# Build model
mod18, cp18, id2word18, data_lemm18 = get_topics(df18_nw)
# Compute Coherence Score
coherence_model_lda18 = CoherenceModel(model=mod18, texts=data_lemm18, dictionary=id2word18, coherence='c_v')
print('\nCoherence Score: ', coherence_model_lda18.get_coherence())
# Compute Perplexity
print('\nPerplexity: ', mod18.log_perplexity(cp18)) # a measure of how good the model is. lower the better.
# Visualize the topics
pyLDAvis.enable_notebook()
pyLDAvis.gensim.prepare(mod18, cp18, id2word18, mds='mmds')
mod18all, cp18all, id2word18all, data_lemm18all = get_topics(df18_nw, textcol='all_text')
# Compute Coherence Score
coherence_model_lda18all = CoherenceModel(model=mod18all, texts=data_lemm18all, dictionary=id2word18all, coherence='c_v')
print('\nCoherence Score: ', coherence_model_lda18all.get_coherence())
# Compute Perplexity
print('\nPerplexity: ', mod18all.log_perplexity(cp18all)) # a measure of how good the model is. lower the better.
# Visualize the topics
pyLDAvis.enable_notebook()
pyLDAvis.gensim.prepare(mod18all, cp18all, id2word18all, mds='mmds')
mod19, cp19, id2word19, data_lemm19 = get_topics(df19_nw)
# Compute Coherence Score
coherence_model_lda19 = CoherenceModel(model=mod19, texts=data_lemm19, dictionary=id2word19, coherence='c_v')
print('\nCoherence Score: ', coherence_model_lda19.get_coherence())
# Compute Perplexity
print('\nPerplexity: ', mod19.log_perplexity(cp19)) # a measure of how good the model is. lower the better.
# Visualize the topics
pyLDAvis.enable_notebook()
pyLDAvis.gensim.prepare(mod19, cp19, id2word19, mds='mmds')