text
stringlengths 0
207
|
---|
cleandata1 = file.lower() |
#cleandata1 |
cleandata2 = re.sub(r'[^\w\s]','', cleandata1) |
#cleandata2 |
cleandata3 = re.sub(r'\d+', ' ', cleandata2) |
#cleandata3 |
stop_words = set(stopwords.words('english')) |
#stop_words |
#let us remove them using function removeWords() |
tokens = word_tokenize(cleandata3) |
cleandata4 = [i for i in tokens if not i in stop_words] |
cleandata4 |
cleandata4 = " ".join(str(x) for x in cleandata4) |
#cleandata4 |
cleandata5 = ' '.join(i for i in cleandata4.split() if not (i.isalpha() and len(i)==1)) |
#cleandata5 |
cleandata6 = cleandata5.strip() |
#cleandata6 |
## Frequency of words |
words_dict = {} |
for word in cleandata6.split(): |
words_dict[word] = words_dict.get(word, 0)+1 |
for key in sorted(words_dict): |
print("{}:{}".format(key,words_dict[key])) |
wordcloud = WordCloud(width=480, height=480, margin=0).generate(cleandata6) |
# Display the generated image: |
plt.imshow(wordcloud, interpolation='bilinear') |
plt.axis("off") |
plt.margins(x=0, y=0) |
plt.show() |
#with max words |
wordcloud = WordCloud(width=480, height=480, max_words=5).generate(cleandata6) |
plt.figure() |
plt.imshow(wordcloud, interpolation="bilinear") |
plt.axis("off") |
plt.margins(x=0, y=0) |
plt.show() |
from textblob import TextBlob |
from textblob.sentiments import NaiveBayesAnalyzer |
Bag of Words |
from sklearn.feature_extraction.text import CountVectorizer |
sentences = ["Hello how are you", |
"Hi students are you all good", |
"Okay lets study bag of words"] |
sentences |
cv = CountVectorizer() |
bow = cv.fit_transform(sentences).toarray() |
cv.vocabulary_ |
cv.get_feature_names() |
bow |
NLTK Basics |
import nltk |
from nltk.book import * |
#similar |
text6.similar('King') |
text6.concordance('King') |
sents() |
len(text1) |
#lines tells how many lines you want. You can run the code without the lines also |
text3.concordance('lived', lines = 38) |
text3.common_contexts(['earth', 'heaven']) |
text1.common_contexts(['captain', 'whale']) |
#text3.collocations() |
text3.collocation_list() |
#Put number inside bracket to get only how many is required |
text6.collocation_list(5) |
text6.generate(5) |
len(text3) |
from nltk import lm |
help(lm) |
text = "Hello students, we are studying Parts of Speech Tagging. Lets understand the process of\ |
shallow parsing or Chunking. Here were are drawing the tree corresponding to the words \ |
and the POS tags based on a set grammer regex patter." |
words = nltk.word_tokenize(text) |
#words |
tags = nltk.pos_tag(words) |
#tags |
# idk what this is |
grammar = (''' NP: {<DT><JJ><NN>} ''') |
grammar |
freq = FreqDist(text3) |
freq |
freq.most_common(50) |
freq['father'] |
freq.plot(20, cumulative = True) |
freq.plot(20) |
freq.tabulate() |
freq.max() |
[i for i in sent3 if len(i) > 8] |
[i for i in sent3 if len(i) != 3] |
[i for i in sent3 if len(i) <= 3] |
l = [] |
for i in sent3: |
if((len(i)) <= 3): |
l.append(i) |
print(l) |