madhavkotecha commited on
Commit
7fc6fc7
·
verified ·
1 Parent(s): e8a041a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +189 -0
app.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import nltk
3
+ from sklearn.svm import SVC
4
+ from sklearn.svm import LinearSVC
5
+ from sklearn.preprocessing import StandardScaler
6
+ from sklearn.feature_extraction import DictVectorizer
7
+ from sklearn.metrics import classification_report
8
+ from nltk.tokenize import word_tokenize
9
+ from datasets import load_dataset
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+ import gradio as gr
13
+ import matplotlib.pyplot as plt
14
+ from sklearn import metrics
15
+ from sklearn.model_selection import KFold
16
+
17
+ SW = set(nltk.corpus.stopwords.words("english"))
18
+ PUNCT = set([".", ",", "!", "?", ":", ";", "-", "(", ")", "[", "]", "{", "}", "'", '"'])
19
+ Features_count = 6
20
+ SEED = 42
21
+
22
+ class NEI:
23
+ def __init__(self):
24
+ self.model = None
25
+ self.scaler = StandardScaler()
26
+ self.vectorizer = DictVectorizer(sparse=True)
27
+ self.tagset = ['Name[1]', 'No-Name[0]']
28
+
29
+ def load_dataset(self, file):
30
+ sentences = []
31
+ sentence = []
32
+ with open(file, 'r', encoding='utf-8') as file:
33
+ for line in file:
34
+ if line.strip() == "":
35
+ if sentence:
36
+ sentences.append(sentence)
37
+ sentence = []
38
+ continue
39
+ word_info = line.strip().split()
40
+ if len(word_info) != 4:
41
+ continue
42
+ word, pos, chunk, nei = word_info
43
+ sentence.append((word, pos, nei))
44
+ if sentence:
45
+ sentences.append(sentence)
46
+ return sentences
47
+
48
+ def sent2features(self, sentence):
49
+ return [self.word2features(sentence, i) for i in range(len(sentence))]
50
+
51
+ def sent2labels(self, sentence):
52
+ return [label for _, _, label in sentence]
53
+
54
+ def word2features(self, sentence, i):
55
+ word = sentence[i][0]
56
+ pos_tag = sentence[i][1]
57
+ features = {
58
+ 'word': word,
59
+ 'pos_tag': pos_tag,
60
+ 'word.isupper': int(word.isupper()),
61
+ 'word.islower': int(word.islower()),
62
+ 'word.istitle': int(word.istitle()),
63
+ 'word.isdigit': int(word.isdigit()),
64
+ 'word.prefix2': word[:2],
65
+ 'word.prefix3': word[:3],
66
+ 'word.suffix2': word[-2:],
67
+ 'word.suffix3': word[-3:],
68
+ }
69
+ # Add context features
70
+ if i > 0:
71
+ prv_word = sentence[i - 1][0]
72
+ prv_pos_tag = sentence[i - 1][1]
73
+ features.update({
74
+ '-1:word': prv_word,
75
+ '-1:pos_tag': prv_pos_tag,
76
+ '-1:word.isupper': int(prv_word.isupper()),
77
+ '-1:word.istitle': int(prv_word.istitle()),
78
+ })
79
+ else:
80
+ features['BOS'] = True
81
+ if i < len(sentence) - 1:
82
+ next_word = sentence[i + 1][0]
83
+ next_pos_tag = sentence[i + 1][1]
84
+ features.update({
85
+ '+1:word': next_word,
86
+ '+1:pos_tag': next_pos_tag,
87
+ '+1:word.isupper': int(next_word.isupper()),
88
+ '+1:word.istitle': int(next_word.istitle()),
89
+ })
90
+ else:
91
+ features['EOS'] = True
92
+ return features
93
+
94
+ def performance(self, y_true, y_pred):
95
+ print(classification_report(y_true, y_pred))
96
+ precision = metrics.precision_score(y_true,y_pred,average='weighted',zero_division=0)
97
+ recall = metrics.recall_score(y_true,y_pred,average='weighted',zero_division=0)
98
+ f05_Score = metrics.fbeta_score(y_true,y_pred,beta=0.5,average='weighted',zero_division=0)
99
+ f1_Score = metrics.fbeta_score(y_true,y_pred,beta=1,average='weighted',zero_division=0)
100
+ f2_Score = metrics.fbeta_score(y_true,y_pred,beta=2,average='weighted',zero_division=0)
101
+ print(f"Average Precision = {precision:.2f}, Average Recall = {recall:.2f}, Average f05-Score = {f05_Score:.2f}, Average f1-Score = {f1_Score:.2f}, Average f2-Score = {f2_Score:.2f}")
102
+
103
+ def confusion_matrix(self,y_true,y_pred):
104
+ matrix = metrics.confusion_matrix(y_true,y_pred)
105
+ normalized_matrix = matrix/np.sum(matrix, axis=1, keepdims=True)
106
+ _, ax = plt.subplots()
107
+ ax.tick_params(top=True)
108
+ plt.xticks(np.arange(len(self.tagset)), self.tagset)
109
+ plt.yticks(np.arange(len(self.tagset)), self.tagset)
110
+ for i in range(normalized_matrix.shape[0]):
111
+ for j in range(normalized_matrix.shape[1]):
112
+ plt.text(j, i, format(normalized_matrix[i, j], '0.2f'), horizontalalignment="center")
113
+ plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu)
114
+ plt.colorbar()
115
+ plt.savefig('Confusion_Matrix.png')
116
+
117
+ def vectorize(self, w, scaled_position):
118
+ title = 1 if w[0].isupper() else 0
119
+ allcaps = 1 if w.isupper() else 0
120
+ sw = 1 if w.lower() in SW else 0
121
+ punct = 1 if w in PUNCT else 0
122
+ return [title, allcaps, len(w), sw, punct, scaled_position]
123
+
124
+ def create_data(self, data):
125
+ words, features, labels = [], [], []
126
+ for d in tqdm(data):
127
+ tags = d["ner_tags"]
128
+
129
+ tokens = d["tokens"]
130
+ for i, token in enumerate(tokens):
131
+ x = self.vectorize(token, scaled_position=(i / len(tokens)))
132
+ y = 1 if tags[i] > 0 else 0
133
+ features.append(x)
134
+ labels.append(y)
135
+ words.extend(tokens)
136
+ return np.array(words, dtype="object"), np.array(features, dtype=np.float32), np.array(labels, dtype=np.float32)
137
+
138
+ def train(self, train_dataset):
139
+ _, X_train, y_train = self.create_data(train_dataset)
140
+ self.scaler.fit(X_train)
141
+ X_train = self.scaler.transform(X_train)
142
+ self.model = SVC(C=1.0, kernel="linear", class_weight="balanced", random_state=SEED, verbose=True)
143
+ self.model.fit(X_train, y_train)
144
+
145
+ def evaluate(self, val_data):
146
+ _, X_val, y_val = self.create_data(val_data)
147
+ X_val = self.scaler.transform(X_val)
148
+ y_pred_val = self.model.predict(X_val)
149
+
150
+ self.confusion_matrix(y_val,y_pred_val)
151
+
152
+ self.performance(y_val,y_pred_val)
153
+
154
+ def infer(self, sentence):
155
+ tokens = word_tokenize(sentence)
156
+ features = [self.vectorize(token, i / len(tokens)) for i, token in enumerate(tokens)]
157
+ features = np.array(features, dtype=np.float32)
158
+ scaled_features = self.scaler.transform(features)
159
+ y_pred = self.model.predict(scaled_features)
160
+ return list(zip(tokens, y_pred))
161
+
162
+
163
+ data = load_dataset("conll2003", trust_remote_code=True)
164
+ nei_model = NEI()
165
+
166
+ # Training the model
167
+ nei_model.train(data["train"])
168
+
169
+ # Evaluating the model
170
+ nei_model.evaluate(data["validation"])
171
+
172
+ def annotate(text):
173
+ predictions = nei_model.infer(text)
174
+ annotated_output = " ".join([f"{word}_{int(label)}" for word, label in predictions])
175
+ return annotated_output
176
+
177
+ interface = gr.Interface(fn = annotate,
178
+ inputs = gr.Textbox(
179
+ label="Input Sentence",
180
+ placeholder="Enter your sentence here...",
181
+ ),
182
+ outputs = gr.Textbox(
183
+ label="Tagged Output",
184
+ placeholder="Tagged sentence appears here...",
185
+ ),
186
+ title = "Named Entity Recognition",
187
+ description = "CS626 Assignment 2 (Autumn 2024)",
188
+ theme=gr.themes.Soft())
189
+ interface.launch()