Spaces:
Runtime error
Runtime error
import pickle | |
import nltk | |
from sklearn.svm import SVC | |
from sklearn.svm import LinearSVC | |
from sklearn.preprocessing import StandardScaler | |
from sklearn.feature_extraction import DictVectorizer | |
from sklearn.metrics import classification_report | |
from nltk.tokenize import word_tokenize | |
from datasets import load_dataset | |
import numpy as np | |
from tqdm import tqdm | |
import gradio as gr | |
import matplotlib.pyplot as plt | |
from sklearn import metrics | |
nltk.download('stopwords') | |
nltk.download('punkt_tab') | |
SW = set(nltk.corpus.stopwords.words("english")) | |
PUNCT = set([".", ",", "!", "?", ":", ";", "-", "(", ")", "[", "]", "{", "}", "'", '"']) | |
Features_count = 6 | |
SEED = 42 | |
SW = set(nltk.corpus.stopwords.words("english")) | |
PUNCT = set([".", ",", "!", "?", ":", ";", "-", "(", ")", "[", "]", "{", "}", "'", '"']) | |
connectors = set(["of", "in", "and", "for", "to", "with", "at", "from"]) | |
start_words = set(["the", "a", "an", "this", "that", "these", "those", "my", "your", "his", "her", "its", "our", "their", "few", "many", "several", "all", "most", "some", "any", "every", "each", "either", "neither", "both", "another", "other", "more", "less", "fewer", "little", "much", "great", "good", "bad", "first", "second", "third", "last", "next", "previous"]) | |
Features_count = 6 | |
SEED = 42 | |
class NEI: | |
def __init__(self): | |
self.model = None | |
self.scaler = StandardScaler() | |
self.vectorizer = DictVectorizer(sparse=True) | |
self.tagset = ['No-Name[0]', 'Name[1]'] | |
def load_dataset(self, file): | |
sentences = [] | |
sentence = [] | |
with open(file, 'r', encoding='utf-8') as file: | |
for line in file: | |
if line.strip() == "": | |
if sentence: | |
sentences.append(sentence) | |
sentence = [] | |
continue | |
word_info = line.strip().split() | |
if len(word_info) != 4: | |
continue | |
word, pos, chunk, nei = word_info | |
sentence.append((word, pos, nei)) | |
if sentence: | |
sentences.append(sentence) | |
return sentences | |
def performance(self, y_true, y_pred): | |
print(classification_report(y_true, y_pred)) | |
precision = metrics.precision_score(y_true,y_pred,average='weighted',zero_division=0) | |
recall = metrics.recall_score(y_true,y_pred,average='weighted',zero_division=0) | |
f05_Score = metrics.fbeta_score(y_true,y_pred,beta=0.5,average='weighted',zero_division=0) | |
f1_Score = metrics.fbeta_score(y_true,y_pred,beta=1,average='weighted',zero_division=0) | |
f2_Score = metrics.fbeta_score(y_true,y_pred,beta=2,average='weighted',zero_division=0) | |
print(f"Average Precision = {precision:.2f}, Average Recall = {recall:.2f}, Average f05-Score = {f05_Score:.2f}, Average f1-Score = {f1_Score:.2f}, Average f2-Score = {f2_Score:.2f}") | |
def confusion_matrix(self,y_true,y_pred): | |
matrix = metrics.confusion_matrix(y_true,y_pred) | |
normalized_matrix = matrix/np.sum(matrix, axis=1, keepdims=True) | |
# disp = metrics.ConfusionMatrixDisplay(confusion_matrix=normalized_matrix, display_labels=self.tagset) | |
fig, ax = plt.subplots() | |
# disp.plot(cmap=plt.cm.GnBu, ax=ax, colorbar=True) | |
ax.xaxis.set_ticks_position('top') | |
ax.xaxis.set_label_position('top') | |
plt.xticks(np.arange(len(self.tagset)), self.tagset) | |
plt.yticks(np.arange(len(self.tagset)), self.tagset) | |
for i in range(normalized_matrix.shape[0]): | |
for j in range(normalized_matrix.shape[1]): | |
text = f"{normalized_matrix[i, j]:.2f}" | |
ax.text(j, i, text, ha="center", va="center", color="black") | |
plt.title("Normalized Confusion Matrix") | |
plt.xlabel("Predicted Label") | |
plt.ylabel("True Label") | |
plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu) | |
plt.colorbar() | |
plt.savefig('Confusion_Matrix.png') | |
# plt.xticks(np.arange(len(self.tagset)), self.tagset) | |
# plt.yticks(np.arange(len(self.tagset)), self.tagset) | |
# for i in range(normalized_matrix.shape[0]): | |
# for j in range(normalized_matrix.shape[1]): | |
# plt.text(j, i, format(normalized_matrix[i, j], '0.2f'), horizontalalignment="center") | |
# plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu) | |
# plt.colorbar() | |
# plt.savefig('Confusion_Matrix.png') | |
def vectorize(self, w, scaled_position, prev_tag=0, next_tag=0): | |
is_titlecase = 1 if w[0].isupper() else 0 | |
is_allcaps = 1 if w.isupper() else 0 | |
is_sw = 1 if w.lower() in SW else 0 | |
is_punct = 1 if w in PUNCT else 0 | |
# is_surrounded_by_entities = 1 if (prev_tag > 0 and next_tag > 0) else 0 | |
is_connector = 1 if (w.lower() in connectors) and (prev_tag > 0 and next_tag > 0) else 0 | |
# is_start_of_sentence = 1 if (scaled_position == 0 or prev_token in [".", "!", "?"]) and w.lower() not in start_words else 0 | |
# is_start_of_sentence = 1 if scaled_position == 0 else 0 | |
return [is_titlecase, is_allcaps, len(w), is_sw, is_punct, is_connector, scaled_position] | |
def create_data(self, data): | |
words, features, labels = [], [], [] | |
for d in tqdm(data): | |
tags = d["ner_tags"] | |
tokens = d["tokens"] | |
for i, token in enumerate(tokens): | |
prev_tag = tags[i - 1] if i > 0 else 0 | |
next_tag = tags[i + 1] if i < len(tokens) - 1 else 0 | |
x = self.vectorize(token, scaled_position=(i / len(tokens)), prev_tag=prev_tag, next_tag=next_tag) | |
y = 1 if tags[i] > 0 else 0 | |
features.append(x) | |
labels.append(y) | |
words.extend(tokens) | |
return np.array(words, dtype="object"), np.array(features, dtype=np.float32), np.array(labels, dtype=np.float32) | |
def train(self, train_dataset): | |
_, X_train, y_train = self.create_data(train_dataset) | |
self.scaler.fit(X_train) | |
X_train = self.scaler.transform(X_train) | |
self.model = SVC(C=1.0, kernel="linear", class_weight="balanced", random_state=SEED, verbose=True) | |
self.model.fit(X_train, y_train) | |
def evaluate(self, val_data): | |
_, X_val, y_val = self.create_data(val_data) | |
X_val = self.scaler.transform(X_val) | |
y_pred_val = self.model.predict(X_val) | |
# print(classification_report(y_true=y_val, y_pred=y_pred_val)) | |
print(metrics.confusion_matrix(y_val,y_pred_val)) | |
self.confusion_matrix(y_val,y_pred_val) | |
self.performance(y_val,y_pred_val) | |
def infer(self, sentence): | |
tokens = word_tokenize(sentence) | |
features = [] | |
raw_features = [self.vectorize(token, i / len(tokens)) for i, token in enumerate(tokens)] | |
raw_features = np.array(raw_features, dtype=np.float32) | |
scaled_features = self.scaler.transform(raw_features) | |
y_pred = self.model.predict(scaled_features) | |
for i, token in enumerate(tokens): | |
prev_tag = y_pred[i - 1] if i > 0 else 0 | |
next_tag = y_pred[i + 1] if i < len(tokens) - 1 else 0 | |
feature_with_context = self.vectorize(token, i / len(tokens), prev_tag, next_tag) | |
features.append(feature_with_context) | |
features = np.array(features, dtype=np.float32) | |
scaled_features = self.scaler.transform(features) | |
y_pred = self.model.predict(scaled_features) | |
return list(zip(tokens, y_pred)) | |
data = load_dataset("conll2003", trust_remote_code=True) | |
nei_model = NEI() | |
# Training the model | |
nei_model.train(data["train"]) | |
# Evaluating the model | |
nei_model.evaluate(data["validation"]) | |
def annotate(text): | |
predictions = nei_model.infer(text) | |
annotated_output = " ".join([f"{word}_{int(label)} " for word, label in predictions]) | |
return annotated_output | |
interface = gr.Interface(fn = annotate, | |
inputs = gr.Textbox( | |
label="Input Sentence", | |
placeholder="Enter your sentence here...", | |
), | |
outputs = gr.Textbox( | |
label="Tagged Output", | |
placeholder="Tagged sentence appears here...", | |
), | |
title = "Named Entity Recognition", | |
description = "CS626 Assignment 3 (Autumn 2024)", | |
theme=gr.themes.Soft()) | |
interface.launch() |