|
import gradio as gr |
|
from transformers import BertTokenizer, BertForSequenceClassification |
|
import torch |
|
import torch.nn.functional as F |
|
|
|
|
|
tokenizer = BertTokenizer.from_pretrained('indobenchmark/indobert-large-p1') |
|
model = BertForSequenceClassification.from_pretrained("hendri/nergrit") |
|
|
|
labels = ["LABEL_0", "LABEL_1", "LABEL_2", "LABEL_3", "LABEL_4", "LABEL_5" , "LABEL_6"] |
|
|
|
|
|
label_mapping = { |
|
"LABEL_0": "I-PERSON", |
|
"LABEL_1": "B-ORGANISATION", |
|
"LABEL_2": "I-ORGANISATION", |
|
"LABEL_3": "B-PLACE", |
|
"LABEL_4": "I-PLACE", |
|
"LABEL_5": "O", |
|
"LABEL_6": "B-PERSON" |
|
} |
|
|
|
|
|
def classify_emotion(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
probabilities = F.softmax(logits, dim=-1) |
|
predictions = {label_mapping[labels[i]]: round(float(prob), 4) for i, prob in enumerate(probabilities[0])} |
|
return predictions |
|
|
|
|
|
interface = gr.Interface( |
|
fn=classify_emotion, |
|
inputs=gr.Textbox(label="Enter Text for NER"), |
|
outputs=gr.Label(label="Predicted NER"), |
|
title="Emotion Classification", |
|
description="This application uses an IndoBERT model fine-tuned for NER. Enter a sentence (bahasa Indonesia) to see the predicted NER and their probabilities." |
|
) |
|
|
|
|
|
interface.launch() |