import gradio as gr from transformers import BertTokenizer, BertForSequenceClassification import torch import torch.nn.functional as F # Load the tokenizer and model tokenizer = BertTokenizer.from_pretrained('indobenchmark/indobert-large-p1') model = BertForSequenceClassification.from_pretrained("hendri/sentiment") labels = ["LABEL_0", "LABEL_1", "LABEL_2"] # Map these to your actual labels: label_mapping = { "LABEL_0": "positive", "LABEL_1": "neutral", "LABEL_2": "negative" } # Define a function to process user input and return predictions def classify_emotion(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probabilities = F.softmax(logits, dim=-1) predictions = {label_mapping[labels[i]]: round(float(prob), 4) for i, prob in enumerate(probabilities[0])} return predictions # Create the Gradio interface interface = gr.Interface( fn=classify_emotion, inputs=gr.Textbox(label="Enter Text for Sentiment Analysis"), outputs=gr.Label(label="Predicted Sentiment"), title="Sentiment Analysis", description="This application uses an IndoBERT model fine-tuned for sentiment analysis. Enter a sentence (bahasa Indonesia) to see the predicted sentiment and their probabilities." ) # Launch the Gradio interface interface.launch()