import gradio as gr import json, torch from transformers import AutoTokenizer, RobertaForSequenceClassification, RobertaConfig # Load the configuration of your model config = RobertaConfig.from_pretrained('cardiffnlp/twitter-roberta-base-emotion', num_labels=3) # Instantiate the model using the specific class model = RobertaForSequenceClassification(config) # Load the state dictionary from your .pt file state_dict = torch.load('transferLearningResults/model_state_dict.pt', map_location=torch.device('cpu')) # Load the state dictionary into the model model.load_state_dict(state_dict, strict=False) # Switch to evaluation mode for inference model.eval() tokenizer = AutoTokenizer.from_pretrained('transferLearningResults') # Load the label mapping with open('label_to_int_mapping.json', 'r') as file: label_mapping = json.load(file) int_to_label = {int(k): v for k, v in label_mapping.items()} # Convert keys to integers def predict_emotion(text): # Tokenize the input text and convert to tensor inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512) # Get model predictions with torch.no_grad(): outputs = model(**inputs) # Convert predictions to probabilities probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1).squeeze() # Convert probabilities to a readable format probabilities_list = probabilities.tolist() # Create a dictionary for the probabilities with labels probabilities_dict = {int_to_label[i]: prob for i, prob in enumerate(probabilities_list)} return probabilities_dict iface = gr.Interface(fn=predict_emotion, inputs="text", outputs="label") iface.launch()