kar0lina's picture
Initial commit
31c7b31
# 1. Import the required packages
import torch
import gradio as gr
from typing import Dict
from transformers import pipeline
# 2. Define function to use our model on given text
def trait_classifier(text: str) -> Dict[str, float]:
# Set up text classification pipeline
trait_classifier = pipeline(task="text-classification",
# Because our model is on Hugging Face already, we can pass in the model name directly
model="kar0lina/petarda-pandora1000-neu-xlm-roberta-base", # link to model on HF Hub
device="cuda" if torch.cuda.is_available() else "cpu",
top_k=None) # return all possible scores (not just top-1)
# Get outputs from pipeline (as a list of dicts)
outputs = trait_classifier(text)[0]
print("outputs: ", outputs)
# Format output for Gradio (e.g. {"label_1": probability_1, "label_2": probability_2})
output_dict = {}
for item in outputs:
output_dict[item["label"]] = item["score"]
print("item: ", item)
print("output_dict: ", output_dict)
return output_dict
# 3. Create a Gradio interface with details about our app
description = """
A text classifier for PErsonality Trait prediction using AI model Roberta - Demo App.
NEUROTICISM
Fine-tuned from [robBERTa](https://huggingface.co/distilbert/distilbert-base-uncased) on a Pandora dataset"""
demo = gr.Interface(fn=trait_classifier,
inputs="text",
outputs=gr.Label(num_top_classes=2), # show top 2 classes (that's all we have)
title="🤯 Petarda",
# theme="gr.themes.Ocean((primary_hue='amber', secondary_hue='cyan')",
description=description,
examples=[["I have a real problem right now... I am struggling a lot :("],
["He's such a nice, gentle man and it was great"]])
# 4. Launch the interface
if __name__ == "__main__":
demo.launch()