|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForMaskedLM |
|
import torch._dynamo |
|
torch._dynamo.config.suppress_errors = True |
|
|
|
|
|
model_id = "answerdotai/ModernBERT-base" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForMaskedLM.from_pretrained(model_id) |
|
|
|
|
|
def conversation(input_text): |
|
|
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
|
|
|
|
outputs = model(**inputs) |
|
|
|
masked_index = inputs["input_ids"][0].tolist().index(tokenizer.mask_token_id) |
|
predicted_token_id = outputs.logits[0, masked_index].argmax(axis=-1) |
|
predicted_token = tokenizer.decode(predicted_token_id) |
|
|
|
return f"Predicted response: {predicted_token}" |
|
|
|
|
|
interface = gr.Interface( |
|
fn=conversation, |
|
inputs=gr.Textbox(label="Enter your text (include [MASK]):"), |
|
outputs=gr.Textbox(label="Predicted Response"), |
|
title="Masked Language Model Conversation", |
|
description="Type a sentence with [MASK] to predict the masked word using ModernBERT." |
|
) |
|
|
|
|
|
interface.launch() |