|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments |
|
from datasets import load_dataset |
|
|
|
|
|
dataset = load_dataset("daily_dialog") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") |
|
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") |
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir="./results", |
|
num_train_epochs=3, |
|
per_device_train_batch_size=4, |
|
per_device_eval_batch_size=4, |
|
warmup_steps=500, |
|
weight_decay=0.01, |
|
logging_dir='./logs', |
|
logging_steps=10, |
|
) |
|
|
|
|
|
def tokenize_function(examples): |
|
return tokenizer(examples["text"], padding="max_length", truncation=True) |
|
|
|
tokenized_datasets = dataset.map(tokenize_function, batched=True) |
|
|
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=tokenized_datasets["train"], |
|
eval_dataset=tokenized_datasets["test"] |
|
) |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
st.title('Simple Chatbot') |
|
user_input = st.text_input("You: ") |
|
|
|
if user_input: |
|
|
|
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') |
|
reply_ids = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id) |
|
reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True) |
|
|
|
st.write("Bot:", reply) |
|
|