Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import re | |
model_name = 'armandnlp/gpt2-TOD_finetuned_SGD' | |
tokenizer_TOD = AutoTokenizer.from_pretrained(model_name) | |
model_TOD = AutoModelForCausalLM.from_pretrained(model_name) | |
def generate_response(prompt): | |
input_ids = tokenizer_TOD(prompt, return_tensors="pt").input_ids | |
outputs = model_TOD.generate(input_ids, | |
do_sample=False, | |
max_length=1024, | |
eos_token_id=50262) | |
return tokenizer_TOD.batch_decode(outputs)[0] | |
#<|context|> <|user|> I want to go to the restaurant.<|endofcontext|> | |
import gradio as gr | |
iface = gr.Interface(fn=generate_response, | |
inputs="text", | |
outputs="text", | |
title="gpt2-TOD", | |
examples=[["<|context|> <|user|> I'm super hungry ! I want to go to the restaurant.<|endofcontext|>"], | |
["<|context|> <|user|> I want to go to the restaurant. <|system|> What food would you like to eat ? <|user|> Italian sounds good. <|endofcontext|>"]], | |
description="Passing in a task-oriented dialogue context generates a belief state, actions to take and a response based on those actions", | |
) | |
iface.launch() | |
""" | |
## Work in progress | |
## https://gradio.app/creating_a_chatbot/ | |
## make chatbot interface | |
## can get input and responses for now | |
## would like to add belief state and actions to history response | |
## means modifying the history when appending input during next turn | |
## ie. keeping only the response and adding <|system|> token | |
ckpt = 'armandnlp/gpt2-TOD_finetuned_SGD' | |
tokenizer = AutoTokenizer.from_pretrained(ckpt) | |
model = AutoModelForCausalLM.from_pretrained(ckpt) | |
def predict(input, history=[]): | |
# history: list of all token ids | |
# response: list of tuples of strings corresponding to dialogue turns | |
#model input and output with extra formatting | |
new_user_input_ids = tokenizer.encode(' <|user|> '+input, return_tensors='pt') | |
context = tokenizer.encode('<|context|>', return_tensors='pt') | |
endofcontext = tokenizer.encode(' <|endofcontext|>', return_tensors='pt') | |
model_input = torch.cat([context, torch.LongTensor(history), new_user_input_ids, endofcontext], dim=-1) | |
out = model.generate(model_input, max_length=1024, eos_token_id=50262).tolist()[0] | |
#history : format for next dialogue turn | |
history = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) | |
string_out = tokenizer.decode(out) | |
response_only = string_out.split('<|response|>')[1].replace('<|endofresponse|>', '') | |
resp_tokenized = tokenizer.encode(' <|system|> '+response_only, return_tensors='pt') | |
history = torch.cat([history, resp_tokenized], dim=-1).tolist() | |
# history with belief + action | |
# output with belief + action + response | |
#response: format printed output | |
turns = tokenizer.decode(history[0]) | |
#turns = "<|user|> I want to go to the restaurant. <|system|> What food would you like to eat ? <|user|> Italian sounds good. <|system|> Okay then !" | |
turns = re.split('<\|system\|>|<\|user\|>', turns)[1:] | |
#print(turns) | |
response = [(turns[i], turns[i+1]) for i in range(0, len(turns)-1, 2)] | |
#print(response) | |
return response, history | |
#predict("I want to go to the restaurant.") | |
import gradio as gr | |
gr.Interface(fn=predict, | |
inputs=["text", "state"], | |
outputs=["chatbot", "state"]).launch() | |
""" | |