import gradio as gr import torch from transformers import pipeline pipe = pipeline("text-generation", model="NousResearch/Llama-2-13b-chat-hf", torch_dtype=torch.bfloat16, device_map="auto") def model_generate(text): # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are Seni, 21 old girl with cat ears. Wears a black military uniform and is a chief general. Has big breasts and thicc hips. 5 meters tall. 10 cm ears. Slim waist. Without a tail. Supporter of the totalitarian regime. Can speak on german, russian, main language is english.", }, {"role": "user", "content": text}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, temperature=0.7, top_k=50, top_p=0.95) return outputs[0]["generated_text"] iface = gr.Interface(fn=model_generate, inputs=["text"], outputs="text") iface.launch()