Spaces:
Sleeping
Sleeping
import streamlit as st | |
from huggingface_hub import InferenceClient | |
import os | |
# Set up the model and client | |
model_name = "01-ai/Yi-1.5-34B-Chat" | |
client = InferenceClient(model_name, token=os.getenv("API_KEY")) | |
chat_history = [] | |
# Streamlit app layout | |
st.title("Chat with Yi-1.5-34B") | |
st.write("Type 'quit' to exit the chat.") | |
# Chat area | |
chat_container = st.empty() | |
# User input area | |
user_input = st.text_input("Enter your message:", key="input") | |
# Send message button | |
send_button = st.button("Send", on_click=lambda: send_message(user_input)) | |
# Function to handle sending messages | |
def send_message(message): | |
if message: | |
global chat_history | |
chat_history.append({"role": "user", "content": message}) | |
# Clear the input field | |
st.session_state.input = "" | |
# Generate response | |
response = client.chat_completion( | |
messages=chat_history, | |
max_tokens=500, | |
stream=True, | |
) | |
# Display response in the chat area | |
with chat_container.container(): | |
for message in response: | |
st.write(message.choices[0].delta.content, unsafe_allow_html=True) | |
# Add response to chat history | |
chat_history.append({"role": "assistant", "content": response[-1].choices[0].message.content}) | |
# Display chat history | |
with chat_container.container(): | |
for message in chat_history: | |
if message["role"] == "user": | |
st.write(f"**You:** {message['content']}") | |
else: | |
st.write(f"**Yi:** {message['content']}") |