Spaces:
Sleeping
Sleeping
File size: 1,563 Bytes
e0d9793 695ca62 e0d9793 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import streamlit as st
from huggingface_hub import InferenceClient
import os
# Set up the model and client
model_name = "01-ai/Yi-1.5-34B-Chat"
client = InferenceClient(model_name, token=os.getenv("API_KEY"))
chat_history = []
# Streamlit app layout
st.title("Chat with Yi-1.5-34B")
st.write("Type 'quit' to exit the chat.")
# Chat area
chat_container = st.empty()
# User input area
user_input = st.text_input("Enter your message:", key="input")
# Send message button
send_button = st.button("Send", on_click=lambda: send_message(user_input))
# Function to handle sending messages
def send_message(message):
if message:
global chat_history
chat_history.append({"role": "user", "content": message})
# Clear the input field
st.session_state.input = ""
# Generate response
response = client.chat_completion(
messages=chat_history,
max_tokens=500,
stream=True,
)
# Display response in the chat area
with chat_container.container():
for message in response:
st.write(message.choices[0].delta.content, unsafe_allow_html=True)
# Add response to chat history
chat_history.append({"role": "assistant", "content": response[-1].choices[0].message.content})
# Display chat history
with chat_container.container():
for message in chat_history:
if message["role"] == "user":
st.write(f"**You:** {message['content']}")
else:
st.write(f"**Yi:** {message['content']}") |