Spaces:
Sleeping
Sleeping
rizkynindra
commited on
Commit
·
afcdd52
1
Parent(s):
239890d
sahabat tai
Browse files
app.py
CHANGED
@@ -1,13 +1,9 @@
|
|
|
|
1 |
import torch
|
2 |
import transformers
|
3 |
-
import os
|
4 |
-
from langchain_huggingface import HuggingFaceEndpoint
|
5 |
-
import streamlit as st
|
6 |
-
from langchain_core.prompts import PromptTemplate
|
7 |
-
from langchain_core.output_parsers import StrOutputParser
|
8 |
|
|
|
9 |
model_id = "GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct"
|
10 |
-
HF_TOKEN = os.getenv("HF") # Ensure this is set correctly
|
11 |
|
12 |
pipeline = transformers.pipeline(
|
13 |
"text-generation",
|
@@ -16,164 +12,45 @@ pipeline = transformers.pipeline(
|
|
16 |
device_map="auto",
|
17 |
)
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
st.session_state.avatars = {'user': None, 'assistant': None}
|
52 |
-
|
53 |
-
# Initialize session state for user text input
|
54 |
-
if 'user_text' not in st.session_state:
|
55 |
-
st.session_state.user_text = None
|
56 |
-
|
57 |
-
# Initialize session state for model parameters
|
58 |
-
if "max_response_length" not in st.session_state:
|
59 |
-
st.session_state.max_response_length = 256
|
60 |
-
|
61 |
-
if "system_message" not in st.session_state:
|
62 |
-
st.session_state.system_message = "You are a helpful assistant"
|
63 |
-
|
64 |
-
if "starter_message" not in st.session_state:
|
65 |
-
st.session_state.starter_message = "Hello, there! How can I help you today?"
|
66 |
-
|
67 |
-
# Sidebar for settings
|
68 |
-
with st.sidebar:
|
69 |
-
st.header("System Settings")
|
70 |
-
|
71 |
-
# AI Settings
|
72 |
-
st.session_state.system_message = st.text_area(
|
73 |
-
"System Message", value="You are a helpful assistant"
|
74 |
-
)
|
75 |
-
st.session_state.starter_message = st.text_area(
|
76 |
-
'First AI Message', value="Hello, there! How can I help you today?"
|
77 |
-
)
|
78 |
-
|
79 |
-
# Model Settings
|
80 |
-
st.session_state.max_response_length = st.number_input(
|
81 |
-
"Max Response Length", value=128
|
82 |
-
)
|
83 |
-
|
84 |
-
# Avatar Selection
|
85 |
-
st.markdown("*Select Avatars:*")
|
86 |
-
col1, col2 = st.columns(2)
|
87 |
-
with col1:
|
88 |
-
st.session_state.avatars['assistant'] = st.selectbox(
|
89 |
-
"AI Avatar", options=["🤗", "💬", "🤖"], index=0
|
90 |
-
)
|
91 |
-
with col2:
|
92 |
-
st.session_state.avatars['user'] = st.selectbox(
|
93 |
-
"User Avatar", options=["👤", "👱♂️", "👨🏾", "👩", "👧🏾"], index=0
|
94 |
-
)
|
95 |
-
# Reset Chat History
|
96 |
-
reset_history = st.button("Reset Chat History")
|
97 |
-
|
98 |
-
# Initialize or reset chat history
|
99 |
-
if "chat_history" not in st.session_state or reset_history:
|
100 |
-
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]
|
101 |
-
|
102 |
-
|
103 |
-
def get_response(system_message, chat_history, user_text,
|
104 |
-
max_new_tokens=256):
|
105 |
-
"""
|
106 |
-
Generates a response from the chatbot model.
|
107 |
-
|
108 |
-
Args:
|
109 |
-
system_message (str): The system message for the conversation.
|
110 |
-
chat_history (list): The list of previous chat messages.
|
111 |
-
user_text (str): The user's input text.
|
112 |
-
max_new_tokens (int, optional): The maximum number of new tokens to generate.
|
113 |
-
|
114 |
-
Returns:
|
115 |
-
tuple: A tuple containing the generated response and the updated chat history.
|
116 |
-
"""
|
117 |
-
# Set up the model
|
118 |
-
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
|
119 |
-
|
120 |
-
# Create the prompt template
|
121 |
-
prompt = PromptTemplate.from_template(
|
122 |
-
(
|
123 |
-
"[INST] {system_message}"
|
124 |
-
"\nCurrent Conversation:\n{chat_history}\n\n"
|
125 |
-
"\nUser: {user_text}.\n [/INST]"
|
126 |
-
"\nAI:"
|
127 |
-
)
|
128 |
)
|
129 |
-
# Make the chain and bind the prompt
|
130 |
-
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
131 |
-
|
132 |
-
# Generate the response
|
133 |
-
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
|
134 |
-
response = response.split("AI:")[-1]
|
135 |
-
|
136 |
-
# Update the chat history
|
137 |
-
chat_history.append({'role': 'user', 'content': user_text})
|
138 |
-
chat_history.append({'role': 'assistant', 'content': response})
|
139 |
-
return response, chat_history
|
140 |
-
|
141 |
-
|
142 |
-
# Chat interface
|
143 |
-
chat_interface = st.container()
|
144 |
-
with chat_interface:
|
145 |
-
output_container = st.container()
|
146 |
-
st.session_state.user_text = st.chat_input(placeholder="Enter your text here.")
|
147 |
-
|
148 |
-
# Display chat messages
|
149 |
-
with output_container:
|
150 |
-
# For every message in the history
|
151 |
-
for message in st.session_state.chat_history:
|
152 |
-
# Skip the system message
|
153 |
-
if message['role'] == 'system':
|
154 |
-
continue
|
155 |
-
|
156 |
-
# Display the chat message using the correct avatar
|
157 |
-
with st.chat_message(message['role'],
|
158 |
-
avatar=st.session_state['avatars'][message['role']]):
|
159 |
-
st.markdown(message['content'])
|
160 |
|
161 |
-
#
|
162 |
-
if
|
163 |
-
|
164 |
-
with st.chat_message("user",
|
165 |
-
avatar=st.session_state.avatars['user']):
|
166 |
-
st.markdown(st.session_state.user_text)
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
# Call the Inference API with the system_prompt, user text, and history
|
173 |
-
response, st.session_state.chat_history = get_response(
|
174 |
-
system_message=st.session_state.system_message,
|
175 |
-
user_text=st.session_state.user_text,
|
176 |
-
chat_history=st.session_state.chat_history,
|
177 |
-
max_new_tokens=st.session_state.max_response_length,
|
178 |
-
)
|
179 |
-
st.markdown(response)
|
|
|
1 |
+
import streamlit as st
|
2 |
import torch
|
3 |
import transformers
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
# Model setup
|
6 |
model_id = "GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct"
|
|
|
7 |
|
8 |
pipeline = transformers.pipeline(
|
9 |
"text-generation",
|
|
|
12 |
device_map="auto",
|
13 |
)
|
14 |
|
15 |
+
terminators = [
|
16 |
+
pipeline.tokenizer.eos_token_id,
|
17 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
18 |
+
]
|
19 |
+
|
20 |
+
# Streamlit App Configuration
|
21 |
+
st.set_page_config(page_title="Chatbot", page_icon="🤗")
|
22 |
+
st.title("Gemma2 Chatbot")
|
23 |
+
st.markdown("A chatbot that understands Javanese and Sundanese using `GoToCompany/gemma2` model.")
|
24 |
+
|
25 |
+
# Initialize session state
|
26 |
+
if "chat_history" not in st.session_state:
|
27 |
+
st.session_state.chat_history = [{"role": "assistant", "content": "Hello! How can I assist you today?"}]
|
28 |
+
|
29 |
+
# User input
|
30 |
+
user_input = st.chat_input("Type your message here...")
|
31 |
+
|
32 |
+
# Generate response
|
33 |
+
if user_input:
|
34 |
+
# Add user message to chat history
|
35 |
+
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
36 |
+
|
37 |
+
# Prepare conversation context
|
38 |
+
conversation = [
|
39 |
+
{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_history
|
40 |
+
]
|
41 |
+
|
42 |
+
# Generate response using the pipeline
|
43 |
+
outputs = pipeline(
|
44 |
+
conversation,
|
45 |
+
max_new_tokens=256,
|
46 |
+
eos_token_id=terminators,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
# Extract and format the assistant's response
|
50 |
+
assistant_response = outputs[0]["generated_text"][-1] if outputs else "Sorry, I couldn't generate a response."
|
51 |
+
st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
|
|
|
|
|
|
|
52 |
|
53 |
+
# Display the chat history
|
54 |
+
for message in st.session_state.chat_history:
|
55 |
+
with st.chat_message(message["role"]):
|
56 |
+
st.markdown(message["content"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|