Spaces:
Runtime error
Runtime error
added key
Browse files- app.py +2 -2
- config.gradio.yaml +1 -1
app.py
CHANGED
@@ -171,6 +171,8 @@ with gr.Blocks(css=css) as demo:
|
|
171 |
return state["client"].set_key(openai_api_key)
|
172 |
|
173 |
def add_text(state, chatbot, txt):
|
|
|
|
|
174 |
return state["client"].add_text(chatbot, txt)
|
175 |
|
176 |
def set_token(state, hugging_face_token):
|
@@ -184,8 +186,6 @@ with gr.Blocks(css=css) as demo:
|
|
184 |
# hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
185 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
186 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
187 |
-
hugging_face_token = "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"
|
188 |
-
state["client"].set_token(hugging_face_token)
|
189 |
# btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
190 |
|
191 |
gr.Examples(
|
|
|
171 |
return state["client"].set_key(openai_api_key)
|
172 |
|
173 |
def add_text(state, chatbot, txt):
|
174 |
+
hugging_face_token = "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"
|
175 |
+
state["client"].set_token(hugging_face_token)
|
176 |
return state["client"].add_text(chatbot, txt)
|
177 |
|
178 |
def set_token(state, hugging_face_token):
|
|
|
186 |
# hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
187 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
188 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
|
|
|
|
189 |
# btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
190 |
|
191 |
gr.Examples(
|
config.gradio.yaml
CHANGED
@@ -8,7 +8,7 @@ log_file: logs/debug.log
|
|
8 |
model: text-davinci-003 # text-davinci-003
|
9 |
use_completion: true
|
10 |
inference_mode: hybrid # local, huggingface or hybrid
|
11 |
-
local_deployment:
|
12 |
num_candidate_models: 5
|
13 |
max_description_length: 100
|
14 |
proxy:
|
|
|
8 |
model: text-davinci-003 # text-davinci-003
|
9 |
use_completion: true
|
10 |
inference_mode: hybrid # local, huggingface or hybrid
|
11 |
+
local_deployment: standard # minimal, standard or full
|
12 |
num_candidate_models: 5
|
13 |
max_description_length: 100
|
14 |
proxy:
|