Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
-
from ctransformers import AutoModelForCausalLM, AutoConfig, Config #import for GGUF/GGML models
|
3 |
import datetime
|
4 |
|
5 |
-
|
|
|
|
|
6 |
modelfile="TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"
|
7 |
|
8 |
i_temperature = 0.30
|
@@ -38,9 +40,9 @@ with gr.Blocks(theme='ParityError/Interstellar') as demo:
|
|
38 |
gr.HTML("<center>"
|
39 |
+ "<h1>π¦ TinyLlama 1.1B π 4K context window</h2></center>")
|
40 |
gr.Markdown("""
|
41 |
-
**Currently Running**: [TinyLlama/TinyLlama-1.1B-
|
42 |
|
43 |
-
- **Base Model**: TinyLlama/TinyLlama-1.1B-
|
44 |
- **License**: Apache 2.0, following the TinyLlama base model.
|
45 |
The model output is not censored and the authors do not endorse the opinions in the generated content. Use at your own risk.
|
46 |
""")
|
@@ -89,12 +91,30 @@ with gr.Blocks(theme='ParityError/Interstellar') as demo:
|
|
89 |
return "", history + [[user_message, None]]
|
90 |
|
91 |
def bot(history, t, p, m, r):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
SYSTEM_PROMPT = """<|im_start|>system
|
93 |
-
You are a
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
<|im_end|>
|
95 |
|
96 |
"""
|
97 |
-
prompt = f"<|im_start|>system<|im_end|><|im_start|>user\n{history[-1][0]}<|im_end|>\n<|im_start|>assistant\n"
|
|
|
98 |
print(f"history lenght: {len(history)}")
|
99 |
if len(history) == 1:
|
100 |
print("this is the first round")
|
|
|
1 |
import gradio as gr
|
2 |
+
from ctransformers import AutoModelForCausalLM, AutoConfig, Config # import for GGUF/GGML models
|
3 |
import datetime
|
4 |
|
5 |
+
|
6 |
+
# modelfile = "models/tinyllama-1.1b-1t-openorca.Q4_K_M.gguf"
|
7 |
+
# modelfile="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
|
8 |
modelfile="TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"
|
9 |
|
10 |
i_temperature = 0.30
|
|
|
40 |
gr.HTML("<center>"
|
41 |
+ "<h1>π¦ TinyLlama 1.1B π 4K context window</h2></center>")
|
42 |
gr.Markdown("""
|
43 |
+
**Currently Running**: [TinyLlama/TinyLlama-1.1B-Chat-v0.6](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6) **Chat History Log File**: *TinyLlama.1B.txt*
|
44 |
|
45 |
+
- **Base Model**: TinyLlama/TinyLlama-1.1B-Chat-v0.6, Fine tuned on OpenOrca GPT4 subset for 1 epoch, Using CHATML format.
|
46 |
- **License**: Apache 2.0, following the TinyLlama base model.
|
47 |
The model output is not censored and the authors do not endorse the opinions in the generated content. Use at your own risk.
|
48 |
""")
|
|
|
91 |
return "", history + [[user_message, None]]
|
92 |
|
93 |
def bot(history, t, p, m, r):
|
94 |
+
# SYSTEM_PROMPT = """<|im_start|>system
|
95 |
+
# You are a helpful bot. Your answers are clear and concise.
|
96 |
+
# <|im_end|>
|
97 |
+
|
98 |
+
# """
|
99 |
+
# prompt = f"<|im_start|>system<|im_end|><|im_start|>user\n{history[-1][0]}<|im_end|>\n<|im_start|>assistant\n"
|
100 |
+
|
101 |
SYSTEM_PROMPT = """<|im_start|>system
|
102 |
+
You are a customer support chatbot for an online platform.
|
103 |
+
Your purpose is to assist users with their inquiries and provide accurate information.
|
104 |
+
You have been trained with a knowledge base that includes rules and limitations regarding chargebacks.
|
105 |
+
The knowledge base consists of the following information:
|
106 |
+
|
107 |
+
1. Chargebacks beyond 90 days are not possible.
|
108 |
+
2. Chargebacks above $1000 are not allowed.
|
109 |
+
3. Chargebacks for transactions with a valid 3D secure are not allowed.
|
110 |
+
|
111 |
+
Use the provided conversation example as a starting point for training.
|
112 |
+
Your goal is to respond to user queries in a helpful and informative manner, ensuring that you adhere to the platform's chargeback policies.
|
113 |
<|im_end|>
|
114 |
|
115 |
"""
|
116 |
+
prompt = f"<|im_start|>system<|im_end|><|im_start|>user\n{history[-1][0]}<|im_end|>\n<|im_start|>assistant\n"
|
117 |
+
|
118 |
print(f"history lenght: {len(history)}")
|
119 |
if len(history) == 1:
|
120 |
print("this is the first round")
|