Lyte commited on
Commit
3c59616
·
verified ·
1 Parent(s): a35d071

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -23
app.py CHANGED
@@ -5,47 +5,51 @@ from huggingface_hub import hf_hub_download
5
 
6
  model = Llama(
7
  model_path=hf_hub_download(
8
- repo_id=os.environ.get("REPO_ID", "bartowski/QwQ-32B-Preview-GGUF"),
9
- filename=os.environ.get("MODEL_FILE", "QwQ-32B-Preview-Q3_K_L.gguf"),
10
  )
11
  )
12
 
13
  DESCRIPTION = '''
14
- # QwQ-32B-Preview | Duplicate the space and set it to private for faster & personal inference for free.
15
- Qwen/QwQ-32B-Preview: an experimental research model developed by the Qwen Team.
16
  Focused on advancing AI reasoning capabilities.
17
 
18
  **To start a new chat**, click "clear" and start a new dialog.
19
  '''
20
 
21
  LICENSE = """
22
- --- Apache 2.0 License ---
23
  """
24
 
 
 
 
 
 
 
 
25
  def generate_text(message, history, max_tokens=512, temperature=0.9, top_p=0.95):
26
- """Generate a response using the Llama model."""
27
  temp = ""
28
- response = model.create_chat_completion(
29
- messages=[{"role": "system", "content": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."},
30
- {"role": "user", "content": message}],
31
- temperature=temperature,
32
- max_tokens=max_tokens,
33
- top_p=top_p,
34
- stream=True,
35
- )
36
- for streamed in response:
37
- delta = streamed["choices"][0].get("delta", {})
38
- text_chunk = delta.get("content", "")
39
- temp += text_chunk
40
  yield temp
41
 
 
42
  with gr.Blocks() as demo:
43
  gr.Markdown(DESCRIPTION)
44
 
45
  chatbot = gr.ChatInterface(
46
  generate_text,
47
- title="Qwen/QwQ-32B-Preview | GGUF Demo",
48
- description=" settings below if needed.",
49
  examples=[
50
  ["How many r's are in the word strawberry?"],
51
  ['What is the most optimal way to do Test-Time Scaling?'],
@@ -56,9 +60,9 @@ with gr.Blocks() as demo:
56
  )
57
 
58
  with gr.Accordion("Adjust Parameters", open=False):
59
- gr.Slider(minimum=512, maximum=4096, value=1024, step=1, label="Max Tokens")
60
- gr.Slider(minimum=0.1, maximum=1.5, value=0.9, step=0.1, label="Temperature")
61
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
62
 
63
  gr.Markdown(LICENSE)
64
 
 
5
 
6
  model = Llama(
7
  model_path=hf_hub_download(
8
+ repo_id=os.environ.get("REPO_ID", "Lyte/LLaMA-O1-Supervised-1129-Q4_K_M-GGUF"),
9
+ filename=os.environ.get("MODEL_FILE", "llama-o1-supervised-1129-q4_k_m.gguf"),
10
  )
11
  )
12
 
13
  DESCRIPTION = '''
14
+ # SimpleBerry/LLaMA-O1-Supervised-1129 | Duplicate the space and set it to private for faster & personal inference for free.
15
+ SimpleBerry/LLaMA-O1-Supervised-1129: an experimental research model developed by the SimpleBerry.
16
  Focused on advancing AI reasoning capabilities.
17
 
18
  **To start a new chat**, click "clear" and start a new dialog.
19
  '''
20
 
21
  LICENSE = """
22
+ --- MIT License ---
23
  """
24
 
25
+ template = "<start_of_father_id>-1<end_of_father_id><start_of_local_id>0<end_of_local_id><start_of_thought><problem>{content}<end_of_thought><start_of_rating><positive_rating><end_of_rating>\n<start_of_father_id>0<end_of_father_id><start_of_local_id>1<end_of_local_id><start_of_thought><expansion>"
26
+
27
+ def llama_o1_template(data):
28
+ #query = data['query']
29
+ text = template.format(content=data)
30
+ return text
31
+
32
  def generate_text(message, history, max_tokens=512, temperature=0.9, top_p=0.95):
 
33
  temp = ""
34
+ input_texts = [llama_o1_template(message)]
35
+ input_texts = [input_text.replace('<|end_of_text|>','') for input_text in input_texts]
36
+ print(f"input_texts[0]: {input_texts[0]}")
37
+ inputs = model.tokenize(input_texts[0].encode('utf-8'))
38
+ for token in model.generate(inputs, top_p=top_p, temp=temperature):
39
+ print(f"token: {token}")
40
+ text = model.detokenize([token])
41
+ print(f"text detok: {text}")
42
+ temp += text.decode('utf-8')
 
 
 
43
  yield temp
44
 
45
+
46
  with gr.Blocks() as demo:
47
  gr.Markdown(DESCRIPTION)
48
 
49
  chatbot = gr.ChatInterface(
50
  generate_text,
51
+ title="SimpleBerry/LLaMA-O1-Supervised-1129 | GGUF Demo",
52
+ description="Edit Settings below if needed.",
53
  examples=[
54
  ["How many r's are in the word strawberry?"],
55
  ['What is the most optimal way to do Test-Time Scaling?'],
 
60
  )
61
 
62
  with gr.Accordion("Adjust Parameters", open=False):
63
+ gr.Slider(minimum=1024, maximum=8192, value=2048, step=1, label="Max Tokens")
64
+ gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature")
65
+ gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.01, label="Top-p (nucleus sampling)")
66
 
67
  gr.Markdown(LICENSE)
68