vpcom commited on
Commit
7900969
·
1 Parent(s): 2667d9e

fix: the maximum number of tokens

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -31,7 +31,7 @@ def format_prompt(message, history, system_prompt):
31
  prompt += f"System: {system_prompt}\n"
32
  for user_prompt, bot_response in history:
33
  prompt += f"User: {user_prompt}\n"
34
- prompt += f"Falcon: {bot_response}\n" # Response already contains "Falcon: "
35
  prompt += f"""User: {message}
36
  Falcon:"""
37
  return prompt
@@ -39,7 +39,7 @@ Falcon:"""
39
  seed = 42
40
 
41
  def generate(
42
- prompt, history, system_prompt="<|endoftext|>", temperature=0.9, max_new_tokens=250, top_p=0.95, repetition_penalty=1.0,
43
  ):
44
  temperature = float(temperature)
45
  if temperature < 1e-2:
@@ -86,9 +86,9 @@ additional_inputs=[
86
  ),
87
  gr.Slider(
88
  label="Max new tokens",
89
- value=256,
90
  minimum=0,
91
- maximum=8192,
92
  step=64,
93
  interactive=True,
94
  info="The maximum numbers of new tokens",
 
31
  prompt += f"System: {system_prompt}\n"
32
  for user_prompt, bot_response in history:
33
  prompt += f"User: {user_prompt}\n"
34
+ prompt += f"PersianGPT: {bot_response}\n" # Response already contains "PersianGPT: "
35
  prompt += f"""User: {message}
36
  Falcon:"""
37
  return prompt
 
39
  seed = 42
40
 
41
  def generate(
42
+ prompt, history, system_prompt="<|endoftext|>", temperature=0.9, max_new_tokens=100, top_p=0.95, repetition_penalty=1.0,
43
  ):
44
  temperature = float(temperature)
45
  if temperature < 1e-2:
 
86
  ),
87
  gr.Slider(
88
  label="Max new tokens",
89
+ value=100,
90
  minimum=0,
91
+ maximum=250,
92
  step=64,
93
  interactive=True,
94
  info="The maximum numbers of new tokens",