chiayewken commited on
Commit
3992910
·
1 Parent(s): f366cea

Update app.py description and examples

Browse files
Files changed (1) hide show
  1. app.py +8 -12
app.py CHANGED
@@ -14,21 +14,19 @@ DEFAULT_MAX_NEW_TOKENS = 1024
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
15
 
16
  DESCRIPTION = """\
17
- # Llama-2 7B Chat
18
 
19
- This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
20
 
21
- 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
22
-
23
- 🔨 Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
24
  """
25
 
26
  LICENSE = """
27
  <p/>
28
 
29
  ---
30
- As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
31
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
32
  """
33
 
34
  if not torch.cuda.is_available():
@@ -125,11 +123,9 @@ chat_interface = gr.ChatInterface(
125
  ],
126
  stop_btn=None,
127
  examples=[
128
- ["Hello there! How are you doing?"],
129
- ["Can you explain briefly to me what is the Python programming language?"],
130
- ["Explain the plot of Cinderella in a sentence."],
131
- ["How many hours does it take a man to eat a Helicopter?"],
132
- ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
133
  ],
134
  cache_examples=False,
135
  type="messages",
 
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
15
 
16
  DESCRIPTION = """\
17
+ # Reasoning Paths Optimization: Learning to Reason and Explore From Diverse Paths
18
 
19
+ This Space demonstrates the reasoning paths optimization (RPO) framework with a Llama 3 model with 8B parameters fine-tuned for math reasoning. Feel free to play with it, or duplicate to run generations without a queue!
20
 
21
+ 🔎 For more details about the RPO training framework, check out the [paper](https://arxiv.org/abs/2410.10858) or [code](https://github.com/DAMO-NLP-SG/reasoning-paths-optimization).
 
 
22
  """
23
 
24
  LICENSE = """
25
  <p/>
26
 
27
  ---
28
+ As a derivate work of [Llama-3-8b-chat](https://huggingface.co/meta-llama/Meta-Llama-3-8B) by Meta,
29
+ this demo is governed by the original [license](https://huggingface.co/meta-llama/Meta-Llama-3-8B/blob/main/LICENSE) and [acceptable use policy](https://huggingface.co/meta-llama/Meta-Llama-3-8B/blob/main/USE_POLICY.md).
30
  """
31
 
32
  if not torch.cuda.is_available():
 
123
  ],
124
  stop_btn=None,
125
  examples=[
126
+ ["Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?"],
127
+ ["Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?"],
128
+ ["Weng earns $12 an hour for babysitting. Yesterday, she just did 50 minutes of babysitting. How much did she earn?"],
 
 
129
  ],
130
  cache_examples=False,
131
  type="messages",