eltorio commited on
Commit
48f559e
·
verified ·
1 Parent(s): beaf904

wrong copy/paste

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -4,7 +4,7 @@ app.py
4
 
5
  This script creates a graphical interface to test an automatic evaluation generation model.
6
  The model is available on Hugging Face and is based on the Llama 3.2 3B-instruct model.
7
- model_id: "eltorio/Llama-3.2-3B-appreciation"
8
 
9
  Author: Ronan Le Meillat
10
  License: AGPL-3.0
@@ -19,8 +19,8 @@ if os.environ.get('HF_TOKEN') is None:
19
  raise ValueError("You must set the HF_TOKEN environment variable to use this script, you also need to have access to the Llama 3.2 model family")
20
 
21
  # sets the main paremeters
22
- hugging_face_model_id = "eltorio/Llama-3.2-3B-appreciation"
23
- base_model_path = "meta-llama/Llama-3.2-3B-Instruct"
24
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
25
 
26
  # Define the title, description, and device description for the Gradio interface
@@ -29,7 +29,7 @@ title = f"Une intelligence artificielle pour écrire des appréciations qui tour
29
  desc = "Ce modèle vous propose une évaluation automatique."
30
 
31
  # Define the long description for the Gradio interface
32
- long_desc = f"Cette démonstration est basée sur le modèle <a href='https://huggingface.co/eltorio/Llama-3.2-3B-appreciation'>Llama-3.2-3B-appreciation</a>, c'est un LLM développé à l'aide de Llama 3.2 3B-instruct!<br><b>{device_desc}</b><br> 2024 - Ronan Le Meillat"
33
 
34
  # Load the model
35
  processor = AutoProcessor.from_pretrained(
@@ -73,7 +73,7 @@ def infere(trimestre: str, moyenne_1: float,moyenne_2: float,moyenne_3: float, c
73
  gr.Warning("""No GPU available.<br>
74
  The answer will appear in around 10 minutes!<br>
75
  But it takes only a few seconds on a decent GPU.<br>
76
- Open a message in the <a href='https://huggingface.co/spaces/eltorio/Llama-3.2-3B-appreciation/discussions'>Community Discussion</a>.<br>
77
  """,
78
  duration=500)
79
  messages = get_conversation(trimestre, moyenne_1, moyenne_2, moyenne_3, comportement, participation, travail)
 
4
 
5
  This script creates a graphical interface to test an automatic evaluation generation model.
6
  The model is available on Hugging Face and is based on the Llama 3.2 3B-instruct model.
7
+ model_id: "eltorio/Llama-3.1-8B-appreciation"
8
 
9
  Author: Ronan Le Meillat
10
  License: AGPL-3.0
 
19
  raise ValueError("You must set the HF_TOKEN environment variable to use this script, you also need to have access to the Llama 3.2 model family")
20
 
21
  # sets the main paremeters
22
+ hugging_face_model_id = "eltorio/Llama-3.1-8B-appreciation"
23
+ base_model_path = "meta-llama/Llama-3.1-8B"
24
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
25
 
26
  # Define the title, description, and device description for the Gradio interface
 
29
  desc = "Ce modèle vous propose une évaluation automatique."
30
 
31
  # Define the long description for the Gradio interface
32
+ long_desc = f"Cette démonstration est basée sur le modèle <a href='https://huggingface.co/eltorio/Llama-3.1-8B-appreciation'>Llama-3.1-8B-appreciation</a>, c'est un LLM développé à l'aide de Llama 3.2 3B-instruct!<br><b>{device_desc}</b><br> 2024 - Ronan Le Meillat"
33
 
34
  # Load the model
35
  processor = AutoProcessor.from_pretrained(
 
73
  gr.Warning("""No GPU available.<br>
74
  The answer will appear in around 10 minutes!<br>
75
  But it takes only a few seconds on a decent GPU.<br>
76
+ Open a message in the <a href='https://huggingface.co/spaces/eltorio/Llama-3.1-8B-appreciation/discussions'>Community Discussion</a>.<br>
77
  """,
78
  duration=500)
79
  messages = get_conversation(trimestre, moyenne_1, moyenne_2, moyenne_3, comportement, participation, travail)