Titobsala commited on
Commit
3ff7970
·
1 Parent(s): 2e44bfb

app para avalição do modelo treinado com interf

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -37,9 +37,13 @@ def generate_and_compare(prompt, max_new_tokens, temperature):
37
  finetuned_output = generate_text(finetuned_model, prompt, max_new_tokens, temperature)
38
  return base_output, finetuned_output
39
 
40
- def evaluate(model, output, score):
41
- log_interaction(model, gr.get_state('last_prompt'), output, score)
42
- return f"Avaliação registrada: {score}"
 
 
 
 
43
 
44
  with gr.Blocks() as demo:
45
  gr.Markdown("# Comparação de Modelos: Llama-3.2-1B-Instruct vs. Modelo Fine-tuned para Sustentabilidade")
@@ -66,8 +70,8 @@ with gr.Blocks() as demo:
66
  finetuned_feedback = gr.Textbox(label="Feedback da Avaliação (Fine-tuned)")
67
 
68
  generate_btn.click(generate_and_compare, inputs=[prompt, max_new_tokens, temperature], outputs=[base_output, finetuned_output])
69
- base_submit.click(evaluate, inputs=["Base", base_output, base_rating], outputs=base_feedback)
70
- finetuned_submit.click(evaluate, inputs=["Fine-tuned", finetuned_output, finetuned_rating], outputs=finetuned_feedback)
71
 
72
  demo.load(lambda: gr.update(value=""), outputs=[prompt])
73
  prompt.change(lambda x: gr.set_state(last_prompt=x), inputs=[prompt])
 
37
  finetuned_output = generate_text(finetuned_model, prompt, max_new_tokens, temperature)
38
  return base_output, finetuned_output
39
 
40
+ def evaluate_base(output, score):
41
+ log_interaction("Base", gr.get_state('last_prompt'), output, score)
42
+ return f"Avaliação registrada para o modelo Base: {score}"
43
+
44
+ def evaluate_finetuned(output, score):
45
+ log_interaction("Fine-tuned", gr.get_state('last_prompt'), output, score)
46
+ return f"Avaliação registrada para o modelo Fine-tuned: {score}"
47
 
48
  with gr.Blocks() as demo:
49
  gr.Markdown("# Comparação de Modelos: Llama-3.2-1B-Instruct vs. Modelo Fine-tuned para Sustentabilidade")
 
70
  finetuned_feedback = gr.Textbox(label="Feedback da Avaliação (Fine-tuned)")
71
 
72
  generate_btn.click(generate_and_compare, inputs=[prompt, max_new_tokens, temperature], outputs=[base_output, finetuned_output])
73
+ base_submit.click(evaluate_base, inputs=[base_output, base_rating], outputs=base_feedback)
74
+ finetuned_submit.click(evaluate_finetuned, inputs=[finetuned_output, finetuned_rating], outputs=finetuned_feedback)
75
 
76
  demo.load(lambda: gr.update(value=""), outputs=[prompt])
77
  prompt.change(lambda x: gr.set_state(last_prompt=x), inputs=[prompt])