File size: 1,247 Bytes
417d7df
65d8c96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417d7df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
prompt:
  template: |-
    Your task is to check if the Response is accurate to the Evidence.
    Generate 'Accurate' if the Response is accurate when verified according to the Evidence, or 'Inaccurate' if the Response is inaccurate (contradicts the evidence) or cannot be verified.

    **Query**:

    {{user_request}}

    **End of Query**

    **Evidence**

    {{context_document}}

    **End of Evidence**

    **Response**:

    {{response}}

    **End of Response**

    Let's think step-by-step.
  template_variables:
    - user_request
    - context_document
    - response
  metadata:
    description: "An evaluation prompt from the paper 'The FACTS Grounding Leaderboard: Benchmarking LLMs’ Ability to Ground
      Responses to Long-Form Input' by Google DeepMind.\n    The prompt was copied from the evaluation_prompts.csv file from
      Kaggle.\n    This specific prompt elicits a binary accurate/inaccurate classifier for the entire response."
    evaluation_method: response_level
    tags:
      - fact-checking
    version: 1.0.0
    author: Google DeepMind
    source: https://www.kaggle.com/datasets/deepmind/FACTS-grounding-examples?resource=download&select=evaluation_prompts.csv
  client_parameters: {}
  custom_data: {}