samidh commited on
Commit
6b6edbb
·
verified ·
1 Parent(s): d0b176c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -11,7 +11,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
11
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
 
13
  base_model_name = "google/gemma-2-9b"
14
- adapter_model_name = "samidh/cope.a.r09.2"
15
 
16
  bnb_config = BitsAndBytesConfig(
17
  load_in_8bit=True,
@@ -127,7 +127,7 @@ with gr.Blocks() as demo:
127
  # Left column with inputs
128
  with gr.Column(scale=1):
129
  input1 = gr.Textbox(label="Content (english only)", lines=2, max_lines=4, value=DEFAULT_CONTENT)
130
- input2 = gr.Textbox(label="Policy (make your own)", lines=10, max_lines=16, value=DEFAULT_POLICY)
131
 
132
  # Right column with output
133
  with gr.Column(scale=1):
 
11
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
 
13
  base_model_name = "google/gemma-2-9b"
14
+ adapter_model_name = "zentropi-ai/cope-a-9b"
15
 
16
  bnb_config = BitsAndBytesConfig(
17
  load_in_8bit=True,
 
127
  # Left column with inputs
128
  with gr.Column(scale=1):
129
  input1 = gr.Textbox(label="Content (english only)", lines=2, max_lines=4, value=DEFAULT_CONTENT)
130
+ input2 = gr.Textbox(label="Policy (make your own)", lines=10, max_lines=17, value=DEFAULT_POLICY)
131
 
132
  # Right column with output
133
  with gr.Column(scale=1):