prithivMLmods commited on
Commit
419096e
·
verified ·
1 Parent(s): addccd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -0
app.py CHANGED
@@ -7,10 +7,18 @@ import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
 
 
10
  DESCRIPTION = """
11
  # GWQ PREV
12
  """
13
 
 
 
 
 
 
 
14
  MAX_MAX_NEW_TOKENS = 2048
15
  DEFAULT_MAX_NEW_TOKENS = 1024
16
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
@@ -70,6 +78,7 @@ def generate(
70
 
71
  demo = gr.ChatInterface(
72
  fn=generate,
 
73
  additional_inputs=[
74
  gr.Slider(
75
  label="Max new tokens",
 
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
+ logo_url = "https://huggingface.co/prithivMLmods/GWQ2b/resolve/main/src/images/1.png"
11
+
12
  DESCRIPTION = """
13
  # GWQ PREV
14
  """
15
 
16
+ PLACEHOLDER = f"""
17
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
18
+ <img src="{logo_url}" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
19
+ </div>
20
+ """
21
+
22
  MAX_MAX_NEW_TOKENS = 2048
23
  DEFAULT_MAX_NEW_TOKENS = 1024
24
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
 
78
 
79
  demo = gr.ChatInterface(
80
  fn=generate,
81
+ placeholder=PLACEHOLDER,
82
  additional_inputs=[
83
  gr.Slider(
84
  label="Max new tokens",