Akjava commited on
Commit
40d6dba
·
verified ·
1 Parent(s): 38acb52

Update app.py

Browse files

change release version

Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -14,8 +14,6 @@ def init():
14
  print("no HUGGINGFACE_TOKEN if you need set secret ")
15
  #raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
16
 
17
- model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
18
- model_id = "google/gemma-2b"
19
  model_id = "Qwen/Qwen2.5-0.5B-Instruct"
20
 
21
  device = "auto" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -94,7 +92,7 @@ def call_generate_text(message, history):
94
  head = '''
95
  <script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.webgpu.min.js" ></script>
96
  <script type="module">
97
- import { matcha_tts,env } from "https://akjava.github.io/Matcha-TTS-Japanese/js-esm/matcha_tts_onnx_en_dev.js";
98
  window.MatchaTTSEn = matcha_tts
99
  </script>
100
  '''
@@ -104,7 +102,7 @@ with gr.Blocks(title="LLM with TTS",head=head) as demo:
104
  js = """
105
  function(chatbot){
106
  text = (chatbot[chatbot.length -1])["content"]
107
- window.MatchaTTSEn(text)
108
  }
109
  """
110
  chatbot = gr.Chatbot(type="messages")
 
14
  print("no HUGGINGFACE_TOKEN if you need set secret ")
15
  #raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
16
 
 
 
17
  model_id = "Qwen/Qwen2.5-0.5B-Instruct"
18
 
19
  device = "auto" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
92
  head = '''
93
  <script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.webgpu.min.js" ></script>
94
  <script type="module">
95
+ import { matcha_tts,env } from "https://akjava.github.io/Matcha-TTS-Japanese/js-esm/v001-20240921/matcha_tts_onnx_en.js";
96
  window.MatchaTTSEn = matcha_tts
97
  </script>
98
  '''
 
102
  js = """
103
  function(chatbot){
104
  text = (chatbot[chatbot.length -1])["content"]
105
+ window.MatchaTTSEn(text,"./ljspeech_sim.onnx")
106
  }
107
  """
108
  chatbot = gr.Chatbot(type="messages")