Roh commited on
Commit
79e4444
·
1 Parent(s): cb63967
Files changed (2) hide show
  1. .gitignore +2 -0
  2. app.py +17 -77
.gitignore CHANGED
@@ -170,3 +170,5 @@ vocoder/*
170
  models
171
  *.wav
172
  /wav_files/
 
 
 
170
  models
171
  *.wav
172
  /wav_files/
173
+ *.db
174
+ flagged
app.py CHANGED
@@ -5,99 +5,39 @@ import scipy.io.wavfile
5
  from espnet2.bin.tts_inference import Text2Speech
6
  from espnet2.utils.types import str_or_none
7
 
8
- tagen = 'kan-bayashi/ljspeech_vits'
9
- vocoder_tagen = "none"
10
-
11
 
12
  text2speechen = Text2Speech.from_pretrained(
13
  model_tag=str_or_none(tagen),
14
  vocoder_tag=str_or_none(vocoder_tagen),
15
  device="cpu",
16
- # Only for Tacotron 2 & Transformer
17
- threshold=0.5,
18
- # Only for Tacotron 2
19
- minlenratio=0.0,
20
- maxlenratio=10.0,
21
- use_att_constraint=False,
22
- backward_window=1,
23
- forward_window=3,
24
- # Only for FastSpeech & FastSpeech2 & VITS
25
- speed_control_alpha=1.0,
26
- # Only for VITS
27
- noise_scale=0.333,
28
- noise_scale_dur=0.333,
29
- )
30
-
31
-
32
- tagjp = 'kan-bayashi/jsut_full_band_vits_prosody'
33
- vocoder_tagjp = 'none'
34
-
35
- text2speechjp = Text2Speech.from_pretrained(
36
- model_tag=str_or_none(tagjp),
37
- vocoder_tag=str_or_none(vocoder_tagjp),
38
- device="cpu",
39
- # Only for Tacotron 2 & Transformer
40
- threshold=0.5,
41
- # Only for Tacotron 2
42
- minlenratio=0.0,
43
- maxlenratio=10.0,
44
- use_att_constraint=False,
45
- backward_window=1,
46
- forward_window=3,
47
- # Only for FastSpeech & FastSpeech2 & VITS
48
- speed_control_alpha=1.0,
49
- # Only for VITS
50
- noise_scale=0.333,
51
- noise_scale_dur=0.333,
52
  )
53
 
54
- tagch = 'kan-bayashi/csmsc_full_band_vits'
55
- vocoder_tagch = "none"
56
-
57
- text2speechch = Text2Speech.from_pretrained(
58
- model_tag=str_or_none(tagch),
59
- vocoder_tag=str_or_none(vocoder_tagch),
60
- device="cpu",
61
- # Only for Tacotron 2 & Transformer
62
- threshold=0.5,
63
- # Only for Tacotron 2
64
- minlenratio=0.0,
65
- maxlenratio=10.0,
66
- use_att_constraint=False,
67
- backward_window=1,
68
- forward_window=3,
69
- # Only for FastSpeech & FastSpeech2 & VITS
70
- speed_control_alpha=1.0,
71
- # Only for VITS
72
- noise_scale=0.333,
73
- noise_scale_dur=0.333,
74
- )
75
-
76
- def inference(text,lang):
77
  with torch.no_grad():
78
- if lang == "english":
79
  wav = text2speechen(text)["wav"]
80
- scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
81
- if lang == "chinese":
82
- wav = text2speechch(text)["wav"]
83
- scipy.io.wavfile.write("out.wav",text2speechch.fs , wav.view(-1).cpu().numpy())
84
- if lang == "japanese":
85
- wav = text2speechjp(text)["wav"]
86
- scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy())
87
- return "out.wav"
88
- title = "ESPnet2-TTS"
89
- description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
90
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
91
 
92
- examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"],['レシピの統一された設計により、ユーザーは最先端のE2E-TTSの結果をすばやく再現できます。また、推論用の統合Pythonインターフェースで事前にトレーニングされたモデルを多数提供し、ユーザーがベースラインサンプルを生成してデモを構築するための迅速な手段を提供します。',"japanese"],['对英语和日语语料库的实验评估表明,我们提供的���型合成了与真实情况相当的话语,达到了最先进的水平',"chinese"]]
 
 
93
 
94
  gr.Interface(
95
  inference,
96
- [gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["english", "chinese", "japanese"], type="value", default="english", label="language")],
 
97
  gr.outputs.Audio(type="file", label="Output"),
98
  title=title,
99
  description=description,
100
  article=article,
101
  enable_queue=True,
102
  examples=examples
103
- ).launch(debug=True)
 
5
  from espnet2.bin.tts_inference import Text2Speech
6
  from espnet2.utils.types import str_or_none
7
 
8
+ tagen = 'espnet/english_male_ryanspeech_tacotron'
9
+ vocoder_tagen = "parallel_wavegan/ljspeech_melgan.v1.long"
 
10
 
11
  text2speechen = Text2Speech.from_pretrained(
12
  model_tag=str_or_none(tagen),
13
  vocoder_tag=str_or_none(vocoder_tagen),
14
  device="cpu",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  )
16
 
17
+ def inference(text, gender):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  with torch.no_grad():
19
+ if gender == "male":
20
  wav = text2speechen(text)["wav"]
21
+ scipy.io.wavfile.write("out.wav", text2speechen.fs, wav.view(-1).cpu().numpy())
22
+ return "out.wav"
23
+
24
+
25
+ title = "RyanSpeech TTS"
26
+ description = "Gradio demo for RyanSpeech: First high quality speech dataset in the domain of conversation. (the female voice will be added in future).You get much better outputs when you use our <a href='https://www.kaggle.com/datasets/roholazandie/conformer-fastspeech2-ryanspeech'>pre-trained vocoder</a>. To use it, simply input a text, or click one of the examples to load. Please <a href='https://www.isca-speech.org/archive/interspeech_2021/zandie21_interspeech.html'>cite</a> our work"
27
+ article = "<p style='text-align: center'>" "<a href='https://arxiv.org/abs/2106.08468' target='_blank'>" "RyanSpeech-TTS</a> | <a href='http://mohammadmahoor.com/ryanspeech/' target='_blank'>Website</a> | <a href='https://www.kaggle.com/datasets/roholazandie/ryanspeech' target='_blank'>Download Dataset</a> | <a href='https://github.com/roholazandie/ryan-tts'>Github</a></p>"
 
 
 
 
28
 
29
+ examples = [['When he reached the suburbs, the light of homes was shining through curtains of all colors', "male"],
30
+ ['I am a fully autonomous social robot. I can talk, listen, express, understand, and remember. My programming lets me have a conversation with just about anyone.', "male"],
31
+ ['When in the very midst of our victory, here comes an order to halt.', "male"]]
32
 
33
  gr.Interface(
34
  inference,
35
+ [gr.inputs.Textbox(label="input text", lines=10),
36
+ gr.inputs.Radio(choices=["male", "female"], type="value", default="male", label="Gender")],
37
  gr.outputs.Audio(type="file", label="Output"),
38
  title=title,
39
  description=description,
40
  article=article,
41
  enable_queue=True,
42
  examples=examples
43
+ ).launch(debug=True)