import sys import os import re import time import math import torch import random import spaces # By using XTTS you agree to CPML license https://coqui.ai/cpml os.environ["COQUI_TOS_AGREED"] = "1" import gradio as gr from TTS.api import TTS from TTS.utils.manage import ModelManager max_64_bit_int = 2**63 - 1 model_names = TTS().list_models() print(model_names.__dict__) print(model_names.__dir__()) model_name = "tts_models/multilingual/multi-dataset/xtts_v2" m = model_name # Automatic device detection if torch.cuda.is_available(): # cuda only device_type = "cuda" device_selection = "cuda:0" data_type = torch.float16 else: # no GPU or Amd device_type = "cpu" device_selection = "cpu" data_type = torch.float32 tts = TTS(model_name, gpu=torch.cuda.is_available()) tts.to(device_type) def update_output(output_number): return [ gr.update(visible = (2 <= output_number)), gr.update(visible = (3 <= output_number)), gr.update(visible = (4 <= output_number)), gr.update(visible = (5 <= output_number)), gr.update(visible = (6 <= output_number)), gr.update(visible = (7 <= output_number)), gr.update(visible = (8 <= output_number)), gr.update(visible = (9 <= output_number)) ] def predict0(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 0, generation_number, temperature, is_randomize_seed, seed, progress) def predict1(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 1, generation_number, temperature, is_randomize_seed, seed, progress) def predict2(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 2, generation_number, temperature, is_randomize_seed, seed, progress) def predict3(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 3, generation_number, temperature, is_randomize_seed, seed, progress) def predict4(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 4, generation_number, temperature, is_randomize_seed, seed, progress) def predict5(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 5, generation_number, temperature, is_randomize_seed, seed, progress) def predict6(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 6, generation_number, temperature, is_randomize_seed, seed, progress) def predict7(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 7, generation_number, temperature, is_randomize_seed, seed, progress) def predict8(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 8, generation_number, temperature, is_randomize_seed, seed, progress) def predict( prompt, language, gender, audio_file_pth, mic_file_path, use_mic, i, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress() ): if generation_number <= i: return ( None, None, ) start = time.time() progress(0, desc = "Preparing data...") if len(prompt) < 2: gr.Warning("Please give a longer prompt text") return ( None, None, ) if 50000 < len(prompt): gr.Warning("Text length limited to 50,000 characters for this demo, please try shorter text") return ( None, None, ) if use_mic: if mic_file_path is None: gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios") return ( None, None, ) else: speaker_wav = mic_file_path else: speaker_wav = audio_file_pth if speaker_wav is None: if gender == "male": speaker_wav = "./examples/male.mp3" else: speaker_wav = "./examples/female.wav" output_filename = f"{i + 1}_{re.sub('[^a-zA-Z0-9]', '_', language)}_{re.sub('[^a-zA-Z0-9]', '_', prompt)}"[:180] + ".wav" try: if language == "fr": if m.find("your") != -1: language = "fr-fr" if m.find("/fr/") != -1: language = None predict_on_gpu(i, generation_number, prompt, speaker_wav, language, output_filename, temperature, is_randomize_seed, seed, progress) except RuntimeError as e : if "device-assert" in str(e): # cannot do anything on cuda device side error, need to restart gr.Warning("Unhandled Exception encounter, please retry in a minute") print("Cuda device-assert Runtime encountered need restart") sys.exit("Exit due to cuda device-assert") else: raise e end = time.time() secondes = int(end - start) minutes = math.floor(secondes / 60) secondes = secondes - (minutes * 60) hours = math.floor(minutes / 60) minutes = minutes - (hours * 60) information = ("Start again to get a different result. " if is_randomize_seed else "") + "The sound has been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec." return ( output_filename, information, ) @spaces.GPU(duration=60) def predict_on_gpu( i, generation_number, prompt, speaker_wav, language, output_filename, temperature, is_randomize_seed, seed, progress ): progress((i + .5) / generation_number, desc = "Generating the audio #" + str(i + 1) + "...") if is_randomize_seed: seed = random.randint(0, max_64_bit_int) random.seed(seed) torch.manual_seed(seed) tts.tts_to_file( text = prompt, file_path = output_filename, speaker_wav = speaker_wav, language = language, temperature = temperature ) with gr.Blocks() as interface: gr.HTML( """
To avoid the queue, you can duplicate this space on CPU, GPU or ZERO space GPU: