Spaces:
Running
on
A100
Running
on
A100
import gradio as gr | |
import subprocess | |
import os | |
import re | |
import shutil | |
import tempfile | |
is_shared_ui = True if "fffiloni/YuE" in os.environ['SPACE_ID'] else False | |
# Install required package | |
def install_flash_attn(): | |
try: | |
print("Installing flash-attn...") | |
subprocess.run( | |
["pip", "install", "flash-attn", "--no-build-isolation"], | |
check=True | |
) | |
print("flash-attn installed successfully!") | |
except subprocess.CalledProcessError as e: | |
print(f"Failed to install flash-attn: {e}") | |
exit(1) | |
# Install flash-attn | |
install_flash_attn() | |
from huggingface_hub import snapshot_download | |
# Create xcodec_mini_infer folder | |
folder_path = './inference/xcodec_mini_infer' | |
# Create the folder if it doesn't exist | |
if not os.path.exists(folder_path): | |
os.mkdir(folder_path) | |
print(f"Folder created at: {folder_path}") | |
else: | |
print(f"Folder already exists at: {folder_path}") | |
snapshot_download( | |
repo_id = "m-a-p/xcodec_mini_infer", | |
local_dir = "./inference/xcodec_mini_infer" | |
) | |
# Change to the "inference" directory | |
inference_dir = "./inference" | |
try: | |
os.chdir(inference_dir) | |
print(f"Changed working directory to: {os.getcwd()}") | |
except FileNotFoundError: | |
print(f"Directory not found: {inference_dir}") | |
exit(1) | |
def empty_output_folder(output_dir): | |
# List all files in the output directory | |
files = os.listdir(output_dir) | |
# Iterate over the files and remove them | |
for file in files: | |
file_path = os.path.join(output_dir, file) | |
try: | |
if os.path.isdir(file_path): | |
# If it's a directory, remove it recursively | |
shutil.rmtree(file_path) | |
else: | |
# If it's a file, delete it | |
os.remove(file_path) | |
except Exception as e: | |
print(f"Error deleting file {file_path}: {e}") | |
# Function to create a temporary file with string content | |
def create_temp_file(content, prefix, suffix=".txt"): | |
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", prefix=prefix, suffix=suffix) | |
content = content.strip() + "\n\n" # Add extra newline at end | |
content = content.replace("\r\n", "\n").replace("\r", "\n") | |
temp_file.write(content) | |
temp_file.close() | |
# Debug: Print file contents | |
print(f"\nContent written to {prefix}{suffix}:") | |
print(content) | |
print("---") | |
return temp_file.name | |
def get_last_mp3_file(output_dir): | |
# List all files in the output directory | |
files = os.listdir(output_dir) | |
# Filter only .mp3 files | |
mp3_files = [file for file in files if file.endswith('.mp3')] | |
if not mp3_files: | |
print("No .mp3 files found in the output folder.") | |
return None | |
# Get the full path for the mp3 files | |
mp3_files_with_path = [os.path.join(output_dir, file) for file in mp3_files] | |
# Sort the files based on the modification time (most recent first) | |
mp3_files_with_path.sort(key=lambda x: os.path.getmtime(x), reverse=True) | |
# Return the most recent .mp3 file | |
return mp3_files_with_path[0] | |
def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens): | |
# Create temporary files | |
genre_txt_path = create_temp_file(genre_txt_content, prefix="genre_") | |
lyrics_txt_path = create_temp_file(lyrics_txt_content, prefix="lyrics_") | |
print(f"Genre TXT path: {genre_txt_path}") | |
print(f"Lyrics TXT path: {lyrics_txt_path}") | |
# Ensure the output folder exists | |
output_dir = "./output" | |
os.makedirs(output_dir, exist_ok=True) | |
print(f"Output folder ensured at: {output_dir}") | |
empty_output_folder(output_dir) | |
# Command and arguments with optimized settings | |
command = [ | |
"python", "infer.py", | |
"--stage1_model", "m-a-p/YuE-s1-7B-anneal-en-cot", | |
"--stage2_model", "m-a-p/YuE-s2-1B-general", | |
"--genre_txt", f"{genre_txt_path}", | |
"--lyrics_txt", f"{lyrics_txt_path}", | |
"--run_n_segments", str(num_segments), | |
"--stage2_batch_size", "4", | |
"--output_dir", f"{output_dir}", | |
"--cuda_idx", "0", | |
"--max_new_tokens", str(max_new_tokens) | |
] | |
# Set up environment variables for CUDA with optimized settings | |
env = os.environ.copy() | |
env.update({ | |
"CUDA_VISIBLE_DEVICES": "0", | |
"CUDA_HOME": "/usr/local/cuda", | |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}", | |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}" | |
}) | |
# Execute the command | |
try: | |
subprocess.run(command, check=True, env=env) | |
print("Command executed successfully!") | |
# Check and print the contents of the output folder | |
output_files = os.listdir(output_dir) | |
if output_files: | |
print("Output folder contents:") | |
for file in output_files: | |
print(f"- {file}") | |
last_mp3 = get_last_mp3_file(output_dir) | |
if last_mp3: | |
print("Last .mp3 file:", last_mp3) | |
instrumental_mp3_path = "./output/vocoder/stems/instrumental.mp3" | |
vocal_mp3_path = "./output/vocoder/stems/vocal.mp3" | |
return last_mp3, instrumental_mp3_path, vocal_mp3_path | |
else: | |
return None, None, None | |
else: | |
print("Output folder is empty.") | |
raise gr.Error(f"Error occurred: Output folder is empty.") | |
except subprocess.CalledProcessError as e: | |
print(f"Error occurred: {e}") | |
raise gr.Error(f"Error occurred: {e}") | |
finally: | |
# Clean up temporary files | |
os.remove(genre_txt_path) | |
os.remove(lyrics_txt_path) | |
print("Temporary files deleted.") | |
# Gradio | |
with gr.Blocks() as demo: | |
with gr.Column(): | |
gr.Markdown("# YuE: Open Music Foundation Models for Full-Song Generation") | |
gr.HTML(""" | |
<div style="display:flex;column-gap:4px;"> | |
<a href="https://github.com/multimodal-art-projection/YuE"> | |
<img src='https://img.shields.io/badge/GitHub-Repo-blue'> | |
</a> | |
<a href="https://map-yue.github.io"> | |
<img src='https://img.shields.io/badge/Project-Page-green'> | |
</a> | |
<a href="https://huggingface.co/spaces/fffiloni/YuE?duplicate=true"> | |
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space"> | |
</a> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Accordion("Pro Tips", open=False): | |
gr.Markdown(f""" | |
**Tips:** | |
1. `genres` should include details like instruments, genre, mood, vocal timbre, and vocal gender. | |
2. The length of `lyrics` segments and the `--max_new_tokens` value should be matched. For example, if `--max_new_tokens` is set to 3000, the maximum duration for a segment is around 30 seconds. Ensure your lyrics fit this time frame. | |
**Notice:** | |
1. A suitable [Genre] tag consists of five components: genre, instrument, mood, gender, and timbre. All five should be included if possible, separated by spaces. The values of timbre should include "vocal" (e.g., "bright vocal"). | |
2. Although our tags have an open vocabulary, we have provided the 200 most commonly used <a href="https://github.com/multimodal-art-projection/YuE/blob/main/top_200_tags.json" id="tags_link" target="_blank">tags</a>. It is recommended to select tags from this list for more stable results. | |
3. The order of the tags is flexible. For example, a stable genre control string might look like: "inspiring female uplifting pop airy vocal electronic bright vocal vocal." | |
4. Additionally, we have introduced the "Mandarin" and "Cantonese" tags to distinguish between Mandarin and Cantonese, as their lyrics often share similarities. | |
""") | |
genre_txt = gr.Textbox( | |
label="Genre", | |
placeholder="Example: inspiring female uplifting pop airy vocal...", | |
info="Text containing genre tags that describe the musical style or characteristics (e.g., instrumental, genre, mood, vocal timbre, vocal gender). This is used as part of the generation prompt." | |
) | |
lyrics_txt = gr.Textbox( | |
label="Lyrics", lines=12, | |
placeholder=""" | |
Type the lyrics here... | |
At least 2 segments, Annotate your segments with brackets, [verse] [chorus] [bridge]""", | |
info="Text containing the lyrics for the music generation. These lyrics will be processed and split into structured segments to guide the generation process." | |
) | |
with gr.Column(): | |
num_segments = gr.Number(label="Number of Segments", value=2, interactive=False) | |
max_new_tokens = gr.Slider(label="Max New Tokens", minimum=500, maximum="3000", step=500, value=1500, interactive=True) | |
submit_btn = gr.Button("Submit") | |
music_out = gr.Audio(label="Audio Result") | |
with gr.Accordion("Vocal & Instrumental", open=False): | |
instrumental = gr.Audio(label="Intrumental") | |
vocal = gr.Audio(label="Vocal") | |
gr.Examples( | |
examples = [ | |
[ | |
"female blues airy vocal bright vocal piano sad romantic guitar jazz", | |
"""[verse] | |
In the quiet of the evening, shadows start to fall | |
Whispers of the night wind echo through the hall | |
Lost within the silence, I hear your gentle voice | |
Guiding me back homeward, making my heart rejoice | |
[chorus] | |
Don't let this moment fade, hold me close tonight | |
With you here beside me, everything's alright | |
Can't imagine life alone, don't want to let you go | |
Stay with me forever, let our love just flow""" | |
], | |
[ | |
"rap piano street tough piercing vocal hip-hop synthesizer clear vocal male", | |
"""[verse] | |
Woke up in the morning, sun is shining bright | |
Chasing all my dreams, gotta get my mind right | |
City lights are fading, but my vision's clear | |
Got my team beside me, no room for fear | |
Walking through the streets, beats inside my head | |
Every step I take, closer to the bread | |
People passing by, they don't understand | |
Building up my future with my own two hands | |
[chorus] | |
This is my life, and I'm aiming for the top | |
Never gonna quit, no, I'm never gonna stop | |
Through the highs and lows, I'mma keep it real | |
Living out my dreams with this mic and a deal""" | |
] | |
], | |
inputs = [genre_txt, lyrics_txt] | |
) | |
submit_btn.click( | |
fn = infer, | |
inputs = [genre_txt, lyrics_txt, num_segments, max_new_tokens], | |
outputs = [music_out, instrumental, vocal] | |
) | |
demo.queue().launch(show_api=False, show_error=True) |