|
import streamlit as st |
|
from transformers import pipeline |
|
import scipy.io.wavfile |
|
from openai import OpenAI |
|
import time |
|
import numpy as np |
|
|
|
|
|
client = OpenAI( |
|
api_key="a99ae8e15f1e439a935b5e1cf2005c8b", |
|
base_url="https://api.aimlapi.com", |
|
) |
|
|
|
|
|
st.title("Mood-based Music Generator") |
|
|
|
|
|
user_feeling = st.text_input("How are you feeling right now?", value="feeling down") |
|
music_style = st.text_input("What music style do you prefer?", value="pop") |
|
|
|
|
|
if st.button("Generate Music"): |
|
with st.spinner("Generating music, please wait..."): |
|
|
|
response = client.chat.completions.create( |
|
model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": "You are a musical assistant that, based on a user's feeling, can describe it as a musical instrument. Provide a short one-sentence response." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": f"I am feeling {user_feeling}. Can you make me happy with a {music_style} style of music?" |
|
}, |
|
], |
|
) |
|
|
|
message = response.choices[0].message.content |
|
st.write(f"Assistant: {message}") |
|
|
|
|
|
synthesiser = pipeline("text-to-audio", "facebook/musicgen-small") |
|
|
|
|
|
time.sleep(2) |
|
|
|
|
|
music = synthesiser(message, forward_params={"do_sample": True, "guidance_scale": 1}) |
|
|
|
|
|
audio_filename = "musicgen_out.wav" |
|
scipy.io.wavfile.write(audio_filename, rate=music["sampling_rate"], data=np.array(music["audio"])) |
|
|
|
st.success("Music has been generated!") |
|
|
|
|
|
st.audio(audio_filename) |
|
|
|
|