ayaht commited on
Commit
d82f662
·
verified ·
1 Parent(s): 83a88fb

Update music_recommendations.py

Browse files
Files changed (1) hide show
  1. music_recommendations.py +59 -59
music_recommendations.py CHANGED
@@ -1,59 +1,59 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
- import scipy.io.wavfile
4
- from openai import OpenAI
5
- import time
6
- import numpy as np
7
-
8
- # Initialize the OpenAI client
9
- client = OpenAI(
10
- api_key="a99ae8e15f1e439a935b5e1cf2005c8b",
11
- base_url="https://api.aimlapi.com",
12
- )
13
-
14
- # Streamlit app layout
15
- st.title("Mood-based Music Generator")
16
-
17
- # Ask the user for their feeling and preferred music style via Streamlit inputs
18
- user_feeling = st.text_input("How are you feeling right now?", value="feeling down")
19
- music_style = st.text_input("What music style do you prefer?", value="pop")
20
-
21
- # Button to trigger music generation
22
- if st.button("Generate Music"):
23
- with st.spinner("Generating music, please wait..."):
24
- # Send the feeling and music style to the OpenAI model
25
- response = client.chat.completions.create(
26
- model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
27
- messages=[
28
- {
29
- "role": "system",
30
- "content": "You are a musical assistant that, based on a user's feeling, can describe it as a musical instrument. Provide a short one-sentence response."
31
- },
32
- {
33
- "role": "user",
34
- "content": f"I am feeling {user_feeling}. Can you make me happy with a {music_style} style of music?"
35
- },
36
- ],
37
- )
38
-
39
- message = response.choices[0].message.content
40
- st.write(f"Assistant: {message}")
41
-
42
- # Load the synthesizer model for music generation
43
- synthesiser = pipeline("text-to-audio", "facebook/musicgen-small")
44
-
45
- # Simulate a short wait to represent loading time for music generation
46
- time.sleep(2)
47
-
48
- # Generate the music using the synthesizer model based on the message
49
- music = synthesiser(message, forward_params={"do_sample": True, "guidance_scale": 1})
50
-
51
- # Save the generated audio to a file
52
- audio_filename = "musicgen_out.wav"
53
- scipy.io.wavfile.write(audio_filename, rate=music["sampling_rate"], data=np.array(music["audio"]))
54
-
55
- st.success("Music has been generated!")
56
-
57
- # Play the generated audio in Streamlit
58
- st.audio(audio_filename)
59
-
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ import scipy.io.wavfile
4
+ from openai import OpenAI
5
+ import time
6
+ import numpy as np
7
+
8
+ # Initialize the OpenAI client
9
+ client = OpenAI(
10
+ api_key="a99ae8e15f1e439a935b5e1cf2005c8b",
11
+ base_url="https://api.aimlapi.com",
12
+ )
13
+
14
+ # Streamlit app layout
15
+ st.title("Mood-based Music Generator")
16
+
17
+ # Ask the user for their feeling and preferred music style via Streamlit inputs
18
+ user_feeling = st.text_input("How are you feeling right now?", value="feeling down")
19
+ music_style = st.text_input("What music style do you prefer?", value="pop")
20
+
21
+ # Button to trigger music generation
22
+ if st.button("Generate Music"):
23
+ with st.spinner("Generating music, please wait..."):
24
+ # Send the feeling and music style to the OpenAI model
25
+ response = client.chat.completions.create(
26
+ model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
27
+ messages=[
28
+ {
29
+ "role": "system",
30
+ "content": "You are a musical assistant that, based on a user's feeling, can describe it as a musical instrument. Provide a short one-sentence response."
31
+ },
32
+ {
33
+ "role": "user",
34
+ "content": f"I am feeling {user_feeling}. Can you make me happy with a {music_style} style of music?"
35
+ },
36
+ ],
37
+ )
38
+
39
+ message = response.choices[0].message.content
40
+ st.write(f"Assistant: {message}")
41
+
42
+ # Load the synthesizer model for music generation
43
+ synthesiser = pipeline("text-to-audio", "facebook/musicgen-small")
44
+
45
+ # Simulate a short wait to represent loading time for music generation
46
+ time.sleep(2)
47
+
48
+ # Generate the music using the synthesizer model based on the message
49
+ music = synthesiser(message, forward_params={"do_sample": True, "guidance_scale": 1})
50
+
51
+ # Save the generated audio to a file
52
+ audio_filename = "musicgen_out.wav"
53
+ scipy.io.wavfile.write(audio_filename, rate=music["sampling_rate"], data=np.array(music["audio"]))
54
+
55
+ st.success("Music has been generated!")
56
+
57
+ # Play the generated audio in Streamlit
58
+ st.audio(audio_filename)
59
+