Spaces:
Sleeping
Sleeping
abdullahmubeen10
commited on
Upload 15 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +133 -0
- Dockerfile +70 -0
- inputs/audio-1.flac +0 -0
- inputs/audio-10.flac +0 -0
- inputs/audio-2.flac +0 -0
- inputs/audio-3.flac +0 -0
- inputs/audio-4.flac +0 -0
- inputs/audio-5.flac +0 -0
- inputs/audio-6.flac +0 -0
- inputs/audio-7.flac +0 -0
- inputs/audio-8.flac +0 -0
- inputs/audio-9.flac +0 -0
- pages/Workflow & Model Overview.py +201 -0
- requirements.txt +5 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
import os
|
4 |
+
import pandas as pd
|
5 |
+
import librosa
|
6 |
+
|
7 |
+
from sparknlp.base import *
|
8 |
+
from sparknlp.common import *
|
9 |
+
from sparknlp.annotator import *
|
10 |
+
from pyspark.ml import Pipeline
|
11 |
+
from sparknlp.pretrained import PretrainedPipeline
|
12 |
+
from pyspark.sql.types import *
|
13 |
+
import pyspark.sql.functions as F
|
14 |
+
|
15 |
+
# Page configuration
|
16 |
+
st.set_page_config(
|
17 |
+
layout="wide",
|
18 |
+
initial_sidebar_state="auto"
|
19 |
+
)
|
20 |
+
|
21 |
+
# Custom CSS for styling
|
22 |
+
st.markdown("""
|
23 |
+
<style>
|
24 |
+
.main-title {
|
25 |
+
font-size: 36px;
|
26 |
+
color: #4A90E2;
|
27 |
+
font-weight: bold;
|
28 |
+
text-align: center;
|
29 |
+
}
|
30 |
+
.section {
|
31 |
+
background-color: #f9f9f9;
|
32 |
+
padding: 10px;
|
33 |
+
border-radius: 10px;
|
34 |
+
margin-top: 10px;
|
35 |
+
}
|
36 |
+
.section p, .section ul {
|
37 |
+
color: #666666;
|
38 |
+
}
|
39 |
+
</style>
|
40 |
+
""", unsafe_allow_html=True)
|
41 |
+
|
42 |
+
@st.cache_resource
|
43 |
+
def init_spark():
|
44 |
+
"""Initialize Spark NLP."""
|
45 |
+
return sparknlp.start()
|
46 |
+
|
47 |
+
@st.cache_resource
|
48 |
+
def create_pipeline(model):
|
49 |
+
"""Create a Spark NLP pipeline for audio processing."""
|
50 |
+
audio_assembler = AudioAssembler() \
|
51 |
+
.setInputCol("audio_content") \
|
52 |
+
.setOutputCol("audio_assembler")
|
53 |
+
|
54 |
+
speech_to_text = HubertForCTC \
|
55 |
+
.pretrained(model)\
|
56 |
+
.setInputCols("audio_assembler") \
|
57 |
+
.setOutputCol("text")
|
58 |
+
|
59 |
+
pipeline = Pipeline(stages=[
|
60 |
+
audio_assembler,
|
61 |
+
speech_to_text
|
62 |
+
])
|
63 |
+
return pipeline
|
64 |
+
|
65 |
+
def fit_data(pipeline, fed_data):
|
66 |
+
"""Fit the data into the pipeline and return the transcription."""
|
67 |
+
data, sampling_rate = librosa.load(fed_data, sr=16000)
|
68 |
+
data = data.tolist()
|
69 |
+
spark_df = spark.createDataFrame([[data]], ["audio_content"])
|
70 |
+
|
71 |
+
model = pipeline.fit(spark_df)
|
72 |
+
lp = LightPipeline(model)
|
73 |
+
lp_result = lp.fullAnnotate(data)[0]
|
74 |
+
return lp_result
|
75 |
+
|
76 |
+
def save_uploadedfile(uploadedfile, path):
|
77 |
+
"""Save the uploaded file to the specified path."""
|
78 |
+
filepath = os.path.join(path, uploadedfile.name)
|
79 |
+
with open(filepath, "wb") as f:
|
80 |
+
if hasattr(uploadedfile, 'getbuffer'):
|
81 |
+
f.write(uploadedfile.getbuffer())
|
82 |
+
else:
|
83 |
+
f.write(uploadedfile.read())
|
84 |
+
|
85 |
+
# Sidebar content
|
86 |
+
model_list = ["asr_hubert_large_ls960"]
|
87 |
+
model = st.sidebar.selectbox(
|
88 |
+
"Choose the pretrained model",
|
89 |
+
model_list,
|
90 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
91 |
+
)
|
92 |
+
|
93 |
+
# Main content
|
94 |
+
st.markdown('<div class="main-title">Speech Recognition With HubertForCTC</div>', unsafe_allow_html=True)
|
95 |
+
st.markdown('<div class="section"><p>This demo transcribes audio files into texts using the <code>HubertForCTC</code> Annotator and advanced speech recognition models.</p></div>', unsafe_allow_html=True)
|
96 |
+
|
97 |
+
# Reference notebook link in sidebar
|
98 |
+
st.sidebar.markdown('Reference notebook:')
|
99 |
+
st.sidebar.markdown("""
|
100 |
+
<a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/open-source-nlp/17.0.Speech_Recognition.ipynb">
|
101 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
102 |
+
</a>
|
103 |
+
""", unsafe_allow_html=True)
|
104 |
+
|
105 |
+
# Load examples
|
106 |
+
AUDIO_FILE_PATH = "inputs"
|
107 |
+
audio_files = sorted(os.listdir(AUDIO_FILE_PATH))
|
108 |
+
|
109 |
+
selected_audio = st.selectbox("Select an audio", audio_files)
|
110 |
+
|
111 |
+
# Creating a simplified Python list of audio file types
|
112 |
+
audio_file_types = ["mp3", "flac", "wav", "aac", "ogg", "aiff", "wma", "m4a", "ape", "dsf", "dff", "midi", "mid", "opus", "amr"]
|
113 |
+
uploadedfile = st.file_uploader("Try it for yourself!", type=audio_file_types)
|
114 |
+
|
115 |
+
if uploadedfile:
|
116 |
+
selected_audio = f"{AUDIO_FILE_PATH}/{uploadedfile.name}"
|
117 |
+
save_uploadedfile(uploadedfile, AUDIO_FILE_PATH)
|
118 |
+
elif selected_audio:
|
119 |
+
selected_audio = f"{AUDIO_FILE_PATH}/{selected_audio}"
|
120 |
+
|
121 |
+
# Audio playback and transcription
|
122 |
+
st.subheader("Play Audio")
|
123 |
+
|
124 |
+
with open(selected_audio, 'rb') as audio_file:
|
125 |
+
audio_bytes = audio_file.read()
|
126 |
+
st.audio(audio_bytes)
|
127 |
+
|
128 |
+
spark = init_spark()
|
129 |
+
pipeline = create_pipeline(model)
|
130 |
+
output = fit_data(pipeline, selected_audio)
|
131 |
+
|
132 |
+
st.subheader(f"Transcription:")
|
133 |
+
st.markdown(f"{(output['text'][0].result).title()}")
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
|
9 |
+
# Install required packages
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
tar \
|
12 |
+
wget \
|
13 |
+
bash \
|
14 |
+
rsync \
|
15 |
+
gcc \
|
16 |
+
libfreetype6-dev \
|
17 |
+
libhdf5-serial-dev \
|
18 |
+
libpng-dev \
|
19 |
+
libzmq3-dev \
|
20 |
+
python3 \
|
21 |
+
python3-dev \
|
22 |
+
python3-pip \
|
23 |
+
unzip \
|
24 |
+
pkg-config \
|
25 |
+
software-properties-common \
|
26 |
+
graphviz \
|
27 |
+
openjdk-8-jdk \
|
28 |
+
ant \
|
29 |
+
ca-certificates-java \
|
30 |
+
&& apt-get clean \
|
31 |
+
&& update-ca-certificates -f;
|
32 |
+
|
33 |
+
# Install Python 3.8 and pip
|
34 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
35 |
+
&& apt-get update \
|
36 |
+
&& apt-get install -y python3.8 python3-pip \
|
37 |
+
&& apt-get clean;
|
38 |
+
|
39 |
+
# Set up JAVA_HOME
|
40 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
41 |
+
RUN mkdir -p ${HOME} \
|
42 |
+
&& echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ${HOME}/.bashrc \
|
43 |
+
&& chown -R ${NB_UID}:${NB_UID} ${HOME}
|
44 |
+
|
45 |
+
# Create a new user named "jovyan" with user ID 1000
|
46 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
47 |
+
|
48 |
+
# Switch to the "jovyan" user
|
49 |
+
USER ${NB_USER}
|
50 |
+
|
51 |
+
# Set home and path variables for the user
|
52 |
+
ENV HOME=/home/${NB_USER} \
|
53 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
54 |
+
|
55 |
+
# Set the working directory to the user's home directory
|
56 |
+
WORKDIR ${HOME}
|
57 |
+
|
58 |
+
# Upgrade pip and install Python dependencies
|
59 |
+
RUN python3.8 -m pip install --upgrade pip
|
60 |
+
COPY requirements.txt /tmp/requirements.txt
|
61 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
62 |
+
|
63 |
+
# Copy the application code into the container at /home/jovyan
|
64 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
65 |
+
|
66 |
+
# Expose port for Streamlit
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Define the entry point for the container
|
70 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
inputs/audio-1.flac
ADDED
Binary file (112 kB). View file
|
|
inputs/audio-10.flac
ADDED
Binary file (76 kB). View file
|
|
inputs/audio-2.flac
ADDED
Binary file (49 kB). View file
|
|
inputs/audio-3.flac
ADDED
Binary file (74 kB). View file
|
|
inputs/audio-4.flac
ADDED
Binary file (113 kB). View file
|
|
inputs/audio-5.flac
ADDED
Binary file (138 kB). View file
|
|
inputs/audio-6.flac
ADDED
Binary file (36.5 kB). View file
|
|
inputs/audio-7.flac
ADDED
Binary file (177 kB). View file
|
|
inputs/audio-8.flac
ADDED
Binary file (94.3 kB). View file
|
|
inputs/audio-9.flac
ADDED
Binary file (129 kB). View file
|
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Custom CSS for better styling
|
4 |
+
st.markdown("""
|
5 |
+
<style>
|
6 |
+
.main-title {
|
7 |
+
font-size: 36px;
|
8 |
+
color: #4A90E2;
|
9 |
+
font-weight: bold;
|
10 |
+
text-align: center;
|
11 |
+
}
|
12 |
+
.sub-title {
|
13 |
+
font-size: 24px;
|
14 |
+
color: #4A90E2;
|
15 |
+
margin-top: 20px;
|
16 |
+
}
|
17 |
+
.section {
|
18 |
+
background-color: #f9f9f9;
|
19 |
+
padding: 15px;
|
20 |
+
border-radius: 10px;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section h2 {
|
24 |
+
font-size: 22px;
|
25 |
+
color: #4A90E2;
|
26 |
+
}
|
27 |
+
.section p, .section ul {
|
28 |
+
color: #666666;
|
29 |
+
}
|
30 |
+
.link {
|
31 |
+
color: #4A90E2;
|
32 |
+
text-decoration: none;
|
33 |
+
}
|
34 |
+
.benchmark-table {
|
35 |
+
width: 100%;
|
36 |
+
border-collapse: collapse;
|
37 |
+
margin-top: 20px;
|
38 |
+
}
|
39 |
+
.benchmark-table th, .benchmark-table td {
|
40 |
+
border: 1px solid #ddd;
|
41 |
+
padding: 8px;
|
42 |
+
text-align: left;
|
43 |
+
}
|
44 |
+
.benchmark-table th {
|
45 |
+
background-color: #4A90E2;
|
46 |
+
color: white;
|
47 |
+
}
|
48 |
+
.benchmark-table td {
|
49 |
+
background-color: #f2f2f2;
|
50 |
+
}
|
51 |
+
</style>
|
52 |
+
""", unsafe_allow_html=True)
|
53 |
+
|
54 |
+
# Main Title
|
55 |
+
st.markdown('<div class="main-title">HuBERT for Speech Recognition</div>', unsafe_allow_html=True)
|
56 |
+
|
57 |
+
# Introduction
|
58 |
+
st.markdown("""
|
59 |
+
<div class="section">
|
60 |
+
<p><strong>HuBERT</strong> (Hidden-Unit BERT) is a self-supervised speech representation model introduced in the paper <em>HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units</em> by Wei-Ning Hsu et al. It tackles challenges in speech representation by predicting hidden units derived from clustered speech features, enabling the model to learn acoustic and language representations from unsegmented and unannotated audio data.</p>
|
61 |
+
</div>
|
62 |
+
""", unsafe_allow_html=True)
|
63 |
+
|
64 |
+
# Why, Where, and When to Use HuBERT
|
65 |
+
|
66 |
+
st.markdown('<div class="sub-title">Why, Where, and When to Use HuBERT</div>', unsafe_allow_html=True)
|
67 |
+
|
68 |
+
# Explanation Section
|
69 |
+
st.markdown("""
|
70 |
+
<div class="section">
|
71 |
+
<p><strong>HuBERT</strong> is particularly useful in scenarios where high-quality speech-to-text conversion is required and where there is a need for robust speech representation learning. The model’s design makes it suitable for tasks where data may be noisy or unannotated. Key use cases include:</p>
|
72 |
+
</div>
|
73 |
+
""", unsafe_allow_html=True)
|
74 |
+
|
75 |
+
# Use Cases Section
|
76 |
+
st.markdown('<div class="sub-title">Use Cases</div>', unsafe_allow_html=True)
|
77 |
+
st.markdown("""
|
78 |
+
<div class="section">
|
79 |
+
<ul>
|
80 |
+
<li><strong>Noisy Environment Transcription:</strong> Ideal for transcribing speech in noisy or challenging audio environments, such as call centers or field recordings.</li>
|
81 |
+
<li><strong>Preprocessing for NLP Tasks:</strong> Converts spoken language into text for NLP tasks like sentiment analysis, topic modeling, or entity recognition.</li>
|
82 |
+
<li><strong>Audio Content Analysis:</strong> Efficiently analyzes large volumes of audio content, enabling keyword extraction and content summarization.</li>
|
83 |
+
<li><strong>Language Model Enhancement:</strong> Enhances language models by providing robust speech representations, improving accuracy in tasks like machine translation or voice-activated systems.</li>
|
84 |
+
</ul>
|
85 |
+
</div>
|
86 |
+
""", unsafe_allow_html=True)
|
87 |
+
|
88 |
+
# How to Use the Model
|
89 |
+
st.markdown('<div class="sub-title">HuBERT Pipeline in Spark NLP</div>', unsafe_allow_html=True)
|
90 |
+
st.markdown("""
|
91 |
+
<div class="section">
|
92 |
+
<p>To use the HuBERT model in Spark NLP, follow the example code below. This code demonstrates how to assemble audio data and apply the HubertForCTC annotator to convert speech to text.</p>
|
93 |
+
</div>
|
94 |
+
""", unsafe_allow_html=True)
|
95 |
+
st.code('''
|
96 |
+
audio_assembler = AudioAssembler()\\
|
97 |
+
.setInputCol("audio_content")\\
|
98 |
+
.setOutputCol("audio_assembler")
|
99 |
+
|
100 |
+
speech_to_text = HubertForCTC.pretrained("asr_hubert_large_ls960", "en")\\
|
101 |
+
.setInputCols("audio_assembler")\\
|
102 |
+
.setOutputCol("text")
|
103 |
+
|
104 |
+
pipeline = Pipeline(stages=[
|
105 |
+
audio_assembler,
|
106 |
+
speech_to_text,
|
107 |
+
])
|
108 |
+
|
109 |
+
pipelineModel = pipeline.fit(audioDf)
|
110 |
+
|
111 |
+
pipelineDF = pipelineModel.transform(audioDf)
|
112 |
+
''', language='python')
|
113 |
+
|
114 |
+
# Model Information
|
115 |
+
st.markdown('<div class="sub-title">Model Information</div>', unsafe_allow_html=True)
|
116 |
+
st.markdown("""
|
117 |
+
<div class="section">
|
118 |
+
<table class="benchmark-table">
|
119 |
+
<tr>
|
120 |
+
<th>Attribute</th>
|
121 |
+
<th>Description</th>
|
122 |
+
</tr>
|
123 |
+
<tr>
|
124 |
+
<td><strong>Model Name</strong></td>
|
125 |
+
<td>asr_hubert_large_ls960</td>
|
126 |
+
</tr>
|
127 |
+
<tr>
|
128 |
+
<td><strong>Compatibility</strong></td>
|
129 |
+
<td>Spark NLP 4.3.0+</td>
|
130 |
+
</tr>
|
131 |
+
<tr>
|
132 |
+
<td><strong>License</strong></td>
|
133 |
+
<td>Open Source</td>
|
134 |
+
</tr>
|
135 |
+
<tr>
|
136 |
+
<td><strong>Edition</strong></td>
|
137 |
+
<td>Official</td>
|
138 |
+
</tr>
|
139 |
+
<tr>
|
140 |
+
<td><strong>Input Labels</strong></td>
|
141 |
+
<td>[audio_assembler]</td>
|
142 |
+
</tr>
|
143 |
+
<tr>
|
144 |
+
<td><strong>Output Labels</strong></td>
|
145 |
+
<td>[text]</td>
|
146 |
+
</tr>
|
147 |
+
<tr>
|
148 |
+
<td><strong>Language</strong></td>
|
149 |
+
<td>en</td>
|
150 |
+
</tr>
|
151 |
+
<tr>
|
152 |
+
<td><strong>Size</strong></td>
|
153 |
+
<td>1.5 GB</td>
|
154 |
+
</tr>
|
155 |
+
</table>
|
156 |
+
</div>
|
157 |
+
""", unsafe_allow_html=True)
|
158 |
+
|
159 |
+
# Data Source Section
|
160 |
+
st.markdown('<div class="sub-title">Data Source</div>', unsafe_allow_html=True)
|
161 |
+
st.markdown("""
|
162 |
+
<div class="section">
|
163 |
+
<p>The HuBERT model is available on <a class="link" href="https://huggingface.co/facebook/hubert-large-ls960-ft" target="_blank">Hugging Face</a>. It was fine-tuned on 960 hours of Librispeech data and is optimized for 16kHz sampled speech audio. Ensure your input audio is sampled at the same rate for optimal performance.</p>
|
164 |
+
</div>
|
165 |
+
""", unsafe_allow_html=True)
|
166 |
+
|
167 |
+
# Conclusion
|
168 |
+
st.markdown('<div class="sub-title">Conclusion</div>', unsafe_allow_html=True)
|
169 |
+
st.markdown("""
|
170 |
+
<div class="section">
|
171 |
+
<p><strong>HuBERT</strong> offers a powerful solution for self-supervised speech recognition, especially in challenging audio environments. Its ability to learn from unannotated data and predict masked speech units makes it a robust model for various speech-related tasks. Integrated into Spark NLP, HuBERT is ready for large-scale deployment, supporting a wide range of applications from transcription to feature extraction.</p>
|
172 |
+
<p>If you’re working on speech recognition projects that require resilience to noise and variability, HuBERT provides an advanced, scalable option.</p>
|
173 |
+
</div>
|
174 |
+
""", unsafe_allow_html=True)
|
175 |
+
|
176 |
+
# References
|
177 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
178 |
+
st.markdown("""
|
179 |
+
<div class="section">
|
180 |
+
<ul>
|
181 |
+
<li><a class="link" href="https://sparknlp.org/2023/02/07/asr_hubert_large_ls960_en.html" target="_blank">HuBERT Model on Sparknlp</a></li>
|
182 |
+
<li><a class="link" href="https://huggingface.co/facebook/hubert-large-ls960-ft" target="_blank">HuBERT Model on Hugging Face</a></li>
|
183 |
+
<li><a class="link" href="https://github.com/pytorch/fairseq/tree/master/examples/hubert" target="_blank">HuBERT GitHub Repository</a></li>
|
184 |
+
<li><a class="link" href="https://arxiv.org/abs/2106.07447" target="_blank">HuBERT Paper on arXiv</a></li>
|
185 |
+
</ul>
|
186 |
+
</div>
|
187 |
+
""", unsafe_allow_html=True)
|
188 |
+
|
189 |
+
# Community & Support
|
190 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
191 |
+
st.markdown("""
|
192 |
+
<div class="section">
|
193 |
+
<ul>
|
194 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
195 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
196 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
197 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
198 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
199 |
+
</ul>
|
200 |
+
</div>
|
201 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
spark-nlp
|
3 |
+
pyspark
|
4 |
+
librosa
|
5 |
+
pandas
|