abdullahmubeen10 commited on
Commit
062d9c4
·
verified ·
1 Parent(s): cfdf9b8

Upload 5 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import pandas as pd
4
+
5
+ from sparknlp.base import *
6
+ from sparknlp.annotator import *
7
+ from pyspark.ml import Pipeline
8
+ from annotated_text import annotated_text
9
+ from streamlit_tags import st_tags
10
+
11
+ # Page configuration
12
+ st.set_page_config(layout="wide", initial_sidebar_state="auto")
13
+
14
+ # CSS for styling
15
+ st.markdown("""
16
+ <style>
17
+ .main-title {
18
+ font-size: 36px;
19
+ color: #4A90E2;
20
+ font-weight: bold;
21
+ text-align: center;
22
+ }
23
+ .section {
24
+ background-color: #f9f9f9;
25
+ padding: 10px;
26
+ border-radius: 10px;
27
+ margin-top: 10px;
28
+ }
29
+ .section p, .section ul {
30
+ color: #666666;
31
+ }
32
+ </style>
33
+ """, unsafe_allow_html=True)
34
+
35
+ @st.cache_resource
36
+ def init_spark():
37
+ return sparknlp.start()
38
+
39
+ @st.cache_resource
40
+ def create_pipeline(task, model_name):
41
+ document_assembler = DocumentAssembler().setInputCol('text').setOutputCol('document')
42
+ tokenizer = Tokenizer().setInputCols(['document']).setOutputCol('token')
43
+
44
+ if task == "Token Classification":
45
+ token_classifier = AlbertForTokenClassification() \
46
+ .pretrained(model_name, 'en') \
47
+ .setInputCols(['token', 'document']) \
48
+ .setOutputCol('ner') \
49
+ .setCaseSensitive(False) \
50
+ .setMaxSentenceLength(512)
51
+
52
+ ner_converter = NerConverter() \
53
+ .setInputCols(['document', 'token', 'ner']) \
54
+ .setOutputCol('entities')
55
+
56
+ pipeline = Pipeline(stages=[document_assembler, tokenizer, token_classifier, ner_converter])
57
+
58
+ elif task == "Sequence Classification":
59
+ sequence_classifier = AlbertForSequenceClassification() \
60
+ .pretrained(model_name, 'en') \
61
+ .setInputCols(['token', 'document']) \
62
+ .setOutputCol('class') \
63
+ .setCaseSensitive(False) \
64
+ .setMaxSentenceLength(512)
65
+
66
+ pipeline = Pipeline(stages=[document_assembler, tokenizer, sequence_classifier])
67
+
68
+ elif task == "Question Answering":
69
+ document_assembler = MultiDocumentAssembler() \
70
+ .setInputCols(["question", "context"]) \
71
+ .setOutputCols(["document_question", "document_context"])
72
+
73
+ span_classifier = AlbertForQuestionAnswering() \
74
+ .pretrained(model_name, "en") \
75
+ .setInputCols(["document_question", "document_context"]) \
76
+ .setOutputCol("answer") \
77
+ .setCaseSensitive(False)
78
+
79
+ pipeline = Pipeline(stages=[document_assembler, span_classifier])
80
+
81
+ return pipeline
82
+
83
+ def fit_data(pipeline, task, data, question=None, context=None):
84
+ if task in ['Token Classification', 'Sequence Classification']:
85
+ empty_df = spark.createDataFrame([['']]).toDF('text')
86
+ pipeline_model = pipeline.fit(empty_df)
87
+ model = LightPipeline(pipeline_model)
88
+ result = model.fullAnnotate(data)
89
+ elif task == "Question Answering":
90
+ df = spark.createDataFrame([[question, context]]).toDF("question", "context")
91
+ result = pipeline.fit(df).transform(df)
92
+ result = result.select('answer.result').collect()
93
+ return result
94
+
95
+ def annotate_text(data):
96
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
97
+ annotated_words = []
98
+ for chunk, label in zip(chunks, labels):
99
+ parts = document.split(chunk, 1)
100
+ if parts[0]:
101
+ annotated_words.append(parts[0])
102
+ annotated_words.append((chunk, label))
103
+ document = parts[1]
104
+ if document:
105
+ annotated_words.append(document)
106
+ annotated_text(*annotated_words)
107
+
108
+ tasks_models_descriptions = {
109
+ "Token Classification": {
110
+ "models": ["albert_base_token_classifier_conll03"],
111
+ "description": "The 'albert_base_token_classifier_conll03' model excels in identifying and classifying tokens within text. Ideal for tasks like named entity recognition (NER), it accurately extracts entities such as names, dates, and locations."
112
+ },
113
+ "Sequence Classification": {
114
+ "models": ["albert_base_sequence_classifier_ag_news", "albert_base_sequence_classifier_imdb"],
115
+ "description": "The 'albert_base_sequence_classifier_ag_news' model specializes in sentiment analysis and document classification. It accurately assesses the mood of customer reviews, classifies emails, and sorts text corpora."
116
+ },
117
+ "Question Answering": {
118
+ "models": ["albert_base_qa_squad2"],
119
+ "description": "The 'albert_base_qa_squad2' model is designed for answering questions based on provided context. Ideal for chatbots and virtual assistants, it delivers precise answers, improving user interaction and support system efficiency."
120
+ }
121
+ }
122
+
123
+ # Sidebar content
124
+ task = st.sidebar.selectbox("Choose the task", list(tasks_models_descriptions.keys()))
125
+ model_name = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
126
+
127
+ # Reference notebook link in sidebar
128
+ colab_link = """
129
+ <a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/DistilBertForTokenClassification.ipynb">
130
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
131
+ </a>
132
+ """
133
+ st.sidebar.markdown('Reference notebook:')
134
+ st.sidebar.markdown(colab_link, unsafe_allow_html=True)
135
+
136
+ # Page content
137
+ st.markdown(f'<div class="main-title">Albert for {task}</div>', unsafe_allow_html=True)
138
+ st.write(tasks_models_descriptions[task]["description"])
139
+
140
+ if model == 'albert_base_sequence_classifier_ag_news':
141
+ examples_to_select = [
142
+ "The Prime Minister of the country announced new policies aimed at reducing carbon emissions by 40% over the next decade. These measures are part of a global effort to combat climate change.",
143
+ "A devastating earthquake hit the coastal city, leaving thousands homeless. International aid is pouring in to assist in the relief efforts as rescue teams continue to search for survivors.",
144
+ "The basketball star signed a record-breaking contract, making him the highest-paid player in the league. Fans are excited to see how he will perform in the upcoming season.",
145
+ "The Olympic Games concluded with a spectacular closing ceremony. The host nation topped the medal tally with an impressive number of gold medals across various sports.",
146
+ "The stock market saw significant gains today, with the Dow Jones reaching an all-time high. Investors are optimistic about the economic recovery following positive job growth data.",
147
+ "A major retail chain announced the closure of 150 stores nationwide due to declining sales. The company plans to focus more on its online presence to adapt to changing consumer behavior.",
148
+ "Scientists have discovered a new exoplanet that could potentially support life. The planet, located in a nearby star system, has conditions similar to those on Earth.",
149
+ "A leading technology firm unveiled its latest smartphone model, featuring a revolutionary camera system and a powerful new processor. The device is expected to set new standards in mobile computing.",
150
+ "In a historic move, the two rival nations signed a peace treaty, ending decades of hostility. The agreement is seen as a major step toward lasting stability in the region.",
151
+ "The tennis legend announced her retirement after a career spanning two decades, during which she won numerous Grand Slam titles. Tributes are pouring in from across the sporting world.",
152
+ "The energy company reported a significant increase in profits this quarter, driven by higher oil prices. Analysts are predicting continued growth in the coming months.",
153
+ "A new study has revealed that a diet high in fiber can significantly reduce the risk of heart disease. The findings are based on a large-scale analysis of dietary habits over the past decade."
154
+ ]
155
+ else:
156
+ examples_to_select = [
157
+ "This movie was absolutely fantastic! The storyline was gripping, the characters were well-developed, and the cinematography was stunning. I was on the edge of my seat the entire time.",
158
+ "A heartwarming and beautiful film. The performances were top-notch, and the direction was flawless. This is easily one of the best movies I've seen this year.",
159
+ "What a delightful surprise! The humor was spot on, and the plot was refreshingly original. The cast did an amazing job bringing the characters to life. Highly recommended!",
160
+ "This was one of the worst movies I’ve ever seen. The plot was predictable, the acting was wooden, and the pacing was painfully slow. I couldn’t wait for it to end.",
161
+ "A complete waste of time. The movie lacked any real substance or direction, and the dialogue was cringe-worthy. I wouldn’t recommend this to anyone.",
162
+ "I had high hopes for this film, but it turned out to be a huge disappointment. The story was disjointed, and the special effects were laughably bad. Don’t bother watching this one.",
163
+ "The movie was okay, but nothing special. It had a few good moments, but overall, it felt pretty average. Not something I would watch again, but it wasn’t terrible either.",
164
+ "An average film with a decent plot. The acting was passable, but it didn't leave much of an impression on me. It's a movie you might watch once and forget about.",
165
+ "This movie was neither good nor bad, just kind of there. It had some interesting ideas, but they weren’t executed very well. It’s a film you could take or leave."
166
+ ]
167
+
168
+ # Load examples
169
+ examples_mapping = {
170
+ "Token Classification": [
171
+ "William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
172
+ "The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
173
+ "When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
174
+ "Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
175
+ "The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
176
+ "Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
177
+ "When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
178
+ "Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
179
+ "Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
180
+ "Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
181
+ ],
182
+ "Sequence Classification": examples_to_select,
183
+ "Question Answering": {
184
+ """What does increased oxygen concentrations in the patient’s lungs displace?""": """Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment.""",
185
+ """What category of game is Legend of Zelda: Twilight Princess?""": """The Legend of Zelda: Twilight Princess (Japanese: ゼルダの伝説 トワイライトプリンセス, Hepburn: Zeruda no Densetsu: Towairaito Purinsesu?) is an action-adventure game developed and published by Nintendo for the GameCube and Wii home video game consoles. It is the thirteenth installment in the The Legend of Zelda series. Originally planned for release on the GameCube in November 2005, Twilight Princess was delayed by Nintendo to allow its developers to refine the game, add more content, and port it to the Wii. The Wii version was released alongside the console in North America in November 2006, and in Japan, Europe, and Australia the following month. The GameCube version was released worldwide in December 2006.""",
186
+ """Who is founder of Alibaba Group?""": """Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news.""",
187
+ """For what instrument did Frédéric write primarily for?""": """Frédéric François Chopin (/ˈʃoʊpæn/; French pronunciation: ​[fʁe.de.ʁik fʁɑ̃.swa ʃɔ.pɛ̃]; 22 February or 1 March 1810 – 17 October 1849), born Fryderyk Franciszek Chopin,[n 1] was a Polish and French (by citizenship and birth of father) composer and a virtuoso pianist of the Romantic era, who wrote primarily for the solo piano. He gained and has maintained renown worldwide as one of the leading musicians of his era, whose "poetic genius was based on a professional technique that was without equal in his generation." Chopin was born in what was then the Duchy of Warsaw, and grew up in Warsaw, which after 1815 became part of Congress Poland. A child prodigy, he completed his musical education and composed his earlier works in Warsaw before leaving Poland at the age of 20, less than a month before the outbreak of the November 1830 Uprising.""",
188
+ """The most populated city in the United States is which city?""": """New York—often called New York City or the City of New York to distinguish it from the State of New York, of which it is a part—is the most populous city in the United States and the center of the New York metropolitan area, the premier gateway for legal immigration to the United States and one of the most populous urban agglomerations in the world. A global power city, New York exerts a significant impact upon commerce, finance, media, art, fashion, research, technology, education, and entertainment, its fast pace defining the term New York minute. Home to the headquarters of the United Nations, New York is an important center for international diplomacy and has been described as the cultural and financial capital of the world."""
189
+ }
190
+ }
191
+
192
+ # Input Handling
193
+ if task == 'Question Answering':
194
+ st.subheader('Try it yourself!')
195
+ question_input = st.text_input('Enter a question')
196
+ context_input = st.text_area("Enter the context")
197
+ st.subheader('Selected Text')
198
+ if question_input and context_input:
199
+ question = question_input
200
+ context = context_input
201
+ else:
202
+ st.error("Please enter both question and context.")
203
+
204
+ else:
205
+ st.subheader('Try it yourself!')
206
+ user_input = st.text_input("Enter a sentence")
207
+ text_to_analyze = user_input if user_input else "Example text"
208
+ st.markdown(f"**Text to Analyze:** {text_to_analyze}")
209
+
210
+ # Initialize Spark and create pipeline
211
+ spark = init_spark()
212
+ pipeline = create_pipeline(task, model_name)
213
+
214
+ # Process Data
215
+ output = fit_data(pipeline, task, text_to_analyze, question, context)
216
+
217
+ st.subheader("Processed Output:")
218
+
219
+ if task == 'Token Classification':
220
+ results = {
221
+ 'Document': output[0]['document'][0].result,
222
+ 'NER Chunk': [n.result for n in output[0]['entities']],
223
+ "NER Label": [n.metadata['entity'] for n in output[0]['entities']]
224
+ }
225
+ annotate_text(results)
226
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
227
+ df.index += 1
228
+ st.dataframe(df)
229
+
230
+ elif task == 'Sequence Classification':
231
+ st.markdown(f"Classification: **{output[0]['class'][0].result}**")
232
+
233
+ elif task == "Question Answering":
234
+ output_text = "".join(output[0][0])
235
+ st.markdown(f"Answer: **{output_text}**")
Dockerfile ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
9
+
10
+ # Install required packages
11
+ RUN apt-get update && apt-get install -y \
12
+ tar \
13
+ wget \
14
+ bash \
15
+ rsync \
16
+ gcc \
17
+ libfreetype6-dev \
18
+ libhdf5-serial-dev \
19
+ libpng-dev \
20
+ libzmq3-dev \
21
+ python3 \
22
+ python3-dev \
23
+ python3-pip \
24
+ unzip \
25
+ pkg-config \
26
+ software-properties-common \
27
+ graphviz \
28
+ openjdk-8-jdk \
29
+ ant \
30
+ ca-certificates-java \
31
+ && apt-get clean \
32
+ && update-ca-certificates -f
33
+
34
+ # Install Python 3.8 and pip
35
+ RUN add-apt-repository ppa:deadsnakes/ppa \
36
+ && apt-get update \
37
+ && apt-get install -y python3.8 python3-pip \
38
+ && apt-get clean
39
+
40
+ # Set up JAVA_HOME
41
+ RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> /etc/profile \
42
+ && echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile
43
+ # Create a new user named "jovyan" with user ID 1000
44
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
45
+
46
+ # Switch to the "jovyan" user
47
+ USER ${NB_USER}
48
+
49
+ # Set home and path variables for the user
50
+ ENV HOME=/home/${NB_USER} \
51
+ PATH=/home/${NB_USER}/.local/bin:$PATH
52
+
53
+ # Set up PySpark to use Python 3.8 for both driver and workers
54
+ ENV PYSPARK_PYTHON=/usr/bin/python3.8
55
+ ENV PYSPARK_DRIVER_PYTHON=/usr/bin/python3.8
56
+
57
+ # Set the working directory to the user's home directory
58
+ WORKDIR ${HOME}
59
+
60
+ # Upgrade pip and install Python dependencies
61
+ RUN python3.8 -m pip install --upgrade pip
62
+ COPY requirements.txt /tmp/requirements.txt
63
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
64
+
65
+ # Copy the application code into the container at /home/jovyan
66
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
67
+
68
+ # Expose port for Streamlit
69
+ EXPOSE 7860
70
+
71
+ # Define the entry point for the container
72
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Custom CSS for better styling
4
+ st.markdown("""
5
+ <style>
6
+ .main-title {
7
+ font-size: 36px;
8
+ color: #4A90E2;
9
+ font-weight: bold;
10
+ text-align: center;
11
+ }
12
+ .sub-title {
13
+ font-size: 24px;
14
+ color: #4A90E2;
15
+ margin-top: 20px;
16
+ }
17
+ .section {
18
+ background-color: #f9f9f9;
19
+ padding: 15px;
20
+ border-radius: 10px;
21
+ margin-top: 20px;
22
+ }
23
+ .section h2 {
24
+ font-size: 22px;
25
+ color: #4A90E2;
26
+ }
27
+ .section p, .section ul {
28
+ color: #666666;
29
+ }
30
+ .link {
31
+ color: #4A90E2;
32
+ text-decoration: none;
33
+ }
34
+ .benchmark-table {
35
+ width: 100%;
36
+ border-collapse: collapse;
37
+ margin-top: 20px;
38
+ }
39
+ .benchmark-table th, .benchmark-table td {
40
+ border: 1px solid #ddd;
41
+ padding: 8px;
42
+ text-align: left;
43
+ }
44
+ .benchmark-table th {
45
+ background-color: #4A90E2;
46
+ color: white;
47
+ }
48
+ .benchmark-table td {
49
+ background-color: #f2f2f2;
50
+ }
51
+ </style>
52
+ """, unsafe_allow_html=True)
53
+
54
+ # Title
55
+ st.markdown('<div class="main-title">Introduction to ALBERT Annotators in Spark NLP</div>', unsafe_allow_html=True)
56
+
57
+ # Subtitle
58
+ st.markdown("""
59
+ <div class="section">
60
+ <p>ALBERT (A Lite BERT) offers a more efficient alternative to BERT by implementing two parameter-reduction techniques: splitting the embedding matrix and using repeating layers. It maintains high performance while being more memory-efficient. Below, we provide an overview of the ALBERT annotator for token classification:</p>
61
+ </div>
62
+ """, unsafe_allow_html=True)
63
+
64
+ tab1, tab2, tab3 = st.tabs(["ALBERT for Token Classification", "ALBERT for Sequence Classification", "ALBERT for Question Answering"])
65
+
66
+ with tab1:
67
+ st.markdown("""
68
+ <div class="section">
69
+ <h2>ALBERT for Token Classification</h2>
70
+ <p>The <strong>AlbertForTokenClassification</strong> annotator is designed for Named Entity Recognition (NER) tasks using ALBERT. This model efficiently handles token classification, enabling the identification and classification of entities in text. The ALBERT model, with its parameter-reduction techniques, achieves state-of-the-art performance while being more lightweight compared to BERT.</p>
71
+ <p>Token classification with ALBERT enables:</p>
72
+ <ul>
73
+ <li><strong>Named Entity Recognition (NER):</strong> Identifying and classifying entities such as names, organizations, locations, and other predefined categories.</li>
74
+ <li><strong>Information Extraction:</strong> Extracting key information from unstructured text for further analysis.</li>
75
+ <li><strong>Text Categorization:</strong> Enhancing document retrieval and categorization based on entity recognition.</li>
76
+ </ul>
77
+ <p>Here is an example of how ALBERT token classification works:</p>
78
+ <table class="benchmark-table">
79
+ <tr>
80
+ <th>Entity</th>
81
+ <th>Label</th>
82
+ </tr>
83
+ <tr>
84
+ <td>Google</td>
85
+ <td>ORG</td>
86
+ </tr>
87
+ <tr>
88
+ <td>Satya Nadella</td>
89
+ <td>PER</td>
90
+ </tr>
91
+ <tr>
92
+ <td>Seattle</td>
93
+ <td>LOC</td>
94
+ </tr>
95
+ </table>
96
+ </div>
97
+ """, unsafe_allow_html=True)
98
+
99
+ # ALBERT Token Classification - NER CoNLL
100
+ st.markdown('<div class="sub-title">ALBERT Token Classification - NER CoNLL</div>', unsafe_allow_html=True)
101
+ st.markdown("""
102
+ <div class="section">
103
+ <p>The <strong>albert_base_token_classifier_conll03</strong> is a fine-tuned ALBERT model for token classification tasks, specifically adapted for Named Entity Recognition (NER) on the CoNLL-03 dataset. It recognizes four types of entities: location (LOC), organizations (ORG), person (PER), and Miscellaneous (MISC).</p>
104
+ </div>
105
+ """, unsafe_allow_html=True)
106
+
107
+ # How to Use the Model - Token Classification
108
+ st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
109
+ st.code('''
110
+ from sparknlp.base import *
111
+ from sparknlp.annotator import *
112
+ from pyspark.ml import Pipeline
113
+ from pyspark.sql.functions import col, expr
114
+
115
+ document_assembler = DocumentAssembler() \\
116
+ .setInputCol('text') \\
117
+ .setOutputCol('document')
118
+
119
+ tokenizer = Tokenizer() \\
120
+ .setInputCols(['document']) \\
121
+ .setOutputCol('token')
122
+
123
+ tokenClassifier = AlbertForTokenClassification \\
124
+ .pretrained('albert_base_token_classifier_conll03', 'en') \\
125
+ .setInputCols(['token', 'document']) \\
126
+ .setOutputCol('ner') \\
127
+ .setCaseSensitive(True) \\
128
+ .setMaxSentenceLength(512)
129
+
130
+ # Convert NER labels to entities
131
+ ner_converter = NerConverter() \\
132
+ .setInputCols(['document', 'token', 'ner']) \\
133
+ .setOutputCol('entities')
134
+
135
+ pipeline = Pipeline(stages=[
136
+ document_assembler,
137
+ tokenizer,
138
+ tokenClassifier,
139
+ ner_converter
140
+ ])
141
+
142
+ example = spark.createDataFrame([["My name is John!"]]).toDF("text")
143
+ result = pipeline.fit(example).transform(example)
144
+
145
+ result.select(
146
+ expr("explode(entities) as ner_chunk")
147
+ ).select(
148
+ col("ner_chunk.result").alias("chunk"),
149
+ col("ner_chunk.metadata.entity").alias("ner_label")
150
+ ).show(truncate=False)
151
+ ''', language='python')
152
+
153
+ # Results
154
+ st.text("""
155
+ +-----+---------+
156
+ |chunk|ner_label|
157
+ +-----+---------+
158
+ |John |PER |
159
+ +-----+---------+
160
+ """)
161
+
162
+ # Performance Metrics
163
+ st.markdown('<div class="sub-title">Performance Metrics</div>', unsafe_allow_html=True)
164
+ st.markdown("""
165
+ <div class="section">
166
+ <p>Here are the detailed performance metrics for the ALBERT token classification model:</p>
167
+ <table class="benchmark-table">
168
+ <tr>
169
+ <th>Entity</th>
170
+ <th>Precision</th>
171
+ <th>Recall</th>
172
+ <th>F1-Score</th>
173
+ <th>Support</th>
174
+ </tr>
175
+ <tr>
176
+ <td>B-LOC</td>
177
+ <td>0.95</td>
178
+ <td>0.97</td>
179
+ <td>0.96</td>
180
+ <td>1837</td>
181
+ </tr>
182
+ <tr>
183
+ <td>B-MISC</td>
184
+ <td>0.87</td>
185
+ <td>0.86</td>
186
+ <td>0.87</td>
187
+ <td>922</td>
188
+ </tr>
189
+ <tr>
190
+ <td>B-ORG</td>
191
+ <td>0.90</td>
192
+ <td>0.91</td>
193
+ <td>0.90</td>
194
+ <td>1341</td>
195
+ </tr>
196
+ <tr>
197
+ <td>B-PER</td>
198
+ <td>0.91</td>
199
+ <td>0.97</td>
200
+ <td>0.94</td>
201
+ <td>1842</td>
202
+ </tr>
203
+ <tr>
204
+ <td>I-LOC</td>
205
+ <td>0.88</td>
206
+ <td>0.86</td>
207
+ <td>0.87</td>
208
+ <td>257</td>
209
+ </tr>
210
+ <tr>
211
+ <td>I-MISC</td>
212
+ <td>0.78</td>
213
+ <td>0.76</td>
214
+ <td>0.77</td>
215
+ <td>346</td>
216
+ </tr>
217
+ <tr>
218
+ <td>I-ORG</td>
219
+ <td>0.84</td>
220
+ <td>0.85</td>
221
+ <td>0.85</td>
222
+ <td>751</td>
223
+ </tr>
224
+ <tr>
225
+ <td>I-PER</td>
226
+ <td>0.97</td>
227
+ <td>0.92</td>
228
+ <td>0.94</td>
229
+ <td>1307</td>
230
+ </tr>
231
+ <tr>
232
+ <td>O</td>
233
+ <td>0.99</td>
234
+ <td>0.99</td>
235
+ <td>0.99</td>
236
+ <td>42759</td>
237
+ </tr>
238
+ <tr>
239
+ <td>average</td>
240
+ <td>0.92</td>
241
+ <td>0.92</td>
242
+ <td>0.92</td>
243
+ <td>52000</td>
244
+ </tr>
245
+ </table>
246
+ </div>
247
+
248
+ """, unsafe_allow_html=True)
249
+ # Model Info Section
250
+ st.markdown('<div class="sub-title">Model Info</div>', unsafe_allow_html=True)
251
+ st.markdown("""
252
+ <div class="section">
253
+ <ul>
254
+ <li><strong>Model Name:</strong> ALBERT for Token Classification</li>
255
+ <li><strong>Pretrained Model:</strong> albert_base_token_classifier_conll03</li>
256
+ <li><strong>Training Dataset:</strong> CoNLL-03</li>
257
+ <li><strong>Languages Supported:</strong> English</li>
258
+ <li><strong>Use Cases:</strong>
259
+ <ul>
260
+ <li>Named Entity Recognition (NER)</li>
261
+ <li>Information Extraction</li>
262
+ <li>Text Categorization</li>
263
+ </ul>
264
+ </li>
265
+ <li><strong>Performance:</strong> High accuracy with a focus on memory efficiency</li>
266
+ <li><strong>Implementation:</strong> Spark NLP</li>
267
+ <li><strong>Resource Requirements:</strong> Moderate computational resources; suitable for production environments with optimization</li>
268
+ </ul>
269
+ </div>
270
+ """, unsafe_allow_html=True)
271
+
272
+ # References Section
273
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
274
+ st.markdown("""
275
+ <div class="section">
276
+ <ul>
277
+ <li><a class="link" href="https://arxiv.org/abs/1909.11942" target="_blank">Lan, Z., Chen, J., Goodman, S., Gimpel, K., Sharma, P., & Soricut, R. (2019). ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. arXiv preprint arXiv:1909.11942.</a></li>
278
+ <li><a class="link" href="https://github.com/google-research/albert" target="_blank">Google Research's ALBERT GitHub Repository</a></li>
279
+ <li><a class="link" href="https://sparknlp.org/2022/06/15/albert_base_qa_squad2_en_3_0.html" target="_blank">Spark NLP Model - albert_base_qa_squad2</a></li>
280
+ <li><a class="link" href="https://nlp.stanford.edu/projects/conll2003/" target="_blank">CoNLL-03 Named Entity Recognition Dataset</a></li>
281
+ </ul>
282
+ </div>
283
+ """, unsafe_allow_html=True)
284
+
285
+ with tab2:
286
+ st.markdown("""
287
+ <div class="section">
288
+ <h2>ALBERT for Sequence Classification</h2>
289
+ <p>The <strong>AlbertForSequenceClassification</strong> annotator is tailored for tasks like sentiment analysis or multi-class text classification using the ALBERT model. This model efficiently handles sequence classification, achieving state-of-the-art performance with reduced parameters compared to BERT.</p>
290
+ <p>Sequence classification with ALBERT enables:</p>
291
+ <ul>
292
+ <li><strong>Sentiment Analysis:</strong> Determining the sentiment expressed in text, such as positive, negative, or neutral.</li>
293
+ <li><strong>Multi-Class Text Classification:</strong> Categorizing text into predefined classes, such as news categories or topics.</li>
294
+ <li><strong>Document Classification:</strong> Enhancing search and categorization of documents based on content classification.</li>
295
+ </ul>
296
+ <p>Here is an example of how ALBERT sequence classification works:</p>
297
+ <table class="benchmark-table">
298
+ <tr>
299
+ <th>Text</th>
300
+ <th>Label</th>
301
+ </tr>
302
+ <tr>
303
+ <td>Disney Comics was a comic book publishing company operated by The Walt Disney Company which ran from 1990 to 1993.</td>
304
+ <td>Business</td>
305
+ </tr>
306
+ </table>
307
+ </div>
308
+ """, unsafe_allow_html=True)
309
+
310
+ # ALBERT Sequence Classification - AG News
311
+ st.markdown('<div class="sub-title">ALBERT Sequence Classification - AG News</div>', unsafe_allow_html=True)
312
+ st.markdown("""
313
+ <div class="section">
314
+ <p>The <strong>albert_base_sequence_classifier_ag_news</strong> is a fine-tuned ALBERT model for sequence classification tasks, specifically adapted for text classification on the AG News dataset. It recognizes four categories: Business, Sci/Tech, Sports, and World.</p>
315
+ </div>
316
+ """, unsafe_allow_html=True)
317
+
318
+ # How to Use the Model - Sequence Classification
319
+ st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
320
+ st.code('''
321
+ from sparknlp.base import *
322
+ from sparknlp.annotator import *
323
+ from pyspark.ml import Pipeline
324
+ from pyspark.sql.functions import col, expr
325
+
326
+ document_assembler = DocumentAssembler() \\
327
+ .setInputCol('text') \\
328
+ .setOutputCol('document')
329
+
330
+ tokenizer = Tokenizer() \\
331
+ .setInputCols(['document']) \\
332
+ .setOutputCol('token')
333
+
334
+ sequenceClassifier = AlbertForSequenceClassification \\
335
+ .pretrained('albert_base_sequence_classifier_ag_news', 'en') \\
336
+ .setInputCols(['token', 'document']) \\
337
+ .setOutputCol('class') \\
338
+ .setCaseSensitive(False) \\
339
+ .setMaxSentenceLength(512)
340
+
341
+ pipeline = Pipeline(stages=[
342
+ document_assembler,
343
+ tokenizer,
344
+ sequenceClassifier
345
+ ])
346
+
347
+ example = spark.createDataFrame([["Disney Comics was a comic book publishing company operated by The Walt Disney Company which ran from 1990 to 1993."]]).toDF("text")
348
+ result = pipeline.fit(example).transform(example)
349
+
350
+ result.select(
351
+ expr("explode(class) as classification_result")
352
+ ).select(
353
+ col("classification_result.result").alias("category")
354
+ ).show(truncate=False)
355
+ ''', language='python')
356
+
357
+ # Results
358
+ st.text("""
359
+ +---------+
360
+ |category |
361
+ +---------+
362
+ |Business |
363
+ +---------+
364
+ """)
365
+
366
+ # Performance Metrics
367
+ st.markdown('<div class="sub-title">Performance Metrics</div>', unsafe_allow_html=True)
368
+ st.markdown("""
369
+ <div class="section">
370
+ <p>Here are the detailed performance metrics for the ALBERT sequence classification model on the AG News dataset:</p>
371
+ <table class="benchmark-table">
372
+ <tr>
373
+ <th>Metric</th>
374
+ <th>Score</th>
375
+ </tr>
376
+ <tr>
377
+ <td>Accuracy</td>
378
+ <td>0.9472</td>
379
+ </tr>
380
+ <tr>
381
+ <td>F1-Score</td>
382
+ <td>0.9472</td>
383
+ </tr>
384
+ <tr>
385
+ <td>Precision</td>
386
+ <td>0.9472</td>
387
+ </tr>
388
+ <tr>
389
+ <td>Recall</td>
390
+ <td>0.9472</td>
391
+ </tr>
392
+ <tr>
393
+ <td>Evaluation Loss</td>
394
+ <td>0.1882</td>
395
+ </tr>
396
+ </table>
397
+ </div>
398
+
399
+ """, unsafe_allow_html=True)
400
+
401
+ # Model Info Section
402
+ st.markdown('<div class="sub-title">Model Info</div>', unsafe_allow_html=True)
403
+ st.markdown("""
404
+ <div class="section">
405
+ <ul>
406
+ <li><strong>Model Name:</strong> ALBERT for Sequence Classification</li>
407
+ <li><strong>Pretrained Model:</strong> albert_base_sequence_classifier_ag_news</li>
408
+ <li><strong>Training Dataset:</strong> AG News</li>
409
+ <li><strong>Languages Supported:</strong> English</li>
410
+ <li><strong>Use Cases:</strong>
411
+ <ul>
412
+ <li>Sentiment Analysis</li>
413
+ <li>Multi-Class Text Classification</li>
414
+ <li>Document Classification</li>
415
+ </ul>
416
+ </li>
417
+ <li><strong>Performance:</strong> High accuracy with a focus on memory efficiency</li>
418
+ <li><strong>Implementation:</strong> Spark NLP</li>
419
+ <li><strong>Resource Requirements:</strong> Moderate computational resources; suitable for production environments with optimization</li>
420
+ </ul>
421
+ </div>
422
+ """, unsafe_allow_html=True)
423
+
424
+ # References Section
425
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
426
+ st.markdown("""
427
+ <div class="section">
428
+ <ul>
429
+ <li><a class="link" href="https://arxiv.org/abs/1909.11942" target="_blank">Lan, Z., Chen, J., Goodman, S., Gimpel, K., Sharma, P., & Soricut, R. (2019). ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. arXiv preprint arXiv:1909.11942.</a></li>
430
+ <li><a class="link" href="https://github.com/google-research/albert" target="_blank">Google Research's ALBERT GitHub Repository</a></li>
431
+ <li><a class="link" href="https://sparknlp.org/2021/12/16/albert_base_sequence_classifier_ag_news_en.html" target="_blank">Spark NLP Model - albert_base_sequence_classifier_ag_news</a></li>
432
+ <li><a class="link" href="https://huggingface.co/datasets/ag_news" target="_blank">AG News Dataset</a></li>
433
+ </ul>
434
+ </div>
435
+ """, unsafe_allow_html=True)
436
+
437
+ with tab3:
438
+ st.markdown("""
439
+ <div class="section">
440
+ <h2>ALBERT for Question Answering</h2>
441
+ <p>The <strong>AlbertForQuestionAnswering</strong> annotator is specialized for tasks involving Question Answering (QA) using the ALBERT model. This model efficiently processes question-context pairs to provide accurate answers, making it ideal for QA systems and information retrieval applications.</p>
442
+ <p>Question Answering with ALBERT enables:</p>
443
+ <ul>
444
+ <li><strong>Information Retrieval:</strong> Extracting precise answers from large text corpora based on user queries.</li>
445
+ <li><strong>Knowledge Management:</strong> Enhancing customer support and information systems by providing accurate answers.</li>
446
+ <li><strong>Contextual Understanding:</strong> Leveraging ALBERT’s capabilities to understand the context of questions and provide relevant answers.</li>
447
+ </ul>
448
+ <p>Here is an example of how ALBERT question answering works:</p>
449
+ <table class="benchmark-table">
450
+ <tr>
451
+ <th>Question</th>
452
+ <th>Context</th>
453
+ <th>Answer</th>
454
+ </tr>
455
+ <tr>
456
+ <td>What is my name?</td>
457
+ <td>My name is Clara and I live in Berkeley.</td>
458
+ <td>Clara</td>
459
+ </tr>
460
+ </table>
461
+ </div>
462
+ """, unsafe_allow_html=True)
463
+
464
+ # ALBERT Question Answering - SQuAD2
465
+ st.markdown('<div class="sub-title">ALBERT Question Answering - SQuAD2</div>', unsafe_allow_html=True)
466
+ st.markdown("""
467
+ <div class="section">
468
+ <p>The <strong>albert_base_qa_squad2</strong> is a fine-tuned ALBERT model for Question Answering tasks, specifically adapted for the SQuAD2 dataset. It is capable of answering questions based on the provided context with high accuracy.</p>
469
+ </div>
470
+ """, unsafe_allow_html=True)
471
+
472
+ # How to Use the Model - Question Answering
473
+ st.markdown('<div class="sub-title">How to Use the Model</div>', unsafe_allow_html=True)
474
+ st.code('''
475
+ from sparknlp.base import *
476
+ from sparknlp.annotator import *
477
+ from pyspark.ml import Pipeline
478
+ from pyspark.sql.functions import col, expr
479
+
480
+ documentAssembler = MultiDocumentAssembler() \\
481
+ .setInputCols(["question", "context"]) \\
482
+ .setOutputCols(["document_question", "document_context"])
483
+
484
+ spanClassifier = AlbertForQuestionAnswering.pretrained("albert_base_qa_squad2","en") \\
485
+ .setInputCols(["document_question", "document_context"]) \\
486
+ .setOutputCol("answer") \\
487
+ .setCaseSensitive(False)
488
+
489
+ pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
490
+
491
+ data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
492
+ result = pipeline.fit(data).transform(data)
493
+
494
+ result.select(
495
+ col("answer.result").alias("predicted_answer")
496
+ ).show(truncate=False)
497
+ ''', language='python')
498
+
499
+ # Results
500
+ st.text("""
501
+ +----------------+
502
+ |predicted_answer|
503
+ +----------------+
504
+ |[clara] |
505
+ +----------------+
506
+ """)
507
+
508
+ # Performance Metrics
509
+ st.markdown('<div class="sub-title">Performance Metrics</div>', unsafe_allow_html=True)
510
+ st.markdown("""
511
+ <div class="section">
512
+ <p>The performance metrics of the ALBERT question answering model on a development subset of the SQuAD2 dataset are:</p>
513
+ <table class="benchmark-table">
514
+ <tr>
515
+ <th>Metric</th>
516
+ <th>Score</th>
517
+ </tr>
518
+ <tr>
519
+ <td>Exact Match</td>
520
+ <td>78.71%</td>
521
+ </tr>
522
+ <tr>
523
+ <td>F1 Score</td>
524
+ <td>81.89%</td>
525
+ </tr>
526
+ <tr>
527
+ <td>Total</td>
528
+ <td>6078</td>
529
+ </tr>
530
+ <tr>
531
+ <td>HasAns Exact Match</td>
532
+ <td>75.40%</td>
533
+ </tr>
534
+ <tr>
535
+ <td>HasAns F1 Score</td>
536
+ <td>82.04%</td>
537
+ </tr>
538
+ <tr>
539
+ <td>HasAns Total</td>
540
+ <td>2910</td>
541
+ </tr>
542
+ <tr>
543
+ <td>NoAns Exact Match</td>
544
+ <td>81.76%</td>
545
+ </tr>
546
+ <tr>
547
+ <td>NoAns F1 Score</td>
548
+ <td>81.76%</td>
549
+ </tr>
550
+ <tr>
551
+ <td>NoAns Total</td>
552
+ <td>3168</td>
553
+ </tr>
554
+ <tr>
555
+ <td>Best Exact Match</td>
556
+ <td>78.73%</td>
557
+ </tr>
558
+ <tr>
559
+ <td>Best F1 Score</td>
560
+ <td>81.91%</td>
561
+ </tr>
562
+ </table>
563
+ </div>
564
+ """, unsafe_allow_html=True)
565
+
566
+ # Model Info Section
567
+ st.markdown('<div class="sub-title">Model Info</div>', unsafe_allow_html=True)
568
+ st.markdown("""
569
+ <div class="section">
570
+ <ul>
571
+ <li><strong>Model Name:</strong> ALBERT for Question Answering</li>
572
+ <li><strong>Pretrained Model:</strong> albert_base_qa_squad2</li>
573
+ <li><strong>Training Dataset:</strong> SQuAD2</li>
574
+ <li><strong>Languages Supported:</strong> English</li>
575
+ <li><strong>Use Cases:</strong>
576
+ <ul>
577
+ <li>Information Retrieval</li>
578
+ <li>Knowledge Management</li>
579
+ <li>Contextual Understanding</li>
580
+ </ul>
581
+ </li>
582
+ <li><strong>Performance:</strong> High accuracy with optimized resource usage</li>
583
+ <li><strong>Implementation:</strong> Spark NLP</li>
584
+ <li><strong>Resource Requirements:</strong> Moderate computational resources; suitable for production environments</li>
585
+ </ul>
586
+ </div>
587
+ """, unsafe_allow_html=True)
588
+
589
+ # References Section
590
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
591
+ st.markdown("""
592
+ <div class="section">
593
+ <ul>
594
+ <li><a class="link" href="https://arxiv.org/abs/1909.11942" target="_blank">Lan, Z., Chen, J., Goodman, S., Gimpel, K., Sharma, P., & Soricut, R. (2019). ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. arXiv preprint arXiv:1909.11942.</a></li>
595
+ <li><a class="link" href="https://sparknlp.org/2022/06/15/albert_base_qa_squad2_en_3_0.html" target="_blank">Spark NLP Model - albert_base_qa_squad2</a></li>
596
+ </ul>
597
+ </div>
598
+ """, unsafe_allow_html=True)
599
+
600
+ # Community & Support
601
+ st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
602
+ st.markdown("""
603
+ <div class="content">
604
+ <ul>
605
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
606
+ <li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
607
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
608
+ </ul>
609
+ </div>
610
+ """, unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ st-annotated-text
3
+ streamlit-tags
4
+ pandas
5
+ numpy
6
+ spark-nlp
7
+ pyspark