abdullahmubeen10 commited on
Commit
6dfd639
·
verified ·
1 Parent(s): 3bcf4ed

Upload 5 files

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#29B4E8"
Demo.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sparknlp
3
+ import os
4
+ import pandas as pd
5
+
6
+ from sparknlp.base import *
7
+ from sparknlp.annotator import *
8
+ from pyspark.ml import Pipeline
9
+ from sparknlp.pretrained import PretrainedPipeline
10
+ from annotated_text import annotated_text
11
+ from streamlit_tags import st_tags
12
+
13
+ # Page configuration
14
+ st.set_page_config(
15
+ layout="wide",
16
+ initial_sidebar_state="auto"
17
+ )
18
+
19
+ # CSS for styling
20
+ st.markdown("""
21
+ <style>
22
+ .main-title {
23
+ font-size: 36px;
24
+ color: #4A90E2;
25
+ font-weight: bold;
26
+ text-align: center;
27
+ }
28
+ .section {
29
+ background-color: #f9f9f9;
30
+ padding: 10px;
31
+ border-radius: 10px;
32
+ margin-top: 10px;
33
+ }
34
+ .section p, .section ul {
35
+ color: #666666;
36
+ }
37
+ </style>
38
+ """, unsafe_allow_html=True)
39
+
40
+ @st.cache_resource
41
+ def init_spark():
42
+ return sparknlp.start()
43
+
44
+ @st.cache_resource
45
+ def create_pipeline(model, task, zeroShotLables=['']):
46
+ document_assembler = DocumentAssembler() \
47
+ .setInputCol('text') \
48
+ .setOutputCol('document')
49
+
50
+ sentence_detector = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
51
+ .setInputCols(["document"])\
52
+ .setOutputCol("sentence")
53
+
54
+ tokenizer = Tokenizer() \
55
+ .setInputCols(['sentence']) \
56
+ .setOutputCol('token')
57
+
58
+ TCclassifier = XlmRoBertaForTokenClassification.pretrained("xlmroberta_ner_large_finetuned_conll03_english", "xx")\
59
+ .setInputCols(["sentence", "token"]) \
60
+ .setOutputCol("ner") \
61
+ .setCaseSensitive(False) \
62
+ .setMaxSentenceLength(512)
63
+
64
+ ner_converter = NerConverter() \
65
+ .setInputCols(['sentence', 'token', 'ner']) \
66
+ .setOutputCol('ner_chunk')
67
+
68
+ TCpipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, TCclassifier, ner_converter])
69
+ return TCpipeline
70
+
71
+ def fit_data(pipeline, data, task, ques='', cont=''):
72
+ empty_df = spark.createDataFrame([['']]).toDF('text')
73
+ pipeline_model = pipeline.fit(empty_df)
74
+ model = LightPipeline(pipeline_model)
75
+ result = model.fullAnnotate(data)
76
+ return result
77
+
78
+ def annotate(data):
79
+ document, chunks, labels = data["Document"], data["NER Chunk"], data["NER Label"]
80
+ annotated_words = []
81
+ for chunk, label in zip(chunks, labels):
82
+ parts = document.split(chunk, 1)
83
+ if parts[0]:
84
+ annotated_words.append(parts[0])
85
+ annotated_words.append((chunk, label))
86
+ document = parts[1]
87
+ if document:
88
+ annotated_words.append(document)
89
+ annotated_text(*annotated_words)
90
+
91
+ tasks_models_descriptions = {
92
+ "Token Classification": {
93
+ "models": ["xlmroberta_ner_large_finetuned_conll03_english"],
94
+ "description": "The 'xlmroberta_ner_large_finetuned_conll03_english' model is adept at token classification tasks, including named entity recognition (NER). It identifies and categorizes tokens in text, such as names, dates, and locations, enhancing the extraction of meaningful information from unstructured data."
95
+ }
96
+ }
97
+
98
+ # Sidebar content
99
+ task = "Token Classification"
100
+ model = st.sidebar.selectbox("Choose the pretrained model", tasks_models_descriptions[task]["models"], help="For more info about the models visit: https://sparknlp.org/models")
101
+
102
+ # Reference notebook link in sidebar
103
+ link = """
104
+ <a href="https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/357691d18373d6e8f13b5b1015137a398fd0a45f/Spark_NLP_Udemy_MOOC/Open_Source/17.01.Transformers-based_Embeddings.ipynb#L103">
105
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
106
+ </a>
107
+ """
108
+ st.sidebar.markdown('Reference notebook:')
109
+ st.sidebar.markdown(link, unsafe_allow_html=True)
110
+
111
+ # Page content
112
+ title, sub_title = (f'DeBERTa for {task}', tasks_models_descriptions[task]["description"])
113
+ st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
114
+ container = st.container(border=True)
115
+ container.write(sub_title)
116
+
117
+ # Load examples
118
+ examples_mapping = {
119
+ "Token Classification": [
120
+ "William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect. During the late 1990s, Gates had been criticized for his business tactics, which have been considered anti-competitive. This opinion has been upheld by numerous court rulings. In June 2006, Gates announced that he would be transitioning to a part-time role at Microsoft and full-time work at the Bill & Melinda Gates Foundation, the private charitable foundation that he and his wife, Melinda Gates, established in 2000.[9] He gradually transferred his duties to Ray Ozzie and Craig Mundie. He stepped down as chairman of Microsoft in February 2014 and assumed a new post as technology adviser to support the newly appointed CEO Satya Nadella.",
121
+ "The Mona Lisa is a 16th century oil painting created by Leonardo. It's held at the Louvre in Paris.",
122
+ "When Sebastian Thrun started working on self-driving cars at Google in 2007, few people outside of the company took him seriously. “I can tell you very senior CEOs of major American car companies would shake my hand and turn away because I wasn’t worth talking to,” said Thrun, now the co-founder and CEO of online higher education startup Udacity, in an interview with Recode earlier this week.",
123
+ "Facebook is a social networking service launched as TheFacebook on February 4, 2004. It was founded by Mark Zuckerberg with his college roommates and fellow Harvard University students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. The website's membership was initially limited by the founders to Harvard students, but was expanded to other colleges in the Boston area, the Ivy League, and gradually most universities in the United States and Canada.",
124
+ "The history of natural language processing generally started in the 1950s, although work can be found from earlier periods. In 1950, Alan Turing published an article titled 'Computing Machinery and Intelligence' which proposed what is now called the Turing test as a criterion of intelligence",
125
+ "Geoffrey Everest Hinton is an English Canadian cognitive psychologist and computer scientist, most noted for his work on artificial neural networks. Since 2013 he divides his time working for Google and the University of Toronto. In 2017, he cofounded and became the Chief Scientific Advisor of the Vector Institute in Toronto.",
126
+ "When I told John that I wanted to move to Alaska, he warned me that I'd have trouble finding a Starbucks there.",
127
+ "Steven Paul Jobs was an American business magnate, industrial designer, investor, and media proprietor. He was the chairman, chief executive officer (CEO), and co-founder of Apple Inc., the chairman and majority shareholder of Pixar, a member of The Walt Disney Company's board of directors following its acquisition of Pixar, and the founder, chairman, and CEO of NeXT. Jobs is widely recognized as a pioneer of the personal computer revolution of the 1970s and 1980s, along with Apple co-founder Steve Wozniak. Jobs was born in San Francisco, California, and put up for adoption. He was raised in the San Francisco Bay Area. He attended Reed College in 1972 before dropping out that same year, and traveled through India in 1974 seeking enlightenment and studying Zen Buddhism.",
128
+ "Titanic is a 1997 American epic romance and disaster film directed, written, co-produced, and co-edited by James Cameron. Incorporating both historical and fictionalized aspects, it is based on accounts of the sinking of the RMS Titanic, and stars Leonardo DiCaprio and Kate Winslet as members of different social classes who fall in love aboard the ship during its ill-fated maiden voyage.",
129
+ "Other than being the king of the north, John Snow is a an english physician and a leader in the development of anaesthesia and medical hygiene. He is considered for being the first one using data to cure cholera outbreak in 1834."
130
+ ]
131
+ }
132
+
133
+ examples = examples_mapping[task]
134
+ selected_text = st.selectbox("Select an example", examples)
135
+ custom_input = st.text_input("Try it with your own Sentence!")
136
+
137
+ try:
138
+ text_to_analyze = custom_input if custom_input else selected_text
139
+ st.subheader('Full example text')
140
+ HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
141
+ st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
142
+ except:
143
+ text_to_analyze = selected_text
144
+
145
+ # Initialize Spark and create pipeline
146
+ spark = init_spark()
147
+ pipeline = create_pipeline(model, task)
148
+ output = fit_data(pipeline, text_to_analyze, task)
149
+
150
+ # Display matched sentence
151
+ st.subheader("Prediction:")
152
+
153
+ results = {
154
+ 'Document': output[0]['document'][0].result,
155
+ 'NER Chunk': [n.result for n in output[0]['ner_chunk']],
156
+ 'NER Label': [n.metadata['entity'] for n in output[0]['ner_chunk']]
157
+ }
158
+ annotate(results)
159
+ df = pd.DataFrame({'NER Chunk': results['NER Chunk'], 'NER Label': results['NER Label']})
160
+ df.index += 1
161
+ st.dataframe(df)
162
+
163
+
164
+
Dockerfile ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Download base image ubuntu 18.04
2
+ FROM ubuntu:18.04
3
+
4
+ # Set environment variables
5
+ ENV NB_USER jovyan
6
+ ENV NB_UID 1000
7
+ ENV HOME /home/${NB_USER}
8
+ ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
9
+
10
+ # Install required packages
11
+ RUN apt-get update && apt-get install -y \
12
+ tar \
13
+ wget \
14
+ bash \
15
+ rsync \
16
+ gcc \
17
+ libfreetype6-dev \
18
+ libhdf5-serial-dev \
19
+ libpng-dev \
20
+ libzmq3-dev \
21
+ python3 \
22
+ python3-dev \
23
+ python3-pip \
24
+ unzip \
25
+ pkg-config \
26
+ software-properties-common \
27
+ graphviz \
28
+ openjdk-8-jdk \
29
+ ant \
30
+ ca-certificates-java \
31
+ && apt-get clean \
32
+ && update-ca-certificates -f
33
+
34
+ # Install Python 3.8 and pip
35
+ RUN add-apt-repository ppa:deadsnakes/ppa \
36
+ && apt-get update \
37
+ && apt-get install -y python3.8 python3-pip \
38
+ && apt-get clean
39
+
40
+ # Set up JAVA_HOME
41
+ RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> /etc/profile \
42
+ && echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile
43
+ # Create a new user named "jovyan" with user ID 1000
44
+ RUN useradd -m -u ${NB_UID} ${NB_USER}
45
+
46
+ # Switch to the "jovyan" user
47
+ USER ${NB_USER}
48
+
49
+ # Set home and path variables for the user
50
+ ENV HOME=/home/${NB_USER} \
51
+ PATH=/home/${NB_USER}/.local/bin:$PATH
52
+
53
+ # Set up PySpark to use Python 3.8 for both driver and workers
54
+ ENV PYSPARK_PYTHON=/usr/bin/python3.8
55
+ ENV PYSPARK_DRIVER_PYTHON=/usr/bin/python3.8
56
+
57
+ # Set the working directory to the user's home directory
58
+ WORKDIR ${HOME}
59
+
60
+ # Upgrade pip and install Python dependencies
61
+ RUN python3.8 -m pip install --upgrade pip
62
+ COPY requirements.txt /tmp/requirements.txt
63
+ RUN python3.8 -m pip install -r /tmp/requirements.txt
64
+
65
+ # Copy the application code into the container at /home/jovyan
66
+ COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
67
+
68
+ # Expose port for Streamlit
69
+ EXPOSE 7860
70
+
71
+ # Define the entry point for the container
72
+ ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
pages/Workflow & Model Overview.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Page configuration
4
+ st.set_page_config(
5
+ layout="wide",
6
+ initial_sidebar_state="auto"
7
+ )
8
+
9
+ # Custom CSS for better styling
10
+ st.markdown("""
11
+ <style>
12
+ .main-title {
13
+ font-size: 36px;
14
+ color: #4A90E2;
15
+ font-weight: bold;
16
+ text-align: center;
17
+ }
18
+ .sub-title {
19
+ font-size: 24px;
20
+ color: #4A90E2;
21
+ margin-top: 20px;
22
+ }
23
+ .section {
24
+ background-color: #f9f9f9;
25
+ padding: 15px;
26
+ border-radius: 10px;
27
+ margin-top: 20px;
28
+ }
29
+ .section h2 {
30
+ font-size: 22px;
31
+ color: #4A90E2;
32
+ }
33
+ .section p, .section ul {
34
+ color: #666666;
35
+ }
36
+ .link {
37
+ color: #4A90E2;
38
+ text-decoration: none;
39
+ }
40
+ .benchmark-table {
41
+ width: 100%;
42
+ border-collapse: collapse;
43
+ margin-top: 20px;
44
+ }
45
+ .benchmark-table th, .benchmark-table td {
46
+ border: 1px solid #ddd;
47
+ padding: 8px;
48
+ text-align: left;
49
+ }
50
+ .benchmark-table th {
51
+ background-color: #4A90E2;
52
+ color: white;
53
+ }
54
+ .benchmark-table td {
55
+ background-color: #f2f2f2;
56
+ }
57
+ </style>
58
+ """, unsafe_allow_html=True)
59
+
60
+ # Title
61
+ st.markdown('<div class="main-title">Introduction to XLM-RoBERTa Annotators in Spark NLP</div>', unsafe_allow_html=True)
62
+
63
+ # Subtitle
64
+ st.markdown("""
65
+ <div class="section">
66
+ <p>XLM-RoBERTa (Cross-lingual Robustly Optimized BERT Approach) is an advanced multilingual model that extends the capabilities of RoBERTa to over 100 languages. Pre-trained on a massive, diverse corpus, XLM-RoBERTa is designed to handle various NLP tasks in a multilingual context, making it ideal for applications that require cross-lingual understanding. Below, we provide an overview of the XLM-RoBERTa annotators for these tasks:</p>
67
+ </div>
68
+ """, unsafe_allow_html=True)
69
+
70
+ st.markdown('<div class="sub-title">XLM-RoBERTa for Token Classification', unsafe_allow_html=True)
71
+ st.markdown("""
72
+ <div class="section">
73
+ <p><strong>Token Classification</strong> is a crucial NLP task that involves assigning labels to individual tokens (words or subwords) within a sentence. This task is fundamental for applications like Named Entity Recognition (NER), Part-of-Speech (POS) tagging, and other fine-grained text analyses.</p>
74
+ <p>XLM-RoBERTa, with its multilingual capabilities, is particularly suited for token classification in diverse linguistic contexts. Leveraging this model in Spark NLP allows for robust and scalable token classification across multiple languages, making it an invaluable tool for multilingual NLP projects.</p>
75
+ <p>Using XLM-RoBERTa for token classification enables:</p>
76
+ <ul>
77
+ <li><strong>Multilingual NER:</strong> Identify and categorize entities in text across various languages, such as persons (PER), organizations (ORG), locations (LOC), and more.</li>
78
+ <li><strong>Cross-lingual Transfer Learning:</strong> Apply learned models from one language to another, benefiting from XLM-RoBERTa's shared representations across languages.</li>
79
+ <li><strong>Enhanced Text Processing:</strong> Improve text categorization, information extraction, and data retrieval across multilingual datasets.</li>
80
+ </ul>
81
+ <p>Advantages of using XLM-RoBERTa for token classification in Spark NLP include:</p>
82
+ <ul>
83
+ <li><strong>Multilingual Expertise:</strong> XLM-RoBERTa's training on a vast multilingual corpus ensures strong performance across different languages.</li>
84
+ <li><strong>Scalability:</strong> Integrated with Apache Spark, Spark NLP allows processing of large-scale multilingual datasets efficiently.</li>
85
+ <li><strong>Model Flexibility:</strong> Fine-tune XLM-RoBERTa models or use pre-trained ones based on your specific language needs and tasks.</li>
86
+ </ul>
87
+ </div>
88
+ """, unsafe_allow_html=True)
89
+
90
+ # General Information about Using Token Classification Models
91
+ st.markdown('<div class="sub-title">How to Use XLM-RoBERTa for Token Classification in Spark NLP</div>', unsafe_allow_html=True)
92
+ st.markdown("""
93
+ <div class="section">
94
+ <p>To harness XLM-RoBERTa for token classification, Spark NLP provides a straightforward pipeline setup. Below is a sample implementation that demonstrates how to use XLM-RoBERTa for Named Entity Recognition (NER). The model's multilingual capabilities ensure that it can be applied effectively across different languages, making it ideal for diverse NLP tasks.</p>
95
+ </div>
96
+ """, unsafe_allow_html=True)
97
+
98
+ st.code('''
99
+ from sparknlp.base import *
100
+ from sparknlp.annotator import *
101
+ from pyspark.ml import Pipeline
102
+ from pyspark.sql.functions import col, expr
103
+
104
+ documentAssembler = DocumentAssembler() \\
105
+ .setInputCol("text") \\
106
+ .setOutputCol("document")
107
+
108
+ tokenizer = Tokenizer() \\
109
+ .setInputCols(["document"]) \\
110
+ .setOutputCol("token")
111
+
112
+ # Example of loading a token classification model (e.g., XLM-RoBERTa)
113
+ token_classifier = XlmRoBertaForTokenClassification.pretrained("xlmroberta_ner_large_finetuned_conll03_english", "xx") \\
114
+ .setInputCols(["document", "token"]) \\
115
+ .setOutputCol("ner")
116
+
117
+ ner_converter = NerConverter() \\
118
+ .setInputCols(['document', 'token', 'ner']) \\
119
+ .setOutputCol('ner_chunk')
120
+
121
+ pipeline = Pipeline(stages=[
122
+ documentAssembler,
123
+ tokenizer,
124
+ token_classifier,
125
+ ner_converter
126
+ ])
127
+
128
+ data = spark.createDataFrame([["Spark NLP provides powerful tools for multilingual NLP."]]).toDF("text")
129
+ result = pipeline.fit(data).transform(data)
130
+
131
+ result.selectExpr("explode(ner_chunk) as ner_chunk").select(
132
+ col("ner_chunk.result").alias("chunk"),
133
+ col("ner_chunk.metadata.entity").alias("ner_label")
134
+ ).show(truncate=False)
135
+ ''', language='python')
136
+
137
+ # Results Example
138
+ st.text("""
139
+ +--------------------------+---------+
140
+ |chunk |ner_label|
141
+ +--------------------------+---------+
142
+ |Spark NLP |ORG |
143
+ +--------------------------+---------+
144
+ """)
145
+
146
+ # Model Info Section
147
+ st.markdown('<div class="sub-title">Choosing the Right XLM-RoBERTa Model</div>', unsafe_allow_html=True)
148
+ st.markdown("""
149
+ <div class="section">
150
+ <p>Spark NLP provides access to various pre-trained XLM-RoBERTa models tailored for token classification tasks. Selecting the appropriate model can significantly impact performance, particularly in multilingual contexts.</p>
151
+ <p>To explore and choose the most suitable XLM-RoBERTa model for your needs, visit the <a class="link" href="https://sparknlp.org/models?annotator=XlmRoBertaForTokenClassification" target="_blank">Spark NLP Models Hub</a>. Here, you will find detailed descriptions of each model, including their specific applications and supported languages.</p>
152
+ </div>
153
+ """, unsafe_allow_html=True)
154
+
155
+ st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
156
+ st.markdown("""
157
+ <div class="section">
158
+ <ul>
159
+ <li><a class="link" href="https://huggingface.co/xlm-roberta-large-finetuned-conll03-english" target="_blank">Hugging Face: xlm-roberta-large-finetuned-conll03-english</a></li>
160
+ <li><a class="link" href="https://arxiv.org/abs/1911.02116" target="_blank">XLM-RoBERTa: A Multilingual Language Model</a></li>
161
+ <li><a class="link" href="https://github.com/facebookresearch/fairseq/tree/main/examples/xlmr" target="_blank">GitHub: XLM-RoBERTa Examples</a></li>
162
+ <li><a class="link" href="https://aclanthology.org/2021.acl-long.330.pdf" target="_blank">ACL: Multilingual Transfer of NER Models</a></li>
163
+ <li><a class="link" href="https://dl.acm.org/doi/pdf/10.1145/3442188.3445922" target="_blank">ACM: Analysis of Multilingual Models</a></li>
164
+ <li><a class="link" href="https://arxiv.org/pdf/2008.03415.pdf" target="_blank">Efficient Multilingual Language Models</a></li>
165
+ <li><a class="link" href="https://mlco2.github.io/impact#compute" target="_blank">ML CO2 Impact Estimator</a></li>
166
+ <li><a class="link" href="https://arxiv.org/abs/1910.09700" target="_blank">Cross-lingual Transfer with XLM-RoBERTa</a></li>
167
+ </ul>
168
+ </div>
169
+ """, unsafe_allow_html=True)
170
+
171
+ # Footer
172
+ st.markdown("""
173
+ <div class="section">
174
+ <ul>
175
+ <li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
176
+ <li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
177
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
178
+ <li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
179
+ <li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
180
+ </ul>
181
+ </div>
182
+ """, unsafe_allow_html=True)
183
+
184
+ st.markdown('<div class="sub-title">Quick Links</div>', unsafe_allow_html=True)
185
+
186
+ st.markdown("""
187
+ <div class="section">
188
+ <ul>
189
+ <li><a class="link" href="https://sparknlp.org/docs/en/quickstart" target="_blank">Getting Started</a></li>
190
+ <li><a class="link" href="https://nlp.johnsnowlabs.com/models" target="_blank">Pretrained Models</a></li>
191
+ <li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/annotation/text/english" target="_blank">Example Notebooks</a></li>
192
+ <li><a class="link" href="https://sparknlp.org/docs/en/install" target="_blank">Installation Guide</a></li>
193
+ </ul>
194
+ </div>
195
+ """, unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ st-annotated-text
3
+ streamlit-tags
4
+ pandas
5
+ numpy
6
+ spark-nlp
7
+ pyspark