Update functions.py
Browse files- functions.py +13 -5
functions.py
CHANGED
@@ -138,7 +138,8 @@ def summary_downloader(raw_text):
|
|
138 |
st.markdown("#### Download Summary as a File ###")
|
139 |
href = f'<a href="data:file/txt;base64,{b64}" download="{new_filename}">Click to Download!!</a>'
|
140 |
st.markdown(href,unsafe_allow_html=True)
|
141 |
-
|
|
|
142 |
def get_all_entities_per_sentence(text):
|
143 |
doc = nlp(''.join(text))
|
144 |
|
@@ -166,11 +167,13 @@ def get_all_entities_per_sentence(text):
|
|
166 |
entities_all_sentences.append(entities_this_sentence)
|
167 |
|
168 |
return entities_all_sentences
|
169 |
-
|
|
|
170 |
def get_all_entities(text):
|
171 |
all_entities_per_sentence = get_all_entities_per_sentence(text)
|
172 |
return list(itertools.chain.from_iterable(all_entities_per_sentence))
|
173 |
-
|
|
|
174 |
def get_and_compare_entities(article_content,summary_output):
|
175 |
|
176 |
all_entities_per_sentence = get_all_entities_per_sentence(article_content)
|
@@ -218,6 +221,7 @@ def get_and_compare_entities(article_content,summary_output):
|
|
218 |
|
219 |
return matched_entities, unmatched_entities
|
220 |
|
|
|
221 |
def highlight_entities(article_content,summary_output):
|
222 |
|
223 |
markdown_start_red = "<mark class=\"entity\" style=\"background: rgb(238, 135, 135);\">"
|
@@ -252,7 +256,8 @@ def display_df_as_table(model,top_k,score='score'):
|
|
252 |
df['Score'] = round(df['Score'],2)
|
253 |
|
254 |
return df
|
255 |
-
|
|
|
256 |
def make_spans(text,results):
|
257 |
results_list = []
|
258 |
for i in range(len(results)):
|
@@ -264,4 +269,7 @@ def make_spans(text,results):
|
|
264 |
##Fiscal Sentiment by Sentence
|
265 |
def fin_ext(text):
|
266 |
results = remote_clx(sent_tokenizer(text))
|
267 |
-
return make_spans(text,results)
|
|
|
|
|
|
|
|
138 |
st.markdown("#### Download Summary as a File ###")
|
139 |
href = f'<a href="data:file/txt;base64,{b64}" download="{new_filename}">Click to Download!!</a>'
|
140 |
st.markdown(href,unsafe_allow_html=True)
|
141 |
+
|
142 |
+
@st.experimental_memo(suppress_st_warning=True)
|
143 |
def get_all_entities_per_sentence(text):
|
144 |
doc = nlp(''.join(text))
|
145 |
|
|
|
167 |
entities_all_sentences.append(entities_this_sentence)
|
168 |
|
169 |
return entities_all_sentences
|
170 |
+
|
171 |
+
@st.experimental_memo(suppress_st_warning=True)
|
172 |
def get_all_entities(text):
|
173 |
all_entities_per_sentence = get_all_entities_per_sentence(text)
|
174 |
return list(itertools.chain.from_iterable(all_entities_per_sentence))
|
175 |
+
|
176 |
+
@st.experimental_memo(suppress_st_warning=True)
|
177 |
def get_and_compare_entities(article_content,summary_output):
|
178 |
|
179 |
all_entities_per_sentence = get_all_entities_per_sentence(article_content)
|
|
|
221 |
|
222 |
return matched_entities, unmatched_entities
|
223 |
|
224 |
+
@st.experimental_memo(suppress_st_warning=True)
|
225 |
def highlight_entities(article_content,summary_output):
|
226 |
|
227 |
markdown_start_red = "<mark class=\"entity\" style=\"background: rgb(238, 135, 135);\">"
|
|
|
256 |
df['Score'] = round(df['Score'],2)
|
257 |
|
258 |
return df
|
259 |
+
|
260 |
+
|
261 |
def make_spans(text,results):
|
262 |
results_list = []
|
263 |
for i in range(len(results)):
|
|
|
269 |
##Fiscal Sentiment by Sentence
|
270 |
def fin_ext(text):
|
271 |
results = remote_clx(sent_tokenizer(text))
|
272 |
+
return make_spans(text,results)
|
273 |
+
|
274 |
+
nlp = get_spacy()
|
275 |
+
asr_model, sent_pipe, sum_pipe, ner_pipe, sbert, cross_encoder = load_models()
|