Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import spacy
|
3 |
+
import wikipediaapi
|
4 |
+
import wikipedia
|
5 |
+
from wikipedia.exceptions import DisambiguationError
|
6 |
+
from transformers import TFAutoModel, AutoTokenizer
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
import faiss
|
10 |
+
|
11 |
+
try:
|
12 |
+
nlp = spacy.load("en_core_web_sm")
|
13 |
+
except:
|
14 |
+
spacy.cli.download("en_core_web_sm")
|
15 |
+
nlp = spacy.load("en_core_web_sm")
|
16 |
+
|
17 |
+
wh_words = ['what', 'who', 'how', 'when', 'which']
|
18 |
+
|
19 |
+
def get_concepts(text):
|
20 |
+
text = text.lower()
|
21 |
+
doc = nlp(text)
|
22 |
+
concepts = []
|
23 |
+
for chunk in doc.noun_chunks:
|
24 |
+
if chunk.text not in wh_words:
|
25 |
+
concepts.append(chunk.text)
|
26 |
+
return concepts
|
27 |
+
|
28 |
+
def get_passages(text, k=100):
|
29 |
+
doc = nlp(text)
|
30 |
+
passages = []
|
31 |
+
passage_len = 0
|
32 |
+
passage = ""
|
33 |
+
sents = list(doc.sents)
|
34 |
+
for i in range(len(sents)):
|
35 |
+
sen = sents[i]
|
36 |
+
passage_len += len(sen)
|
37 |
+
if passage_len >= k:
|
38 |
+
passages.append(passage)
|
39 |
+
passage = sen.text
|
40 |
+
passage_len = len(sen)
|
41 |
+
continue
|
42 |
+
elif i == (len(sents) - 1):
|
43 |
+
passage += " " + sen.text
|
44 |
+
passages.append(passage)
|
45 |
+
passage = ""
|
46 |
+
passage_len = 0
|
47 |
+
continue
|
48 |
+
passage += " " + sen.text
|
49 |
+
return passages
|
50 |
+
|
51 |
+
def get_dicts_for_dpr(concepts, n_results=20, k=100):
|
52 |
+
dicts = []
|
53 |
+
for concept in concepts:
|
54 |
+
wikis = wikipedia.search(concept, results=n_results)
|
55 |
+
st.write(f"{concept} No of Wikis: {len(wikis)}")
|
56 |
+
for wiki in wikis:
|
57 |
+
try:
|
58 |
+
html_page = wikipedia.page(title=wiki, auto_suggest=False)
|
59 |
+
except DisambiguationError:
|
60 |
+
continue
|
61 |
+
htmlResults = html_page.content
|
62 |
+
passages = get_passages(htmlResults, k=k)
|
63 |
+
for passage in passages:
|
64 |
+
i_dicts = {}
|
65 |
+
i_dicts['text'] = passage
|
66 |
+
i_dicts['title'] = wiki
|
67 |
+
dicts.append(i_dicts)
|
68 |
+
return dicts
|
69 |
+
|
70 |
+
passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
|
71 |
+
query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
|
72 |
+
p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
|
73 |
+
q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
|
74 |
+
|
75 |
+
def get_title_text_combined(passage_dicts):
|
76 |
+
res = []
|
77 |
+
for p in passage_dicts:
|
78 |
+
res.append(tuple((p['title'], p['text'])))
|
79 |
+
return res
|
80 |
+
|
81 |
+
def extracted_passage_embeddings(processed_passages, max_length=156):
|
82 |
+
passage_inputs = p_tokenizer.batch_encode_plus(
|
83 |
+
processed_passages,
|
84 |
+
add_special_tokens=True,
|
85 |
+
truncation=True,
|
86 |
+
padding="max_length",
|
87 |
+
max_length=max_length,
|
88 |
+
return_token_type_ids=True
|
89 |
+
)
|
90 |
+
passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']), np.array(passage_inputs['attention_mask']),
|
91 |
+
np.array(passage_inputs['token_type_ids'])],
|
92 |
+
batch_size=64,
|
93 |
+
verbose=1)
|
94 |
+
return passage_embeddings
|
95 |
+
|
96 |
+
def extracted_query_embeddings(queries, max_length=64):
|
97 |
+
query_inputs = q_tokenizer.batch_encode_plus(
|
98 |
+
queries,
|
99 |
+
add_special_tokens=True,
|
100 |
+
truncation=True,
|
101 |
+
padding="max_length",
|
102 |
+
max_length=max_length,
|
103 |
+
return_token_type_ids=True
|
104 |
+
)
|
105 |
+
|
106 |
+
query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']),
|
107 |
+
np.array(query_inputs['attention_mask']),
|
108 |
+
np.array(query_inputs['token_type_ids'])],
|
109 |
+
batch_size=1,
|
110 |
+
verbose=1)
|
111 |
+
return query_embeddings
|
112 |
+
|
113 |
+
#Wikipedia API:
|
114 |
+
|
115 |
+
def get_pagetext(page):
|
116 |
+
s = str(page).replace("/t","")
|
117 |
+
return s
|
118 |
+
|
119 |
+
def get_wiki_summary(search):
|
120 |
+
wiki_wiki = wikipediaapi.Wikipedia('en')
|
121 |
+
page = wiki_wiki.page(search)
|