Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,75 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
import plotly.graph_objects as go
|
3 |
-
from transformers import pipeline
|
4 |
-
import re
|
5 |
-
import time
|
6 |
-
import requests
|
7 |
from PIL import Image
|
8 |
-
import
|
9 |
-
import
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
import torch
|
24 |
-
|
25 |
-
import tiktoken
|
26 |
-
import seaborn as sns
|
27 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
28 |
-
# from colorama import Fore, Style
|
29 |
-
import openai # for OpenAI API calls
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
3 |
from PIL import Image
|
4 |
+
import spacy
|
5 |
+
import streamlit as st
|
6 |
+
from streamlit_pdf_viewer import pdf_viewer
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
st.set_page_config(page_title="FACTOID: FACtual enTailment fOr hallucInation Detection", layout="wide")
|
11 |
+
st.title('Welcome to :blue[FACTOID] ')
|
12 |
+
|
13 |
+
st.header('FACTOID: FACtual enTailment fOr hallucInation Detection :blue[Web Demo]')
|
14 |
+
#image = Image.open('image.png')
|
15 |
+
#st.image(image, caption='Traditional Entailment vs Factual Entailment')
|
16 |
+
pdf_viewer(input="fac.pdf", width=700)
|
17 |
+
|
18 |
+
# List of sentences
|
19 |
+
sentence1 = [f"U.S. President Barack Obama declared that the U.S. will refrain from deploying troops in Ukraine."]
|
20 |
+
sentence2 = [f"Joe Biden said we’d not send U.S. troops to fight Russian troops in Ukraine, but we would provide robust military assistance and try to unify the Western world against Russia’s aggression."]
|
21 |
+
# Create a dropdown menu
|
22 |
+
selected_sentence1 = st.selectbox("Select first sentence:", sentence1)
|
23 |
+
selected_sentence2 = st.selectbox("Select first sentence:", sentence2)
|
24 |
+
|
25 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
26 |
import torch
|
27 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
model_name = "MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7"
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name,use_fast=False)
|
31 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
32 |
+
|
33 |
+
|
34 |
+
premise = selected_sentence1
|
35 |
+
hypothesis = selected_sentence2
|
36 |
+
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
37 |
+
output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu"
|
38 |
+
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
39 |
+
label_names = ["support", "neutral", "refute"]
|
40 |
+
prediction = {name: float(pred) for pred, name in zip(prediction, label_names)}
|
41 |
+
highest_label = max(prediction, key=prediction.get)
|
42 |
+
|
43 |
+
|
44 |
+
from transformers import pipeline
|
45 |
+
pipe = pipeline("text-classification",model="sileod/deberta-v3-base-tasksource-nli")
|
46 |
+
labels=pipe([dict(text=selected_sentence1,
|
47 |
+
text_pair=selected_sentence2)])
|
48 |
+
|
49 |
+
|
50 |
+
import en_core_web_sm
|
51 |
+
|
52 |
+
|
53 |
+
def extract_person_names(sentence):
|
54 |
+
nlp = spacy.load("en_core_web_sm")
|
55 |
+
doc = nlp(sentence)
|
56 |
+
person_names = [entity.text for entity in doc.ents if entity.label_ == 'PERSON']
|
57 |
+
|
58 |
+
return person_names[0]
|
59 |
+
|
60 |
+
person_name1 = extract_person_names(selected_sentence1)
|
61 |
+
person_name2 = extract_person_names(selected_sentence2)
|
62 |
+
|
63 |
+
|
64 |
+
col1, col2 = st.columns(2)
|
65 |
+
|
66 |
+
with col1:
|
67 |
+
st.write("Without Factual Entailment.")
|
68 |
+
st.write("Textual Entailment Model:\n",highest_label)
|
69 |
+
|
70 |
+
with col2:
|
71 |
+
st.write("With Factual Entailment:")
|
72 |
+
st.write("Textual Entailment Model:\n",labels[0]['label'])
|
73 |
+
st.write("Span Detection Model:\n")
|
74 |
+
st.write(f"{person_name1}::{person_name2}")
|
75 |
+
|