Spaces:
Runtime error
Runtime error
Firefly777a
commited on
Commit
·
cd851c8
1
Parent(s):
c292733
create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Any, Callable, List, Optional, Tuple
|
3 |
+
|
4 |
+
import nltk
|
5 |
+
nltk.download('punkt')
|
6 |
+
import gradio as gr
|
7 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
8 |
+
|
9 |
+
# A folderpath for where the examples are stored
|
10 |
+
EXAMPLES_FOLDER_NAME = "examples"
|
11 |
+
|
12 |
+
# A List of repo names for the huggingface models available for inference
|
13 |
+
HF_MODELS = ["huggingface/facebook/bart-large-cnn",
|
14 |
+
"huggingface/sshleifer/distilbart-xsum-12-6",
|
15 |
+
"huggingface/google/pegasus-xsum",
|
16 |
+
"huggingface/philschmid/bart-large-cnn-samsum",
|
17 |
+
"huggingface/linydub/bart-large-samsum",
|
18 |
+
"huggingface/philschmid/distilbart-cnn-12-6-samsum",
|
19 |
+
"huggingface/knkarthick/MEETING-SUMMARY-BART-LARGE-XSUM-SAMSUM-DIALOGSUM-AMI",
|
20 |
+
]
|
21 |
+
|
22 |
+
|
23 |
+
################################################################################
|
24 |
+
# Functions: Document statistics
|
25 |
+
################################################################################
|
26 |
+
# Function that uses a huggingface tokenizer to count how many tokens are in a text
|
27 |
+
def count_tokens(input_text, model_path='sshleifer/distilbart-cnn-12-6'):
|
28 |
+
# Load a huggingface tokenizer
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
30 |
+
# Tokenize the text
|
31 |
+
tokens = tokenizer(input_text)
|
32 |
+
# Count the number of tokens
|
33 |
+
return len(tokens['input_ids'])
|
34 |
+
|
35 |
+
# Function that uses nltk to count sentences in a text
|
36 |
+
def count_sentences(input_text):
|
37 |
+
# Use nltk to count sentences in the text
|
38 |
+
number_of_sentences = nltk.sent_tokenize(input_text)
|
39 |
+
# Return the number of sentences
|
40 |
+
return len(number_of_sentences)
|
41 |
+
|
42 |
+
# Function that counts the number of words in a text
|
43 |
+
def count_words(input_text):
|
44 |
+
# Use nltk to count words in the text
|
45 |
+
number_of_words = nltk.word_tokenize(input_text)
|
46 |
+
# Return the number of words
|
47 |
+
return len(number_of_words)
|
48 |
+
|
49 |
+
# Function that computes a few document statistics such as the number of tokens, sentences, and words
|
50 |
+
def compute_stats(input_text, models: Optional[List[str]] = None):
|
51 |
+
# Count the number of tokens
|
52 |
+
num_tokens = count_tokens(input_text)
|
53 |
+
# Count the number of sentences
|
54 |
+
num_sentences = count_sentences(input_text)
|
55 |
+
# Count the number of words
|
56 |
+
num_words = count_words(input_text)
|
57 |
+
# Return the document statistics formatted as a string
|
58 |
+
output_str = "| Tokens: {0} \n| Sentences: {1} \n| Words: {2}".format(num_tokens, num_sentences, num_words) + "\n"
|
59 |
+
output_str += "The max number of tokens for the model is: 1024" + "\n" # I manually set 1024 as the max. I don't intend to use any models that are smaller anyway.
|
60 |
+
# output_str += "Number of documents splits: 17.5"
|
61 |
+
return output_str
|
62 |
+
|
63 |
+
# # A function to loop through a list of strings
|
64 |
+
# # returning the last element in the filepath for each string
|
65 |
+
# def get_file_names(file_paths):
|
66 |
+
# # Create a list of file names
|
67 |
+
# file_names = []
|
68 |
+
# # Loop through the file paths
|
69 |
+
# for file_path in file_paths:
|
70 |
+
# # Get the last element in the file path
|
71 |
+
# file_name = file_path.split('/')[-2:]
|
72 |
+
# # Add the file name to the list
|
73 |
+
# file_names.append(file_name)
|
74 |
+
# # Loop through the file names and append to a string
|
75 |
+
# file_names_str = ""
|
76 |
+
# for file_name in file_names:
|
77 |
+
# breakpoint()
|
78 |
+
# file_names_str += file_name[0] + "\n"
|
79 |
+
# # Return the list of file names
|
80 |
+
# return file_names_str
|
81 |
+
|
82 |
+
################################################################################
|
83 |
+
# Functions: Huggingface Inference
|
84 |
+
################################################################################
|
85 |
+
|
86 |
+
# Function that uses a huggingface pipeline to predict a summary of a text
|
87 |
+
# input is a text string of a dialog conversation
|
88 |
+
def predict(dialog_text):
|
89 |
+
# Load a huggingface model
|
90 |
+
model = pipeline('summarization', model="philschmid/bart-large-cnn-samsum") #model='sshleifer/distilbart-cnn-12-6')
|
91 |
+
# Build tokenizer_kwargs to set a max length and truncate the data on inference
|
92 |
+
tokenizer_kwargs = {'truncation': True, 'max_length': 1024}
|
93 |
+
# Use the model to predict a summary of the text
|
94 |
+
summary = model(dialog_text, **tokenizer_kwargs)
|
95 |
+
# Return the summary w/ the model name
|
96 |
+
output = f"{hf_model_name} output: {summary[0]['summary_text']}"
|
97 |
+
return output, "output2"
|
98 |
+
|
99 |
+
def recursive_predict(dialog_text: str, hf_model_name: Tuple[str]):
|
100 |
+
breakpoint()
|
101 |
+
asdf = "asdf"
|
102 |
+
return output
|
103 |
+
|
104 |
+
################################################################################
|
105 |
+
# Functions: Gradio Utilities
|
106 |
+
################################################################################
|
107 |
+
# Function to build examples for gradio app
|
108 |
+
# Load text files from the examples folder as a list of strings for gradio
|
109 |
+
def get_examples(folder_path):
|
110 |
+
# Create a list of strings
|
111 |
+
examples = []
|
112 |
+
# Loop through the files in the folder
|
113 |
+
for file in os.listdir(folder_path):
|
114 |
+
# Load the file
|
115 |
+
with open(os.path.join(folder_path, file), 'r') as f:
|
116 |
+
# Add the file to the list
|
117 |
+
examples.append([f.read(), ["None"]])
|
118 |
+
# Return the list of strings
|
119 |
+
return examples
|
120 |
+
|
121 |
+
# A function that loops through a list of model paths, creates a gradio interface with the
|
122 |
+
# model name, and adds it to the list of interfaces
|
123 |
+
# It outputs a list of interfaces
|
124 |
+
def get_hf_interfaces(models_to_load):
|
125 |
+
# Create a list of interfaces
|
126 |
+
interfaces = []
|
127 |
+
# Loop through the HF_MODELS
|
128 |
+
for model in models_to_load:
|
129 |
+
# Create a gradio interface with the model name
|
130 |
+
interface = gr.Interface.load(model, title="this is a test TITLE", alias="this is an ALIAS")
|
131 |
+
# Add the interface to the list
|
132 |
+
interfaces.append(interface)
|
133 |
+
# Return the list of interfaces
|
134 |
+
return interfaces
|
135 |
+
|
136 |
+
################################################################################
|
137 |
+
# Build Gradio app
|
138 |
+
################################################################################
|
139 |
+
# print_details = gr.Interface(
|
140 |
+
# fn=lambda x: get_file_names(HF_MODELS),
|
141 |
+
# inputs="text",
|
142 |
+
# outputs="text",
|
143 |
+
# title="Statistics of the document"
|
144 |
+
# )
|
145 |
+
# Outputs a string of various document statistics
|
146 |
+
document_statistics = gr.Interface(
|
147 |
+
fn=compute_stats,
|
148 |
+
inputs="text",
|
149 |
+
outputs="text",
|
150 |
+
title="Statistics of the document"
|
151 |
+
)
|
152 |
+
maddie_mixer_summarization = gr.Interface(
|
153 |
+
fn=recursive_predict,
|
154 |
+
inputs="text",
|
155 |
+
outputs="text",
|
156 |
+
title="Statistics of the document"
|
157 |
+
)
|
158 |
+
|
159 |
+
# Build Examples to pass along to the gradio app
|
160 |
+
examples = get_examples(EXAMPLES_FOLDER_NAME)
|
161 |
+
|
162 |
+
# Build a list of huggingface interfaces from model paths,
|
163 |
+
# then add document statistics, and any custom interfaces
|
164 |
+
all_interfaces = get_hf_interfaces(HF_MODELS)
|
165 |
+
all_interfaces.insert(0, document_statistics) # Insert the statistics interface at the beginning
|
166 |
+
# all_interfaces.insert(0, print_details)
|
167 |
+
# all_interfaces.append(maddie_mixer_summarization) # Add the interface for the maddie mixer
|
168 |
+
|
169 |
+
# Build app
|
170 |
+
app = gr.Parallel(*all_interfaces,
|
171 |
+
title='Text Summarizer (Maddie Custom)',
|
172 |
+
description="Write a summary of a text",
|
173 |
+
examples=examples,
|
174 |
+
inputs=gr.inputs.Textbox(lines = 10, label="Text"),
|
175 |
+
)
|
176 |
+
|
177 |
+
# Launch
|
178 |
+
app.launch(inbrowser=True, show_error=True)
|