File size: 10,683 Bytes
6eff5e7
 
 
 
 
4bea31b
b1499f3
 
05a7bdc
6eff5e7
 
 
 
 
580aef7
 
6eff5e7
 
 
580aef7
6eff5e7
 
 
580aef7
6eff5e7
 
 
 
 
 
 
05a7bdc
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
0532283
 
963bf46
6eff5e7
 
 
 
580aef7
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
 
e16ae7e
05a7bdc
0532283
963bf46
05a7bdc
 
6eff5e7
 
0532283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05a7bdc
 
6eff5e7
0532283
 
 
 
 
 
6eff5e7
 
 
 
963bf46
6eff5e7
 
0532283
6eff5e7
0532283
e16ae7e
 
0532283
 
6eff5e7
e16ae7e
6eff5e7
 
 
 
 
e16ae7e
963bf46
0532283
e16ae7e
 
0532283
 
 
 
 
 
e16ae7e
 
 
6eff5e7
 
a6756ef
2fad322
a6756ef
2fad322
 
 
 
 
 
 
 
a6756ef
0532283
 
 
a6756ef
2fad322
0532283
 
2fad322
a6756ef
 
4f8ef7b
0532283
 
2fad322
 
 
6eff5e7
 
 
 
 
 
 
 
2fad322
6eff5e7
 
 
 
0532283
963bf46
 
 
e16ae7e
 
 
 
 
0532283
e16ae7e
6eff5e7
963bf46
e16ae7e
6eff5e7
 
 
 
0532283
e16ae7e
6eff5e7
0532283
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
963bf46
 
 
6eff5e7
 
 
 
 
 
 
 
e16ae7e
6eff5e7
e16ae7e
0532283
6eff5e7
 
 
963bf46
6eff5e7
 
0532283
 
 
 
6eff5e7
 
e16ae7e
963bf46
e16ae7e
 
 
 
 
 
0532283
 
e16ae7e
 
4f8ef7b
 
 
 
 
 
 
0532283
6eff5e7
4bea31b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import gradio as gr
import os
from transformers import AutoTokenizer, AutoModel
from sentence_transformers import SentenceTransformer
import pickle
import nltk
nltk.download('punkt') # tokenizer
nltk.download('averaged_perceptron_tagger') # postagger
import time

from input_format import *
from score import *

# load document scoring model
torch.cuda.is_available = lambda : False
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_model = 'allenai/specter'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
doc_model = AutoModel.from_pretrained(pretrained_model) 
doc_model.to(device)

# load sentence model 
sent_model = SentenceTransformer('sentence-transformers/gtr-t5-base')
sent_model.to(device)

def get_similar_paper(
    abstract_text_input, 
    pdf_file_input, 
    author_id_input, 
    num_papers_show=10
):
    print('retrieving similar papers...')
    start = time.time()
    input_sentences = sent_tokenize(abstract_text_input)
    
    # TODO handle pdf file input
    if pdf_file_input is not None:
        name = None
        papers = []
        raise ValueError('Use submission abstract instead.')
    else:
        # Get author papers from id
        name, papers = get_text_from_author_id(author_id_input)
    
    # Compute Doc-level affinity scores for the Papers 
    print('computing scores...') 
    # TODO detect duplicate papers?
    titles, abstracts, doc_scores = compute_document_score(
        doc_model, 
        tokenizer,
        abstract_text_input, 
        papers,
        batch=50
    )
    
    tmp = {
        'titles': titles,
        'abstracts': abstracts,
        'doc_scores': doc_scores
    }
    
    # Select top K choices of papers to show
    titles = titles[:num_papers_show]
    abstracts = abstracts[:num_papers_show]
    doc_scores = doc_scores[:num_papers_show]
    
    display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(titles, doc_scores)]
    end = time.time()
    print('paper retrieval complete in [%0.2f] seconds'%(end - start))
    
    print('obtaining highlights..')
    start = time.time()
    input_sentences = sent_tokenize(abstract_text_input)
    num_sents = len(input_sentences)
    for aa, (tt, ab, ds) in enumerate(zip(titles, abstracts, doc_scores)):
        # Compute sent-level and phrase-level affinity scores for each papers
        sent_ids, sent_scores, info = get_highlight_info(
            sent_model, 
            abstract_text_input, 
            ab,
            K=2
        )

        word_scores = dict()
        
        # different highlights for each input sentence
        for i in range(num_sents):
            word_scores[str(i)] = {
                "original": ab,
                "interpretation": list(zip(info['all_words'], info[i]['scores']))
            } # format to feed to for Gradio Interpretation component

        tmp[display_title[aa]] = {
            'title': tt,
            'abstract': ab,
            'doc_score': ds,
            'source_sentences': input_sentences,
            'highlight': word_scores
        }
    pickle.dump(tmp, open('info.pkl', 'wb'))  # TODO better ways of saving intermediate results?
    end = time.time()
    print('done in [%0.2f] seconds'%(end - start))
    
    return (
        gr.update(choices=display_title, interactive=True, visible=True), # set of papers
        gr.update(choices=input_sentences, interactive=True, visible=True), # submission sentences
        gr.update(visible=True),    # title row
        gr.update(visible=True),    # abstract row
    )

def update_name(author_id_input):
    # update the name of the author based on the id input
    name, _ = get_text_from_author_id(author_id_input)
    
    return gr.update(value=name)

def change_output_highlight(selected_papers_radio, source_sent_choice):
    # change the output highlight based on the sentence selected from the submission
    fname = 'info.pkl'
    if os.path.exists(fname):
        tmp = pickle.load(open(fname, 'rb'))
        source_sents = tmp[selected_papers_radio]['source_sentences']
        highlights = tmp[selected_papers_radio]['highlight']
        for i, s in enumerate(source_sents):
            #print('changing highlight')
            if source_sent_choice == s:
                return highlights[str(i)]
    else:
        return

def change_paper(selected_papers_radio):
    # change the paper to show based on the paper selected
    fname = 'info.pkl'
    if os.path.exists(fname):
        tmp = pickle.load(open(fname, 'rb'))
        title = tmp[selected_papers_radio]['title']
        abstract = tmp[selected_papers_radio]['abstract']
        aff_score = tmp[selected_papers_radio]['doc_score']
        highlights = tmp[selected_papers_radio]['highlight']
        return title, abstract, aff_score, highlights['0']

    else:
        return

with gr.Blocks() as demo:
    
    # Text description about the app and disclaimer
    ### TEXT Description
    # TODO add instruction video link
    gr.Markdown(
        """
# Paper Matching Helper

This is a tool designed to help match an academic paper (submission) to a potential peer reviewer, by presenting information that may be relevant to the users.
Below we describe how to use the tool. Also feel free to check out the [video]() for a more detailed rundown. 

##### Input
- The tool requires two inputs: (1) an academic paper's abstract in a text format, (2) and a potential reviewer's [Semantic Scholar](https://www.semanticscholar.org/) profile link. Once you put in a valid profile link, the reviewer's name will be displayed. 
- Once the name is confirmed, press the `What Makes this a Good Match?` button.
##### Similar Papers From the Reviewer
- Based on the input information above, the tool will first search for similar papers from the reviewer's previous publications using [Semantic Scholar API](https://www.semanticscholar.org/product/api). 
- It will list top 10 similar papers along with the **affinity scores** (ranging from 0 -1) for each, computed using text representations from a [language model](https://github.com/allenai/specter/tree/master/specter).
- You can click on different papers to see title, abstract, and affinity scores in detail. 
##### Relevant Parts
- Below the list of papers, we highlight relevant parts in the selected paper compared to the submission abstract.
- On the left, you will see individual sentences from the submission abstract you can select from.
- On the right, you will see the abstract of the selected paper, with **highlights**.
- **<span style="color:black;background-color:#DB7262;">Red highlights</span>**: sentences from the reviewer's paper abstract with high semantic similarity to the selected sentence.
- **<span style="color:black;background-color:#5296D5;">Blue highlights</span>**: phrases from the reviewer's paper abstract that is included in the selected sentence.
- To see relevant parts in a different paper from the reviewer, select the new paper.
-------
        """
    ) 
    
    ### INPUT
    with gr.Row() as input_row:
        with gr.Column():
            abstract_text_input = gr.Textbox(label='Submission Abstract')
        with gr.Column():
            pdf_file_input = gr.File(label='OR upload a submission PDF File')
        with gr.Column():
            with gr.Row():
                author_id_input = gr.Textbox(label='Reviewer Link or ID (Semantic Scholar)')
            with gr.Row():
                name = gr.Textbox(label='Confirm Reviewer Name', interactive=False)
                author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
    with gr.Row():
        compute_btn = gr.Button('What Makes This a Good Match?')  
    
    ### PAPER INFORMATION
     
    # show multiple papers in radio check box to select from
    with gr.Row():
        selected_papers_radio = gr.Radio(
            choices=[], # will be udpated with the button click
            visible=False, # also will be updated with the button click
            label='Top Relevant Papers from the Reviewer'
        )
    
    # selected paper information 
    with gr.Row(visible=False) as title_row:
        with gr.Column(scale=3):
            paper_title = gr.Textbox(label='Title', interactive=False)
        with gr.Column(scale=1):
            affinity= gr.Number(label='Affinity', interactive=False, value=0)
    with gr.Row():
        paper_abstract = gr.Textbox(label='Abstract', interactive=False, visible=False)
        
    ## TODO consider adding more direct information feeding to the users before giving them options for interactions.
 
    ### RELEVANT PARTS (HIGHLIGHTS)
    with gr.Row(): 
        with gr.Column(scale=2): # text from submission
            source_sentences = gr.Radio(
                choices=[], 
                visible=False, 
                label='Sentences from Submission Abstract',
            )
        with gr.Column(scale=3): # highlighted text from paper
            highlight = gr.components.Interpretation(paper_abstract) 
    
    ### EVENT LISTENERS
    
    # retrieve similar papers
    compute_btn.click(
        fn=get_similar_paper,
        inputs=[
            abstract_text_input, 
            pdf_file_input, 
            author_id_input
        ],
        outputs=[
            selected_papers_radio,
            source_sentences,
            title_row,
            paper_abstract
        ]
    )      
    
    # change highlight based on selected sentences from submission
    source_sentences.change(
        fn=change_output_highlight,
        inputs=[
            selected_papers_radio,
            source_sentences
        ],
        outputs=highlight
    )
    
    # change paper to show based on selected papers
    selected_papers_radio.change(
        fn=change_paper,
        inputs=selected_papers_radio,
        outputs= [
            paper_title,
            paper_abstract,
            affinity,
            highlight
        ]
    )
    
    gr.Markdown(
        """
        ---------
        **Disclaimer.** This tool and its output should not serve as the sole justification for confirming a match for the submission. It is intended as a supplementary tool that the user may use at their discretion; the correctness of the output of the tool is not guaranteed. This may be improved by updating the internal models used to compute the affinity scores and sentence relevance, which may require additional research independently. The tool does not compromise the privacy of the reviewers as it relies only on their publicly-available information (e.g., names and list of previously published papers). 
        """
    )
    
if __name__ == "__main__":
    demo.launch()