gagan3012 commited on
Commit
ffc84fc
·
verified ·
1 Parent(s): 6bd3297

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +304 -6
app.py CHANGED
@@ -1,28 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import requests
2
  import re
3
  import subprocess
4
  import gradio as gr
 
5
  def fetch_bibtex(arxiv_link):
6
 
7
  print(arxiv_link)
8
  # Extract the arXiv ID from the link
9
  arxiv_id = re.findall(r'arxiv\.org\/(?:abs|pdf)\/([\w\.]+)', arxiv_link)[0].replace(".pdf","")
10
 
11
-
12
  # Use an API or web scraping method to fetch the BibTeX
13
  # For simplicity, here's a placeholder for the BibTeX entry
14
  bibtex_entry = "Placeholder BibTeX for " + arxiv_id
15
 
16
- command = "arxiv2bib"
17
 
18
  print(arxiv_id)
19
 
20
- result = subprocess.run([command, arxiv_id], stdout=subprocess.PIPE, text=True)
 
 
21
 
22
  # Get the output
23
- output = result.stdout
24
-
25
- return output
26
 
27
 
28
  interface = gr.Interface(fn=fetch_bibtex,
 
1
+ #! /usr/bin/env python
2
+ #
3
+ # Copyright (c) 2012, Nathan Grigg
4
+ # All rights reserved.
5
+ #
6
+ # Redistribution and use in source and binary forms, with or without
7
+ # modification, are permitted provided that the following conditions are met:
8
+ #
9
+ # * Redistributions of source code must retain the above copyright
10
+ # notice, this list of conditions and the following disclaimer.
11
+ # * Redistributions in binary form must reproduce the above copyright
12
+ # notice, this list of conditions and the following disclaimer in the
13
+ # documentation and/or other materials provided with the distribution.
14
+ # * Neither the name of this package nor the
15
+ # names of its contributors may be used to endorse or promote products
16
+ # derived from this software without specific prior written permission.
17
+ #
18
+ # This software is provided by the copyright holders and contributors "as
19
+ # is" and any express or implied warranties, including, but not limited
20
+ # to, the implied warranties of merchantability and fitness for a
21
+ # particular purpose are disclaimed. In no event shall Nathan Grigg be
22
+ # liable for any direct, indirect, incidental, special, exemplary, or
23
+ # consequential damages (including, but not limited to, procurement of
24
+ # substitute goods or services; loss of use, data, or profits; or business
25
+ # interruption) however caused and on any theory of liability, whether in
26
+ # contract, strict liability, or tort (including negligence or otherwise)
27
+ # arising in any way out of the use of this software, even if advised of
28
+ # the possibility of such damage.
29
+ #
30
+ # (also known as the New BSD License)
31
+ #
32
+ # Indiscriminate automated downloads from arXiv.org are not permitted.
33
+ # For more information, see http://arxiv.org/help/robots
34
+ #
35
+ # This script usually makes only one call to arxiv.org per run.
36
+ # No caching of any kind is performed.
37
+
38
+ from __future__ import print_function
39
+ from xml.etree import ElementTree
40
+ import sys
41
+ import re
42
+ import os
43
+
44
+ if sys.version_info < (2, 6):
45
+ raise Exception("Python 2.6 or higher required")
46
+
47
+ # Python 2 compatibility code
48
+ PY2 = sys.version_info[0] == 2
49
+ if not PY2:
50
+ from urllib.parse import urlencode
51
+ from urllib.request import urlopen
52
+ from urllib.error import HTTPError
53
+ print_bytes = lambda s: sys.stdout.buffer.write(s)
54
+ else:
55
+ from urllib import urlencode
56
+ from urllib2 import HTTPError, urlopen
57
+ print_bytes = lambda s: sys.stdout.write(s)
58
+
59
+
60
+ # Namespaces
61
+ ATOM = '{http://www.w3.org/2005/Atom}'
62
+ ARXIV = '{http://arxiv.org/schemas/atom}'
63
+
64
+ # regular expressions to check if arxiv id is valid
65
+ NEW_STYLE = re.compile(r'^\d{4}\.\d{4,}(v\d+)?$')
66
+ OLD_STYLE = re.compile(r"""(?x)
67
+ ^(
68
+ math-ph
69
+ |hep-ph
70
+ |nucl-ex
71
+ |nucl-th
72
+ |gr-qc
73
+ |astro-ph
74
+ |hep-lat
75
+ |quant-ph
76
+ |hep-ex
77
+ |hep-th
78
+ |stat
79
+ (\.(AP|CO|ML|ME|TH))?
80
+ |q-bio
81
+ (\.(BM|CB|GN|MN|NC|OT|PE|QM|SC|TO))?
82
+ |cond-mat
83
+ (\.(dis-nn|mes-hall|mtrl-sci|other|soft|stat-mech|str-el|supr-con))?
84
+ |cs
85
+ (\.(AR|AI|CL|CC|CE|CG|GT|CV|CY|CR|DS|DB|DL|DM|DC|GL|GR|HC|IR|IT|LG|LO|
86
+ MS|MA|MM|NI|NE|NA|OS|OH|PF|PL|RO|SE|SD|SC))?
87
+ |nlin
88
+ (\.(AO|CG|CD|SI|PS))?
89
+ |physics
90
+ (\.(acc-ph|ao-ph|atom-ph|atm-clus|bio-ph|chem-ph|class-ph|comp-ph|
91
+ data-an|flu-dyn|gen-ph|geo-ph|hist-ph|ins-det|med-ph|optics|ed-ph|
92
+ soc-ph|plasm-ph|pop-ph|space-ph))?
93
+ |math
94
+ (\.(AG|AT|AP|CT|CA|CO|AC|CV|DG|DS|FA|GM|GN|GT|GR|HO|IT|KT|LO|MP|MG
95
+ |NT|NA|OA|OC|PR|QA|RT|RA|SP|ST|SG))?
96
+ )/\d{7}(v\d+)?$""")
97
+
98
+
99
+ def is_valid(arxiv_id):
100
+ """Checks if id resembles a valid arxiv identifier."""
101
+ return bool(NEW_STYLE.match(arxiv_id)) or bool(OLD_STYLE.match(arxiv_id))
102
+
103
+
104
+ class FatalError(Exception):
105
+ """Error that prevents us from continuing"""
106
+
107
+
108
+ class NotFoundError(Exception):
109
+ """Reference not found by the arxiv API"""
110
+
111
+
112
+ class Reference(object):
113
+ """Represents a single reference.
114
+
115
+ Instantiate using Reference(entry_xml). Note entry_xml should be
116
+ an ElementTree.Element object.
117
+ """
118
+ def __init__(self, entry_xml):
119
+ self.xml = entry_xml
120
+ self.url = self._field_text('id')
121
+ self.id = self._id()
122
+ self.authors = self._authors()
123
+ self.title = self._field_text('title')
124
+ if len(self.id) == 0 or len(self.authors) == 0 or len(self.title) == 0:
125
+ raise NotFoundError("No such publication", self.id)
126
+ self.summary = self._field_text('summary')
127
+ self.category = self._category()
128
+ self.year, self.month = self._published()
129
+ self.updated = self._field_text('updated')
130
+ self.bare_id = self.id[:self.id.rfind('v')]
131
+ self.note = self._field_text('journal_ref', namespace=ARXIV)
132
+ self.doi = self._field_text('doi', namespace=ARXIV)
133
+
134
+ def _authors(self):
135
+ """Extracts author names from xml."""
136
+ xml_list = self.xml.findall(ATOM + 'author/' + ATOM + 'name')
137
+ return [field.text for field in xml_list]
138
+
139
+ def _field_text(self, id, namespace=ATOM):
140
+ """Extracts text from arbitrary xml field"""
141
+ try:
142
+ return self.xml.find(namespace + id).text.strip()
143
+ except:
144
+ return ""
145
+
146
+ def _category(self):
147
+ """Get category"""
148
+ try:
149
+ return self.xml.find(ARXIV + 'primary_category').attrib['term']
150
+ except:
151
+ return ""
152
+
153
+ def _id(self):
154
+ """Get arxiv id"""
155
+ try:
156
+ id_url = self._field_text('id')
157
+ return id_url[id_url.find('/abs/') + 5:]
158
+ except:
159
+ return ""
160
+
161
+ def _published(self):
162
+ """Get published date"""
163
+ published = self._field_text('published')
164
+ if len(published) < 7:
165
+ return "", ""
166
+ y, m = published[:4], published[5:7]
167
+ try:
168
+ m = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
169
+ "Aug", "Sep", "Oct", "Nov", "Dec"][int(m) - 1]
170
+ except:
171
+ pass
172
+ return y, m
173
+
174
+ def bibtex(self):
175
+ """BibTex string of the reference."""
176
+
177
+ self.new_id = self.authors[0].split(' ')[1].lower()+self.year+self.title.split(' ')[0].lower()
178
+
179
+ lines = ["@article{" + self.new_id]
180
+ for k, v in [("Author", " and ".join(self.authors)),
181
+ ("Title", self.title),
182
+ ("Eprint", self.id),
183
+ ("DOI", self.doi),
184
+ ("ArchivePrefix", "arXiv"),
185
+ ("PrimaryClass", self.category),
186
+ ("Abstract", self.summary),
187
+ ("Year", self.year),
188
+ ("Month", self.month),
189
+ ("Note", self.note),
190
+ ("Url", self.url),
191
+ ("File", self.id + ".pdf"),
192
+ ]:
193
+ if len(v):
194
+ lines.append("%-13s = {%s}" % (k, v))
195
+
196
+ return ("," + os.linesep).join(lines) + os.linesep + "}"
197
+
198
+
199
+ class ReferenceErrorInfo(object):
200
+ """Contains information about a reference error"""
201
+ def __init__(self, message, id):
202
+ self.message = message
203
+ self.id = id
204
+ self.bare_id = id[:id.rfind('v')]
205
+ # mark it as really old, so it gets superseded if possible
206
+ self.updated = '0'
207
+
208
+ def bibtex(self):
209
+ """BibTeX comment explaining error"""
210
+ return "@comment{%(id)s: %(message)s}" % \
211
+ {'id': self.id, 'message': self.message}
212
+
213
+ def __str__(self):
214
+ return "Error: %(message)s (%(id)s)" % \
215
+ {'id': self.id, 'message': self.message}
216
+
217
+
218
+ def arxiv2bib(id_list):
219
+ """Returns a list of references, corresponding to elts of id_list"""
220
+ d = arxiv2bib_dict(id_list)
221
+ print(d)
222
+ l = []
223
+ for id in id_list:
224
+ try:
225
+ l.append(d[id])
226
+ except:
227
+ l.append(ReferenceErrorInfo("Not found", id))
228
+
229
+ return l
230
+
231
+
232
+ def arxiv_request(ids):
233
+ """Sends a request to the arxiv API."""
234
+ q = urlencode([
235
+ ("id_list", ",".join(ids)),
236
+ ("max_results", len(ids))
237
+ ])
238
+ xml = urlopen("http://export.arxiv.org/api/query?" + q)
239
+ print(q)
240
+ # xml.read() returns bytes, but ElementTree.fromstring decodes
241
+ # to unicode when needed (python2) or string (python3)
242
+ return ElementTree.fromstring(xml.read())
243
+
244
+
245
+ def arxiv2bib_dict(id_list):
246
+ """Fetches citations for ids in id_list into a dictionary indexed by id"""
247
+ ids = []
248
+ d = {}
249
+
250
+ # validate ids
251
+ for id in id_list:
252
+ if is_valid(id):
253
+ ids.append(id)
254
+ else:
255
+ d[id] = ReferenceErrorInfo("Invalid arXiv identifier", id)
256
+
257
+ if len(ids) == 0:
258
+ return d
259
+
260
+ # make the api call
261
+ while True:
262
+ xml = arxiv_request(ids)
263
+
264
+ # check for error
265
+ entries = xml.findall(ATOM + "entry")
266
+ try:
267
+ first_title = entries[0].find(ATOM + "title")
268
+ except:
269
+ raise FatalError("Unable to connect to arXiv.org API.")
270
+
271
+ if first_title is None or first_title.text.strip() != "Error":
272
+ break
273
+
274
+ try:
275
+ id = entries[0].find(ATOM + "summary").text.split()[-1]
276
+ del(ids[ids.index(id)])
277
+ except:
278
+ raise FatalError("Unable to parse an error returned by arXiv.org.")
279
+
280
+ # Parse each reference and store it in dictionary
281
+ for entry in entries:
282
+ try:
283
+ ref = Reference(entry)
284
+ except NotFoundError as error:
285
+ message, id = error.args
286
+ ref = ReferenceErrorInfo(message, id)
287
+ if ref.id:
288
+ d[ref.id] = ref
289
+ if ref.bare_id:
290
+ if not (ref.bare_id in d) or d[ref.bare_id].updated < ref.updated:
291
+ d[ref.bare_id] = ref
292
+
293
+ return d
294
+
295
+
296
  import requests
297
  import re
298
  import subprocess
299
  import gradio as gr
300
+
301
  def fetch_bibtex(arxiv_link):
302
 
303
  print(arxiv_link)
304
  # Extract the arXiv ID from the link
305
  arxiv_id = re.findall(r'arxiv\.org\/(?:abs|pdf)\/([\w\.]+)', arxiv_link)[0].replace(".pdf","")
306
 
307
+
308
  # Use an API or web scraping method to fetch the BibTeX
309
  # For simplicity, here's a placeholder for the BibTeX entry
310
  bibtex_entry = "Placeholder BibTeX for " + arxiv_id
311
 
312
+ # command = "arxiv2bib"
313
 
314
  print(arxiv_id)
315
 
316
+ # result = subprocess.run([command, arxiv_id], stdout=subprocess.PIPE, text=True)
317
+
318
+ results = arxiv2bib([arxiv_id])[0].bibtex()
319
 
320
  # Get the output
321
+ # output = result.stdout
322
+
323
+ return results
324
 
325
 
326
  interface = gr.Interface(fn=fetch_bibtex,