id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
400 | code.py | internetarchive_openlibrary/openlibrary/plugins/importapi/code.py | """Open Library Import API
"""
from typing import Any
from infogami.plugins.api.code import add_hook
from infogami.infobase.client import ClientException
from openlibrary.catalog.utils import get_non_isbn_asin
from openlibrary.plugins.openlibrary.code import can_write
from openlibrary.catalog.marc.marc_binary import MarcBinary, MarcException
from openlibrary.catalog.marc.marc_xml import MarcXml
from openlibrary.catalog.marc.parse import read_edition
from openlibrary.catalog import add_book
from openlibrary.catalog.get_ia import get_marc_record_from_ia, get_from_archive_bulk
from openlibrary import accounts, records
from openlibrary.core import ia
from openlibrary.plugins.upstream.utils import (
LanguageNoMatchError,
get_abbrev_from_full_lang_name,
LanguageMultipleMatchError,
get_location_and_publisher,
safeget,
)
from openlibrary.utils.isbn import get_isbn_10s_and_13s, to_isbn_13
import web
import base64
import json
import re
from pydantic import ValidationError
from openlibrary.plugins.importapi import (
import_edition_builder,
import_opds,
import_rdf,
)
from lxml import etree
import logging
import urllib
import lxml.etree
MARC_LENGTH_POS = 5
logger = logging.getLogger('openlibrary.importapi')
class DataError(ValueError):
pass
class BookImportError(Exception):
def __init__(self, error_code, error='Invalid item', **kwargs):
self.error_code = error_code
self.error = error
self.kwargs = kwargs
def parse_meta_headers(edition_builder):
# parse S3-style http headers
# we don't yet support augmenting complex fields like author or language
# string_keys = ['title', 'title_prefix', 'description']
re_meta = re.compile(r'HTTP_X_ARCHIVE_META(?:\d{2})?_(.*)')
for k, v in web.ctx.env.items():
m = re_meta.match(k)
if m:
meta_key = m.group(1).lower()
edition_builder.add(meta_key, v, restrict_keys=False)
def parse_data(data: bytes) -> tuple[dict | None, str | None]:
"""
Takes POSTed data and determines the format, and returns an Edition record
suitable for adding to OL.
:param bytes data: Raw data
:return: (Edition record, format (rdf|opds|marcxml|json|marc)) or (None, None)
"""
data = data.strip()
if b'<?xml' in data[:10]:
root = etree.fromstring(
data, parser=lxml.etree.XMLParser(resolve_entities=False)
)
if root.tag == '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF':
edition_builder = import_rdf.parse(root)
format = 'rdf'
elif root.tag == '{http://www.w3.org/2005/Atom}entry':
edition_builder = import_opds.parse(root)
format = 'opds'
elif root.tag == '{http://www.loc.gov/MARC21/slim}record':
if root.tag == '{http://www.loc.gov/MARC21/slim}collection':
root = root[0]
rec = MarcXml(root)
edition = read_edition(rec)
edition_builder = import_edition_builder.import_edition_builder(
init_dict=edition
)
format = 'marcxml'
else:
raise DataError('unrecognized-XML-format')
elif data.startswith(b'{') and data.endswith(b'}'):
obj = json.loads(data)
# Only look to the import_item table if a record is incomplete.
# This is the minimum to achieve a complete record. See:
# https://github.com/internetarchive/openlibrary/issues/9440
# import_validator().validate() requires more fields.
minimum_complete_fields = ["title", "authors", "publish_date"]
is_complete = all(obj.get(field) for field in minimum_complete_fields)
if not is_complete:
isbn_10 = safeget(lambda: obj.get("isbn_10", [])[0])
isbn_13 = safeget(lambda: obj.get("isbn_13", [])[0])
identifier = to_isbn_13(isbn_13 or isbn_10 or "")
if not identifier:
identifier = get_non_isbn_asin(rec=obj)
if identifier:
supplement_rec_with_import_item_metadata(rec=obj, identifier=identifier)
edition_builder = import_edition_builder.import_edition_builder(init_dict=obj)
format = 'json'
elif data[:MARC_LENGTH_POS].isdigit():
# Marc Binary
if len(data) < MARC_LENGTH_POS or len(data) != int(data[:MARC_LENGTH_POS]):
raise DataError('no-marc-record')
record = MarcBinary(data)
edition = read_edition(record)
edition_builder = import_edition_builder.import_edition_builder(
init_dict=edition
)
format = 'marc'
else:
raise DataError('unrecognised-import-format')
parse_meta_headers(edition_builder)
return edition_builder.get_dict(), format
def supplement_rec_with_import_item_metadata(
rec: dict[str, Any], identifier: str
) -> None:
"""
Queries for a staged/pending row in `import_item` by identifier, and if found,
uses select metadata to supplement empty fields in `rec`.
Changes `rec` in place.
"""
from openlibrary.core.imports import ImportItem # Evade circular import.
import_fields = [
'authors',
'description',
'isbn_10',
'isbn_13',
'number_of_pages',
'physical_format',
'publish_date',
'publishers',
'title',
'source_records',
]
if import_item := ImportItem.find_staged_or_pending([identifier]).first():
import_item_metadata = json.loads(import_item.get("data", '{}'))
for field in import_fields:
if field == "source_records":
rec[field].extend(import_item_metadata.get(field))
if not rec.get(field) and (staged_field := import_item_metadata.get(field)):
rec[field] = staged_field
class importapi:
"""/api/import endpoint for general data formats."""
def error(self, error_code, error='Invalid item', **kwargs):
content = {'success': False, 'error_code': error_code, 'error': error}
content.update(kwargs)
raise web.HTTPError('400 Bad Request', data=json.dumps(content))
def POST(self):
web.header('Content-Type', 'application/json')
if not can_write():
raise web.HTTPError('403 Forbidden')
data = web.data()
try:
edition, _ = parse_data(data)
except (DataError, json.JSONDecodeError) as e:
return self.error(str(e), 'Failed to parse import data')
except ValidationError as e:
return self.error('invalid-value', str(e).replace('\n', ': '))
if not edition:
return self.error('unknown-error', 'Failed to parse import data')
try:
reply = add_book.load(edition)
# TODO: If any records have been created, return a 201, otherwise 200
return json.dumps(reply)
except add_book.RequiredField as e:
return self.error('missing-required-field', str(e))
except ClientException as e:
return self.error('bad-request', **json.loads(e.json))
except TypeError as e:
return self.error('type-error', repr(e))
except Exception as e:
return self.error('unhandled-exception', repr(e))
def raise_non_book_marc(marc_record, **kwargs):
details = 'Item rejected'
# Is the item a serial instead of a monograph?
marc_leaders = marc_record.leader()
if marc_leaders[7] == 's':
raise BookImportError('item-is-serial', details, **kwargs)
# insider note: follows Archive.org's approach of
# Item::isMARCXMLforMonograph() which excludes non-books
if not (marc_leaders[7] == 'm' and marc_leaders[6] == 'a'):
raise BookImportError('item-not-book', details, **kwargs)
class ia_importapi(importapi):
"""/api/import/ia import endpoint for Archive.org items, requiring an ocaid identifier rather than direct data upload.
Request Format:
POST /api/import/ia
Content-Type: application/json
Authorization: Basic base64-of-username:password
{
"identifier": "<ocaid>",
"require_marc": "true",
"bulk_marc": "false"
}
"""
@classmethod
def ia_import(
cls, identifier: str, require_marc: bool = True, force_import: bool = False
) -> str:
"""
Performs logic to fetch archive.org item + metadata,
produces a data dict, then loads into Open Library
:param str identifier: archive.org ocaid
:param bool require_marc: require archive.org item have MARC record?
:param bool force_import: force import of this record
:returns: the data of the imported book or raises BookImportError
"""
from_marc_record = False
# Check 1 - Is this a valid Archive.org item?
metadata = ia.get_metadata(identifier)
if not metadata:
raise BookImportError('invalid-ia-identifier', f'{identifier} not found')
# Check 2 - Can the item be loaded into Open Library?
status = ia.get_item_status(identifier, metadata)
if status != 'ok' and not force_import:
raise BookImportError(status, f'Prohibited Item {identifier}')
# Check 3 - Does this item have a MARC record?
marc_record = get_marc_record_from_ia(
identifier=identifier, ia_metadata=metadata
)
if require_marc and not marc_record:
raise BookImportError('no-marc-record')
if marc_record:
from_marc_record = True
if not force_import:
raise_non_book_marc(marc_record)
try:
edition_data = read_edition(marc_record)
except MarcException as e:
logger.error(f'failed to read from MARC record {identifier}: {e}')
raise BookImportError('invalid-marc-record')
else:
try:
edition_data = cls.get_ia_record(metadata)
except KeyError:
raise BookImportError('invalid-ia-metadata')
# Add IA specific fields: ocaid, source_records, and cover
edition_data = cls.populate_edition_data(edition_data, identifier)
return cls.load_book(edition_data, from_marc_record)
def POST(self):
web.header('Content-Type', 'application/json')
if not can_write():
raise web.HTTPError('403 Forbidden')
i = web.input()
require_marc = i.get('require_marc') != 'false'
force_import = i.get('force_import') == 'true'
bulk_marc = i.get('bulk_marc') == 'true'
if 'identifier' not in i:
return self.error('bad-input', 'identifier not provided')
identifier = i.identifier
# First check whether this is a non-book, bulk-marc item
if bulk_marc:
# Get binary MARC by identifier = ocaid/filename:offset:length
re_bulk_identifier = re.compile(r"([^/]*)/([^:]*):(\d*):(\d*)")
try:
ocaid, filename, offset, length = re_bulk_identifier.match(
identifier
).groups()
data, next_offset, next_length = get_from_archive_bulk(identifier)
next_data = {
'next_record_offset': next_offset,
'next_record_length': next_length,
}
rec = MarcBinary(data)
edition = read_edition(rec)
except MarcException as e:
details = f'{identifier}: {e}'
logger.error(f'failed to read from bulk MARC record {details}')
return self.error('invalid-marc-record', details, **next_data)
actual_length = int(rec.leader()[:MARC_LENGTH_POS])
edition['source_records'] = 'marc:%s/%s:%s:%d' % (
ocaid,
filename,
offset,
actual_length,
)
local_id = i.get('local_id')
if local_id:
local_id_type = web.ctx.site.get('/local_ids/' + local_id)
prefix = local_id_type.urn_prefix
force_import = True
id_field, id_subfield = local_id_type.id_location.split('$')
def get_subfield(field, id_subfield):
if isinstance(field[1], str):
return field[1]
subfields = field[1].get_subfield_values(id_subfield)
return subfields[0] if subfields else None
ids = [
get_subfield(f, id_subfield)
for f in rec.read_fields([id_field])
if f and get_subfield(f, id_subfield)
]
edition['local_id'] = [f'urn:{prefix}:{id_}' for id_ in ids]
# Don't add the book if the MARC record is a non-monograph item,
# unless it is a scanning partner record and/or force_import is set.
if not force_import:
try:
raise_non_book_marc(rec, **next_data)
except BookImportError as e:
return self.error(e.error_code, e.error, **e.kwargs)
result = add_book.load(edition)
# Add next_data to the response as location of next record:
result.update(next_data)
return json.dumps(result)
try:
return self.ia_import(
identifier, require_marc=require_marc, force_import=force_import
)
except BookImportError as e:
return self.error(e.error_code, e.error, **e.kwargs)
@staticmethod
def get_ia_record(metadata: dict) -> dict:
"""
Generate Edition record from Archive.org metadata, in lieu of a MARC record
:param dict metadata: metadata retrieved from metadata API
:return: Edition record
"""
authors = [{'name': name} for name in metadata.get('creator', '').split(';')]
description = metadata.get('description')
unparsed_isbns = metadata.get('isbn')
language = metadata.get('language')
lccn = metadata.get('lccn')
subject = metadata.get('subject')
oclc = metadata.get('oclc-id')
imagecount = metadata.get('imagecount')
unparsed_publishers = metadata.get('publisher')
d = {
'title': metadata.get('title', ''),
'authors': authors,
'publish_date': metadata.get('date'),
}
if description:
d['description'] = description
if unparsed_isbns:
isbn_10, isbn_13 = get_isbn_10s_and_13s(unparsed_isbns)
if isbn_10:
d['isbn_10'] = isbn_10
if isbn_13:
d['isbn_13'] = isbn_13
if language:
if len(language) == 3:
d['languages'] = [language]
# Try converting the name of a language to its three character code.
# E.g. English -> eng.
else:
try:
if lang_code := get_abbrev_from_full_lang_name(language):
d['languages'] = [lang_code]
except LanguageMultipleMatchError as e:
logger.warning(
"Multiple language matches for %s. No edition language set for %s.",
e.language_name,
metadata.get("identifier"),
)
except LanguageNoMatchError as e:
logger.warning(
"No language matches for %s. No edition language set for %s.",
e.language_name,
metadata.get("identifier"),
)
if lccn:
d['lccn'] = [lccn]
if subject:
d['subjects'] = subject
if oclc:
d['oclc'] = oclc
# Ensure no negative page number counts.
if imagecount:
if int(imagecount) - 4 >= 1:
d['number_of_pages'] = int(imagecount) - 4
else:
d['number_of_pages'] = int(imagecount)
if unparsed_publishers:
publish_places, publishers = get_location_and_publisher(unparsed_publishers)
if publish_places:
d['publish_places'] = publish_places
if publishers:
d['publishers'] = publishers
return d
@staticmethod
def load_book(edition_data: dict, from_marc_record: bool = False) -> str:
"""
Takes a well constructed full Edition record and sends it to add_book
to check whether it is already in the system, and to add it, and a Work
if they do not already exist.
:param dict edition_data: Edition record
:param bool from_marc_record: whether the record is based on a MARC record.
"""
result = add_book.load(edition_data, from_marc_record=from_marc_record)
return json.dumps(result)
@staticmethod
def populate_edition_data(edition: dict, identifier: str) -> dict:
"""
Adds archive.org specific fields to a generic Edition record, based on identifier.
:param dict edition: Edition record
:param str identifier: ocaid
:return: Edition record
"""
edition['ocaid'] = identifier
edition['source_records'] = 'ia:' + identifier
edition['cover'] = ia.get_cover_url(identifier)
return edition
@staticmethod
def find_edition(identifier: str) -> str | None:
"""
Checks if the given identifier has already been imported into OL.
:param str identifier: ocaid
:return: OL item key of matching item: '/books/OL..M' or None if no item matches
"""
# match ocaid
q = {"type": "/type/edition", "ocaid": identifier}
keys = web.ctx.site.things(q)
if keys:
return keys[0]
# Match source_records
# When there are multiple scans for the same edition, only source_records is updated.
q = {"type": "/type/edition", "source_records": "ia:" + identifier}
keys = web.ctx.site.things(q)
if keys:
return keys[0]
return None
@staticmethod
def status_matched(key):
reply = {'success': True, 'edition': {'key': key, 'status': 'matched'}}
return json.dumps(reply)
class ils_search:
"""Search and Import API to use in Koha.
When a new catalog record is added to Koha, it makes a request with all
the metadata to find if OL has a matching record. OL returns the OLID of
the matching record if exists, if not it creates a new record and returns
the new OLID.
Request Format:
POST /api/ils_search
Content-Type: application/json
Authorization: Basic base64-of-username:password
{
'title': '',
'authors': ['...','...',...]
'publisher': '...',
'publish_year': '...',
'isbn': [...],
'lccn': [...],
}
Response Format:
{
'status': 'found | notfound | created',
'olid': 'OL12345M',
'key': '/books/OL12345M',
'cover': {
'small': 'https://covers.openlibrary.org/b/12345-S.jpg',
'medium': 'https://covers.openlibrary.org/b/12345-M.jpg',
'large': 'https://covers.openlibrary.org/b/12345-L.jpg',
},
...
}
When authorization header is not provided and match is not found,
status='notfound' is returned instead of creating a new record.
"""
def POST(self):
try:
rawdata = json.loads(web.data())
except ValueError as e:
raise self.error("Unparsable JSON input \n %s" % web.data())
# step 1: prepare the data
data = self.prepare_input_data(rawdata)
# step 2: search
matches = self.search(data)
# step 3: Check auth
try:
auth_header = http_basic_auth()
self.login(auth_header)
except accounts.ClientException:
raise self.auth_failed("Invalid credentials")
# step 4: create if logged in
keys = []
if auth_header:
keys = self.create(matches)
# step 4: format the result
d = self.format_result(matches, auth_header, keys)
return json.dumps(d)
def error(self, reason):
d = json.dumps({"status": "error", "reason": reason})
return web.HTTPError("400 Bad Request", {"Content-Type": "application/json"}, d)
def auth_failed(self, reason):
d = json.dumps({"status": "error", "reason": reason})
return web.HTTPError(
"401 Authorization Required",
{
"WWW-Authenticate": 'Basic realm="http://openlibrary.org"',
"Content-Type": "application/json",
},
d,
)
def login(self, auth_str):
if not auth_str:
return
auth_str = auth_str.replace("Basic ", "")
try:
auth_str = base64.decodebytes(bytes(auth_str, 'utf-8'))
auth_str = auth_str.decode('utf-8')
except AttributeError:
auth_str = base64.decodestring(auth_str)
username, password = auth_str.split(':')
accounts.login(username, password)
def prepare_input_data(self, rawdata):
data = dict(rawdata)
identifiers = rawdata.get('identifiers', {})
# TODO: Massage single strings here into lists. e.g. {"google" : "123"} into {"google" : ["123"]}.
for i in ["oclc_numbers", "lccn", "ocaid", "isbn"]:
if i in data:
val = data.pop(i)
if not isinstance(val, list):
val = [val]
identifiers[i] = val
data['identifiers'] = identifiers
if "authors" in data:
authors = data.pop("authors")
data['authors'] = [{"name": i} for i in authors]
return {"doc": data}
def search(self, params):
matches = records.search(params)
return matches
def create(self, items):
return records.create(items)
def format_result(self, matches, authenticated, keys):
doc = matches.pop("doc", {})
if doc and doc['key']:
doc = web.ctx.site.get(doc['key']).dict()
# Sanitise for only information that we want to return.
for i in [
"created",
"last_modified",
"latest_revision",
"type",
"revision",
]:
doc.pop(i)
# Main status information
d = {
'status': 'found',
'key': doc['key'],
'olid': doc['key'].split("/")[-1],
}
# Cover information
covers = doc.get('covers') or []
if covers and covers[0] > 0:
d['cover'] = {
"small": "https://covers.openlibrary.org/b/id/%s-S.jpg" % covers[0],
"medium": "https://covers.openlibrary.org/b/id/%s-M.jpg"
% covers[0],
"large": "https://covers.openlibrary.org/b/id/%s-L.jpg" % covers[0],
}
# Pull out identifiers to top level
identifiers = doc.pop("identifiers", {})
for i in identifiers:
d[i] = identifiers[i]
d.update(doc)
else:
if authenticated:
d = {'status': 'created', 'works': [], 'authors': [], 'editions': []}
for i in keys:
if i.startswith('/books'):
d['editions'].append(i)
if i.startswith('/works'):
d['works'].append(i)
if i.startswith('/authors'):
d['authors'].append(i)
else:
d = {'status': 'notfound'}
return d
def http_basic_auth():
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
return auth and web.lstrips(auth, "")
class ils_cover_upload:
"""Cover Upload API for Koha.
Request Format: Following input fields with enctype multipart/form-data
* olid: Key of the edition. e.g. OL12345M
* file: image file
* url: URL to image
* redirect_url: URL to redirect after upload
Other headers:
Authorization: Basic base64-of-username:password
One of file or url can be provided. If the former, the image is
directly used. If the latter, the image at the URL is fetched and
used.
On Success:
If redirect URL specified,
redirect to redirect_url?status=ok
else
return
{
"status" : "ok"
}
On Failure:
If redirect URL specified,
redirect to redirect_url?status=error&reason=bad+olid
else
return
{
"status" : "error",
"reason" : "bad olid"
}
"""
def error(self, i, reason):
if i.redirect_url:
url = self.build_url(i.redirect_url, status="error", reason=reason)
return web.seeother(url)
else:
d = json.dumps({"status": "error", "reason": reason})
return web.HTTPError(
"400 Bad Request", {"Content-Type": "application/json"}, d
)
def success(self, i):
if i.redirect_url:
url = self.build_url(i.redirect_url, status="ok")
return web.seeother(url)
else:
d = json.dumps({"status": "ok"})
return web.ok(d, {"Content-type": "application/json"})
def auth_failed(self, reason):
d = json.dumps({"status": "error", "reason": reason})
return web.HTTPError(
"401 Authorization Required",
{
"WWW-Authenticate": 'Basic realm="http://openlibrary.org"',
"Content-Type": "application/json",
},
d,
)
def build_url(self, url, **params):
if '?' in url:
return url + "&" + urllib.parse.urlencode(params)
else:
return url + "?" + urllib.parse.urlencode(params)
def login(self, auth_str):
if not auth_str:
raise self.auth_failed("No credentials provided")
auth_str = auth_str.replace("Basic ", "")
try:
auth_str = base64.decodebytes(bytes(auth_str, 'utf-8'))
auth_str = auth_str.decode('utf-8')
except AttributeError:
auth_str = base64.decodestring(auth_str)
username, password = auth_str.split(':')
accounts.login(username, password)
def POST(self):
i = web.input(olid=None, file={}, redirect_url=None, url="")
if not i.olid:
self.error(i, "olid missing")
key = '/books/' + i.olid
book = web.ctx.site.get(key)
if not book:
raise self.error(i, "bad olid")
try:
auth_header = http_basic_auth()
self.login(auth_header)
except accounts.ClientException:
raise self.auth_failed("Invalid credentials")
from openlibrary.plugins.upstream import covers
add_cover = covers.add_cover()
data = add_cover.upload(key, i)
if coverid := data.get('id'):
add_cover.save(book, coverid)
raise self.success(i)
else:
raise self.error(i, "upload failed")
add_hook("import", importapi)
add_hook("ils_search", ils_search)
add_hook("ils_cover_upload", ils_cover_upload)
add_hook("import/ia", ia_importapi)
| 28,010 | Python | .py | 672 | 30.941964 | 122 | 0.574285 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
401 | metaxml_to_json.py | internetarchive_openlibrary/openlibrary/plugins/importapi/metaxml_to_json.py | #!/usr/bin/env python
"""
This example uses the import_edition_builder class to convert
an IA meta.xml into a json object that the Import API can consume.
usage:
> python metaxml_to_json.py romanceonthreele00hafnrich_meta.xml
{
"publishers": ["New York : Bloomsbury"],
"description": "Includes bibliographical references (p. [243]-247) and index",
"title": "A romance on three legs : Glenn Gould's obsessive quest for the perfect piano",
"isbn_10": ["1596915250"],
"isbn_13": ["9781596915251"],
"languages": ["eng"],
"subjects": [
"Lending library",
"protected DAISY",
"Accessible book",
"Gould, Glenn, 1932-1982",
"Steinway piano",
],
"publish_date": "2009",
"authors": [
{
"entity_type": "person",
"name": "Hafner, Katie",
"personal_name": "Hafner, Katie",
}
],
"ocaid": "romanceonthreele00hafnrich",
}
"""
from openlibrary.plugins.importapi.import_edition_builder import import_edition_builder
import lxml.etree
def parse_collection(collection):
collection_dict = {
'printdisabled': ['Protected DAISY', 'Accessible book'],
'lendinglibrary': ['Lending library', 'Protected DAISY', 'Accessible book'],
'inlibrary': ['In library'],
}
return collection_dict.get(collection, [])
def parse_isbn(isbn):
if len(isbn) == 13:
return ('isbn_13', [isbn])
elif len(isbn) == 10:
return ('isbn_10', [isbn])
else:
return ('isbn', [])
def metaxml_to_edition_dict(root):
ia_to_ol_map = {
'identifier': 'ocaid',
'creator': 'author',
'date': 'publish_date',
'boxid': 'ia_box_id',
}
edition_builder = import_edition_builder()
for element in root.iter():
# print("got %s -> %s" % (element.tag, element.text))
if element.tag == 'collection':
key = 'subject'
values = parse_collection(element.text)
elif element.tag == 'isbn':
key, values = parse_isbn(element.text)
elif element.tag in ia_to_ol_map:
key = ia_to_ol_map[element.tag]
values = [element.text]
else:
key = element.tag
values = [element.text]
for value in values:
if key.startswith('ia_'):
edition_builder.add(key, value, restrict_keys=False)
else:
edition_builder.add(key, value)
return edition_builder.get_dict()
if __name__ == '__main__':
from lxml import etree
import sys
assert len(sys.argv) == 2
tree = etree.parse(sys.argv[1], parser=lxml.etree.XMLParser(resolve_entities=False))
root = tree.getroot()
edition_dict = metaxml_to_edition_dict(root)
import json
json_str = json.dumps(edition_dict)
print(json_str)
| 2,868 | Python | .py | 84 | 27.071429 | 93 | 0.604634 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
402 | import_opds.py | internetarchive_openlibrary/openlibrary/plugins/importapi/import_opds.py | """
OL Import API OPDS parser
"""
from openlibrary.plugins.importapi import import_edition_builder
def parse_string(e, key):
return (key, e.text)
def parse_author(e, key):
name = e.find('{http://www.w3.org/2005/Atom}name')
return (key, name.text)
def parse_category(e, key):
return (key, e.get('label'))
def parse_identifier(e, key):
val = e.text
isbn_str = 'urn:ISBN:'
ia_str = 'http://www.archive.org/details/'
if val.startswith(isbn_str):
isbn = val[len(isbn_str) :]
if len(isbn) == 10:
return ('isbn_10', isbn)
elif len(isbn) == 13:
return ('isbn_13', isbn)
elif val.startswith(ia_str):
return ('ocaid', val[len(ia_str) :])
else:
return (None, None)
parser_map = {
'{http://www.w3.org/2005/Atom}title': ['title', parse_string],
'{http://www.w3.org/2005/Atom}author': ['author', parse_author],
'{http://purl.org/dc/terms/}publisher': ['publisher', parse_string],
'{http://purl.org/dc/terms/}issued': ['publish_date', parse_string],
'{http://purl.org/dc/terms/}extent': ['pagination', parse_string],
'{http://www.w3.org/2005/Atom}category': ['subject', parse_category],
'{http://purl.org/dc/terms/}language': ['language', parse_string],
'{http://www.w3.org/2005/Atom}summary': ['description', parse_string],
'{http://purl.org/ontology/bibo/}lccn': ['lccn', parse_string],
'{http://purl.org/ontology/bibo/}oclcnum': ['oclc_number', parse_string],
'{http://purl.org/dc/terms/}identifier': ['identifier', parse_identifier],
'{http://RDVocab.info/elements/}placeOfPublication': [
'publish_place',
parse_string,
],
}
# TODO: {http://purl.org/dc/terms/}identifier (could be ocaid)
# TODO: {http://www.w3.org/2005/Atom}link (could be cover image)
def parse(root):
edition_builder = import_edition_builder.import_edition_builder()
for e in root:
if isinstance(e.tag, str) and e.tag in parser_map:
key = parser_map[e.tag][0]
(new_key, val) = parser_map[e.tag][1](e, key)
if new_key:
edition_builder.add(new_key, val)
return edition_builder
| 2,196 | Python | .py | 53 | 35.54717 | 78 | 0.62453 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
403 | import_edition_builder.py | internetarchive_openlibrary/openlibrary/plugins/importapi/import_edition_builder.py | """
Create a edition dict that can be passed to catalog.add_book.load()
This class encapsulates the logic of creating edition dicts.
You can use add(key) to add a new key to the edition dict. This class
will take care of whether it should be a string or a list. For example,
you can use add('subject') to add an entry to the 'subjects' list.
This class also takes care of creating complex types, such as authors.
For example, you can add an author using add('author', 'Mark Twain') and
we will create {'personal_name': ..., 'name': ..., 'entity_type': 'person'}
which is stored as a list of authors in the edition dict.
A sample dict looks like one of these:
{
"edition_name": "3rd ed.",
"pagination": "xii, 444 p.",
"title": "A course of pure mathematics",
"publishers": ["At the University Press"],
"number_of_pages": 444,
"languages": ["eng"],
"publish_date": "1921",
"location": ["GLAD"],
"authors": [
{
"birth_date": "1877",
"personal_name": "Hardy, G. H.",
"death_date": "1947",
"name": "Hardy, G. H.",
"entity_type": "person",
}
],
"by_statement": "by G.H. Hardy",
"publish_places": ["Cambridge"],
"publish_country": "enk",
}
{
"publishers": ["Ace Books"],
"pagination": "271 p. ;",
"title": "Neuromancer",
"lccn": ["91174394"],
"notes": "Hugo award book, 1985; Nebula award ; Philip K. Dick award",
"number_of_pages": 271,
"isbn_13": ["9780441569595"],
"languages": ["eng"],
"dewey_decimal_class": ["813/.54"],
"lc_classifications": ["PS3557.I2264 N48 1984", "PR9199.3.G53 N49 1984"],
"publish_date": "1984",
"publish_country": "nyu",
"authors": [
{
"birth_date": "1948",
"personal_name": "Gibson, William",
"name": "Gibson, William",
"entity_type": "person",
}
],
"by_statement": "William Gibson",
"oclc_numbers": ["24379880"],
"publish_places": ["New York"],
"isbn_10": ["0441569595"],
}
{
"publishers": ["Grosset & Dunlap"],
"pagination": "156 p.",
"title": "Great trains of all time",
"lccn": ["62051844"],
"number_of_pages": 156,
"languages": ["eng"],
"dewey_decimal_class": ["625.2"],
"lc_classifications": ["TF147 .H8"],
"publish_date": "1962",
"publish_country": "nyu",
"authors": [
{
"birth_date": "1894",
"personal_name": "Hubbard, Freeman H.",
"name": "Hubbard, Freeman H.",
"entity_type": "person",
}
],
"by_statement": "Illustrated by Herb Mott",
"oclc_numbers": ["1413013"],
"publish_places": ["New York"],
}
"""
from openlibrary.plugins.importapi.import_validator import import_validator
class import_edition_builder:
def add_string(self, key, val):
self.edition_dict[key] = val
def add_list(self, key, val):
if key in self.edition_dict:
self.edition_dict[key].append(val)
else:
self.edition_dict[key] = [val]
def add_author(self, key, val):
# We don't know birth_date or death_date.
# Should name and personal_name be the same value?
author_dict = {'personal_name': val, 'name': val, 'entity_type': 'person'}
self.add_list('authors', author_dict)
def add_illustrator(self, key, val):
self.add_list('contributions', val + ' (Illustrator)')
def __init__(self, init_dict=None):
init_dict = init_dict or {}
self.edition_dict = init_dict.copy()
self._validate()
self.type_dict = {
'title': ['title', self.add_string],
'author': ['authors', self.add_author],
'publisher': ['publishers', self.add_list],
'publish_place': ['publish_places', self.add_list],
'publish_date': ['publish_date', self.add_string],
'pagination': ['pagination', self.add_string],
'subject': ['subjects', self.add_list],
'language': ['languages', self.add_list],
'description': ['description', self.add_string],
'lccn': ['lccn', self.add_list],
'oclc_number': ['oclc_numbers', self.add_list],
'isbn_10': ['isbn_10', self.add_list],
'isbn_13': ['isbn_13', self.add_list],
'ocaid': ['ocaid', self.add_string],
'illustrator': ['contributions', self.add_illustrator],
'source_record': ['source_records', self.add_list],
'dewey_decimal_class': ['dewey_decimal_class', self.add_list],
'lc_classification': ['lc_classifications', self.add_list],
}
def _validate(self):
import_validator().validate(self.edition_dict)
def get_dict(self):
return self.edition_dict
def add(self, key, val, restrict_keys=True):
if restrict_keys and key not in self.type_dict:
print('import_edition_builder invalid key: ' + key)
return
if key in self.type_dict:
new_key = self.type_dict[key][0]
add_func = self.type_dict[key][1]
add_func(new_key, val)
else:
self.add_string(key, val)
| 5,236 | Python | .py | 137 | 30.708029 | 82 | 0.578202 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
404 | import_validator.py | internetarchive_openlibrary/openlibrary/plugins/importapi/import_validator.py | from typing import Annotated, Any, Final, TypeVar
from annotated_types import MinLen
from pydantic import BaseModel, ValidationError, model_validator
T = TypeVar("T")
NonEmptyList = Annotated[list[T], MinLen(1)]
NonEmptyStr = Annotated[str, MinLen(1)]
STRONG_IDENTIFIERS: Final = {"isbn_10", "isbn_13", "lccn"}
class Author(BaseModel):
name: NonEmptyStr
class CompleteBookPlus(BaseModel):
"""
The model for a complete book, plus source_records and publishers.
A complete book has title, authors, and publish_date. See #9440.
"""
title: NonEmptyStr
source_records: NonEmptyList[NonEmptyStr]
authors: NonEmptyList[Author]
publishers: NonEmptyList[NonEmptyStr]
publish_date: NonEmptyStr
class StrongIdentifierBookPlus(BaseModel):
"""
The model for a book with a title, strong identifier, plus source_records.
Having one or more strong identifiers is sufficient here. See #9440.
"""
title: NonEmptyStr
source_records: NonEmptyList[NonEmptyStr]
isbn_10: NonEmptyList[NonEmptyStr] | None = None
isbn_13: NonEmptyList[NonEmptyStr] | None = None
lccn: NonEmptyList[NonEmptyStr] | None = None
@model_validator(mode="after")
def at_least_one_valid_strong_identifier(self):
if not any([self.isbn_10, self.isbn_13, self.lccn]):
raise ValueError(
f"At least one of the following must be provided: {', '.join(STRONG_IDENTIFIERS)}"
)
return self
class import_validator:
def validate(self, data: dict[str, Any]) -> bool:
"""Validate the given import data.
Return True if the import object is valid.
Successful validation of either model is sufficient, though an error
message will only display for the first model, regardless whether both
models are invalid. The goal is to encourage complete records.
This does *not* verify data is sane.
See https://github.com/internetarchive/openlibrary/issues/9440.
"""
errors = []
try:
CompleteBookPlus.model_validate(data)
return True
except ValidationError as e:
errors.append(e)
try:
StrongIdentifierBookPlus.model_validate(data)
return True
except ValidationError as e:
errors.append(e)
if errors:
raise errors[0]
return False
| 2,426 | Python | .py | 60 | 33.216667 | 98 | 0.68475 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
405 | test_import_edition_builder.py | internetarchive_openlibrary/openlibrary/plugins/importapi/tests/test_import_edition_builder.py | import pytest
from openlibrary.plugins.importapi.import_edition_builder import import_edition_builder
import_examples = [
{
'edition_name': '3rd ed.',
'pagination': 'xii, 444 p.',
'title': 'A course of pure mathematics',
'publishers': ['At the University Press'],
'number_of_pages': 444,
'languages': ['eng'],
'publish_date': '1921',
'location': ['GLAD'],
'authors': [
{
'birth_date': '1877',
'personal_name': 'Hardy, G. H.',
'death_date': '1947',
'name': 'Hardy, G. H.',
'entity_type': 'person',
}
],
'by_statement': 'by G.H. Hardy',
'publish_places': ['Cambridge'],
'publish_country': 'enk',
'source_records': ['partner:book1'],
},
{
'publishers': ['Ace Books'],
'pagination': '271 p. ;',
'title': 'Neuromancer',
'lccn': ['91174394'],
'notes': 'Hugo award book, 1985; Nebula award ; Philip K. Dick award',
'number_of_pages': 271,
'isbn_13': ['9780441569595'],
'languages': ['eng'],
'dewey_decimal_class': ['813/.54'],
'lc_classifications': ['PS3557.I2264 N48 1984', 'PR9199.3.G53 N49 1984'],
'publish_date': '1984',
'publish_country': 'nyu',
'authors': [
{
'birth_date': '1948',
'personal_name': 'Gibson, William',
'name': 'Gibson, William',
'entity_type': 'person',
}
],
'by_statement': 'William Gibson',
'oclc_numbers': ['24379880'],
'publish_places': ['New York'],
'isbn_10': ['0441569595'],
'source_records': ['partner:book2'],
},
{
'publishers': ['Grosset & Dunlap'],
'pagination': '156 p.',
'title': 'Great trains of all time',
'lccn': ['62051844'],
'number_of_pages': 156,
'languages': ['eng'],
'dewey_decimal_class': ['625.2'],
'lc_classifications': ['TF147 .H8'],
'publish_date': '1962',
'publish_country': 'nyu',
'authors': [
{
'birth_date': '1894',
'personal_name': 'Hubbard, Freeman H.',
'name': 'Hubbard, Freeman H.',
'entity_type': 'person',
}
],
'by_statement': 'Illustrated by Herb Mott',
'oclc_numbers': ['1413013'],
'publish_places': ['New York'],
'source_records': ['partner:book3'],
},
]
@pytest.mark.parametrize('data', import_examples)
def test_import_edition_builder_JSON(data):
edition = import_edition_builder(init_dict=data)
assert isinstance(edition, import_edition_builder)
# JSON with the fields above is NOT altered by import_edition_builder
assert edition.get_dict() == data
| 2,923 | Python | .py | 84 | 25.285714 | 87 | 0.510931 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
406 | test_import_validator.py | internetarchive_openlibrary/openlibrary/plugins/importapi/tests/test_import_validator.py | import pytest
from pydantic import ValidationError
from openlibrary.plugins.importapi.import_validator import import_validator, Author
def test_create_an_author_with_no_name():
Author(name="Valid Name")
with pytest.raises(ValidationError):
Author(name="")
valid_values = {
"title": "Beowulf",
"source_records": ["key:value"],
"author": {"name": "Tom Robbins"},
"authors": [{"name": "Tom Robbins"}, {"name": "Dean Koontz"}],
"publishers": ["Harper Collins", "OpenStax"],
"publish_date": "December 2018",
}
valid_values_strong_identifier = {
"title": "Beowulf",
"source_records": ["key:value"],
"isbn_13": ["0123456789012"],
}
validator = import_validator()
def test_validate():
assert validator.validate(valid_values) is True
def test_validate_strong_identifier_minimal():
"""The least amount of data for a strong identifier record to validate."""
assert validator.validate(valid_values_strong_identifier) is True
@pytest.mark.parametrize(
'field', ["title", "source_records", "authors", "publishers", "publish_date"]
)
def test_validate_record_with_missing_required_fields(field):
invalid_values = valid_values.copy()
del invalid_values[field]
with pytest.raises(ValidationError):
validator.validate(invalid_values)
@pytest.mark.parametrize('field', ['title', 'publish_date'])
def test_validate_empty_string(field):
invalid_values = valid_values.copy()
invalid_values[field] = ""
with pytest.raises(ValidationError):
validator.validate(invalid_values)
@pytest.mark.parametrize('field', ['source_records', 'authors', 'publishers'])
def test_validate_empty_list(field):
invalid_values = valid_values.copy()
invalid_values[field] = []
with pytest.raises(ValidationError):
validator.validate(invalid_values)
@pytest.mark.parametrize('field', ['source_records', 'publishers'])
def test_validate_list_with_an_empty_string(field):
invalid_values = valid_values.copy()
invalid_values[field] = [""]
with pytest.raises(ValidationError):
validator.validate(invalid_values)
@pytest.mark.parametrize('field', ['isbn_10', 'lccn'])
def test_validate_multiple_strong_identifiers(field):
"""More than one strong identifier should still validate."""
multiple_valid_values = valid_values_strong_identifier.copy()
multiple_valid_values[field] = ["non-empty"]
assert validator.validate(multiple_valid_values) is True
@pytest.mark.parametrize('field', ['isbn_13'])
def test_validate_not_complete_no_strong_identifier(field):
"""An incomplete record without a strong identifier won't validate."""
invalid_values = valid_values_strong_identifier.copy()
invalid_values[field] = [""]
with pytest.raises(ValidationError):
validator.validate(invalid_values)
| 2,842 | Python | .py | 65 | 39.461538 | 83 | 0.721758 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
407 | test_code_ils.py | internetarchive_openlibrary/openlibrary/plugins/importapi/tests/test_code_ils.py | import datetime
from openlibrary.plugins.importapi import code
"""Tests for Koha ILS (Integrated Library System) code.
"""
class Test_ils_cover_upload:
def test_build_url(self):
build_url = code.ils_cover_upload().build_url
assert (
build_url("http://example.com/foo", status="ok")
== "http://example.com/foo?status=ok"
)
assert (
build_url("http://example.com/foo?bar=true", status="ok")
== "http://example.com/foo?bar=true&status=ok"
)
class Test_ils_search:
def test_format_result(self, mock_site):
format_result = code.ils_search().format_result
assert format_result({"doc": {}}, False, "") == {'status': 'notfound'}
doc = {'key': '/books/OL1M', 'type': {'key': '/type/edition'}}
timestamp = datetime.datetime(2010, 1, 2, 3, 4, 5)
mock_site.save(doc, timestamp=timestamp)
assert format_result({'doc': doc}, False, "") == {
'status': 'found',
'olid': 'OL1M',
'key': '/books/OL1M',
}
doc = {
'key': '/books/OL1M',
'type': {'key': '/type/edition'},
'covers': [12345],
}
timestamp = datetime.datetime(2011, 1, 2, 3, 4, 5)
mock_site.save(doc, timestamp=timestamp)
assert format_result({'doc': doc}, False, "") == {
'status': 'found',
'olid': 'OL1M',
'key': '/books/OL1M',
'covers': [12345],
'cover': {
'small': 'https://covers.openlibrary.org/b/id/12345-S.jpg',
'medium': 'https://covers.openlibrary.org/b/id/12345-M.jpg',
'large': 'https://covers.openlibrary.org/b/id/12345-L.jpg',
},
}
def test_prepare_input_data(self):
prepare_input_data = code.ils_search().prepare_input_data
data = {
'isbn': ['1234567890', '9781234567890'],
'ocaid': ['abc123def'],
'publisher': 'Some Books',
'authors': ['baz'],
}
assert prepare_input_data(data) == {
'doc': {
'identifiers': {
'isbn': ['1234567890', '9781234567890'],
'ocaid': ['abc123def'],
},
'publisher': 'Some Books',
'authors': [{'name': 'baz'}],
}
}
| 2,417 | Python | .py | 63 | 27.492063 | 78 | 0.500427 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
408 | test_code.py | internetarchive_openlibrary/openlibrary/plugins/importapi/tests/test_code.py | from .. import code
from openlibrary.catalog.add_book.tests.conftest import add_languages # noqa: F401
import web
import pytest
def test_get_ia_record(monkeypatch, mock_site, add_languages) -> None: # noqa F811
"""
Try to test every field that get_ia_record() reads.
"""
monkeypatch.setattr(web, "ctx", web.storage())
web.ctx.lang = "eng"
web.ctx.site = mock_site
ia_metadata = {
"creator": "Drury, Bob",
"date": "2013",
"description": [
"The story of the great Ogala Sioux chief Red Cloud",
],
"identifier": "heartofeverythin0000drur_j2n5",
"isbn": [
"9781451654684",
"1451654685",
],
"language": "French",
"lccn": "2013003200",
"oclc-id": "1226545401",
"publisher": "New York : Simon & Schuster",
"subject": [
"Red Cloud, 1822-1909",
"Oglala Indians",
],
"title": "The heart of everything that is",
"imagecount": "454",
}
expected_result = {
"authors": [{"name": "Drury, Bob"}],
"description": ["The story of the great Ogala Sioux chief Red Cloud"],
"isbn_10": ["1451654685"],
"isbn_13": ["9781451654684"],
"languages": ["fre"],
"lccn": ["2013003200"],
"number_of_pages": 450,
"oclc": "1226545401",
"publish_date": "2013",
"publish_places": ["New York"],
"publishers": ["Simon & Schuster"],
"subjects": ["Red Cloud, 1822-1909", "Oglala Indians"],
"title": "The heart of everything that is",
}
result = code.ia_importapi.get_ia_record(ia_metadata)
assert result == expected_result
@pytest.mark.parametrize(
"tc,exp",
[("Frisian", "Multiple language matches"), ("Fake Lang", "No language matches")],
)
def test_get_ia_record_logs_warning_when_language_has_multiple_matches(
mock_site, monkeypatch, add_languages, caplog, tc, exp # noqa F811
) -> None:
"""
When the IA record uses the language name rather than the language code,
get_ia_record() should log a warning if there are multiple name matches,
and set no language for the edition.
"""
monkeypatch.setattr(web, "ctx", web.storage())
web.ctx.lang = "eng"
web.ctx.site = mock_site
ia_metadata = {
"creator": "The Author",
"date": "2013",
"identifier": "ia_frisian001",
"language": f"{tc}",
"publisher": "The Publisher",
"title": "Frisian is Fun",
}
expected_result = {
"authors": [{"name": "The Author"}],
"publish_date": "2013",
"publishers": ["The Publisher"],
"title": "Frisian is Fun",
}
result = code.ia_importapi.get_ia_record(ia_metadata)
assert result == expected_result
assert exp in caplog.text
@pytest.mark.parametrize("tc,exp", [(5, 1), (4, 4), (3, 3)])
def test_get_ia_record_handles_very_short_books(tc, exp) -> None:
"""
Because scans have extra images for the cover, etc, and the page count from
the IA metadata is based on `imagecount`, 4 pages are subtracted from
number_of_pages. But make sure this doesn't go below 1.
"""
ia_metadata = {
"creator": "The Author",
"date": "2013",
"identifier": "ia_frisian001",
"imagecount": f"{tc}",
"publisher": "The Publisher",
"title": "Frisian is Fun",
}
result = code.ia_importapi.get_ia_record(ia_metadata)
assert result.get("number_of_pages") == exp
| 3,550 | Python | .py | 99 | 28.979798 | 85 | 0.594705 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
409 | autocomplete.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/autocomplete.py | import itertools
import web
import json
from infogami.utils import delegate
from infogami.utils.view import safeint
from openlibrary.core.models import Thing
from openlibrary.plugins.upstream import utils
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.utils import (
find_olid_in_string,
olid_to_key,
)
def to_json(d):
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(d))
class autocomplete(delegate.page):
path = "/_autocomplete"
fq = ['-type:edition']
fl = 'key,type,name,title,score'
olid_suffix: str | None = None
sort: str | None = None
query = 'title:"{q}"^2 OR title:({q}*) OR name:"{q}"^2 OR name:({q}*)'
def db_fetch(self, key: str) -> Thing | None:
if thing := web.ctx.site.get(key):
return thing.as_fake_solr_record()
else:
return None
def doc_wrap(self, doc: dict):
"""Modify the returned solr document in place."""
if 'name' not in doc:
doc['name'] = doc.get('title')
def doc_filter(self, doc: dict) -> bool:
"""Exclude certain documents"""
return True
def GET(self):
return self.direct_get()
def direct_get(self, fq: list[str] | None = None):
i = web.input(q="", limit=5)
i.limit = safeint(i.limit, 5)
solr = get_solr()
# look for ID in query string here
q = solr.escape(i.q).strip()
embedded_olid = None
if self.olid_suffix:
embedded_olid = find_olid_in_string(q, self.olid_suffix)
if embedded_olid:
solr_q = f'key:"{olid_to_key(embedded_olid)}"'
else:
solr_q = self.query.format(q=q)
fq = fq or self.fq
params = {
'q_op': 'AND',
'rows': i.limit,
**({'fq': fq} if fq else {}),
# limit the fields returned for better performance
'fl': self.fl,
**({'sort': self.sort} if self.sort else {}),
}
data = solr.select(solr_q, **params)
docs = data['docs']
if embedded_olid and not docs:
# Grumble! Work not in solr yet. Create a dummy.
fake_doc = self.db_fetch(olid_to_key(embedded_olid))
if fake_doc:
docs = [fake_doc]
result_docs = []
for d in docs:
if self.doc_filter(d):
self.doc_wrap(d)
result_docs.append(d)
return to_json(result_docs)
class languages_autocomplete(delegate.page):
path = "/languages/_autocomplete"
def GET(self):
i = web.input(q="", limit=5)
i.limit = safeint(i.limit, 5)
web.header("Cache-Control", "max-age=%d" % (24 * 3600))
return to_json(
list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))
)
class works_autocomplete(autocomplete):
path = "/works/_autocomplete"
fq = ['type:work']
fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'
olid_suffix = 'W'
query = 'title:"{q}"^2 OR title:({q}*)'
def doc_filter(self, doc: dict) -> bool:
# Exclude orphaned editions from autocomplete results
# Note: Do this here instead of with an `fq=key:*W` for performance
# reasons.
return doc['key'][-1] == 'W'
def doc_wrap(self, doc: dict):
doc['full_title'] = doc['title']
if 'subtitle' in doc:
doc['full_title'] += ": " + doc['subtitle']
doc['name'] = doc.get('title')
class authors_autocomplete(autocomplete):
path = "/authors/_autocomplete"
fq = ['type:author']
fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'
olid_suffix = 'A'
query = 'name:({q}*) OR alternate_names:({q}*) OR name:"{q}"^2 OR alternate_names:"{q}"^2'
def doc_wrap(self, doc: dict):
if 'top_work' in doc:
doc['works'] = [doc.pop('top_work')]
else:
doc['works'] = []
doc['subjects'] = doc.pop('top_subjects', [])
class subjects_autocomplete(autocomplete):
# can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+
path = "/subjects_autocomplete"
fq = ['type:subject']
fl = 'key,name,work_count'
query = 'name:({q}*)'
sort = 'work_count desc'
def GET(self):
i = web.input(type="")
fq = self.fq
if i.type:
fq = fq + [f'subject_type:{i.type}']
return super().direct_get(fq=fq)
def setup():
"""Do required setup."""
pass
| 4,619 | Python | .py | 124 | 29.451613 | 94 | 0.584081 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
410 | bulk_search.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/bulk_search.py | import web
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import public, safeint, render
class bulk_search(delegate.page):
path = "/search/bulk"
def GET(self):
return render['bulk_search/bulk_search']()
def setup():
pass
| 289 | Python | .py | 10 | 25.3 | 55 | 0.750916 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
411 | publishers.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/publishers.py | """Publisher pages
"""
from infogami.utils import delegate, stats
from infogami.utils.view import render_template, safeint
import web
import logging
from . import subjects
from . import search
logger = logging.getLogger("openlibrary.worksearch")
class publishers(subjects.subjects):
path = '(/publishers/[^/]+)'
def GET(self, key):
key = key.replace("_", " ")
page = subjects.get_subject(key, details=True)
if not page or page.work_count == 0:
web.ctx.status = "404 Not Found"
return render_template('publishers/notfound.tmpl', key)
return render_template("publishers/view", page)
def is_enabled(self):
return "publishers" in web.ctx.features
class publishers_json(subjects.subjects_json):
path = '(/publishers/[^/]+)'
encoding = "json"
def is_enabled(self):
return "publishers" in web.ctx.features
def normalize_key(self, key):
return key
def process_key(self, key):
return key.replace("_", " ")
class index(delegate.page):
path = "/publishers"
def GET(self):
return render_template("publishers/index")
def is_enabled(self):
return "publishers" in web.ctx.features
class publisher_search(delegate.page):
path = '/search/publishers'
def GET(self):
i = web.input(q="")
result = search.get_solr().select(
{"publisher": i.q, "type": "work"},
facets=["publisher_facet"],
facet_mincount=1,
facet_limit=25,
facet_contains=i.q,
facet_contains_ignoreCase='true',
rows=0,
)
result = self.process_result(result)
return render_template('search/publishers', i.q, result)
def process_result(self, result):
publisher_facets = result['facets']['publisher_facet']
return [
web.storage(
name=p.value,
key="/publishers/" + p.value.replace(" ", "_"),
count=p.count,
)
for p in publisher_facets
]
class PublisherEngine(subjects.SubjectEngine):
def normalize_key(self, key):
return key
def get_ebook_count(self, name, value, publish_year):
# Query solr for this publish_year and publish_year combination and read the has_fulltext=true facet
solr = search.get_solr()
q = {"publisher_facet": value}
if isinstance(publish_year, list):
q['publish_year'] = tuple(publish_year) # range
elif publish_year:
q['publish_year'] = publish_year
result = solr.select(q, facets=["has_fulltext"], rows=0)
counts = {v.value: v.count for v in result["facets"]["has_fulltext"]}
return counts.get('true')
def setup():
subjects.SUBJECTS.append(
subjects.SubjectMeta(
name="publisher",
key="publishers",
prefix="/publishers/",
facet="publisher_facet",
facet_key="publisher_facet",
Engine=PublisherEngine,
)
)
| 3,088 | Python | .py | 85 | 28.023529 | 108 | 0.60928 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
412 | search.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/search.py | """Search utilities.
"""
from openlibrary.utils.solr import Solr
from infogami import config
_ACTIVE_SOLR: Solr | None = None
def get_solr():
global _ACTIVE_SOLR
if not _ACTIVE_SOLR:
base_url = config.plugin_worksearch.get('solr_base_url')
_ACTIVE_SOLR = Solr(base_url)
return _ACTIVE_SOLR
| 322 | Python | .py | 11 | 25.363636 | 64 | 0.703583 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
413 | code.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/code.py | from dataclasses import dataclass
import itertools
import time
import copy
import json
import logging
import re
from typing import Any, cast
from collections.abc import Iterable
from unicodedata import normalize
import requests
import web
from requests import Response
import urllib
from infogami import config
from infogami.utils import delegate, stats
from infogami.utils.view import public, render, render_template, safeint
from openlibrary.core import cache
from openlibrary.core.lending import add_availability
from openlibrary.core.models import Edition
from openlibrary.i18n import gettext as _
from openlibrary.plugins.openlibrary.processors import urlsafe
from openlibrary.plugins.upstream.utils import (
get_language_name,
urlencode,
)
from openlibrary.plugins.worksearch.schemes.editions import EditionSearchScheme
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.plugins.worksearch.schemes import SearchScheme
from openlibrary.plugins.worksearch.schemes.authors import AuthorSearchScheme
from openlibrary.plugins.worksearch.schemes.subjects import SubjectSearchScheme
from openlibrary.plugins.worksearch.schemes.works import (
WorkSearchScheme,
has_solr_editions_enabled,
)
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.query_utils import fully_escape_query
from openlibrary.utils.isbn import normalize_isbn
logger = logging.getLogger("openlibrary.worksearch")
OLID_URLS = {'A': 'authors', 'M': 'books', 'W': 'works'}
re_isbn_field = re.compile(r'^\s*(?:isbn[:\s]*)?([-0-9X]{9,})\s*$', re.I)
re_olid = re.compile(r'^OL\d+([AMW])$')
plurals = {f + 's': f for f in ('publisher', 'author')}
if hasattr(config, 'plugin_worksearch'):
solr_select_url = (
config.plugin_worksearch.get('solr_base_url', 'localhost') + '/select'
)
default_spellcheck_count = config.plugin_worksearch.get('spellcheck_count', 10)
@public
def get_facet_map() -> tuple[tuple[str, str]]:
return (
('has_fulltext', _('eBook?')),
('language', _('Language')),
('author_key', _('Author')),
('subject_facet', _('Subjects')),
('first_publish_year', _('First published')),
('publisher_facet', _('Publisher')),
('person_facet', _('People')),
('place_facet', _('Places')),
('time_facet', _('Times')),
('public_scan_b', _('Classic eBooks')),
)
@public
def get_solr_works(work_key: Iterable[str]) -> dict[str, dict]:
from openlibrary.plugins.worksearch.search import get_solr
return {
doc['key']: doc
for doc in get_solr().get_many(
set(work_key), fields=WorkSearchScheme.default_fetched_fields
)
}
def read_author_facet(author_facet: str) -> tuple[str, str]:
"""
>>> read_author_facet("OL26783A Leo Tolstoy")
('OL26783A', 'Leo Tolstoy')
"""
key, name = author_facet.split(' ', 1)
return key, name
def process_facet(
field: str, facets: Iterable[tuple[str, int]]
) -> tuple[str, str, int]:
if field == 'has_fulltext':
counts = dict(facets)
yield ('true', 'yes', counts.get('true', 0))
yield ('false', 'no', counts.get('false', 0))
else:
for val, count in facets:
if count == 0:
continue
if field == 'author_key':
key, name = read_author_facet(val)
yield (key, name, count)
elif field == 'language':
yield (val, get_language_name(f'/languages/{val}'), count)
else:
yield (val, val, count)
def process_facet_counts(
facet_counts: dict[str, list]
) -> dict[str, tuple[str, str, int]]:
for field, facets in facet_counts.items():
if field == 'author_facet':
field = 'author_key'
yield field, list(process_facet(field, web.group(facets, 2)))
def execute_solr_query(
solr_path: str, params: dict | list[tuple[str, Any]]
) -> Response | None:
url = solr_path
if params:
url += '&' if '?' in url else '?'
url += urlencode(params)
stats.begin("solr", url=url)
try:
response = get_solr().raw_request(solr_path, urlencode(params))
response.raise_for_status()
except requests.HTTPError:
logger.exception("Failed solr query")
return None
finally:
stats.end()
return response
# Expose this publicly
public(has_solr_editions_enabled)
def run_solr_query( # noqa: PLR0912
scheme: SearchScheme,
param: dict | None = None,
rows=100,
page=1,
sort: str | None = None,
spellcheck_count=None,
offset=None,
fields: str | list[str] | None = None,
facet: bool | Iterable[str] = True,
allowed_filter_params: set[str] | None = None,
extra_params: list[tuple[str, Any]] | None = None,
):
"""
:param param: dict of query parameters
"""
param = param or {}
if not fields:
fields = []
elif isinstance(fields, str):
fields = fields.split(',')
# use page when offset is not specified
if offset is None:
offset = rows * (page - 1)
params = [
*(('fq', subquery) for subquery in scheme.universe),
('start', offset),
('rows', rows),
('wt', param.get('wt', 'json')),
] + (extra_params or [])
if spellcheck_count is None:
spellcheck_count = default_spellcheck_count
if spellcheck_count:
params.append(('spellcheck', 'true'))
params.append(('spellcheck.count', spellcheck_count))
facet_fields = scheme.facet_fields if isinstance(facet, bool) else facet
if facet and facet_fields:
params.append(('facet', 'true'))
for facet in facet_fields: # noqa: PLR1704
if isinstance(facet, str):
params.append(('facet.field', facet))
elif isinstance(facet, dict):
params.append(('facet.field', facet['name']))
if 'sort' in facet:
params.append((f'f.{facet["name"]}.facet.sort', facet['sort']))
if 'limit' in facet:
params.append((f'f.{facet["name"]}.facet.limit', facet['limit']))
else:
# Should never get here
raise ValueError(f'Invalid facet type: {facet}')
facet_params = (allowed_filter_params or scheme.facet_fields) & set(param)
for (field, value), rewrite in scheme.facet_rewrites.items():
if param.get(field) == value:
if field in facet_params:
facet_params.remove(field)
params.append(('fq', rewrite() if callable(rewrite) else rewrite))
for field in facet_params:
if field == 'author_facet':
field = 'author_key'
values = param[field]
params += [('fq', f'{field}:"{val}"') for val in values if val]
# Many fields in solr use the convention of `*_facet` both
# as a facet key and as the explicit search query key.
# Examples being publisher_facet, subject_facet?
# `author_key` & `author_facet` is an example of a mismatch that
# breaks this rule. This code makes it so, if e.g. `author_facet` is used where
# `author_key` is intended, both will be supported (and vis versa)
# This "doubling up" has no real performance implication
# but does fix cases where the search query is different than the facet names
q = None
if param.get('q'):
q = scheme.process_user_query(param['q'])
if params_q := scheme.build_q_from_params(param):
q = f'{q} {params_q}' if q else params_q
if q:
solr_fields = (
set(fields or scheme.default_fetched_fields) - scheme.non_solr_fields
)
if 'editions' in solr_fields:
solr_fields.remove('editions')
solr_fields.add('editions:[subquery]')
if ed_sort := param.get('editions.sort'):
params.append(
('editions.sort', EditionSearchScheme().process_user_sort(ed_sort))
)
params.append(('fl', ','.join(solr_fields)))
params += scheme.q_to_solr_params(q, solr_fields, params)
if sort:
params.append(('sort', scheme.process_user_sort(sort)))
url = f'{solr_select_url}?{urlencode(params)}'
start_time = time.time()
response = execute_solr_query(solr_select_url, params)
solr_result = response.json() if response else None
end_time = time.time()
duration = end_time - start_time
if solr_result is not None:
non_solr_fields = set(fields) & scheme.non_solr_fields
if non_solr_fields:
scheme.add_non_solr_fields(non_solr_fields, solr_result)
return SearchResponse.from_solr_result(solr_result, sort, url, time=duration)
@dataclass
class SearchResponse:
facet_counts: dict[str, tuple[str, str, int]]
sort: str
docs: list
num_found: int
solr_select: str
raw_resp: dict = None
error: str = None
time: float = None
"""Seconds to execute the query"""
@staticmethod
def from_solr_result(
solr_result: dict | None,
sort: str,
solr_select: str,
time: float,
) -> 'SearchResponse':
if not solr_result or 'error' in solr_result:
return SearchResponse(
facet_counts=None,
sort=sort,
docs=[],
num_found=None,
solr_select=solr_select,
error=(solr_result.get('error') if solr_result else None),
time=time,
)
else:
return SearchResponse(
facet_counts=(
dict(
process_facet_counts(
solr_result['facet_counts']['facet_fields']
)
)
if 'facet_counts' in solr_result
else None
),
sort=sort,
raw_resp=solr_result,
docs=solr_result['response']['docs'],
num_found=solr_result['response']['numFound'],
solr_select=solr_select,
time=time,
)
def do_search(
param: dict,
sort: str | None,
page=1,
rows=100,
facet=False,
spellcheck_count=None,
):
"""
:param param: dict of search url parameters
:param sort: csv sort ordering
:param spellcheck_count: Not really used; should probably drop
"""
# If you want work_search page html to extend default_fetched_fields:
extra_fields = {
'editions',
'providers',
'ratings_average',
'ratings_count',
'want_to_read_count',
}
fields = WorkSearchScheme.default_fetched_fields | extra_fields
if web.cookies(sfw="").sfw == 'yes':
fields |= {'subject'}
return run_solr_query(
WorkSearchScheme(),
param,
rows,
page,
sort,
spellcheck_count,
fields=list(fields),
facet=facet,
)
def get_doc(doc: SolrDocument):
"""
Coerce a solr document to look more like an Open Library edition/work. Ish.
called from work_search template
"""
return web.storage(
key=doc['key'],
title=doc['title'],
url=f"{doc['key']}/{urlsafe(doc['title'])}",
edition_count=doc['edition_count'],
ia=doc.get('ia', []),
collections=(
set(doc['ia_collection_s'].split(';'))
if doc.get('ia_collection_s')
else set()
),
has_fulltext=doc.get('has_fulltext', False),
public_scan=doc.get('public_scan_b', bool(doc.get('ia'))),
lending_edition=doc.get('lending_edition_s', None),
lending_identifier=doc.get('lending_identifier_s', None),
authors=[
web.storage(
key=key,
name=name,
url=f"/authors/{key}/{urlsafe(name or 'noname')}",
birth_date=doc.get('birth_date', None),
death_date=doc.get('death_date', None),
)
for key, name in zip(doc.get('author_key', []), doc.get('author_name', []))
],
first_publish_year=doc.get('first_publish_year', None),
first_edition=doc.get('first_edition', None),
subtitle=doc.get('subtitle', None),
cover_edition_key=doc.get('cover_edition_key', None),
languages=doc.get('language', []),
id_project_gutenberg=doc.get('id_project_gutenberg', []),
id_librivox=doc.get('id_librivox', []),
id_standard_ebooks=doc.get('id_standard_ebooks', []),
id_openstax=doc.get('id_openstax', []),
id_cita_press=doc.get('id_cita_press', []),
id_wikisource=doc.get('id_wikisource', []),
editions=[
web.storage(
{
**ed,
'title': ed.get('title', 'Untitled'),
'url': f"{ed['key']}/{urlsafe(ed.get('title', 'Untitled'))}",
}
)
for ed in doc.get('editions', {}).get('docs', [])
],
ratings_average=doc.get('ratings_average', None),
ratings_count=doc.get('ratings_count', None),
want_to_read_count=doc.get('want_to_read_count', None),
)
class scan(delegate.page):
"""
Experimental EAN barcode scanner page to scan and add/view books by their barcodes.
"""
path = "/barcodescanner"
def GET(self):
return render.barcodescanner()
class search(delegate.page):
def redirect_if_needed(self, i):
params = {}
need_redirect = False
for k, v in i.items():
if k in plurals:
params[k] = None
k = plurals[k]
need_redirect = True
if isinstance(v, list):
if v == []:
continue
clean = [normalize('NFC', b.strip()) for b in v]
if clean != v:
need_redirect = True
if len(clean) == 1 and clean[0] == '':
clean = None
else:
clean = normalize('NFC', v.strip())
if clean == '':
need_redirect = True
clean = None
if clean != v:
need_redirect = True
params[k] = clean
if need_redirect:
raise web.seeother(web.changequery(**params))
def isbn_redirect(self, isbn_param):
isbn = normalize_isbn(isbn_param)
if not isbn:
return
if ed := Edition.from_isbn(isbn):
web.seeother(ed.key)
def GET(self):
# Enable patrons to search for query q2 within collection q
# q2 param gets removed and prepended to q via a redirect
_i = web.input(q='', q2='')
if _i.q.strip() and _i.q2.strip():
_i.q = _i.q2.strip() + ' ' + _i.q.strip()
_i.pop('q2')
raise web.seeother('/search?' + urllib.parse.urlencode(_i))
i = web.input(
author_key=[],
language=[],
first_publish_year=[],
publisher_facet=[],
subject_facet=[],
person_facet=[],
place_facet=[],
time_facet=[],
public_scan_b=[],
)
# Send to full-text Search Inside if checkbox checked
if i.get('search-fulltext'):
raise web.seeother(
'/search/inside?' + urllib.parse.urlencode({'q': i.get('q', '')})
)
if i.get('wisbn'):
i.isbn = i.wisbn
self.redirect_if_needed(i)
if 'isbn' in i:
self.isbn_redirect(i.isbn)
q_list = []
if q := i.get('q', '').strip():
m = re_olid.match(q)
if m:
raise web.seeother(f'/{OLID_URLS[m.group(1)]}/{q}')
m = re_isbn_field.match(q)
if m:
self.isbn_redirect(m.group(1))
q_list.append(q)
for k in ('title', 'author', 'isbn', 'subject', 'place', 'person', 'publisher'):
if k in i:
q_list.append(f'{k}:{fully_escape_query(i[k].strip())}')
web_input = i
param = {}
for p in {
'q',
'title',
'author',
'page',
'sort',
'isbn',
'oclc',
'contributor',
'publish_place',
'lccn',
'ia',
'first_sentence',
'publisher',
'author_key',
'debug',
'subject',
'place',
'person',
'time',
'editions.sort',
} | WorkSearchScheme.facet_fields:
if web_input.get(p):
param[p] = web_input[p]
if list(param) == ['has_fulltext']:
param = {}
page = int(param.get('page', 1))
sort = param.get('sort', None)
rows = 20
if param:
search_response = do_search(
param, sort, page, rows=rows, spellcheck_count=3
)
else:
search_response = SearchResponse(
facet_counts=None, sort='', docs=[], num_found=0, solr_select=''
)
return render.work_search(
' '.join(q_list),
search_response,
get_doc,
param,
page,
rows,
)
def works_by_author(
akey: str,
sort='editions',
page=1,
rows=100,
facet=False,
has_fulltext=False,
query: str | None = None,
):
param = {'q': query or '*:*'}
if has_fulltext:
param['has_fulltext'] = 'true'
result = run_solr_query(
WorkSearchScheme(),
param=param,
page=page,
rows=rows,
sort=sort,
facet=(
facet
and [
"subject_facet",
"person_facet",
"place_facet",
"time_facet",
]
),
fields=WorkSearchScheme.default_fetched_fields | {'editions'},
extra_params=[
('fq', f'author_key:{akey}'),
('facet.limit', 25),
],
)
result.docs = [get_doc(doc) for doc in result.docs]
add_availability(
[(work.get('editions') or [None])[0] or work for work in result.docs]
)
return result
def top_books_from_author(akey: str, rows=5) -> SearchResponse:
return run_solr_query(
WorkSearchScheme(),
{'q': f'author_key:{akey}'},
fields=['key', 'title', 'edition_count', 'first_publish_year'],
sort='editions',
rows=rows,
facet=False,
)
class advancedsearch(delegate.page):
path = "/advancedsearch"
def GET(self):
return render_template("search/advancedsearch.html")
class list_search(delegate.page):
path = '/search/lists'
def GET(self):
i = web.input(q='', offset='0', limit='10')
lists = self.get_results(i.q, i.offset, i.limit)
return render_template('search/lists.tmpl', q=i.q, lists=lists)
def get_results(self, q, offset=0, limit=100):
if 'env' not in web.ctx:
delegate.fakeload()
keys = web.ctx.site.things(
{
"type": "/type/list",
"name~": q,
"limit": int(limit),
"offset": int(offset),
}
)
return web.ctx.site.get_many(keys)
class list_search_json(list_search):
path = '/search/lists'
encoding = 'json'
def GET(self):
i = web.input(q='', offset=0, limit=10)
offset = safeint(i.offset, 0)
limit = safeint(i.limit, 10)
limit = min(100, limit)
docs = self.get_results(i.q, offset=offset, limit=limit)
response = {'start': offset, 'docs': [doc.preview() for doc in docs]}
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(response))
class subject_search(delegate.page):
path = '/search/subjects'
def GET(self):
return render_template('search/subjects', self.get_results)
def get_results(self, q, offset=0, limit=100):
response = run_solr_query(
SubjectSearchScheme(),
{'q': q},
offset=offset,
rows=limit,
sort='work_count desc',
)
return response
class subject_search_json(subject_search):
path = '/search/subjects'
encoding = 'json'
def GET(self):
i = web.input(q='', offset=0, limit=100)
offset = safeint(i.offset, 0)
limit = safeint(i.limit, 100)
limit = min(1000, limit) # limit limit to 1000.
response = self.get_results(i.q, offset=offset, limit=limit)
# Backward compatibility :/
raw_resp = response.raw_resp['response']
for doc in raw_resp['docs']:
doc['type'] = doc.get('subject_type', 'subject')
doc['count'] = doc.get('work_count', 0)
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(raw_resp))
class author_search(delegate.page):
path = '/search/authors'
def GET(self):
return render_template('search/authors', self.get_results)
def get_results(self, q, offset=0, limit=100, fields='*', sort=''):
resp = run_solr_query(
AuthorSearchScheme(),
{'q': q},
offset=offset,
rows=limit,
fields=fields,
sort=sort,
)
return resp
class author_search_json(author_search):
path = '/search/authors'
encoding = 'json'
def GET(self):
i = web.input(q='', offset=0, limit=100, fields='*', sort='')
offset = safeint(i.offset, 0)
limit = safeint(i.limit, 100)
limit = min(1000, limit) # limit limit to 1000.
response = self.get_results(
i.q, offset=offset, limit=limit, fields=i.fields, sort=i.sort
)
raw_resp = response.raw_resp['response']
for doc in raw_resp['docs']:
# SIGH the public API exposes the key like this :(
doc['key'] = doc['key'].split('/')[-1]
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(raw_resp))
@public
def random_author_search(limit=10) -> SearchResponse:
return run_solr_query(
AuthorSearchScheme(),
{'q': '*:*'},
rows=limit,
sort='random.hourly',
)
def rewrite_list_query(q, page, offset, limit):
"""Takes a solr query. If it doesn't contain a /lists/ key, then
return the query, unchanged, exactly as it entered the
function. If it does contain a lists key, then use the pagination
information to fetch the right block of keys from the
lists_editions and lists_works API and then feed these editions resulting work
keys into solr with the form key:(OL123W, OL234W). This way, we
can use the solr API to fetch list works and render them in
carousels in the right format.
"""
from openlibrary.core.lists.model import List
def cached_get_list_book_keys(key, offset, limit):
# make cacheable
if 'env' not in web.ctx:
delegate.fakeload()
lst = cast(List, web.ctx.site.get(key))
return list(itertools.islice(lst.get_work_keys(), offset or 0, offset + limit))
if '/lists/' in q:
# we're making an assumption that q is just a list key
book_keys = cache.memcache_memoize(
cached_get_list_book_keys, "search.list_books_query", timeout=5 * 60
)(q, offset, limit)
q = f"key:({' OR '.join(book_keys)})"
# We've applied the offset to fetching get_list_editions to
# produce the right set of discrete work IDs. We don't want
# it applied to paginate our resulting solr query.
offset = 0
page = 1
return q, page, offset, limit
@public
def work_search(
query: dict,
sort: str | None = None,
page: int = 1,
offset: int = 0,
limit: int = 100,
fields: str = '*',
facet: bool = True,
spellcheck_count: int | None = None,
) -> dict:
"""
:param sort: key of SORTS dict at the top of this file
"""
# Ensure we don't mutate the `query` passed in by reference
query = copy.deepcopy(query)
query['wt'] = 'json'
# deal with special /lists/ key queries
query['q'], page, offset, limit = rewrite_list_query(
query['q'], page, offset, limit
)
resp = run_solr_query(
WorkSearchScheme(),
query,
rows=limit,
page=page,
sort=sort,
offset=offset,
fields=fields,
facet=facet,
spellcheck_count=spellcheck_count,
)
response = resp.raw_resp['response']
# backward compatibility
response['num_found'] = response['numFound']
if fields == '*' or 'availability' in fields:
response['docs'] = add_availability(response['docs'])
return response
class search_json(delegate.page):
path = "/search"
encoding = "json"
def GET(self):
i = web.input(
author_key=[],
subject_facet=[],
person_facet=[],
place_facet=[],
time_facet=[],
first_publish_year=[],
publisher_facet=[],
language=[],
public_scan_b=[],
)
if 'query' in i:
query = json.loads(i.query)
else:
query = i
sort = query.get('sort', None)
limit = safeint(query.pop("limit", "100"), default=100)
if "offset" in query:
offset = safeint(query.pop("offset", 0), default=0)
page = None
else:
offset = None
page = safeint(query.pop("page", "1"), default=1)
fields = query.pop('fields', '*').split(',')
spellcheck_count = safeint(
query.pop("_spellcheck_count", default_spellcheck_count),
default=default_spellcheck_count,
)
# If the query is a /list/ key, create custom list_editions_query
q = query.get('q', '').strip()
query['q'], page, offset, limit = rewrite_list_query(q, page, offset, limit)
response = work_search(
query,
sort=sort,
page=page,
offset=offset,
limit=limit,
fields=fields,
# We do not support returning facets from /search.json,
# so disable it. This makes it much faster.
facet=False,
spellcheck_count=spellcheck_count,
)
response['q'] = q
response['offset'] = offset
response['docs'] = response['docs']
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(response, indent=4))
def setup():
from openlibrary.plugins.worksearch import (
autocomplete,
subjects,
languages,
publishers,
bulk_search,
)
bulk_search.setup()
autocomplete.setup()
subjects.setup()
publishers.setup()
languages.setup()
setup()
| 27,307 | Python | .py | 767 | 26.680574 | 88 | 0.568853 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
414 | languages.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/languages.py | """Language pages
"""
from infogami.plugins.api.code import jsonapi
from infogami.utils import delegate, stats
from infogami.utils.view import render_template, safeint
import web
import json
import logging
from openlibrary.plugins.upstream.utils import get_language_name
from . import subjects
from . import search
logger = logging.getLogger("openlibrary.worksearch")
class languages(subjects.subjects):
path = '(/languages/[^_][^/]*)'
def is_enabled(self):
return "languages" in web.ctx.features
class languages_json(subjects.subjects_json):
path = '(/languages/[^_][^/]*)'
encoding = "json"
def is_enabled(self):
return "languages" in web.ctx.features
def normalize_key(self, key):
return key
def process_key(self, key):
return key.replace("_", " ")
def get_top_languages(limit):
from . import search
result = search.get_solr().select(
'type:work', rows=0, facets=['language'], facet_limit=limit
)
return [
web.storage(
name=get_language_name(f'/languages/{row.value}'),
key=f'/languages/{row.value}',
count=row.count,
)
for row in result['facets']['language']
]
class index(delegate.page):
path = "/languages"
def GET(self):
return render_template("languages/index", get_top_languages(500))
def is_enabled(self):
return True
class index_json(delegate.page):
path = "/languages"
encoding = "json"
@jsonapi
def GET(self):
i = web.input(limit=15)
return json.dumps(get_top_languages(safeint(i.limit, 15)))
class language_search(delegate.page):
path = '/search/languages'
def GET(self):
i = web.input(q="")
solr = search.get_solr()
q = {"language": i.q}
result = solr.select(q, facets=["language"], fields=["language"], rows=0)
result = self.process_result(result)
return render_template('search/languages', i.q, result)
def process_result(self, result):
solr = search.get_solr()
def process(p):
return web.storage(
name=p.value,
key="/languages/" + p.value.replace(" ", "_"),
count=solr.select({"language": p.value}, rows=0)['num_found'],
)
language_facets = result['facets']['language'][:25]
return [process(p) for p in language_facets]
class LanguageEngine(subjects.SubjectEngine):
def normalize_key(self, key):
return key
def get_ebook_count(self, name, value, publish_year):
# Query solr for this publish_year and publish_year combination and read the has_fulltext=true facet
solr = search.get_solr()
q = {"language": value}
if isinstance(publish_year, list):
q['publish_year'] = tuple(publish_year) # range
elif publish_year:
q['publish_year'] = publish_year
result = solr.select(q, facets=["has_fulltext"], rows=0)
counts = {v.value: v.count for v in result["facets"]["has_fulltext"]}
return counts.get('true')
def setup():
subjects.SUBJECTS.append(
subjects.SubjectMeta(
name="language",
key="languages",
prefix="/languages/",
facet="language",
facet_key="language",
Engine=LanguageEngine,
)
)
| 3,411 | Python | .py | 95 | 28.494737 | 108 | 0.622751 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
415 | subjects.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/subjects.py | """Subject pages.
"""
from dataclasses import dataclass
from typing import Literal
import web
import json
import datetime
from infogami.plugins.api.code import jsonapi
from infogami.utils import delegate
from infogami.utils.view import render_template, safeint
from openlibrary.core.models import Subject
from openlibrary.core.lending import add_availability
from openlibrary.solr.query_utils import query_dict_to_str
from openlibrary.utils import str_to_key
__all__ = ["SubjectEngine", "get_subject", "SubjectMeta"]
DEFAULT_RESULTS = 12
MAX_RESULTS = 1000
class subjects(delegate.page):
path = '(/subjects/[^/]+)'
def GET(self, key):
if (nkey := self.normalize_key(key)) != key:
raise web.redirect(nkey)
# this needs to be updated to include:
# q=public_scan_b:true+OR+lending_edition_s:*
subj = get_subject(
key,
details=True,
filters={'public_scan_b': 'false', 'lending_edition_s': '*'},
sort=web.input(sort='readinglog').sort,
)
delegate.context.setdefault('cssfile', 'subject')
if not subj or subj.work_count == 0:
web.ctx.status = "404 Not Found"
page = render_template('subjects/notfound.tmpl', key)
else:
page = render_template("subjects", page=subj)
return page
def normalize_key(self, key):
key = key.lower()
# temporary code to handle url change from /people/ to /person:
if key.count("/") == 3:
key = key.replace("/people/", "/person:")
key = key.replace("/places/", "/place:")
key = key.replace("/times/", "/time:")
return key
class subjects_json(delegate.page):
path = '(/subjects/[^/]+)'
encoding = 'json'
@jsonapi
def GET(self, key):
web.header('Content-Type', 'application/json')
# If the key is not in the normalized form, redirect to the normalized form.
if (nkey := self.normalize_key(key)) != key:
raise web.redirect(nkey)
# Does the key requires any processing before passing using it to query solr?
key = self.process_key(key)
i = web.input(
offset=0,
limit=DEFAULT_RESULTS,
details='false',
has_fulltext='false',
sort='editions',
available='false',
)
i.limit = safeint(i.limit, DEFAULT_RESULTS)
i.offset = safeint(i.offset, 0)
if i.limit > MAX_RESULTS:
msg = json.dumps(
{'error': 'Specified limit exceeds maximum of %s.' % MAX_RESULTS}
)
raise web.HTTPError('400 Bad Request', data=msg)
filters = {}
if i.get('has_fulltext') == 'true':
filters['has_fulltext'] = 'true'
if i.get('published_in'):
if '-' in i.published_in:
begin, end = i.published_in.split('-', 1)
if safeint(begin, None) is not None and safeint(end, None) is not None:
filters['publish_year'] = f'[{begin} TO {end}]'
else:
y = safeint(i.published_in, None)
if y is not None:
filters['publish_year'] = i.published_in
subject_results = get_subject(
key,
offset=i.offset,
limit=i.limit,
sort=i.sort,
details=i.details.lower() == 'true',
**filters,
)
if i.has_fulltext == 'true':
subject_results['ebook_count'] = subject_results['work_count']
return json.dumps(subject_results)
def normalize_key(self, key):
return key.lower()
def process_key(self, key):
return key
SubjectType = Literal["subject", "place", "person", "time"]
SubjectPseudoKey = str
"""
The key-like paths for a subject, eg:
- `/subjects/foo`
- `/subjects/person:harry_potter`
"""
def get_subject(
key: SubjectPseudoKey,
details=False,
offset=0,
sort='editions',
limit=DEFAULT_RESULTS,
**filters,
) -> Subject:
"""Returns data related to a subject.
By default, it returns a storage object with key, name, work_count and works.
The offset and limit arguments are used to get the works.
>>> get_subject("/subjects/Love") #doctest: +SKIP
{
"key": "/subjects/Love",
"name": "Love",
"work_count": 5129,
"works": [...]
}
When details=True, facets and ebook_count are additionally added to the result.
>>> get_subject("/subjects/Love", details=True) #doctest: +SKIP
{
"key": "/subjects/Love",
"name": "Love",
"work_count": 5129,
"works": [...],
"ebook_count": 94,
"authors": [
{
"count": 11,
"name": "Plato.",
"key": "/authors/OL12823A"
},
...
],
"subjects": [
{
"count": 1168,
"name": "Religious aspects",
"key": "/subjects/religious aspects"
},
...
],
"times": [...],
"places": [...],
"people": [...],
"publishing_history": [[1492, 1], [1516, 1], ...],
"publishers": [
{
"count": 57,
"name": "Sine nomine"
},
...
]
}
Optional arguments limit and offset can be passed to limit the number of works returned and starting offset.
Optional arguments has_fulltext and published_in can be passed to filter the results.
"""
EngineClass = next(
(d.Engine for d in SUBJECTS if key.startswith(d.prefix)), SubjectEngine
)
return EngineClass().get_subject(
key,
details=details,
offset=offset,
sort=sort,
limit=limit,
**filters,
)
class SubjectEngine:
def get_subject(
self,
key,
details=False,
offset=0,
limit=DEFAULT_RESULTS,
sort='new',
**filters,
):
# Circular imports are everywhere -_-
from openlibrary.plugins.worksearch.code import run_solr_query, WorkSearchScheme
meta = self.get_meta(key)
subject_type = meta.name
path = web.lstrips(key, meta.prefix)
name = path.replace("_", " ")
unescaped_filters = {}
if 'publish_year' in filters:
# Don't want this escaped or used in fq for perf reasons
unescaped_filters['publish_year'] = filters.pop('publish_year')
result = run_solr_query(
WorkSearchScheme(),
{
'q': query_dict_to_str(
{meta.facet_key: self.normalize_key(path)},
unescaped=unescaped_filters,
phrase=True,
),
**filters,
},
offset=offset,
rows=limit,
sort=sort,
fields=[
"key",
"author_name",
"author_key",
"title",
"edition_count",
"ia",
"cover_i",
"first_publish_year",
"cover_edition_key",
"has_fulltext",
"subject",
"ia_collection_s",
"public_scan_b",
"lending_edition_s",
"lending_identifier_s",
],
facet=(
details
and [
{"name": "author_facet", "sort": "count"},
"language",
"publisher_facet",
{"name": "publish_year", "limit": -1},
"subject_facet",
"person_facet",
"place_facet",
"time_facet",
"has_fulltext",
]
),
extra_params=[
('facet.mincount', 1),
('facet.limit', 25),
],
allowed_filter_params={
'has_fulltext',
'publish_year',
},
)
subject = Subject(
key=key,
name=name,
subject_type=subject_type,
work_count=result.num_found,
works=add_availability([self.work_wrapper(d) for d in result.docs]),
)
if details:
result.facet_counts = {
facet_field: [
self.facet_wrapper(facet_field, key, label, count)
for key, label, count in facet_counts
]
for facet_field, facet_counts in result.facet_counts.items()
}
subject.ebook_count = next(
(
count
for key, count in result.facet_counts["has_fulltext"]
if key == "true"
),
0,
)
subject.subjects = result.facet_counts["subject_facet"]
subject.places = result.facet_counts["place_facet"]
subject.people = result.facet_counts["person_facet"]
subject.times = result.facet_counts["time_facet"]
subject.authors = result.facet_counts["author_key"]
subject.publishers = result.facet_counts["publisher_facet"]
subject.languages = result.facet_counts['language']
# Ignore bad dates when computing publishing_history
# year < 1000 or year > current_year+1 are considered bad dates
current_year = datetime.datetime.utcnow().year
subject.publishing_history = [
[year, count]
for year, count in result.facet_counts["publish_year"]
if 1000 < year <= current_year + 1
]
# strip self from subjects and use that to find exact name
for i, s in enumerate(subject[meta.key]):
if "key" in s and s.key.lower() == key.lower():
subject.name = s.name
subject[meta.key].pop(i)
break
q = {"type": "/type/tag", "name": subject.name, "tag_type": "subject"}
match = web.ctx.site.things(q)
if match:
tag = web.ctx.site.get(match[0])
match = {
'name': tag.name,
'id': tag.key,
'description': tag.tag_description,
'plugins': tag.tag_plugins,
}
subject.tag = match
return subject
def get_meta(self, key) -> 'SubjectMeta':
prefix = self.parse_key(key)[0]
meta = next((d for d in SUBJECTS if d.prefix == prefix), None)
assert meta is not None, "Invalid subject key: {key}"
return meta
def parse_key(self, key):
"""Returns prefix and path from the key."""
for d in SUBJECTS:
if key.startswith(d.prefix):
return d.prefix, key[len(d.prefix) :]
return None, None
def normalize_key(self, key):
return str_to_key(key).lower()
def facet_wrapper(self, facet: str, value: str, label: str, count: int):
if facet == "publish_year":
return [int(value), count]
elif facet == "publisher_facet":
return web.storage(
name=value, count=count, key="/publishers/" + value.replace(" ", "_")
)
elif facet == "author_key":
return web.storage(name=label, key=f"/authors/{value}", count=count)
elif facet in ["subject_facet", "person_facet", "place_facet", "time_facet"]:
meta = next((d for d in SUBJECTS if d.facet == facet), None)
assert meta is not None, "Invalid subject facet: {facet}"
return web.storage(
key=meta.prefix + str_to_key(value).replace(" ", "_"),
name=value,
count=count,
)
elif facet == "has_fulltext":
return [value, count]
else:
return web.storage(name=value, count=count)
@staticmethod
def work_wrapper(w: dict) -> web.storage:
"""
Convert a solr document into the doc returned by the /subjects APIs.
These docs are weird :/ We should be using more standardized results
across our search APIs, but that would be a big breaking change.
"""
ia_collection = w.get('ia_collection_s', '').split(';')
return web.storage(
key=w['key'],
title=w["title"],
edition_count=w["edition_count"],
cover_id=w.get('cover_i'),
cover_edition_key=w.get('cover_edition_key'),
subject=w.get('subject', []),
ia_collection=ia_collection,
lendinglibrary='lendinglibrary' in ia_collection,
printdisabled='printdisabled' in ia_collection,
lending_edition=w.get('lending_edition_s', ''),
lending_identifier=w.get('lending_identifier_s', ''),
authors=[
web.storage(key=f'/authors/{olid}', name=name)
for olid, name in zip(w.get('author_key', []), w.get('author_name', []))
],
first_publish_year=w.get('first_publish_year'),
ia=w.get('ia', [None])[0],
public_scan=w.get('public_scan_b', bool(w.get('ia'))),
has_fulltext=w.get('has_fulltext', False),
)
@dataclass
class SubjectMeta:
name: str
key: str
prefix: str
facet: str
facet_key: str
Engine: type['SubjectEngine'] = SubjectEngine
SUBJECTS = [
SubjectMeta(
name="person",
key="people",
prefix="/subjects/person:",
facet="person_facet",
facet_key="person_key",
),
SubjectMeta(
name="place",
key="places",
prefix="/subjects/place:",
facet="place_facet",
facet_key="place_key",
),
SubjectMeta(
name="time",
key="times",
prefix="/subjects/time:",
facet="time_facet",
facet_key="time_key",
),
SubjectMeta(
name="subject",
key="subjects",
prefix="/subjects/",
facet="subject_facet",
facet_key="subject_key",
),
]
def setup():
"""Placeholder for doing any setup required.
This function is called from code.py.
"""
pass
| 14,574 | Python | .py | 408 | 24.735294 | 112 | 0.524745 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
416 | test_autocomplete.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/tests/test_autocomplete.py | import json
from unittest.mock import patch
from openlibrary.plugins.worksearch.autocomplete import autocomplete, works_autocomplete
import web
from openlibrary.utils.solr import Solr
def test_autocomplete():
ac = autocomplete()
with (
patch('web.input') as mock_web_input,
patch('web.header'),
patch('openlibrary.utils.solr.Solr.select') as mock_solr_select,
patch('openlibrary.plugins.worksearch.autocomplete.get_solr') as mock_get_solr,
):
mock_get_solr.return_value = Solr('http://foohost:8983/solr')
mock_web_input.return_value = web.storage(q='foo', limit=5)
mock_solr_select.return_value = {'docs': []}
ac.GET()
# assert solr_select called with correct params
assert (
mock_solr_select.call_args[0][0]
== 'title:"foo"^2 OR title:(foo*) OR name:"foo"^2 OR name:(foo*)'
)
# check kwargs
assert mock_solr_select.call_args.kwargs['fq'] == ['-type:edition']
assert mock_solr_select.call_args.kwargs['q_op'] == 'AND'
assert mock_solr_select.call_args.kwargs['rows'] == 5
def test_works_autocomplete():
ac = works_autocomplete()
with (
patch('web.input') as mock_web_input,
patch('web.header'),
patch('openlibrary.utils.solr.Solr.select') as mock_solr_select,
patch('openlibrary.plugins.worksearch.autocomplete.get_solr') as mock_get_solr,
):
mock_get_solr.return_value = Solr('http://foohost:8983/solr')
mock_web_input.return_value = web.storage(q='foo', limit=5)
mock_solr_select.return_value = {
'docs': [
{
'key': '/works/OL123W',
'type': 'work',
'title': 'Foo Bar',
'subtitle': 'Baz',
},
{
'key': '/works/OL456W',
'type': 'work',
'title': 'Foo Baz',
},
{
'key': '/works/OL789M',
'type': 'work',
'title': 'Foo Baz',
},
]
}
result = json.loads(ac.GET().rawtext)
# assert solr_select called with correct params
assert mock_solr_select.call_args[0][0] == 'title:"foo"^2 OR title:(foo*)'
# check kwargs
assert mock_solr_select.call_args.kwargs['fq'] == ['type:work']
# check result
assert result == [
{
'key': '/works/OL123W',
'type': 'work',
'title': 'Foo Bar',
'subtitle': 'Baz',
'full_title': 'Foo Bar: Baz',
'name': 'Foo Bar',
},
{
'key': '/works/OL456W',
'type': 'work',
'title': 'Foo Baz',
'full_title': 'Foo Baz',
'name': 'Foo Baz',
},
]
# Test searching for OLID
mock_web_input.return_value = web.storage(q='OL123W', limit=5)
mock_solr_select.return_value = {
'docs': [
{
'key': '/works/OL123W',
'type': 'work',
'title': 'Foo Bar',
},
]
}
ac.GET()
# assert solr_select called with correct params
assert mock_solr_select.call_args[0][0] == 'key:"/works/OL123W"'
# Test searching for OLID missing from solr
mock_web_input.return_value = web.storage(q='OL123W', limit=5)
mock_solr_select.return_value = {'docs': []}
with patch(
'openlibrary.plugins.worksearch.autocomplete.autocomplete.db_fetch'
) as db_fetch:
db_fetch.return_value = {'key': '/works/OL123W', 'title': 'Foo Bar'}
ac.GET()
db_fetch.assert_called_once_with('/works/OL123W')
| 3,951 | Python | .py | 102 | 26.882353 | 88 | 0.510672 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
417 | test_worksearch.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/tests/test_worksearch.py | import web
from openlibrary.plugins.worksearch.code import (
process_facet,
get_doc,
)
def test_process_facet():
facets = [('false', 46), ('true', 2)]
assert list(process_facet('has_fulltext', facets)) == [
('true', 'yes', 2),
('false', 'no', 46),
]
def test_get_doc():
doc = get_doc(
{
'author_key': ['OL218224A'],
'author_name': ['Alan Freedman'],
'cover_edition_key': 'OL1111795M',
'edition_count': 14,
'first_publish_year': 1981,
'has_fulltext': True,
'ia': ['computerglossary00free'],
'key': '/works/OL1820355W',
'lending_edition_s': 'OL1111795M',
'public_scan_b': False,
'title': 'The computer glossary',
'ratings_average': None,
'ratings_count': None,
'want_to_read_count': None,
}
)
assert doc == web.storage(
{
'key': '/works/OL1820355W',
'title': 'The computer glossary',
'url': '/works/OL1820355W/The_computer_glossary',
'edition_count': 14,
'ia': ['computerglossary00free'],
'collections': set(),
'has_fulltext': True,
'public_scan': False,
'lending_edition': 'OL1111795M',
'lending_identifier': None,
'authors': [
web.storage(
{
'key': 'OL218224A',
'name': 'Alan Freedman',
'url': '/authors/OL218224A/Alan_Freedman',
'birth_date': None,
'death_date': None,
}
)
],
'first_publish_year': 1981,
'first_edition': None,
'subtitle': None,
'cover_edition_key': 'OL1111795M',
'languages': [],
'id_project_gutenberg': [],
'id_librivox': [],
'id_standard_ebooks': [],
'id_openstax': [],
'id_cita_press': [],
'id_wikisource': [],
'editions': [],
'ratings_average': None,
'ratings_count': None,
'want_to_read_count': None,
}
)
| 2,300 | Python | .py | 70 | 20.828571 | 66 | 0.451482 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
418 | authors.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/authors.py | from datetime import datetime
import logging
from collections.abc import Callable
from openlibrary.plugins.worksearch.schemes import SearchScheme
logger = logging.getLogger("openlibrary.worksearch")
class AuthorSearchScheme(SearchScheme):
universe = ['type:author']
all_fields = {
'key',
'name',
'alternate_names',
'birth_date',
'death_date',
'date',
'top_subjects',
'work_count',
}
non_solr_fields: set[str] = set()
facet_fields: set[str] = set()
field_name_map: dict[str, str] = {}
sorts = {
'work_count desc': 'work_count desc',
# Random
'random': 'random_1 asc',
'random asc': 'random_1 asc',
'random desc': 'random_1 desc',
'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',
'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',
}
default_fetched_fields = {
'key',
'name',
'birth_date',
'death_date',
'date',
'top_subjects',
'work_count',
}
facet_rewrites: dict[tuple[str, str], str | Callable[[], str]] = {}
def q_to_solr_params(
self,
q: str,
solr_fields: set[str],
cur_solr_params: list[tuple[str, str]],
) -> list[tuple[str, str]]:
return [
('q', q),
('q.op', 'AND'),
('defType', 'edismax'),
('qf', 'name alternate_names'),
('pf', 'name^10 alternate_names^10'),
('bf', 'min(work_count,20)'),
]
| 1,581 | Python | .py | 53 | 22.169811 | 74 | 0.538411 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
419 | works.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/works.py | from copy import deepcopy
from datetime import datetime
import logging
import re
import sys
from typing import Any, cast
from collections.abc import Callable
import luqum.tree
import web
import infogami
from openlibrary.plugins.upstream.utils import convert_iso_to_marc
from openlibrary.plugins.worksearch.schemes import SearchScheme
from openlibrary.solr.query_utils import (
EmptyTreeError,
fully_escape_query,
luqum_parser,
luqum_remove_child,
luqum_remove_field,
luqum_replace_child,
luqum_traverse,
luqum_replace_field,
)
from openlibrary.utils.ddc import (
normalize_ddc,
normalize_ddc_prefix,
normalize_ddc_range,
)
from openlibrary.utils.isbn import normalize_isbn
from openlibrary.utils.lcc import (
normalize_lcc_prefix,
normalize_lcc_range,
short_lcc_to_sortable_lcc,
)
logger = logging.getLogger("openlibrary.worksearch")
re_author_key = re.compile(r'(OL\d+A)')
class WorkSearchScheme(SearchScheme):
universe = ['type:work']
all_fields = {
"key",
"redirects",
"title",
"subtitle",
"alternative_title",
"alternative_subtitle",
"cover_i",
"ebook_access",
"edition_count",
"edition_key",
"format",
"by_statement",
"publish_date",
"lccn",
"ia",
"oclc",
"isbn",
"contributor",
"publish_place",
"publisher",
"first_sentence",
"author_key",
"author_name",
"author_alternative_name",
"subject",
"person",
"place",
"time",
"has_fulltext",
"title_suggest",
"publish_year",
"language",
"number_of_pages_median",
"ia_count",
"publisher_facet",
"author_facet",
"first_publish_year",
"ratings_count",
"readinglog_count",
"want_to_read_count",
"currently_reading_count",
"already_read_count",
# Subjects
"subject_key",
"person_key",
"place_key",
"time_key",
# Classifications
"lcc",
"ddc",
"lcc_sort",
"ddc_sort",
"osp_count",
}
non_solr_fields = {
'description',
'providers',
}
facet_fields = {
"has_fulltext",
"author_facet",
"language",
"first_publish_year",
"publisher_facet",
"subject_facet",
"person_facet",
"place_facet",
"time_facet",
"public_scan_b",
}
field_name_map = {
'author': 'author_name',
'authors': 'author_name',
'by': 'author_name',
'number_of_pages': 'number_of_pages_median',
'publishers': 'publisher',
'subtitle': 'alternative_subtitle',
'title': 'alternative_title',
'work_subtitle': 'subtitle',
'work_title': 'title',
# "Private" fields
# This is private because we'll change it to a multi-valued field instead of a
# plain string at the next opportunity, which will make it much more usable.
'_ia_collection': 'ia_collection_s',
}
sorts = {
'editions': 'edition_count desc',
'old': 'def(first_publish_year, 9999) asc',
'new': 'first_publish_year desc',
'rating': 'ratings_sortable desc',
'rating asc': 'ratings_sortable asc',
'rating desc': 'ratings_sortable desc',
'readinglog': 'readinglog_count desc',
'want_to_read': 'want_to_read_count desc',
'currently_reading': 'currently_reading_count desc',
'already_read': 'already_read_count desc',
'title': 'title_sort asc',
'scans': 'ia_count desc',
# Classifications
'lcc_sort': 'lcc_sort asc',
'lcc_sort asc': 'lcc_sort asc',
'lcc_sort desc': 'lcc_sort desc',
'ddc_sort': 'ddc_sort asc',
'ddc_sort asc': 'ddc_sort asc',
'ddc_sort desc': 'ddc_sort desc',
# Ebook access
'ebook_access': 'ebook_access desc',
'ebook_access asc': 'ebook_access asc',
'ebook_access desc': 'ebook_access desc',
# Open Syllabus Project
'osp_count': 'osp_count desc',
'osp_count asc': 'osp_count asc',
'osp_count desc': 'osp_count desc',
# Key
'key': 'key asc',
'key asc': 'key asc',
'key desc': 'key desc',
# Random
'random': 'random_1 asc',
'random asc': 'random_1 asc',
'random desc': 'random_1 desc',
'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',
'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',
}
default_fetched_fields = {
'key',
'author_name',
'author_key',
'title',
'subtitle',
'edition_count',
'ia',
'has_fulltext',
'first_publish_year',
'cover_i',
'cover_edition_key',
'public_scan_b',
'lending_edition_s',
'lending_identifier_s',
'language',
'ia_collection_s',
# FIXME: These should be fetched from book_providers, but can't cause circular
# dep
'id_project_gutenberg',
'id_librivox',
'id_standard_ebooks',
'id_openstax',
'id_cita_press',
'id_wikisource',
}
facet_rewrites = {
('public_scan', 'true'): 'ebook_access:public',
('public_scan', 'false'): '-ebook_access:public',
('print_disabled', 'true'): 'ebook_access:printdisabled',
('print_disabled', 'false'): '-ebook_access:printdisabled',
(
'has_fulltext',
'true',
): lambda: f'ebook_access:[{get_fulltext_min()} TO *]',
(
'has_fulltext',
'false',
): lambda: f'ebook_access:[* TO {get_fulltext_min()}}}',
}
def is_search_field(self, field: str):
# New variable introduced to prevent rewriting the input.
if field.startswith(('work.', 'edition.')):
return self.is_search_field(field.partition(".")[2])
return super().is_search_field(field) or field.startswith('id_')
def transform_user_query(
self, user_query: str, q_tree: luqum.tree.Item
) -> luqum.tree.Item:
has_search_fields = False
for node, parents in luqum_traverse(q_tree):
if isinstance(node, luqum.tree.SearchField):
has_search_fields = True
if node.name.lower() in self.field_name_map:
node.name = self.field_name_map[node.name.lower()]
if node.name == 'isbn':
isbn_transform(node)
if node.name in ('lcc', 'lcc_sort'):
lcc_transform(node)
if node.name in ('dcc', 'dcc_sort'):
ddc_transform(node)
if node.name == 'ia_collection_s':
ia_collection_s_transform(node)
if not has_search_fields:
# If there are no search fields, maybe we want just an isbn?
isbn = normalize_isbn(user_query)
if isbn and len(isbn) in (10, 13):
q_tree = luqum_parser(f'isbn:({isbn})')
return q_tree
def build_q_from_params(self, params: dict[str, Any]) -> str:
q_list = []
if 'author' in params:
v = params['author'].strip()
m = re_author_key.search(v)
if m:
q_list.append(f"author_key:({m.group(1)})")
else:
v = fully_escape_query(v)
q_list.append(f"(author_name:({v}) OR author_alternative_name:({v}))")
check_params = {
'title',
'publisher',
'oclc',
'lccn',
'contributor',
'subject',
'place',
'person',
'time',
'author_key',
}
# support web.input fields being either a list or string
# when default values used
q_list += [
f'{k}:({fully_escape_query(val)})'
for k in (check_params & set(params))
for val in (params[k] if isinstance(params[k], list) else [params[k]])
]
if params.get('isbn'):
q_list.append(
'isbn:(%s)' % (normalize_isbn(params['isbn']) or params['isbn'])
)
return ' AND '.join(q_list)
def q_to_solr_params( # noqa: C901, PLR0915
self,
q: str,
solr_fields: set[str],
cur_solr_params: list[tuple[str, str]],
) -> list[tuple[str, str]]:
new_params: list[tuple[str, str]] = []
# We need to parse the tree so that it gets transformed using the
# special OL query parsing rules (different from default solr!)
# See luqum_parser for details.
work_q_tree = luqum_parser(q)
# Removes the work prefix from fields; used as the callable argument for 'luqum_replace_field'
def remove_work_prefix(field: str) -> str:
return field.partition('.')[2] if field.startswith('work.') else field
# Removes the indicator prefix from queries with the 'work field' before appending them to parameters.
final_work_query = deepcopy(work_q_tree)
luqum_replace_field(final_work_query, remove_work_prefix)
try:
luqum_remove_field(final_work_query, lambda f: f.startswith('edition.'))
except EmptyTreeError:
# If the whole tree is removed, we should just search for everything
final_work_query = luqum_parser('*:*')
new_params.append(('workQuery', str(final_work_query)))
# This full work query uses solr-specific syntax to add extra parameters
# to the way the search is processed. We are using the edismax parser.
# See https://solr.apache.org/guide/8_11/the-extended-dismax-query-parser.html
# This is somewhat synonymous to setting defType=edismax in the
# query, but much more flexible. We wouldn't be able to do our
# complicated parent/child queries with defType!
full_work_query = '({{!edismax q.op="AND" qf="{qf}" pf="{pf}" bf="{bf}" v={v}}})'.format(
# qf: the fields to query un-prefixed parts of the query.
# e.g. 'harry potter' becomes
# 'text:(harry potter) OR alternative_title:(harry potter)^20 OR ...'
qf='text alternative_title^10 author_name^10',
# pf: phrase fields. This increases the score of documents that
# match the query terms in close proximity to each other.
pf='alternative_title^10 author_name^10',
# bf (boost factor): boost results based on the value of this
# field. I.e. results with more editions get boosted, upto a
# max of 100, after which we don't see it as good signal of
# quality.
bf='min(100,edition_count) min(100,def(readinglog_count,0))',
# v: the query to process with the edismax query parser. Note
# we are using a solr variable here; this reads the url parameter
# arbitrarily called workQuery.
v='$workQuery',
)
ed_q = None
full_ed_query = None
editions_fq = []
if has_solr_editions_enabled() and 'editions:[subquery]' in solr_fields:
WORK_FIELD_TO_ED_FIELD: dict[str, str | Callable[[str], str]] = {
# Internals
'edition_key': 'key',
'text': 'text',
# Display data
'title': 'title',
'title_suggest': 'title_suggest',
'subtitle': 'subtitle',
'alternative_title': 'alternative_title',
'alternative_subtitle': 'subtitle',
'cover_i': 'cover_i',
# Duplicate author fields
# Disabled until the next full reindex
# 'author_name': 'author_name',
# 'author_key': 'author_key',
# 'author_alternative_name': 'author_alternative_name',
# 'author_facet': 'author_facet',
# Misc useful data
'format': 'format',
'language': 'language',
'publisher': 'publisher',
'publisher_facet': 'publisher_facet',
'publish_date': 'publish_date',
'publish_year': 'publish_year',
# Identifiers
'isbn': 'isbn',
# 'id_*': 'id_*', # Handled manually for now to match any id field
'ebook_access': 'ebook_access',
# IA
'has_fulltext': 'has_fulltext',
'ia': 'ia',
'ia_collection': 'ia_collection',
'ia_box_id': 'ia_box_id',
'public_scan_b': 'public_scan_b',
}
def convert_work_field_to_edition_field(
field: str,
) -> str | Callable[[str], str] | None:
"""
Convert a SearchField name (eg 'title') to the correct fieldname
for use in an edition query.
If no conversion is possible, return None.
"""
if field in WORK_FIELD_TO_ED_FIELD:
return WORK_FIELD_TO_ED_FIELD[field]
elif field.startswith('id_'):
return field
elif self.is_search_field(field) or field in self.facet_fields:
return None
else:
raise ValueError(f'Unknown field: {field}')
def convert_work_query_to_edition_query(work_query: str) -> str:
"""
Convert a work query to an edition query. Mainly involves removing
invalid fields, or renaming fields as necessary.
"""
q_tree = luqum_parser(work_query)
for node, parents in luqum_traverse(q_tree):
if isinstance(node, luqum.tree.SearchField) and node.name != '*':
if node.name.startswith('edition.'):
ed_field = node.name.partition('.')[2]
else:
ed_field = node.name
new_name = convert_work_field_to_edition_field(ed_field)
if new_name is None:
try:
luqum_remove_child(node, parents)
except EmptyTreeError:
# Deleted the whole tree! Nothing left
return ''
elif isinstance(new_name, str):
parent = parents[-1] if parents else None
# Prefixing with + makes the field mandatory
if isinstance(
parent,
(
luqum.tree.Not,
luqum.tree.Prohibit,
luqum.tree.OrOperation,
),
):
node.name = new_name
else:
node.name = f'+{new_name}'
if new_name == 'key':
# need to convert eg 'edition_key:OL123M' to
# 'key:(/books/OL123M)'. Or
# key:(/books/OL123M OR /books/OL456M)
for n, n_parents in luqum_traverse(node.expr):
if isinstance(
n, (luqum.tree.Word, luqum.tree.Phrase)
):
val = (
n.value
if isinstance(n, luqum.tree.Word)
else n.value[1:-1]
)
if val.startswith('/books/'):
val = val[7:]
n.value = f'"/books/{val}"'
elif callable(new_name):
# Replace this node with a new one
# First process the expr
new_expr = convert_work_query_to_edition_query(
str(node.expr)
)
new_node = luqum.tree.Group(
luqum_parser(new_name(new_expr))
)
if parents:
luqum_replace_child(parents[-1], node, new_node)
else:
return convert_work_query_to_edition_query(
str(new_node)
)
else:
# Shouldn't happen
raise ValueError(f'Invalid new_name: {new_name}')
return str(q_tree)
# Move over all fq parameters that can be applied to editions.
# These are generally used to handle facets.
editions_fq = ['type:edition']
for param_name, param_value in cur_solr_params:
if param_name != 'fq' or param_value.startswith('type:'):
continue
field_name, field_val = param_value.split(':', 1)
if ed_field := convert_work_field_to_edition_field(field_name):
editions_fq.append(f'{ed_field}:{field_val}')
for fq in editions_fq:
new_params.append(('editions.fq', fq))
user_lang = convert_iso_to_marc(web.ctx.lang or 'en') or 'eng'
ed_q = convert_work_query_to_edition_query(str(work_q_tree))
full_ed_query = '({{!edismax bq="{bq}" v="{v}" qf="{qf}"}})'.format(
# See qf in work_query
qf='text alternative_title^4 author_name^4',
# Because we include the edition query inside the v="..." part,
# we need to escape quotes. Also note that if there is no
# edition query (because no fields in the user's work query apply),
# we use the special value *:* to match everything, but still get
# boosting.
v=ed_q.replace('"', '\\"') or '*:*',
# bq (boost query): Boost which edition is promoted to the top
bq=' '.join(
(
f'language:{user_lang}^40',
'ebook_access:public^10',
'ebook_access:borrowable^8',
'ebook_access:printdisabled^2',
'cover_i:*^2',
)
),
)
if ed_q or len(editions_fq) > 1:
# The elements in _this_ edition query should cause works not to
# match _at all_ if matching editions are not found
new_params.append(('edQuery', cast(str, full_ed_query) if ed_q else '*:*'))
q = (
f'+{full_work_query} '
# This is using the special parent query syntax to, on top of
# the user's `full_work_query`, also only find works which have
# editions matching the edition query.
# Also include edition-less works (i.e. edition_count:0)
'+('
'_query_:"{!parent which=type:work v=$edQuery filters=$editions.fq}" '
'OR edition_count:0'
')'
)
new_params.append(('q', q))
else:
new_params.append(('q', full_work_query))
if full_ed_query:
edition_fields = {
f.split('.', 1)[1] for f in solr_fields if f.startswith('editions.')
}
if not edition_fields:
edition_fields = solr_fields - {
# Default to same fields as for the work...
'editions:[subquery]',
# but exclude the author fields since they're primarily work data;
# they only exist on editions to improve search matches.
'author_name',
'author_key',
'author_alternative_name',
'author_facet',
}
# The elements in _this_ edition query will match but not affect
# whether the work appears in search results
new_params.append(
(
'editions.q',
# Here we use the special terms parser to only filter the
# editions for a given, already matching work '_root_' node.
f'({{!terms f=_root_ v=$row.key}}) AND {full_ed_query}',
)
)
new_params.append(('editions.rows', '1'))
new_params.append(('editions.fl', ','.join(edition_fields)))
return new_params
def add_non_solr_fields(self, non_solr_fields: set[str], solr_result: dict) -> None:
from openlibrary.plugins.upstream.models import Edition
# Augment with data from db
edition_keys = [
ed_doc['key']
for doc in solr_result['response']['docs']
for ed_doc in doc.get('editions', {}).get('docs', [])
]
editions = cast(list[Edition], web.ctx.site.get_many(edition_keys))
ed_key_to_record = {ed.key: ed for ed in editions if ed.key in edition_keys}
from openlibrary.book_providers import get_book_provider
for doc in solr_result['response']['docs']:
for ed_doc in doc.get('editions', {}).get('docs', []):
# `ed` could be `None` if the record has been deleted and Solr not yet updated.
if not (ed := ed_key_to_record.get(ed_doc['key'])):
continue
for field in non_solr_fields:
val = getattr(ed, field)
if field == 'providers':
provider = get_book_provider(ed)
if not provider:
continue
ed_doc[field] = [
p.__dict__ for p in provider.get_acquisitions(ed)
]
elif isinstance(val, infogami.infobase.client.Nothing):
continue
elif field == 'description':
ed_doc[field] = val if isinstance(val, str) else val.value
def lcc_transform(sf: luqum.tree.SearchField):
# e.g. lcc:[NC1 TO NC1000] to lcc:[NC-0001.00000000 TO NC-1000.00000000]
# for proper range search
val = sf.children[0]
if isinstance(val, luqum.tree.Range):
normed_range = normalize_lcc_range(val.low.value, val.high.value)
if normed_range:
val.low.value, val.high.value = normed_range
elif isinstance(val, luqum.tree.Word):
if '*' in val.value and not val.value.startswith('*'):
# Marshals human repr into solr repr
# lcc:A720* should become A--0720*
parts = val.value.split('*', 1)
lcc_prefix = normalize_lcc_prefix(parts[0])
val.value = (lcc_prefix or parts[0]) + '*' + parts[1]
else:
normed = short_lcc_to_sortable_lcc(val.value.strip('"'))
if normed:
val.value = normed
elif isinstance(val, luqum.tree.Phrase):
normed = short_lcc_to_sortable_lcc(val.value.strip('"'))
if normed:
val.value = f'"{normed}"'
elif (
isinstance(val, luqum.tree.Group)
and isinstance(val.expr, luqum.tree.UnknownOperation)
and all(isinstance(c, luqum.tree.Word) for c in val.expr.children)
):
# treat it as a string
normed = short_lcc_to_sortable_lcc(str(val.expr))
if normed:
if ' ' in normed:
sf.expr = luqum.tree.Phrase(f'"{normed}"')
else:
sf.expr = luqum.tree.Word(f'{normed}*')
else:
logger.warning(f"Unexpected lcc SearchField value type: {type(val)}")
def ddc_transform(sf: luqum.tree.SearchField):
val = sf.children[0]
if isinstance(val, luqum.tree.Range):
normed_range = normalize_ddc_range(val.low.value, val.high.value)
val.low.value = normed_range[0] or val.low
val.high.value = normed_range[1] or val.high
elif isinstance(val, luqum.tree.Word) and val.value.endswith('*'):
return normalize_ddc_prefix(val.value[:-1]) + '*'
elif isinstance(val, (luqum.tree.Word, luqum.tree.Phrase)):
if normed := normalize_ddc(val.value.strip('"')):
val.value = normed
else:
logger.warning(f"Unexpected ddc SearchField value type: {type(val)}")
def isbn_transform(sf: luqum.tree.SearchField):
field_val = sf.children[0]
if isinstance(field_val, luqum.tree.Word) and '*' not in field_val.value:
isbn = normalize_isbn(field_val.value)
if isbn:
field_val.value = isbn
else:
logger.warning(f"Unexpected isbn SearchField value type: {type(field_val)}")
def ia_collection_s_transform(sf: luqum.tree.SearchField):
"""
Because this field is not a multi-valued field in solr, but a simple ;-separate
string, we have to do searches like this for now.
"""
val = sf.children[0]
if isinstance(val, luqum.tree.Word):
if val.value.startswith('*'):
val.value = '*' + val.value
if val.value.endswith('*'):
val.value += '*'
else:
logger.warning(
f"Unexpected ia_collection_s SearchField value type: {type(val)}"
)
def has_solr_editions_enabled():
if 'pytest' in sys.modules:
return True
def read_query_string():
return web.input(editions=None).get('editions')
def read_cookie():
if "SOLR_EDITIONS" in web.ctx.env.get("HTTP_COOKIE", ""):
return web.cookies().get('SOLR_EDITIONS')
if (qs_value := read_query_string()) is not None:
return qs_value == 'true'
if (cookie_value := read_cookie()) is not None:
return cookie_value == 'true'
return True
def get_fulltext_min():
is_printdisabled = web.cookies().get('pd', False)
return 'printdisabled' if is_printdisabled else 'borrowable'
| 26,861 | Python | .py | 631 | 29.293185 | 110 | 0.518831 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
420 | editions.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/editions.py | from datetime import datetime
import logging
from openlibrary.plugins.worksearch.schemes import SearchScheme
logger = logging.getLogger("openlibrary.worksearch")
# Kind of mostly a stub for now since you can't really search editions
# directly, but it's still useful for somethings (eg editions have a custom
# sort logic).
class EditionSearchScheme(SearchScheme):
universe = ['type:work']
all_fields = {
"key",
"title",
"subtitle",
"alternative_title",
"alternative_subtitle",
"cover_i",
"ebook_access",
"publish_date",
"lccn",
"ia",
"isbn",
"publisher",
"has_fulltext",
"title_suggest",
"publish_year",
"language",
"publisher_facet",
}
facet_fields: set[str] = set()
field_name_map = {
'publishers': 'publisher',
'subtitle': 'alternative_subtitle',
'title': 'alternative_title',
# "Private" fields
# This is private because we'll change it to a multi-valued field instead of a
# plain string at the next opportunity, which will make it much more usable.
'_ia_collection': 'ia_collection_s',
}
sorts = {
'old': 'def(publish_year, 9999) asc',
'new': 'publish_year desc',
'title': 'title_sort asc',
# Ebook access
'ebook_access': 'ebook_access desc',
'ebook_access asc': 'ebook_access asc',
'ebook_access desc': 'ebook_access desc',
# Key
'key': 'key asc',
'key asc': 'key asc',
'key desc': 'key desc',
# Random
'random': 'random_1 asc',
'random asc': 'random_1 asc',
'random desc': 'random_1 desc',
'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',
'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',
}
default_fetched_fields: set[str] = set()
facet_rewrites = {}
def is_search_field(self, field: str):
return super().is_search_field(field) or field.startswith('id_')
| 2,081 | Python | .py | 61 | 26.803279 | 86 | 0.594045 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
421 | __init__.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/__init__.py | import logging
from collections.abc import Callable
import luqum.tree
from luqum.exceptions import ParseError
from openlibrary.solr.query_utils import (
escape_unknown_fields,
fully_escape_query,
luqum_parser,
)
logger = logging.getLogger("openlibrary.worksearch")
class SearchScheme:
# Set of queries that define the universe of this scheme
universe: list[str]
# All actual solr fields that can be in a user query
all_fields: set[str]
# Fields that can be read, but which aren't stored in solr
non_solr_fields: set[str]
# These fields are fetched for facets and can also be url params
facet_fields: set[str]
# Mapping of user-only fields to solr fields
field_name_map: dict[str, str]
# Mapping of user sort to solr sort
sorts: dict[str, str | Callable[[], str]]
# Default
default_fetched_fields: set[str]
# Fields that should be rewritten
facet_rewrites: dict[tuple[str, str], str | Callable[[], str]]
def is_search_field(self, field: str):
return field in self.all_fields or field in self.field_name_map
def process_user_sort(self, user_sort: str) -> str:
"""
Convert a user-provided sort to a solr sort
>>> from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme
>>> scheme = WorkSearchScheme()
>>> scheme.process_user_sort('editions')
'edition_count desc'
>>> scheme.process_user_sort('editions, new')
'edition_count desc,first_publish_year desc'
>>> scheme.process_user_sort('random')
'random_1 asc'
>>> scheme.process_user_sort('random_custom_seed')
'random_1_custom_seed asc'
>>> scheme.process_user_sort('random_custom_seed desc')
'random_1_custom_seed desc'
>>> scheme.process_user_sort('random_custom_seed asc')
'random_1_custom_seed asc'
"""
def process_individual_sort(sort: str) -> str:
if sort.startswith(('random_', 'random.hourly_', 'random.daily_')):
# Allow custom randoms; so anything random_* is allowed
# Also Allow custom time randoms to allow carousels with overlapping
# books to have a fresh ordering when on the same collection
sort_order: str | None = None
if ' ' in sort:
sort, sort_order = sort.split(' ', 1)
random_type, random_seed = sort.split('_', 1)
solr_sort = self.sorts[random_type]
solr_sort_str = solr_sort() if callable(solr_sort) else solr_sort
solr_sort_field, solr_sort_order = solr_sort_str.split(' ', 1)
sort_order = sort_order or solr_sort_order
return f'{solr_sort_field}_{random_seed} {sort_order}'
else:
solr_sort = self.sorts[sort]
return solr_sort() if callable(solr_sort) else solr_sort
return ','.join(
process_individual_sort(s.strip()) for s in user_sort.split(',')
)
def process_user_query(self, q_param: str) -> str:
if q_param == '*:*':
# This is a special solr syntax; don't process
return q_param
try:
q_param = escape_unknown_fields(
(
# Solr 4+ has support for regexes (eg `key:/foo.*/`)! But for now,
# let's not expose that and escape all '/'. Otherwise
# `key:/works/OL1W` is interpreted as a regex.
q_param.strip()
.replace('/', '\\/')
# Also escape unexposed lucene features
.replace('?', '\\?')
.replace('~', '\\~')
),
self.is_search_field,
lower=True,
)
q_tree = luqum_parser(q_param)
except ParseError:
# This isn't a syntactically valid lucene query
logger.warning("Invalid lucene query", exc_info=True)
# Escape everything we can
q_tree = luqum_parser(fully_escape_query(q_param))
q_tree = self.transform_user_query(q_param, q_tree)
return str(q_tree)
def transform_user_query(
self,
user_query: str,
q_tree: luqum.tree.Item,
) -> luqum.tree.Item:
return q_tree
def build_q_from_params(self, params: dict) -> str | None:
return None
def q_to_solr_params(
self,
q: str,
solr_fields: set[str],
cur_solr_params: list[tuple[str, str]],
) -> list[tuple[str, str]]:
return [('q', q)]
def add_non_solr_fields(self, solr_fields: set[str], solr_result: dict) -> None:
raise NotImplementedError()
| 4,797 | Python | .py | 111 | 33.027027 | 86 | 0.586081 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
422 | subjects.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/subjects.py | from datetime import datetime
import logging
from collections.abc import Callable
from openlibrary.plugins.worksearch.schemes import SearchScheme
logger = logging.getLogger("openlibrary.worksearch")
class SubjectSearchScheme(SearchScheme):
universe = ['type:subject']
all_fields = {
'key',
'name',
'subject_type',
'work_count',
}
non_solr_fields: set[str] = set()
facet_fields: set[str] = set()
field_name_map: dict[str, str] = {}
sorts = {
'work_count desc': 'work_count desc',
# Random
'random': 'random_1 asc',
'random asc': 'random_1 asc',
'random desc': 'random_1 desc',
'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',
'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',
}
default_fetched_fields = {
'key',
'name',
'subject_type',
'work_count',
}
facet_rewrites: dict[tuple[str, str], str | Callable[[], str]] = {}
def q_to_solr_params(
self,
q: str,
solr_fields: set[str],
cur_solr_params: list[tuple[str, str]],
) -> list[tuple[str, str]]:
return [
('q', q),
('q.op', 'AND'),
('defType', 'edismax'),
]
| 1,300 | Python | .py | 43 | 23.162791 | 74 | 0.5623 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
423 | test_works.py | internetarchive_openlibrary/openlibrary/plugins/worksearch/schemes/tests/test_works.py | from unittest.mock import patch
import pytest
from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme
# {'Test name': ('query', fields[])}
QUERY_PARSER_TESTS = {
'No fields': ('query here', 'query here'),
'Misc': (
'title:(Holidays are Hell) authors:(Kim Harrison) OR authors:(Lynsay Sands)',
'alternative_title:(Holidays are Hell) author_name:(Kim Harrison) OR author_name:(Lynsay Sands)',
),
'Author field': (
'food rules author:pollan',
'food rules author_name:pollan',
),
'Invalid dashes': (
'foo foo bar -',
'foo foo bar \\-',
),
'Field aliases': (
'title:food rules by:pollan',
'alternative_title:(food rules) author_name:pollan',
),
'Fields are case-insensitive aliases': (
'food rules By:pollan',
'food rules author_name:pollan',
),
'Spaces after fields': (
'title: "Harry Potter"',
'alternative_title:"Harry Potter"',
),
'Quotes': (
'title:"food rules" author:pollan',
'alternative_title:"food rules" author_name:pollan',
),
'Leading text': (
'query here title:food rules author:pollan',
'query here alternative_title:(food rules) author_name:pollan',
),
'Colons in query': (
'flatland:a romance of many dimensions',
'flatland\\:a romance of many dimensions',
),
'Spaced colons in query': (
'flatland : a romance of many dimensions',
'flatland\\: a romance of many dimensions',
),
'Colons in field': (
'title:flatland:a romance of many dimensions',
'alternative_title:(flatland\\:a romance of many dimensions)',
),
'Operators': (
'authors:Kim Harrison OR authors:Lynsay Sands',
'author_name:(Kim Harrison) OR author_name:(Lynsay Sands)',
),
'ISBN-like': (
'978-0-06-093546-7',
'isbn:(9780060935467)',
),
'Normalizes ISBN': (
'isbn:978-0-06-093546-7',
'isbn:9780060935467',
),
'Does not normalize ISBN stars': (
'isbn:979*',
'isbn:979*',
),
# LCCs
'LCC: quotes added if space present': (
'lcc:NC760 .B2813 2004',
'lcc:"NC-0760.00000000.B2813 2004"',
),
'LCC: star added if no space': (
'lcc:NC760 .B2813',
'lcc:NC-0760.00000000.B2813*',
),
'LCC: Noise left as is': (
'lcc:good evening',
'lcc:(good evening)',
),
'LCC: range': (
'lcc:[NC1 TO NC1000]',
'lcc:[NC-0001.00000000 TO NC-1000.00000000]',
),
'LCC: prefix': (
'lcc:NC76.B2813*',
'lcc:NC-0076.00000000.B2813*',
),
'LCC: suffix': (
'lcc:*B2813',
'lcc:*B2813',
),
'LCC: multi-star without prefix': (
'lcc:*B2813*',
'lcc:*B2813*',
),
'LCC: multi-star with prefix': (
'lcc:NC76*B2813*',
'lcc:NC-0076*B2813*',
),
'LCC: quotes preserved': (
'lcc:"NC760 .B2813"',
'lcc:"NC-0760.00000000.B2813"',
),
# TODO Add tests for DDC
}
@pytest.mark.parametrize(
"query,parsed_query", QUERY_PARSER_TESTS.values(), ids=QUERY_PARSER_TESTS.keys()
)
def test_process_user_query(query, parsed_query):
s = WorkSearchScheme()
assert s.process_user_query(query) == parsed_query
EDITION_KEY_TESTS = {
'edition_key:OL123M': '+key:\\"/books/OL123M\\"',
'edition_key:"OL123M"': '+key:\\"/books/OL123M\\"',
'edition_key:"/books/OL123M"': '+key:\\"/books/OL123M\\"',
'edition_key:(OL123M)': '+key:(\\"/books/OL123M\\")',
'edition_key:(OL123M OR OL456M)': '+key:(\\"/books/OL123M\\" OR \\"/books/OL456M\\")',
}
@pytest.mark.parametrize("query,edQuery", EDITION_KEY_TESTS.items())
def test_q_to_solr_params_edition_key(query, edQuery):
import web
web.ctx.lang = 'en'
s = WorkSearchScheme()
with patch(
'openlibrary.plugins.worksearch.schemes.works.convert_iso_to_marc'
) as mock_fn:
mock_fn.return_value = 'eng'
params = s.q_to_solr_params(query, {'editions:[subquery]'}, [])
params_d = dict(params)
assert params_d['workQuery'] == query
assert edQuery in params_d['edQuery']
| 4,209 | Python | .py | 131 | 25.900763 | 105 | 0.591792 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
424 | code.py | internetarchive_openlibrary/openlibrary/plugins/books/code.py | """Open Library Books API
"""
import json
import re
import urllib
import web
from infogami.utils import delegate
from infogami.plugins.api.code import jsonapi
from openlibrary.plugins.books import dynlinks, readlinks
class books_json(delegate.page):
"""
Endpoint for mapping bib keys (e.g. ISBN, LCCN) to certain links associated
with Open Library editions, such as the thumbnail URL.
- `bibkeys` is expected a comma separated string of ISBNs, LCCNs, etc.
- `'high_priority=true'` will attempt to import an edition from a supplied ISBN
if no matching edition is found. If not `high_priority`, then missed bib_keys
are queued for lookup on the affiliate-server, and any responses are `staged`
in `import_item`.
Example call:
http://localhost:8080/api/books.json?bibkeys=059035342X,0312368615&high_priority=true
Returns a JSONified dictionary of the form:
{"059035342X": {
"bib_key": "059035342X",
"info_url": "http://localhost:8080/books/OL43M/Harry_Potter_and_the_Sorcerer's_Stone",
"preview": "noview",
"preview_url": "https://archive.org/details/lccn_078073006991",
"thumbnail_url": "https://covers.openlibrary.org/b/id/21-S.jpg"
}
"0312368615": {...}
}
"""
path = "/api/books"
@jsonapi
def GET(self):
i = web.input(bibkeys='', callback=None, details="false", high_priority=False)
i.high_priority = i.get("high_priority") == "true"
if web.ctx.path.endswith('.json'):
i.format = 'json'
return dynlinks.dynlinks(bib_keys=i.bibkeys.split(","), options=i)
class read_singleget(delegate.page):
"""Handle the single-lookup form of the Hathi-style API"""
path = (
r"/api/volumes/(brief|full)/(oclc|lccn|issn|isbn|htid|olid|recordnumber)/(.+)"
)
encoding = "json"
@jsonapi
def GET(self, brief_or_full, idtype, idval):
i = web.input()
web.ctx.headers = []
req = f'{idtype}:{idval}'
result = readlinks.readlinks(req, i)
result = result.get(req, [])
return json.dumps(result)
class read_multiget(delegate.page):
"""Handle the multi-lookup form of the Hathi-style API"""
path = r"/api/volumes/(brief|full)/json/(.+)"
path_re = re.compile(path)
@jsonapi
def GET(self, brief_or_full, req): # params aren't used, see below
i = web.input()
# Work around issue with gunicorn where semicolon and after
# get truncated. (web.input() still seems ok)
# see https://github.com/benoitc/gunicorn/issues/215
if raw_uri := web.ctx.env.get("RAW_URI"):
raw_path = urllib.parse.urlsplit(raw_uri).path
# handle e.g. '%7C' for '|'
decoded_path = urllib.parse.unquote(raw_path)
m = self.path_re.match(decoded_path)
if len(m.groups()) != 2:
return json.dumps({})
(brief_or_full, req) = m.groups()
web.ctx.headers = []
result = readlinks.readlinks(req, i)
return json.dumps(result)
| 3,137 | Python | .py | 74 | 34.837838 | 98 | 0.633553 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
425 | dynlinks.py | internetarchive_openlibrary/openlibrary/plugins/books/dynlinks.py | from typing import Any
import importlib
import json
import sys
from collections.abc import Hashable, Iterable, Mapping
import web
from openlibrary.core.models import Edition
from openlibrary.core.imports import ImportItem
from openlibrary.plugins.openlibrary.processors import urlsafe
from openlibrary.core import helpers as h
from openlibrary.core import ia
from infogami.utils.delegate import register_exception
# Import from manage-imports, but get around hyphen problem.
imports_module = "scripts.manage-imports"
manage_imports = importlib.import_module(imports_module)
def split_key(bib_key: str) -> tuple[str | None, str | None]:
"""
>>> split_key('1234567890')
('isbn_', '1234567890')
>>> split_key('ISBN:1234567890')
('isbn_', '1234567890')
>>> split_key('ISBN1234567890')
('isbn_', '1234567890')
>>> split_key('ISBN1234567890123')
('isbn_', '1234567890123')
>>> split_key('LCCNsa 64009056')
('lccn', 'sa 64009056')
>>> split_key('badkey')
(None, None)
"""
bib_key = bib_key.strip()
if not bib_key:
return None, None
valid_keys = ['isbn', 'lccn', 'oclc', 'ocaid', 'olid']
key, value = None, None
# split with : when possible
if ':' in bib_key:
key, value = bib_key.split(':', 1)
key = key.lower()
else:
# try prefix match
for k in valid_keys:
if bib_key.lower().startswith(k):
key = k
value = bib_key[len(k) :]
continue
# treat plain number as ISBN
if key is None and bib_key[0].isdigit():
key = 'isbn'
value = bib_key
# treat OLxxxM as OLID
re_olid = web.re_compile(r'OL\d+M(@\d+)?')
if key is None and re_olid.match(bib_key.upper()):
key = 'olid'
value = bib_key.upper()
if key == 'isbn':
# 'isbn_' is a special indexed field that gets both isbn_10 and isbn_13 in the normalized form.
key = 'isbn_'
value = (value or "").replace("-", "") # normalize isbn by stripping hyphens
if key == 'oclc':
key = 'oclc_numbers'
if key == 'olid':
key = 'key'
value = '/books/' + (value or "").upper()
return key, value
def ol_query(name, value):
query = {
'type': '/type/edition',
name: value,
}
if keys := web.ctx.site.things(query):
return keys[0]
def ol_get_many_as_dict(keys: Iterable[str]) -> dict:
"""
Ex.: ol_get_many_as_dict(['/books/OL2058361M', '/works/OL54120W'])
"""
keys_with_revisions = [k for k in keys if '@' in k]
keys2 = [k for k in keys if '@' not in k]
result = {doc['key']: doc for doc in ol_get_many(keys2)}
for k in keys_with_revisions:
key, revision = k.split('@', 1)
revision = h.safeint(revision, None)
doc = web.ctx.site.get(key, revision)
result[k] = doc and doc.dict()
return result
def ol_get_many(keys: Iterable[str]) -> list:
return [doc.dict() for doc in web.ctx.site.get_many(keys)]
def query_keys(bib_keys: Iterable[str]) -> dict:
"""Given a list of bibkeys, returns a mapping from bibkey to OL key.
>> query(["isbn:1234567890"])
{"isbn:1234567890": "/books/OL1M"}
"""
def query(bib_key):
name, value = split_key(bib_key)
if name is None:
return None
elif name == 'key':
return value
else:
return ol_query(name, value)
d = {bib_key: query(bib_key) for bib_key in bib_keys}
return {k: v for k, v in d.items() if v is not None}
def query_docs(bib_keys: Iterable[str]) -> dict:
"""Given a list of bib_keys, returns a mapping from bibkey to OL doc."""
mapping = query_keys(bib_keys)
thingdict = ol_get_many_as_dict(uniq(mapping.values()))
return {
bib_key: thingdict[key] for bib_key, key in mapping.items() if key in thingdict
}
def uniq(values: Iterable[Hashable]) -> list:
return list(set(values))
def process_result(result, jscmd):
d = {
"details": process_result_for_details,
"data": DataProcessor().process,
"viewapi": process_result_for_viewapi,
}
f = d.get(jscmd) or d['viewapi']
return f(result)
def get_many_as_dict(keys: Iterable[str]) -> dict:
return {doc['key']: doc for doc in ol_get_many(keys)}
def get_url(doc: Mapping[str, str]) -> str:
base = web.ctx.get("home", "https://openlibrary.org")
if base == 'http://[unknown]':
base = "https://openlibrary.org"
if doc['key'].startswith(("/books/", "/works/")):
return base + doc['key'] + "/" + urlsafe(doc.get("title", "untitled"))
elif doc['key'].startswith("/authors/"):
return base + doc['key'] + "/" + urlsafe(doc.get("name", "unnamed"))
else:
return base + doc['key']
class DataProcessor:
"""Processor to process the result when jscmd=data."""
def process(self, result):
work_keys = [w['key'] for doc in result.values() for w in doc.get('works', [])]
self.works = get_many_as_dict(work_keys)
author_keys = [
a['author']['key']
for w in self.works.values()
for a in w.get('authors', [])
]
self.authors = get_many_as_dict(author_keys)
return {k: self.process_doc(doc) for k, doc in result.items()}
def get_authors(self, work):
author_keys = [a['author']['key'] for a in work.get('authors', [])]
return [
{
"url": get_url(self.authors[key]),
"name": self.authors[key].get("name", ""),
}
for key in author_keys
]
def get_work(self, doc):
works = [self.works[w['key']] for w in doc.get('works', [])]
if works:
return works[0]
else:
return {}
def process_doc(self, doc):
"""Processes one document.
Should be called only after initializing self.authors and self.works.
"""
w = self.get_work(doc)
def subject(name, prefix):
# handle bad subjects loaded earlier.
if isinstance(name, dict):
if 'value' in name:
name = name['value']
elif 'key' in name:
name = name['key'].split("/")[-1].replace("_", " ")
else:
return {}
return {
"name": name,
"url": "https://openlibrary.org/subjects/{}{}".format(
prefix, name.lower().replace(" ", "_")
),
}
def get_subjects(name, prefix):
return [subject(s, prefix) for s in w.get(name, '')]
def get_value(v):
if isinstance(v, dict):
return v.get('value', '')
else:
return v
def format_excerpt(e):
return {
"text": get_value(e.get("excerpt", {})),
"comment": e.get("comment", ""),
}
def format_table_of_contents(toc):
# after openlibrary.plugins.upstream.models.get_table_of_contents
def row(r):
if isinstance(r, str):
level = 0
label = ""
title = r
pagenum = ""
else:
level = h.safeint(r.get('level', '0'), 0)
label = r.get('label', '')
title = r.get('title', '')
pagenum = r.get('pagenum', '')
r = {'level': level, 'label': label, 'title': title, 'pagenum': pagenum}
return r
d = [row(r) for r in toc]
return [row for row in d if any(row.values())]
d = {
"url": get_url(doc),
"key": doc['key'],
"title": doc.get("title", ""),
"subtitle": doc.get("subtitle", ""),
"authors": self.get_authors(w),
"number_of_pages": doc.get("number_of_pages", ""),
"pagination": doc.get("pagination", ""),
"weight": doc.get("weight", ""),
"by_statement": doc.get("by_statement", ""),
'identifiers': web.dictadd(
doc.get('identifiers', {}),
{
'isbn_10': doc.get('isbn_10', []),
'isbn_13': doc.get('isbn_13', []),
'lccn': doc.get('lccn', []),
'oclc': doc.get('oclc_numbers', []),
'openlibrary': [doc['key'].split("/")[-1]],
},
),
'classifications': web.dictadd(
doc.get('classifications', {}),
{
'lc_classifications': doc.get('lc_classifications', []),
'dewey_decimal_class': doc.get('dewey_decimal_class', []),
},
),
"publishers": [{"name": p} for p in doc.get("publishers", "")],
"publish_places": [{"name": p} for p in doc.get("publish_places", "")],
"publish_date": doc.get("publish_date"),
"subjects": get_subjects("subjects", ""),
"subject_places": get_subjects("subject_places", "place:"),
"subject_people": get_subjects("subject_people", "person:"),
"subject_times": get_subjects("subject_times", "time:"),
"excerpts": [format_excerpt(e) for e in w.get("excerpts", [])],
"notes": get_value(doc.get("notes", "")),
"table_of_contents": format_table_of_contents(
doc.get("table_of_contents", [])
),
"links": [
{'title': link.get("title"), 'url': link['url']}
for link in w.get('links', '')
if link.get('url')
],
}
for fs in [doc.get("first_sentence"), w.get('first_sentence')]:
if fs:
e = {"text": get_value(fs), "comment": "", "first_sentence": True}
d['excerpts'].insert(0, e)
break
def ebook(doc):
itemid = doc['ocaid']
availability = get_ia_availability(itemid)
d = {
"preview_url": "https://archive.org/details/" + itemid,
"availability": availability,
"formats": {},
}
prefix = f"https://archive.org/download/{itemid}/{itemid}"
if availability == 'full':
d["read_url"] = "https://archive.org/stream/%s" % (itemid)
d['formats'] = {
"pdf": {"url": prefix + ".pdf"},
"epub": {"url": prefix + ".epub"},
"text": {"url": prefix + "_djvu.txt"},
}
elif availability == "borrow":
d['borrow_url'] = "https://openlibrary.org{}/{}/borrow".format(
doc['key'], h.urlsafe(doc.get("title", "untitled"))
)
loanstatus = web.ctx.site.store.get(
'ebooks/' + doc['ocaid'], {'borrowed': 'false'}
)
d['checkedout'] = loanstatus['borrowed'] == 'true'
return d
if doc.get("ocaid"):
d['ebooks'] = [ebook(doc)]
if doc.get('covers'):
cover_id = doc['covers'][0]
d['cover'] = {
"small": "https://covers.openlibrary.org/b/id/%s-S.jpg" % cover_id,
"medium": "https://covers.openlibrary.org/b/id/%s-M.jpg" % cover_id,
"large": "https://covers.openlibrary.org/b/id/%s-L.jpg" % cover_id,
}
d['identifiers'] = trim(d['identifiers'])
d['classifications'] = trim(d['classifications'])
return trim(d)
def trim(d):
"""Remove empty values from given dictionary.
>>> trim({"a": "x", "b": "", "c": [], "d": {}})
{'a': 'x'}
"""
return {k: v for k, v in d.items() if v}
def get_authors(docs):
"""Returns a dict of author_key to {"key", "...", "name": "..."} for all authors in docs."""
authors = [a['key'] for doc in docs for a in doc.get('authors', [])]
author_dict = {}
if authors:
for a in ol_get_many(uniq(authors)):
author_dict[a['key']] = {"key": a['key'], "name": a.get("name", "")}
return author_dict
def process_result_for_details(result):
def f(bib_key, doc):
d = process_doc_for_viewapi(bib_key, doc)
if 'authors' in doc:
doc['authors'] = [author_dict[a['key']] for a in doc['authors']]
d['details'] = doc
return d
author_dict = get_authors(result.values())
return {k: f(k, doc) for k, doc in result.items()}
def process_result_for_viewapi(result):
return {k: process_doc_for_viewapi(k, doc) for k, doc in result.items()}
def get_ia_availability(itemid):
collections = ia.get_metadata(itemid).get('collection', [])
if 'lendinglibrary' in collections or 'inlibrary' in collections:
return 'borrow'
elif 'printdisabled' in collections:
return 'restricted'
else:
return 'full'
def process_doc_for_viewapi(bib_key, page):
key = page['key']
url = get_url(page)
if 'ocaid' in page:
preview = get_ia_availability(page['ocaid'])
preview_url = 'https://archive.org/details/' + page['ocaid']
else:
preview = 'noview'
preview_url = url
d = {
'bib_key': bib_key,
'info_url': url,
'preview': preview,
'preview_url': preview_url,
}
if page.get('covers'):
d['thumbnail_url'] = (
'https://covers.openlibrary.org/b/id/%s-S.jpg' % page["covers"][0]
)
return d
def format_result(result: dict, options: web.storage) -> str:
"""Format result as js or json.
>>> format_result({'x': 1}, {})
'var _OLBookInfo = {"x": 1};'
>>> format_result({'x': 1}, {'callback': 'f'})
'{"x": 1}'
"""
format = options.get('format', '').lower()
if format == 'json':
return json.dumps(result)
else: # js
json_data = json.dumps(result)
callback = options.get("callback")
if callback:
# the API handles returning the data as a callback
return "%s" % json_data
else:
return "var _OLBookInfo = %s;" % json_data
def is_isbn(bib_key: str) -> bool:
"""Return True if the bib_key is ostensibly an ISBN (i.e. 10 or 13 characters)."""
return len(bib_key) in {10, 13}
def get_missed_isbn_bib_keys(bib_keys: Iterable[str], found_records: dict) -> list[str]:
"""
Return a Generator[str, None, None] with all ISBN bib_keys not in `found_records`.
"""
return [
bib_key
for bib_key in bib_keys
if bib_key not in found_records and is_isbn(bib_key)
]
def get_isbn_editiondict_map(
isbns: Iterable[str], high_priority: bool = False
) -> dict[str, Any]:
"""
Attempts to import items from their ISBN, returning a mapping of possibly
imported edition_dicts in the following form:
{isbn_string: edition_dict...}}
"""
# Supplement non-ISBN ASIN records with BookWorm metadata for that ASIN.
if high_priority:
for isbn in isbns:
if not isbn.upper().startswith("B"):
continue
if item_to_import := ImportItem.find_staged_or_pending([isbn]).first():
item_edition = ImportItem(item_to_import)
manage_imports.do_import(item_edition, require_marc=False)
# Get a mapping of ISBNs to new Editions (or `None`)
isbn_edition_map = {
isbn: Edition.from_isbn(isbn_or_asin=isbn, high_priority=high_priority)
for isbn in isbns
}
# Convert editions to dicts, dropping ISBNs for which no edition was created.
return {
isbn: edition.dict() for isbn, edition in isbn_edition_map.items() if edition
}
def dynlinks(bib_keys: Iterable[str], options: web.storage) -> str:
"""
Return a JSONified dictionary of bib_keys (e.g. ISBN, LCCN) and select URLs
associated with the corresponding edition, if any.
If a bib key is an ISBN, options.high_priority=True, and no edition is found,
an import is attempted with high priority; otherwise missed bib_keys are queued
for lookup via the affiliate-server and responses are `staged` in `import_item`.
Example return value for a bib key of the ISBN "1452303886":
'{"1452303886": {"bib_key": "1452303886", "info_url": '
'"http://localhost:8080/books/OL24630277M/Fires_of_Prophecy_The_Morcyth_Saga_Book_Two", '
'"preview": "restricted", "preview_url": '
'"https://archive.org/details/978-1-4523-0388-8"}}'
"""
# for backward-compatibility
if options.get("details", "").lower() == "true":
options["jscmd"] = "details"
high_priority = options.get("high_priority", False)
try:
edition_dicts = query_docs(bib_keys)
# For any ISBN bib_keys without hits, attempt to import+use immediately if
# `high_priority`. Otherwise, queue them for lookup via the AMZ Products
# API and process whatever editions were found in existing data.
if missed_isbns := get_missed_isbn_bib_keys(bib_keys, edition_dicts):
new_editions = get_isbn_editiondict_map(
isbns=missed_isbns, high_priority=high_priority
)
edition_dicts.update(new_editions)
edition_dicts = process_result(edition_dicts, options.get('jscmd'))
except:
print("Error in processing Books API", file=sys.stderr)
register_exception()
edition_dicts = {}
return format_result(edition_dicts, options)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17,850 | Python | .py | 438 | 31.244292 | 103 | 0.550546 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
426 | readlinks.py | internetarchive_openlibrary/openlibrary/plugins/books/readlinks.py | """'Read' api implementation. This is modeled after the HathiTrust
Bibliographic API, but also includes information about loans and other
editions of the same work that might be available.
"""
import sys
import re
import requests
import web
from openlibrary.core import ia
from openlibrary.core import helpers
from openlibrary.api import OpenLibrary
from openlibrary.plugins.books import dynlinks
from infogami.utils.delegate import register_exception
from infogami.utils import stats
from infogami import config
def key_to_olid(key):
return key.split('/')[-1]
def ol_query(name, value):
query = {
'type': '/type/edition',
name: value,
}
if keys := web.ctx.site.things(query):
return keys[0]
def get_solr_select_url():
c = config.get("plugin_worksearch")
base_url = c and c.get('solr_base_url')
return base_url and (base_url + "/select")
def get_work_iaids(wkey):
# wid = wkey.split('/')[2]
solr_select_url = get_solr_select_url()
filter = 'ia'
q = 'key:' + wkey
stats.begin('solr', url=wkey)
solr_select = (
solr_select_url
+ f"?version=2.2&q.op=AND&q={q}&rows=10&fl={filter}&qt=standard&wt=json&fq=type:work"
)
reply = requests.get(solr_select).json()
stats.end()
print(reply)
if reply['response']['numFound'] == 0:
return []
return reply["response"]['docs'][0].get(filter, [])
def get_solr_fields_for_works(
field: str,
wkeys: list[str],
clip_limit: int | None = None,
) -> dict[str, list[str]]:
from openlibrary.plugins.worksearch.search import get_solr
docs = get_solr().get_many(wkeys, fields=['key', field])
return {doc['key']: doc.get(field, [])[:clip_limit] for doc in docs}
def get_eids_for_wids(wids):
"""To support testing by passing in a list of work-ids - map each to
it's first edition ID"""
solr_select_url = get_solr_select_url()
filter = 'edition_key'
q = '+OR+'.join(wids)
solr_select = (
solr_select_url
+ f"?version=2.2&q.op=AND&q={q}&rows=10&fl=key,{filter}&qt=standard&wt=json&fq=type:work"
)
reply = requests.get(solr_select).json()
if reply['response']['numFound'] == 0:
return []
rows = reply['response']['docs']
result = {r['key']: r[filter][0] for r in rows if len(r.get(filter, []))}
return result
# Not yet used. Solr editions aren't up-to-date (6/2011)
def get_solr_edition_records(iaids):
solr_select_url = get_solr_select_url()
filter = 'title'
q = '+OR+'.join('ia:' + id for id in iaids)
solr_select = (
solr_select_url
+ f"?version=2.2&q.op=AND&q={q}&rows=10&fl=key,{filter}&qt=standard&wt=json"
)
reply = requests.get(solr_select).json()
if reply['response']['numFound'] == 0:
return []
rows = reply['response']['docs']
return rows
result = {r['key']: r[filter][0] for r in rows if len(r.get(filter, []))}
return result
class ReadProcessor:
def __init__(self, options):
self.options = options
def get_item_status(self, ekey, iaid, collections, subjects) -> str:
if 'lendinglibrary' in collections:
status = 'lendable' if 'Lending library' in subjects else 'restricted'
elif 'inlibrary' in collections:
status = 'restricted'
if 'In library' in subjects: # self.get_inlibrary() is deprecated
if self.options.get('debug_items'):
status = 'restricted - not inlib'
elif self.options.get('show_inlibrary'):
status = 'lendable'
else:
status = 'restricted' if 'printdisabled' in collections else 'full access'
if status == 'lendable':
loanstatus = web.ctx.site.store.get(f'ebooks/{iaid}', {'borrowed': 'false'})
if loanstatus['borrowed'] == 'true':
status = 'checked out'
return status
def get_readitem(self, iaid, orig_iaid, orig_ekey, wkey, status, publish_date):
meta = self.iaid_to_meta.get(iaid)
if meta is None:
return None
collections = meta.get("collection", [])
if status == 'missing':
return None
if (
status.startswith('restricted') or status == 'checked out'
) and not self.options.get('show_all_items'):
return None
edition = self.iaid_to_ed.get(iaid)
ekey = edition.get('key', '')
if status == 'full access':
itemURL = 'http://www.archive.org/stream/%s' % (iaid)
else:
# this could be rewrit in terms of iaid...
itemURL = 'http://openlibrary.org{}/{}/borrow'.format(
ekey, helpers.urlsafe(edition.get('title', 'untitled'))
)
result = {
# XXX add lastUpdate
'enumcron': False,
'match': 'exact' if iaid == orig_iaid else 'similar',
'status': status,
'fromRecord': orig_ekey,
'ol-edition-id': key_to_olid(ekey),
'ol-work-id': key_to_olid(wkey),
'publishDate': publish_date,
'contributor': '',
'itemURL': itemURL,
}
if edition.get('covers'):
cover_id = edition['covers'][0]
# can be rewrit in terms of iaid
# XXX covers url from yaml?
result['cover'] = {
"small": "https://covers.openlibrary.org/b/id/%s-S.jpg" % cover_id,
"medium": "https://covers.openlibrary.org/b/id/%s-M.jpg" % cover_id,
"large": "https://covers.openlibrary.org/b/id/%s-L.jpg" % cover_id,
}
return result
date_pat = r'\D*(\d\d\d\d)\D*'
date_re = re.compile(date_pat)
def make_record(self, bib_keys):
# XXX implement hathi no-match logic?
found = False
for k in bib_keys:
if k in self.docs:
found = True
break
if not found:
return None
doc = self.docs[k]
data = self.datas[k]
details = self.detailss.get(k)
# determine potential ia items for this identifier,
orig_iaid = doc.get('ocaid')
doc_works = doc.get('works')
if doc_works and len(doc_works) > 0:
wkey = doc_works[0]['key']
else:
wkey = None
work = None
subjects = []
if wkey:
work = self.works.get(wkey)
subjects = work.get('subjects', [])
iaids = self.wkey_to_iaids[wkey]
# rearrange so any scan for this edition is first
if orig_iaid and orig_iaid in iaids:
iaids.pop(iaids.index(orig_iaid))
iaids.insert(0, orig_iaid)
elif orig_iaid:
# attempt to handle work-less editions
iaids = [orig_iaid]
else:
iaids = []
orig_ekey = data['key']
# Sort iaids. Is there a more concise way?
def getstatus(self, iaid):
meta = self.iaid_to_meta.get(iaid)
if not meta:
status = 'missing'
edition = None
else:
collections = meta.get("collection", [])
edition = self.iaid_to_ed.get(iaid)
if not edition:
status = 'missing'
else:
ekey = edition.get('key', '')
status = self.get_item_status(ekey, iaid, collections, subjects)
return status
def getdate(self, iaid):
if edition := self.iaid_to_ed.get(iaid):
m = self.date_re.match(edition.get('publish_date', ''))
if m:
return m.group(1)
return ''
iaids_tosort = [
(iaid, getstatus(self, iaid), getdate(self, iaid)) for iaid in iaids
]
def sortfn(sortitem):
iaid, status, date = sortitem
if iaid == orig_iaid and status in {'full access', 'lendable'}:
isexact = '000'
else:
isexact = '999'
# sort dateless to end
if date == '':
date = 5000
date = int(date)
# reverse-sort modern works by date
if status in {'lendable', 'checked out'}:
date = 10000 - date
statusvals = {
'full access': 1,
'lendable': 2,
'checked out': 3,
'restricted': 4,
'restricted - not inlib': 4,
'missing': 5,
}
return (isexact, statusvals[status], date)
iaids_tosort.sort(key=sortfn)
items = [
self.get_readitem(iaid, orig_iaid, orig_ekey, wkey, status, date)
for iaid, status, date in iaids_tosort
] # if status != 'missing'
items = [item for item in items if item]
ids = data.get('identifiers', {})
if self.options.get('no_data'):
returned_data = None
else:
returned_data = data
result = {
'records': {
data['key']: {
'isbns': [
subitem
for sublist in (ids.get('isbn_10', []), ids.get('isbn_13', []))
for subitem in sublist
],
'issns': [],
'lccns': ids.get('lccn', []),
'oclcs': ids.get('oclc', []),
'olids': [key_to_olid(data['key'])],
'publishDates': [data.get('publish_date', '')],
'recordURL': data['url'],
'data': returned_data,
'details': details,
}
},
'items': items,
}
if self.options.get('debug_items'):
result['tosort'] = iaids_tosort
return result
def process(self, req):
requests = req.split('|')
bib_keys = [item for r in requests for item in r.split(';')]
# filter out 'id:foo' before passing to dynlinks
bib_keys = [k for k in bib_keys if k[:3].lower() != 'id:']
self.docs = dynlinks.query_docs(bib_keys)
if not self.options.get('no_details'):
self.detailss = dynlinks.process_result_for_details(self.docs)
else:
self.detailss = {}
dp = dynlinks.DataProcessor()
self.datas = dp.process(self.docs)
self.works = dp.works
# XXX control costs below with iaid_limit - note that this may result
# in no 'exact' item match, even if one exists
# Note that it's available thru above works/docs
self.wkey_to_iaids = get_solr_fields_for_works('ia', self.works, 500)
iaids = [value for sublist in self.wkey_to_iaids.values() for value in sublist]
self.iaid_to_meta = {iaid: ia.get_metadata(iaid) for iaid in iaids}
def lookup_iaids(iaids):
step = 10
if len(iaids) > step and not self.options.get('debug_things'):
result = []
while iaids:
result += lookup_iaids(iaids[:step])
iaids = iaids[step:]
return result
query = {
'type': '/type/edition',
'ocaid': iaids,
}
result = web.ctx.site.things(query)
return result
ekeys = lookup_iaids(iaids)
# If returned order were reliable, I could skip the below.
eds = dynlinks.ol_get_many_as_dict(ekeys)
self.iaid_to_ed = {ed['ocaid']: ed for ed in eds.values()}
# self.iaid_to_ekey = dict((iaid, ed['key'])
# for iaid, ed in self.iaid_to_ed.items())
# Work towards building a dict of iaid loanability,
# def has_lending_collection(meta):
# collections = meta.get("collection", [])
# return 'lendinglibrary' in collections or 'inlibrary' in collections
# in case site.store supports get_many (unclear)
# maybe_loanable_iaids = [iaid for iaid in iaids
# if has_lending_collection(self.iaid_to_meta.get(iaid, {}))]
# loanable_ekeys = [self.iaid_to_ekey.get(iaid) for iaid in maybe_loanable_iaids]
# loanstatus = web.ctx.site.store.get('ebooks' + ekey, {'borrowed': 'false'})
result = {}
for r in requests:
bib_keys = r.split(';')
if r.lower().startswith('id:'):
result_key = bib_keys.pop(0)[3:]
else:
result_key = r
sub_result = self.make_record(bib_keys)
if sub_result:
result[result_key] = sub_result
if self.options.get('debug_items'):
result['ekeys'] = ekeys
result['eds'] = eds
result['iaids'] = iaids
return result
def readlinks(req, options):
try:
dbstr = 'debug|'
if req.startswith(dbstr):
options = {
'stats': True,
'show_exception': True,
'no_data': True,
'no_details': True,
'show_all_items': True,
}
req = req[len(dbstr) :]
rp = ReadProcessor(options)
if options.get('listofworks'):
"""For load-testing, handle a special syntax"""
wids = req.split('|')
mapping = get_solr_fields_for_works('edition_key', wids[:5])
req = '|'.join(('olid:' + k) for k in mapping.values())
result = rp.process(req)
if options.get('stats'):
summary = stats.stats_summary()
s = {}
result['stats'] = s
s['summary'] = summary
s['stats'] = web.ctx.get('stats', [])
except:
print('Error in processing Read API', file=sys.stderr)
if options.get('show_exception'):
register_exception()
result = {'success': False}
else:
register_exception()
result = {}
return result
| 14,211 | Python | .py | 360 | 28.791667 | 97 | 0.537084 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
427 | test_dynlinks.py | internetarchive_openlibrary/openlibrary/plugins/books/tests/test_dynlinks.py | """Test suite for dynlinks.
Most of the tests here use 3 sets of data.
data0: This contains OL0A, OL0M and OL0W with each having just name/title.
data1: This contains OL1A, OL1M, OL1W with each having name/tile and interconnections.
data9: This contains OL9A, OL9M and OL9W with interconnections and almost all fields.
"""
import pytest
import re
import json
import web
from openlibrary.core import ia
from openlibrary.mocks import mock_infobase
from openlibrary.plugins.books import dynlinks
@pytest.fixture
def data0(request):
return {
"/books/OL0M": {"key": "/books/OL0M", "title": "book-0"},
"/authors/OL0A": {"key": "/authors/OL0A", "name": "author-0"},
"/works/OL0W": {"key": "/works/OL0W", "title": "work-0"},
"result": {
"data": {
"url": "https://openlibrary.org/books/OL0M/book-0",
"key": "/books/OL0M",
"title": "book-0",
"identifiers": {"openlibrary": ["OL0M"]},
}
},
}
@pytest.fixture
def data1(request):
return {
"/books/OL1M": {
"key": "/books/OL1M",
"title": "foo",
"works": [{"key": "/works/OL1W"}],
},
"/authors/OL1A": {"key": "/authors/OL1A", "name": "Mark Twain"},
"/works/OL1W": {
"key": "/works/OL1W",
"title": "Foo",
"authors": [{"author": {"key": "/authors/OL1A"}}],
},
}
@pytest.fixture
def data9(request):
return {
"/authors/OL9A": {"key": "/authors/OL9A", "name": "Mark Twain"},
"/works/OL9W": {
"key": "/works/OL9W",
"title": "Foo",
"authors": [{"author": {"key": "/authors/OL9A"}}],
"links": [
{
"title": "wikipedia article",
"url": "http://en.wikipedia.org/wiki/foo",
}
],
"subjects": ["Test Subject"],
"subject_people": ["Test Person"],
"subject_places": ["Test Place"],
"subject_times": ["Test Time"],
"excerpts": [
{
"excerpt": {"type": "/type/text", "value": "This is an excerpt."},
"comment": "foo",
},
{
# sometimes excerpt was plain string instead of /type/text.
"excerpt": "This is another excerpt.",
"comment": "bar",
},
],
},
"/books/OL9M": {
"key": "/books/OL9M",
"title": "foo",
"subtitle": "bar",
"by_statement": "Mark Twain",
"works": [{"key": "/works/OL9W"}],
"publishers": ["Dover Publications"],
"publish_places": ["New York"],
"identifiers": {"goodreads": ["12345"]},
"isbn_10": ["1234567890"],
"lccn": ["lccn-1"],
"oclc_numbers": ["oclc-1"],
"classifications": {"indcat": ["12345"]},
"lc_classifications": ["LC1234"],
"covers": [42, 53],
"ocaid": "foo12bar",
"number_of_pages": "100",
"pagination": "100 p.",
},
"result": {
"viewapi": {
"info_url": "https://openlibrary.org/books/OL9M",
"thumbnail_url": "https://covers.openlibrary.org/b/id/42-S.jpg",
"preview": "noview",
"preview_url": "https://openlibrary.org/books/OL9M",
},
"data": {
"url": "https://openlibrary.org/books/OL9M/foo",
"key": "/books/OL9M",
"title": "foo",
"subtitle": "bar",
"by_statement": "Mark Twain",
"authors": [
{
"url": "https://openlibrary.org/authors/OL9A/Mark_Twain",
"name": "Mark Twain",
}
],
"identifiers": {
"isbn_10": ["1234567890"],
"lccn": ["lccn-1"],
"oclc": ["oclc-1"],
"goodreads": ["12345"],
"openlibrary": ["OL9M"],
},
"classifications": {
"lc_classifications": ["LC1234"],
"indcat": ["12345"],
},
"publishers": [{"name": "Dover Publications"}],
"publish_places": [{"name": "New York"}],
"links": [
{
"title": "wikipedia article",
"url": "http://en.wikipedia.org/wiki/foo",
}
],
'subjects': [
{
'url': 'https://openlibrary.org/subjects/test_subject',
'name': 'Test Subject',
}
],
'subject_places': [
{
'url': 'https://openlibrary.org/subjects/place:test_place',
'name': 'Test Place',
}
],
'subject_people': [
{
'url': 'https://openlibrary.org/subjects/person:test_person',
'name': 'Test Person',
}
],
'subject_times': [
{
'url': 'https://openlibrary.org/subjects/time:test_time',
'name': 'Test Time',
}
],
"cover": {
"small": "https://covers.openlibrary.org/b/id/42-S.jpg",
"medium": "https://covers.openlibrary.org/b/id/42-M.jpg",
"large": "https://covers.openlibrary.org/b/id/42-L.jpg",
},
"excerpts": [
{
"text": "This is an excerpt.",
"comment": "foo",
},
{"text": "This is another excerpt.", "comment": "bar"},
],
"ebooks": [
{
"preview_url": "https://archive.org/details/foo12bar",
"read_url": "https://archive.org/stream/foo12bar",
"availability": "full",
"formats": {
"pdf": {
"url": "https://archive.org/download/foo12bar/foo12bar.pdf"
},
"epub": {
"url": "https://archive.org/download/foo12bar/foo12bar.epub"
},
"text": {
"url": "https://archive.org/download/foo12bar/foo12bar_djvu.txt"
},
},
}
],
"number_of_pages": "100",
"pagination": "100 p.",
},
},
}
class Mock:
def __init__(self):
self.calls = []
self.default = None
def __call__(self, *a, **kw):
for a2, kw2, _return in self.calls:
if (a, kw) == (a2, kw2):
return _return
return self.default
def setup_call(self, *a, **kw):
_return = kw.pop("_return", None)
call = a, kw, _return
self.calls.append(call)
def monkeypatch_ol(monkeypatch):
mock = Mock()
mock.setup_call("isbn_", "1234567890", _return="/books/OL1M")
mock.setup_call("key", "/books/OL2M", _return="/books/OL2M")
monkeypatch.setattr(dynlinks, "ol_query", mock)
mock = Mock()
mock.setup_call(["/books/OL1M"], _return=[{"key": "/books/OL1M", "title": "foo"}])
mock.setup_call(
["/books/OL2M"],
_return=[{"key": "/books/OL2M", "title": "bar", "ocaid": "ia-bar"}],
)
mock.default = []
monkeypatch.setattr(dynlinks, "ol_get_many", mock)
monkeypatch.setattr(ia, "get_metadata", lambda itemid: web.storage())
def test_query_keys(monkeypatch):
monkeypatch_ol(monkeypatch)
assert dynlinks.query_keys(["isbn:1234567890"]) == {
"isbn:1234567890": "/books/OL1M"
}
assert dynlinks.query_keys(["isbn:9876543210"]) == {}
assert dynlinks.query_keys(["isbn:1234567890", "isbn:9876543210"]) == {
"isbn:1234567890": "/books/OL1M"
}
def test_query_docs(monkeypatch):
monkeypatch_ol(monkeypatch)
assert dynlinks.query_docs(["isbn:1234567890"]) == {
"isbn:1234567890": {"key": "/books/OL1M", "title": "foo"}
}
assert dynlinks.query_docs(["isbn:9876543210"]) == {}
assert dynlinks.query_docs(["isbn:1234567890", "isbn:9876543210"]) == {
"isbn:1234567890": {"key": "/books/OL1M", "title": "foo"}
}
def test_process_doc_for_view_api(monkeypatch):
monkeypatch_ol(monkeypatch)
bib_key = "isbn:1234567890"
doc = {"key": "/books/OL1M", "title": "foo"}
expected_result = {
"bib_key": "isbn:1234567890",
"info_url": "https://openlibrary.org/books/OL1M/foo",
"preview": "noview",
"preview_url": "https://openlibrary.org/books/OL1M/foo",
}
assert dynlinks.process_doc_for_viewapi(bib_key, doc) == expected_result
doc['ocaid'] = "ia-foo"
expected_result["preview"] = "full"
expected_result["preview_url"] = "https://archive.org/details/ia-foo"
assert dynlinks.process_doc_for_viewapi(bib_key, doc) == expected_result
doc['covers'] = [42, 53]
expected_result["thumbnail_url"] = "https://covers.openlibrary.org/b/id/42-S.jpg"
assert dynlinks.process_doc_for_viewapi(bib_key, doc) == expected_result
def test_process_result_for_details(monkeypatch):
assert dynlinks.process_result_for_details(
{"isbn:1234567890": {"key": "/books/OL1M", "title": "foo"}}
) == {
"isbn:1234567890": {
"bib_key": "isbn:1234567890",
"info_url": "https://openlibrary.org/books/OL1M/foo",
"preview": "noview",
"preview_url": "https://openlibrary.org/books/OL1M/foo",
"details": {"key": "/books/OL1M", "title": "foo"},
}
}
OL1A = {
"key": "/authors/OL1A",
"type": {"key": "/type/author"},
"name": "Mark Twain",
}
mock = Mock()
mock.setup_call(["/authors/OL1A"], _return=[OL1A])
monkeypatch.setattr(dynlinks, "ol_get_many", mock)
result = {
"isbn:1234567890": {
"key": "/books/OL1M",
"title": "foo",
"authors": [{"key": "/authors/OL1A"}],
}
}
expected_result = {
"isbn:1234567890": {
"bib_key": "isbn:1234567890",
"info_url": "https://openlibrary.org/books/OL1M/foo",
"preview": "noview",
"preview_url": "https://openlibrary.org/books/OL1M/foo",
"details": {
"key": "/books/OL1M",
"title": "foo",
"authors": [{"key": "/authors/OL1A", "name": "Mark Twain"}],
},
}
}
assert dynlinks.process_result_for_details(result) == expected_result
def test_dynlinks(monkeypatch):
monkeypatch_ol(monkeypatch)
expected_result = {
"isbn:1234567890": {
"bib_key": "isbn:1234567890",
"info_url": "https://openlibrary.org/books/OL1M/foo",
"preview": "noview",
"preview_url": "https://openlibrary.org/books/OL1M/foo",
}
}
js = dynlinks.dynlinks(["isbn:1234567890"], {})
match = re.match('^var _OLBookInfo = ({.*});$', js)
assert match is not None
assert json.loads(match.group(1)) == expected_result
js = dynlinks.dynlinks(["isbn:1234567890"], {"callback": "func"})
match = re.match('^({.*})$', js)
assert match is not None
assert json.loads(match.group(1)) == expected_result
js = dynlinks.dynlinks(["isbn:1234567890"], {"format": "json"})
assert json.loads(js) == expected_result
def test_isbnx(monkeypatch):
site = mock_infobase.MockSite()
site.save(
{
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"isbn_10": ["123456789X"],
}
)
monkeypatch.setattr(web.ctx, "site", site, raising=False)
json_data = dynlinks.dynlinks(["isbn:123456789X"], {"format": "json"})
d = json.loads(json_data)
assert list(d) == ["isbn:123456789X"]
def test_dynlinks_ia(monkeypatch):
monkeypatch_ol(monkeypatch)
expected_result = {
"OL2M": {
"bib_key": "OL2M",
"info_url": "https://openlibrary.org/books/OL2M/bar",
"preview": "full",
"preview_url": "https://archive.org/details/ia-bar",
}
}
json_data = dynlinks.dynlinks(["OL2M"], {"format": "json"})
assert json.loads(json_data) == expected_result
def test_dynlinks_details(monkeypatch):
monkeypatch_ol(monkeypatch)
expected_result = {
"OL2M": {
"bib_key": "OL2M",
"info_url": "https://openlibrary.org/books/OL2M/bar",
"preview": "full",
"preview_url": "https://archive.org/details/ia-bar",
"details": {"key": "/books/OL2M", "title": "bar", "ocaid": "ia-bar"},
},
}
json_data = dynlinks.dynlinks(["OL2M"], {"format": "json", "details": "true"})
assert json.loads(json_data) == expected_result
class TestDataProcessor:
def test_get_authors0(self, data0):
p = dynlinks.DataProcessor()
p.authors = data0
assert p.get_authors(data0['/books/OL0M']) == []
def test_get_authors1(self, data1):
p = dynlinks.DataProcessor()
p.authors = data1
assert p.get_authors(data1['/works/OL1W']) == [
{
"url": "https://openlibrary.org/authors/OL1A/Mark_Twain",
"name": "Mark Twain",
}
]
def test_process_doc0(self, data0):
p = dynlinks.DataProcessor()
assert p.process_doc(data0['/books/OL0M']) == data0['result']['data']
def test_process_doc9(self, monkeypatch, data9):
monkeypatch_ol(monkeypatch)
p = dynlinks.DataProcessor()
p.authors = data9
p.works = data9
assert p.process_doc(data9['/books/OL9M']) == data9['result']['data']
| 14,480 | Python | .py | 374 | 26.906417 | 96 | 0.48872 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
428 | test_readlinks.py | internetarchive_openlibrary/openlibrary/plugins/books/tests/test_readlinks.py | import pytest
import web
from openlibrary.plugins.books import readlinks
@pytest.mark.parametrize(
"collections,subjects,options,expected",
[
(['lendinglibrary'], ['Lending library'], {}, 'lendable'),
(['lendinglibrary'], ['Some other subject'], {}, 'restricted'),
(['inlibrary'], ['In library'], {}, 'restricted'),
(
['inlibrary'],
['In library'],
{'debug_items': True},
'restricted - not inlib',
),
(['inlibrary'], ['In library'], {'show_inlibrary': True}, 'lendable'),
(['printdisabled'], [], {}, 'restricted'),
(['some other collection'], [], {}, 'full access'),
],
)
def test_get_item_status(collections, subjects, options, expected, mock_site):
read_processor = readlinks.ReadProcessor(options=options)
status = read_processor.get_item_status('ekey', 'iaid', collections, subjects)
assert status == expected
@pytest.mark.parametrize(
"borrowed,expected",
[
('true', 'checked out'),
('false', 'lendable'),
],
)
def test_get_item_status_monkeypatched(borrowed, expected, monkeypatch, mock_site):
read_processor = readlinks.ReadProcessor(options={})
monkeypatch.setattr(web.ctx.site.store, 'get', lambda _, __: {'borrowed': borrowed})
collections = ['lendinglibrary']
subjects = ['Lending library']
status = read_processor.get_item_status('ekey', 'iaid', collections, subjects)
assert status == expected
| 1,500 | Python | .py | 38 | 33.394737 | 88 | 0.628003 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
429 | test_doctests.py | internetarchive_openlibrary/openlibrary/plugins/books/tests/test_doctests.py | import doctest
import pytest
def find_doctests(modules):
finder = doctest.DocTestFinder()
for m in modules:
mod = __import__(m, None, None, ['x'])
yield from finder.find(mod, mod.__name__)
@pytest.mark.parametrize('test', find_doctests(["openlibrary.plugins.books.dynlinks"]))
def test_doctest(test):
runner = doctest.DocTestRunner(verbose=True)
failures, tries = runner.run(test)
if failures:
pytest.fail("doctest failed: " + test.name)
| 486 | Python | .py | 13 | 32.692308 | 87 | 0.690832 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
430 | code.py | internetarchive_openlibrary/openlibrary/plugins/inside/code.py | from time import time
import json
import web
from infogami.utils import delegate
from infogami.utils.view import render_template, safeint
from openlibrary.core.fulltext import fulltext_search
RESULTS_PER_PAGE = 20
class search_inside(delegate.page):
path = '/search/inside'
def GET(self):
search_start = time() # should probably use a @timeit decorator
i = web.input(q='', page=1)
query = i.q
page = int(i.page)
results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE)
search_time = time() - search_start
return render_template(
'search/inside.tmpl',
query,
results,
search_time,
page=page,
results_per_page=RESULTS_PER_PAGE,
)
class search_inside_json(delegate.page):
path = "/search/inside"
encoding = "json"
def GET(self):
i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)
limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE)
query = i.q
page = int(i.page)
results = fulltext_search(query, page=page, limit=limit, js=True, facets=True)
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(results, indent=4))
| 1,291 | Python | .py | 35 | 29.514286 | 86 | 0.64257 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
431 | memory.py | internetarchive_openlibrary/openlibrary/plugins/admin/memory.py | """memory profiler
"""
import gc
import web
from collections import defaultdict
_mark = {}
_mark_ids = {}
class Storage(web.Storage):
pass
def mark():
"""Mark the current counts to show the difference."""
global _mark, _mark_ids
objects = get_objects()
d = defaultdict(set)
for obj in objects:
d[_get_type(obj)].add(id(obj))
_mark_ids = d
_mark = get_all_counts()
def get_counts():
counts = get_all_counts()
d = [
Storage(
type=type,
count=count,
mark=_mark.get(type, 0),
diff=count - _mark.get(type, 0),
)
for type, count in counts.items()
]
return d
def get_all_counts():
"""Returns the counts of live objects."""
objects = get_objects()
d = defaultdict(lambda: 0)
for obj in objects:
d[_get_type(obj)] += 1
return d
def get_objects():
"""Returns a list of live objects."""
objects = gc.get_objects()
dicts = {id(o.__dict__) for o in objects if hasattr(o, "__dict__")}
return (
obj for obj in gc.get_objects() if obj is not _mark and id(obj) not in dicts
)
def get_objects_by_type(type):
return (obj for obj in get_objects() if _get_type(obj) == type)
def _get_type(obj):
"""Returns the type of given object as string."""
try:
t = obj.__class__
except:
t = type(obj)
mod = t.__module__
name = t.__name__
if mod != "__builtin__":
name = mod + "." + name
return name
| 1,529 | Python | .py | 57 | 21.22807 | 84 | 0.58 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
432 | mem.py | internetarchive_openlibrary/openlibrary/plugins/admin/mem.py | from infogami.utils import delegate
from infogami.utils.view import render, safeint
from openlibrary.plugins.admin import memory
import web
import gc
def render_template(name, *a, **kw):
return render[name](*a, **kw)
class Object:
def __init__(self, obj, name=None):
self.obj = obj
self.name = name
def get_id(self):
return id(self.obj)
def get_type(self):
return memory._get_type(self.obj)
def repr(self):
try:
if isinstance(self.obj, (dict, web.threadeddict)):
from infogami.infobase.utils import prepr
return prepr(self.obj)
else:
return repr(self.obj)
except:
return "failed"
return render_template("admin/memory/object", self.obj)
def get_referrers(self):
d = []
for o in gc.get_referrers(self.obj):
name = None
if isinstance(o, dict):
name = web.dictfind(o, self.obj)
for r in gc.get_referrers(o):
if getattr(r, "__dict__", None) is o:
o = r
break
elif isinstance(o, dict): # other dict types
name = web.dictfind(o, self.obj)
if not isinstance(name, str):
name = None
d.append(Object(o, name))
return d
def get_referents(self):
d = []
_dict = getattr(self.obj, "__dict__", None)
if _dict:
for k, v in self.obj.__dict__.items():
d.append(Object(v, name=k))
for o in gc.get_referents(self.obj):
if o is not _dict:
d.append(Object(o))
return d
class _memory:
path = "/memory"
def GET(self):
i = web.input(page=1, sort="diff", prefix="")
page = safeint(i.page, 1)
end = page * 50
begin = end - 50
if i.sort not in ["count", "mark", "diff"]:
i.sort = "diff"
counts = [c for c in memory.get_counts() if c.type.startswith(i.prefix)]
counts.sort(key=lambda c: c[i.sort], reverse=True)
return render_template(
"admin/memory/index", counts[begin:end], page, sort=i.sort
)
def POST(self):
memory.mark()
raise web.seeother(web.ctx.fullpath)
class _memory_type:
path = "/memory/type/(.*)"
def GET(self, type):
objects = memory.get_objects_by_type(type)
i = web.input(page=1, diff="false")
page = safeint(i.page, 1)
end = page * 50
begin = end - 50
objects = [Object(obj) for obj in memory.get_objects_by_type(type)]
if i.diff == "true":
marked = memory._mark_ids.get(type, [])
objects = [obj for obj in objects if obj.get_id() not in marked]
return render_template("admin/memory/type", type, objects, page)
def first(it):
try:
return next(it)
except StopIteration:
return None
class _memory_id:
path = "/memory/id/(.*)"
def get_object(self, _id):
for obj in memory.get_objects():
if str(id(obj)) == _id:
return Object(obj)
def GET(self, _id):
obj = self.get_object(_id)
if not obj:
raise web.notfound()
return render_template("admin/memory/object", obj)
| 3,393 | Python | .py | 97 | 25.268041 | 80 | 0.546793 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
433 | code.py | internetarchive_openlibrary/openlibrary/plugins/admin/code.py | """Plugin to provide admin interface.
"""
import os
from collections.abc import Iterable
import requests
import sys
import web
import subprocess
import datetime
import traceback
import logging
import json
from internetarchive.exceptions import ItemLocateError
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import render, public
from infogami.utils.context import context
from infogami.utils.view import add_flash_message
from infogami.plugins.api.code import jsonapi
from openlibrary.catalog.add_book import (
update_ia_metadata_for_ol_edition,
create_ol_subjects_for_ocaid,
)
import openlibrary
from openlibrary import accounts
from openlibrary.accounts.model import Account, clear_cookies
from openlibrary.accounts.model import OpenLibraryAccount
from openlibrary.core import admin as admin_stats, helpers as h, imports, cache
from openlibrary.core.models import Work
from openlibrary.plugins.upstream import forms, spamcheck
from openlibrary.plugins.upstream.account import send_forgot_password_email
logger = logging.getLogger("openlibrary.admin")
def render_template(name, *a, **kw):
if "." in name:
name = name.rsplit(".", 1)[0]
return render[name](*a, **kw)
admin_tasks = []
def register_admin_page(path, cls, label=None, visible=True, librarians=False):
label = label or cls.__name__
t = web.storage(
path=path, cls=cls, label=label, visible=visible, librarians=librarians
)
admin_tasks.append(t)
def revert_all_user_edits(account: Account) -> tuple[int, int]:
"""
:return: tuple of (number of edits reverted, number of documents deleted)
"""
i = 0
edit_count = 0
stop = False
keys_to_delete = set()
while not stop:
changes = account.get_recentchanges(limit=100, offset=100 * i)
added_records: list[list[dict]] = [
c.changes for c in changes if c.kind == 'add-book'
]
flattened_records: list[dict] = [
record for lst in added_records for record in lst
]
keys_to_delete |= {r['key'] for r in flattened_records}
keys_to_revert: dict[str, list[int]] = {
item.key: [] for change in changes for item in change.changes
}
for change in changes:
for item in change.changes:
keys_to_revert[item.key].append(change.id)
deleted_keys = web.ctx.site.things(
{'key': list(keys_to_revert), 'type': {'key': '/type/delete'}}
)
changesets_with_deleted_works = {
change_id for key in deleted_keys for change_id in keys_to_revert[key]
}
changeset_ids = [
c.id for c in changes if c.id not in changesets_with_deleted_works
]
_, len_docs = revert_changesets(changeset_ids, "Reverted Spam")
edit_count += len_docs
i += 1
if len(changes) < 100:
stop = True
delete_payload = [
{'key': key, 'type': {'key': '/type/delete'}} for key in keys_to_delete
]
web.ctx.site.save_many(delete_payload, 'Delete spam')
return edit_count, len(delete_payload)
def revert_changesets(changeset_ids: Iterable[int], comment: str):
"""
An aggressive revert function ; it rolls back all the documents to
the revision that existed before the changeset was applied.
Note this means that any edits made _after_ the given changeset will
also be lost.
"""
def get_doc(key: str, revision: int) -> dict:
if revision == 0:
return {"key": key, "type": {"key": "/type/delete"}}
else:
return web.ctx.site.get(key, revision).dict()
site = web.ctx.site
docs = [
get_doc(c['key'], c['revision'] - 1)
for cid in changeset_ids
for c in site.get_change(cid).changes
]
docs = [doc for doc in docs if doc.get('type', {}).get('key') != '/type/delete']
data = {"reverted_changesets": [str(cid) for cid in changeset_ids]}
manifest = web.ctx.site.save_many(docs, action="revert", data=data, comment=comment)
return manifest, len(docs)
class admin(delegate.page):
path = "/admin(?:/.*)?"
def delegate(self):
if web.ctx.path == "/admin":
return self.handle(admin_index)
for t in admin_tasks:
m = web.re_compile('^' + t.path + '$').match(web.ctx.path)
if m:
return self.handle(t.cls, m.groups(), librarians=t.librarians)
raise web.notfound()
def handle(self, cls, args=(), librarians=False):
# Use admin theme
context.cssfile = "admin"
m = getattr(cls(), web.ctx.method, None)
if not m:
raise web.nomethod(cls=cls)
else:
if (
context.user
and context.user.is_librarian()
and web.ctx.path == '/admin/solr'
):
return m(*args)
if self.is_admin() or (
librarians and context.user and context.user.is_super_librarian()
):
return m(*args)
else:
return render.permission_denied(web.ctx.path, "Permission denied.")
GET = POST = delegate
def is_admin(self):
"""Returns True if the current user is in admin usergroup."""
return context.user and context.user.key in [
m.key for m in web.ctx.site.get('/usergroup/admin').members
]
class admin_index:
def GET(self):
return web.seeother('/stats')
class gitpull:
def GET(self):
root = os.path.join(os.path.dirname(openlibrary.__file__), os.path.pardir)
root = os.path.normpath(root)
p = subprocess.Popen(
'cd %s && git pull' % root,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out = p.stdout.read()
p.wait()
return '<pre>' + web.websafe(out) + '</pre>'
class reload:
def GET(self):
if servers := config.get("plugin_admin", {}).get("webservers", []):
body = "".join(self.reload(servers))
else:
body = "No webservers specified in the configuration file."
return render_template("message", "Reload", body)
def reload(self, servers):
for s in servers:
s = web.rstrips(s, "/") + "/_reload"
yield "<h3>" + s + "</h3>"
try:
response = requests.get(s).text
yield "<p><pre>" + response[:100] + "</pre></p>"
except:
yield "<p><pre>%s</pre></p>" % traceback.format_exc()
@web.memoize
def local_ip():
import socket
return socket.gethostbyname(socket.gethostname())
class _reload(delegate.page):
def GET(self):
# make sure the request is coming from the LAN.
if (
web.ctx.ip not in ['127.0.0.1', '0.0.0.0']
and web.ctx.ip.rsplit(".", 1)[0] != local_ip().rsplit(".", 1)[0]
):
return render.permission_denied(
web.ctx.fullpath, "Permission denied to reload templates/macros."
)
from infogami.plugins.wikitemplates import code as wikitemplates
wikitemplates.load_all()
from openlibrary.plugins.upstream import code as upstream
upstream.reload()
return delegate.RawText("done")
class any:
def GET(self):
path = web.ctx.path
class people:
def GET(self):
i = web.input(email=None, ia_id=None)
account = None
if i.email:
account = accounts.find(email=i.email)
if i.ia_id:
account = OpenLibraryAccount.get_by_link(i.ia_id)
if account:
raise web.seeother(f"/admin/people/{account.username}")
return render_template("admin/people/index", email=i.email, ia_id=i.ia_id)
class add_work_to_staff_picks:
def GET(self):
return render_template("admin/sync")
def POST(self):
i = web.input(action="add", work_id='', subjects='openlibrary_staff_picks')
results = {}
work_ids = i.work_id.split(',')
subjects = i.subjects.split(',')
for work_id in work_ids:
work = web.ctx.site.get('/works/%s' % work_id)
editions = work.editions
ocaids = [edition.ocaid for edition in editions if edition.ocaid]
results[work_id] = {}
for ocaid in ocaids:
try:
results[work_id][ocaid] = create_ol_subjects_for_ocaid(
ocaid, subjects=subjects
)
except ItemLocateError as err:
results[work_id][
ocaid
] = f'Failed to add to staff picks. Error message: {err}'
return delegate.RawText(json.dumps(results), content_type="application/json")
class resolve_redirects:
def GET(self):
return self.main(test=True)
def POST(self):
return self.main(test=False)
def main(self, test=False):
params = web.input(key='', test='')
# Provide an escape hatch to let GET requests resolve
if test is True and params.test == 'false':
test = False
# Provide an escape hatch to let POST requests preview
elif test is False and params.test:
test = True
summary = Work.resolve_redirect_chain(params.key, test=test)
return delegate.RawText(json.dumps(summary), content_type="application/json")
class sync_ol_ia:
def GET(self):
"""Updates an Open Library edition's Archive.org item by writing its
latest openlibrary_work and openlibrary_edition to the
Archive.org item's metadata.
"""
i = web.input(edition_id='')
data = update_ia_metadata_for_ol_edition(i.edition_id)
return delegate.RawText(json.dumps(data), content_type="application/json")
class people_view:
def GET(self, key):
account = accounts.find(username=key) or accounts.find(email=key)
if account:
if "@" in key:
raise web.seeother("/admin/people/" + account.username)
else:
return render_template('admin/people/view', account)
else:
raise web.notfound()
def POST(self, key):
user = accounts.find(username=key)
if not user:
raise web.notfound()
i = web.input(action=None, tag=None, bot=None, dry_run=None)
if i.action == "update_email":
return self.POST_update_email(user, i)
elif i.action == "update_password":
return self.POST_update_password(user, i)
elif i.action == "resend_link":
return self.POST_resend_link(user)
elif i.action == "activate_account":
return self.POST_activate_account(user)
elif i.action == "send_password_reset_email":
return self.POST_send_password_reset_email(user)
elif i.action == "block_account":
return self.POST_block_account(user)
elif i.action == "block_account_and_revert":
return self.POST_block_account_and_revert(user)
elif i.action == "unblock_account":
return self.POST_unblock_account(user)
elif i.action == "add_tag":
return self.POST_add_tag(user, i.tag)
elif i.action == "remove_tag":
return self.POST_remove_tag(user, i.tag)
elif i.action == "set_bot_flag":
return self.POST_set_bot_flag(user, i.bot)
elif i.action == "su":
return self.POST_su(user)
elif i.action == "anonymize_account":
test = bool(i.dry_run)
return self.POST_anonymize_account(user, test)
else:
raise web.seeother(web.ctx.path)
def POST_activate_account(self, user):
user.activate()
raise web.seeother(web.ctx.path)
def POST_send_password_reset_email(self, user):
send_forgot_password_email(user.username, user.email)
raise web.seeother(web.ctx.path)
def POST_block_account(self, account):
account.block()
raise web.seeother(web.ctx.path)
def POST_block_account_and_revert(self, account: Account):
account.block()
edit_count, deleted_count = revert_all_user_edits(account)
add_flash_message(
"info",
f"Blocked the account and reverted all {edit_count} edits. {deleted_count} records deleted.",
)
raise web.seeother(web.ctx.path)
def POST_unblock_account(self, account):
account.unblock()
raise web.seeother(web.ctx.path)
def POST_resend_link(self, user):
key = "account/%s/verify" % user.username
activation_link = web.ctx.site.store.get(key)
del activation_link
user.send_verification_email()
add_flash_message("info", "Activation mail has been resent.")
raise web.seeother(web.ctx.path)
def POST_update_email(self, account, i):
user = account.get_user()
if not forms.vemail.valid(i.email):
return render_template(
"admin/people/view", user, i, {"email": forms.vemail.msg}
)
if not forms.email_not_already_used.valid(i.email):
return render_template(
"admin/people/view",
user,
i,
{"email": forms.email_not_already_used.msg},
)
account.update_email(i.email)
add_flash_message("info", "Email updated successfully!")
raise web.seeother(web.ctx.path)
def POST_update_password(self, account, i):
user = account.get_user()
if not forms.vpass.valid(i.password):
return render_template(
"admin/people/view", user, i, {"password": forms.vpass.msg}
)
account.update_password(i.password)
logger.info("updated password of %s", user.key)
add_flash_message("info", "Password updated successfully!")
raise web.seeother(web.ctx.path)
def POST_add_tag(self, account, tag):
account.add_tag(tag)
return delegate.RawText('{"ok": "true"}', content_type="application/json")
def POST_remove_tag(self, account, tag):
account.remove_tag(tag)
return delegate.RawText('{"ok": "true"}', content_type="application/json")
def POST_set_bot_flag(self, account, bot):
bot = (bot and bot.lower()) == "true"
account.set_bot_flag(bot)
raise web.seeother(web.ctx.path)
def POST_su(self, account):
code = account.generate_login_code()
# Clear all existing admin cookies before logging in as another user
clear_cookies()
web.setcookie(config.login_cookie_name, code, expires="")
return web.seeother("/")
def POST_anonymize_account(self, account, test):
results = account.anonymize(test=test)
msg = (
f"Account anonymized. New username: {results['new_username']}. "
f"Notes deleted: {results['booknotes_count']}. "
f"Ratings updated: {results['ratings_count']}. "
f"Observations updated: {results['observations_count']}. "
f"Bookshelves updated: {results['bookshelves_count']}."
f"Merge requests updated: {results['merge_request_count']}"
)
add_flash_message("info", msg)
raise web.seeother(web.ctx.path)
class people_edits:
def GET(self, username):
account = accounts.find(username=username)
if not account:
raise web.notfound()
else:
return render_template("admin/people/edits", account)
def POST(self, username):
i = web.input(changesets=[], comment="Revert", action="revert")
if i.action == "revert" and i.changesets:
revert_changesets(i.changesets, i.comment)
raise web.redirect(web.ctx.path)
class ipaddress:
def GET(self):
return render_template('admin/ip/index')
class ipaddress_view:
def GET(self, ip):
return render_template('admin/ip/view', ip)
def POST(self, ip):
i = web.input(changesets=[], comment="Revert", action="revert")
if i.action == "block":
self.block(ip)
else:
revert_changesets(i.changesets, i.comment)
raise web.redirect(web.ctx.path)
def block(self, ip):
ips = get_blocked_ips()
if ip not in ips:
ips.append(ip)
block().block_ips(ips)
class stats:
def GET(self, today):
json = web.ctx.site._conn.request(
web.ctx.site.name, '/get', 'GET', {'key': '/admin/stats/' + today}
)
return delegate.RawText(json)
def POST(self, today):
"""Update stats for today."""
doc = self.get_stats(today)
doc._save()
raise web.seeother(web.ctx.path)
def get_stats(self, today):
stats = web.ctx.site._request("/stats/" + today)
key = '/admin/stats/' + today
doc = web.ctx.site.new(key, {'key': key, 'type': {'key': '/type/object'}})
doc.edits = {
'human': stats.edits - stats.edits_by_bots,
'bot': stats.edits_by_bots,
'total': stats.edits,
}
doc.members = stats.new_accounts
return doc
class block:
def GET(self):
page = web.ctx.site.get("/admin/block") or web.storage(
ips=[web.storage(ip="127.0.0.1", duration="1 week", since="1 day")]
)
return render_template("admin/block", page)
def POST(self):
i = web.input()
ips = [ip.strip() for ip in i.ips.splitlines()]
self.block_ips(ips)
add_flash_message("info", "Saved!")
raise web.seeother("/admin/block")
def block_ips(self, ips):
page = web.ctx.get("/admin/block") or web.ctx.site.new(
"/admin/block", {"key": "/admin/block", "type": "/type/object"}
)
page.ips = [{'ip': ip} for ip in ips]
page._save("updated blocked IPs")
def get_blocked_ips():
if doc := web.ctx.site.get("/admin/block"):
return [d.ip for d in doc.ips]
else:
return []
def block_ip_processor(handler):
if (
not web.ctx.path.startswith("/admin")
and (web.ctx.method == "POST" or web.ctx.path.endswith("/edit"))
and web.ctx.ip in get_blocked_ips()
):
return render_template(
"permission_denied", web.ctx.path, "Your IP address is blocked."
)
else:
return handler()
def daterange(date, *slice):
return [date + datetime.timedelta(i) for i in range(*slice)]
def storify(d):
if isinstance(d, dict):
return web.storage((k, storify(v)) for k, v in d.items())
elif isinstance(d, list):
return [storify(v) for v in d]
else:
return d
def get_counts():
"""Generate counts for various operations which will be given to the
index page"""
retval = admin_stats.get_stats(100)
return storify(retval)
def get_admin_stats():
def f(dates):
keys = ["/admin/stats/" + date.isoformat() for date in dates]
docs = web.ctx.site.get_many(keys)
return g(docs)
def has_doc(date):
return bool(web.ctx.site.get('/admin/stats/' + date.isoformat()))
def g(docs):
return {
'edits': {
'human': sum(doc['edits']['human'] for doc in docs),
'bot': sum(doc['edits']['bot'] for doc in docs),
'total': sum(doc['edits']['total'] for doc in docs),
},
'members': sum(doc['members'] for doc in docs),
}
date = datetime.datetime.utcnow().date()
if has_doc(date):
today = f([date])
else:
today = g([stats().get_stats(date.isoformat())])
yesterday = f(daterange(date, -1, 0, 1))
thisweek = f(daterange(date, 0, -7, -1))
thismonth = f(daterange(date, 0, -30, -1))
xstats = {
'edits': {
'today': today['edits'],
'yesterday': yesterday['edits'],
'thisweek': thisweek['edits'],
'thismonth': thismonth['edits'],
},
'members': {
'today': today['members'],
'yesterday': yesterday['members'],
'thisweek': thisweek['members'],
'thismonth': thismonth['members'],
},
}
return storify(xstats)
from openlibrary.plugins.upstream import borrow
class inspect:
def GET(self, section):
if section == "/store":
return self.GET_store()
elif section == "/memcache":
return self.GET_memcache()
else:
raise web.notfound()
def GET_store(self):
i = web.input(key=None, type=None, name=None, value=None)
if i.key:
doc = web.ctx.site.store.get(i.key)
if doc:
docs = [doc]
else:
docs = []
else:
docs = web.ctx.site.store.values(
type=i.type or None,
name=i.name or None,
value=i.value or None,
limit=100,
)
return render_template("admin/inspect/store", docs, input=i)
def GET_memcache(self):
i = web.input(action="read")
i.setdefault("keys", "")
mc = cache.get_memcache().memcache
keys = [k.strip() for k in i["keys"].split() if k.strip()]
if i.action == "delete":
mc.delete_multi(keys)
add_flash_message("info", "Deleted %s keys from memcache" % len(keys))
return render_template("admin/inspect/memcache", [], {})
else:
mapping = keys and mc.get_multi(keys)
return render_template("admin/inspect/memcache", keys, mapping)
class spamwords:
def GET(self):
spamwords = spamcheck.get_spam_words()
domains = spamcheck.get_spam_domains()
return render_template("admin/spamwords.html", spamwords, domains)
def POST(self):
i = web.input(spamwords="", domains="", action="")
if i.action == "save-spamwords":
spamcheck.set_spam_words(i.spamwords.strip().split("\n"))
add_flash_message("info", "Updated spam words successfully.")
elif i.action == "save-domains":
spamcheck.set_spam_domains(i.domains.strip().split("\n"))
add_flash_message("info", "Updated domains successfully.")
raise web.redirect("/admin/spamwords")
class _graphs:
def GET(self):
return render_template("admin/graphs")
class permissions:
def GET(self):
perm_pages = self.get_permission("/")
# assuming that the permission of books and authors is same as works
perm_records = self.get_permission("/works")
return render_template("admin/permissions", perm_records, perm_pages)
def get_permission(self, key):
doc = web.ctx.site.get(key)
perm = doc and doc.child_permission
return perm and perm.key or "/permission/open"
def set_permission(self, key, permission):
"""Returns the doc with permission set.
The caller must save the doc.
"""
doc = web.ctx.site.get(key)
doc = doc and doc.dict() or {"key": key, "type": {"key": "/type/page"}}
# so that only admins can modify the permission
doc["permission"] = {"key": "/permission/restricted"}
doc["child_permission"] = {"key": permission}
return doc
def POST(self):
i = web.input(
perm_pages="/permission/loggedinusers",
perm_records="/permission/loggedinusers",
)
root = self.set_permission("/", i.perm_pages)
works = self.set_permission("/works", i.perm_records)
books = self.set_permission("/books", i.perm_records)
authors = self.set_permission("/authors", i.perm_records)
web.ctx.site.save_many(
[root, works, books, authors], comment="Updated edit policy."
)
add_flash_message("info", "Edit policy has been updated!")
return self.GET()
class attach_debugger:
def GET(self):
python_version = "{}.{}.{}".format(*sys.version_info)
return render_template("admin/attach_debugger", python_version)
def POST(self):
import debugpy # noqa: T100
# Allow other computers to attach to ptvsd at this IP address and port.
web.debug("Enabling debugger attachment")
debugpy.listen(('0.0.0.0', 3000)) # noqa: T100
web.debug("Waiting for debugger to attach...")
debugpy.wait_for_client() # noqa: T100
web.debug("Debugger attached to port 3000")
add_flash_message("info", "Debugger attached!")
return self.GET()
class solr:
def GET(self):
return render_template("admin/solr")
def POST(self):
i = web.input(keys="")
keys = i['keys'].strip().split()
web.ctx.site.store['solr-force-update'] = {
"type": "solr-force-update",
"keys": keys,
"_rev": None,
}
add_flash_message("info", "Added the specified keys to solr update queue.!")
return self.GET()
class imports_home:
def GET(self):
return render_template("admin/imports", imports.Stats)
class imports_public(delegate.page):
path = "/imports"
def GET(self):
return imports_home().GET()
class imports_add:
def GET(self):
return render_template("admin/imports-add")
def POST(self):
i = web.input("identifiers")
identifiers = [
line.strip() for line in i.identifiers.splitlines() if line.strip()
]
batch_name = "admin"
batch = imports.Batch.find(batch_name, create=True)
batch.add_items(identifiers)
add_flash_message("info", "Added the specified identifiers to import queue.")
raise web.seeother("/admin/imports")
class imports_by_date:
def GET(self, date):
return render_template("admin/imports_by_date", imports.Stats(), date)
class show_log:
def GET(self):
i = web.input(name='')
logname = i.name
filepath = config.get('errorlog', 'errors') + '/' + logname + '.html'
if os.path.exists(filepath):
with open(filepath) as f:
return f.read()
def setup():
register_admin_page('/admin/git-pull', gitpull, label='git-pull')
register_admin_page('/admin/reload', reload, label='Reload Templates')
register_admin_page('/admin/people', people, label='People')
register_admin_page('/admin/people/([^/]*)', people_view, label='View People')
register_admin_page('/admin/people/([^/]*)/edits', people_edits, label='Edits')
register_admin_page('/admin/ip', ipaddress, label='IP')
register_admin_page('/admin/ip/(.*)', ipaddress_view, label='View IP')
register_admin_page(r'/admin/stats/(\d\d\d\d-\d\d-\d\d)', stats, label='Stats JSON')
register_admin_page('/admin/block', block, label='')
register_admin_page(
'/admin/attach_debugger', attach_debugger, label='Attach Debugger'
)
register_admin_page('/admin/inspect(?:(/.+))?', inspect, label="")
register_admin_page('/admin/graphs', _graphs, label="")
register_admin_page('/admin/logs', show_log, label="")
register_admin_page('/admin/permissions', permissions, label="")
register_admin_page('/admin/solr', solr, label="", librarians=True)
register_admin_page('/admin/sync', sync_ol_ia, label="", librarians=True)
register_admin_page(
'/admin/resolve_redirects', resolve_redirects, label="Resolve Redirects"
)
register_admin_page(
'/admin/staffpicks', add_work_to_staff_picks, label="", librarians=True
)
register_admin_page('/admin/imports', imports_home, label="")
register_admin_page('/admin/imports/add', imports_add, label="")
register_admin_page(
r'/admin/imports/(\d\d\d\d-\d\d-\d\d)', imports_by_date, label=""
)
register_admin_page('/admin/spamwords', spamwords, label="")
from openlibrary.plugins.admin import mem
for p in [mem._memory, mem._memory_type, mem._memory_id]:
register_admin_page('/admin' + p.path, p)
public(get_admin_stats)
public(get_blocked_ips)
delegate.app.add_processor(block_ip_processor)
from openlibrary.plugins.admin import graphs
graphs.setup()
setup()
| 28,542 | Python | .py | 704 | 31.816761 | 105 | 0.603616 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
434 | graphs.py | internetarchive_openlibrary/openlibrary/plugins/admin/graphs.py | """Utilities for rendering Graphite graphs.
"""
import web
from infogami import config
import urllib
def get_graphite_base_url():
return config.get("graphite_base_url", "")
class GraphiteGraph:
"""Representation of Graphite graph.
Usage:
g = GraphiteGraph()
g.add("stats.timers.ol.pageload.all.mean").apply("movingAverage", 20).alias("all")
print g.render()
In templates:
$ g = GraphiteGraph()
$g.add("stats.timers.ol.pageload.all.mean").apply("movingAverage", 20).alias("all")
$:g.render()
"""
def __init__(self):
self.series_list = []
def add(self, name):
s = Series(name)
self.series_list.append(s)
return s
def get_queryparams(self, **options):
"""Returns query params to be passed to the image URL for rendering this graph."""
options["target"] = [s.name for s in self.series_list]
return options
def render(self, **options):
"""Renders the graphs as an img tag.
Usage in templates:
$:g.render(yLimit=100, width=300, height=400)
"""
return f'<img src="{get_graphite_base_url()}/render/?{urllib.parse.urlencode(self.get_queryparams(**options), doseq=True)}"/>'
class Series:
"""One series in the GraphiteGraph."""
def __init__(self, name):
self.name = name
def apply(self, funcname, *args):
"""Applies a function to this series.
:return: Returns self
"""
self.name = "{}({}, {})".format(
funcname, self.name, ", ".join(repr(a) for a in args)
)
return self
def alias(self, name):
"""Shorthand for calling s.apply("alias", name)"""
return self.apply("alias", name)
def __repr__(self):
return "<series: %r>" % self.name
def __str__(self):
# Returning empty string to allow template use $g.add("foo") without printing anything.
return ""
def setup():
web.template.Template.globals.update(
{
'GraphiteGraph': GraphiteGraph,
}
)
| 2,105 | Python | .py | 60 | 27.783333 | 134 | 0.601783 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
435 | services.py | internetarchive_openlibrary/openlibrary/plugins/admin/services.py | """
Contains stuff needed to list services and modules run by OpenLibrary
for the admin panel
"""
import re
import requests
from collections import defaultdict
from bs4 import BeautifulSoup
class Nagios:
def __init__(self, url):
try:
self.data = BeautifulSoup(requests.get(url).content, "lxml")
except Exception as m:
print(m)
self.data = None
def get_service_status(self, service):
"Returns the stats of the service `service`"
if not self.data:
return "error-api"
# The service name is kept inside a bunch of nested nodes We
# walk up the nodes to find the enclosing <tr> that contains
# the service in question. A single step is not enough since
# there are nested tables in the layout.
service = self.data.find(text=re.compile(service))
if service:
service_tr = service.findParents("tr")[2]
status_td = service_tr.find(
"td",
attrs={
"class": re.compile(r"status(OK|RECOVERY|UNKNOWN|WARNING|CRITICAL)")
},
)
return status_td['class'].replace("status", "")
else:
return "error-nosuchservice"
class Service:
"""
An OpenLibrary service with all the stuff that we need to
manipulate it.
"""
def __init__(self, node, name, nagios, logs=False):
self.node = node
self.name = name
self.logs = logs
self.status = "Service status(TBD)"
self.nagios = nagios.get_service_status(name)
def __repr__(self):
return (
f"Service(name = '{self.name}', node = '{self.node}', logs = '{self.logs}')"
)
def load_all(config, nagios_url):
"""Loads all services specified in the config dictionary and returns
the list of Service"""
d = defaultdict(list)
nagios = Nagios(nagios_url)
for node in config:
services = config[node].get('services', [])
if services:
for service in services:
d[node].append(Service(node=node, name=service, nagios=nagios))
return d
| 2,177 | Python | .py | 61 | 27.42623 | 88 | 0.605226 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
436 | conftest.py | internetarchive_openlibrary/openlibrary/plugins/admin/tests/conftest.py | import pytest
@pytest.fixture
def serviceconfig(request):
import os
import yaml
root = os.path.dirname(__file__)
with open(os.path.join(root, "sample_services.yml")) as in_file:
return yaml.safe_load(in_file)
| 236 | Python | .py | 8 | 25.125 | 68 | 0.702222 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
437 | test_services.py | internetarchive_openlibrary/openlibrary/plugins/admin/tests/test_services.py | """
Tests for the services module used by the admin interface.
"""
def test_loader(serviceconfig):
"Make sure services are loaded"
from .. import services
services = services.load_all(serviceconfig, "http://nagios.url")
assert len(list(services)) == 2
s = sorted(services)
assert s[0] == "ol-web0"
assert s[1] == "ol-web1"
assert services['ol-web0'][0].name == "7071-ol-gunicorn"
assert services['ol-web0'][1].name == "7060-memcached"
assert services['ol-web1'][0].name == "7072-ol-gunicorn"
assert services['ol-web1'][1].name == "7061-memcached"
| 594 | Python | .py | 15 | 35.466667 | 68 | 0.668403 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
438 | test_code.py | internetarchive_openlibrary/openlibrary/plugins/admin/tests/test_code.py | from typing import cast
from openlibrary.accounts.model import Account, OpenLibraryAccount
from openlibrary.plugins.admin.code import revert_all_user_edits
import web
from openlibrary.plugins.upstream.models import Changeset
def make_test_account(username: str) -> OpenLibraryAccount:
web.ctx.site.register(
username=username,
email=f"{username}@foo.org",
password="password",
displayname=f"{username} User",
)
web.ctx.site.activate_account(username)
return cast(OpenLibraryAccount, OpenLibraryAccount.get(username=username))
def make_thing(key: str, title: str = '', thing_type: str | None = None) -> dict:
if thing_type == '/type/delete':
return {
"key": key,
"type": {"key": "/type/delete"},
}
if key.startswith("/works/"):
return {
"key": key,
"type": {"key": "/type/work"},
"title": title,
}
elif '/lists/' in key:
return {
"key": key,
"type": {"key": "/type/list"},
"name": title,
}
else:
raise NotImplementedError(
f"make_thing not implemented for {key} or {thing_type}"
)
class TestRevertAllUserEdits:
def test_no_edits(self, mock_site):
alice = make_test_account("alice")
revert_all_user_edits(alice)
def test_deletes_spam_works(self, mock_site):
good_alice = make_test_account("good_alice")
spam_alice = make_test_account("spam_alice")
web.ctx.site.save(
author=good_alice.get_user(),
query=make_thing("/works/OL123W", "Good Book Title"),
action='add-book',
)
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/works/OL789W", "Spammy New Book"),
action='add-book',
)
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/works/OL345W", "Spammy New Book 2"),
action='add-book',
)
web.ctx.site.save(
author=good_alice.get_user(),
query=make_thing("/works/OL12333W", "Good Book Title 2"),
action='add-book',
)
revert_all_user_edits(spam_alice)
# Good books un-altered
assert web.ctx.site.get("/works/OL123W").revision == 1
assert web.ctx.site.get("/works/OL123W").title == "Good Book Title"
assert web.ctx.site.get("/works/OL12333W").revision == 1
assert web.ctx.site.get("/works/OL12333W").title == "Good Book Title 2"
# Spam books deleted
assert web.ctx.site.get("/works/OL789W").revision == 2
assert web.ctx.site.get("/works/OL789W").type.key == "/type/delete"
assert web.ctx.site.get("/works/OL345W").revision == 2
assert web.ctx.site.get("/works/OL345W").type.key == "/type/delete"
def test_reverts_spam_edits(self, mock_site):
good_alice = make_test_account("good_alice")
spam_alice = make_test_account("spam_alice")
web.ctx.site.save(
author=good_alice.get_user(),
query=make_thing("/works/OL123W", "Good Book Title"),
action='add-book',
)
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/works/OL123W", "Spammy Book Title"),
action='edit-book',
)
revert_all_user_edits(spam_alice)
# Reverted back to good edit
assert web.ctx.site.get("/works/OL123W").revision == 3
assert web.ctx.site.get("/works/OL123W").title == "Good Book Title"
assert web.ctx.site.get("/works/OL123W").type.key == "/type/work"
def test_does_not_undelete(self, mock_site):
spam_alice = make_test_account("spam_alice")
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/people/spam_alice/lists/OL123L", "spam spam spam"),
action='lists',
)
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing(
"/people/spam_alice/lists/OL123L", thing_type='/type/delete'
),
action='lists',
)
revert_all_user_edits(spam_alice)
assert web.ctx.site.get("/people/spam_alice/lists/OL123L").revision == 2
assert (
web.ctx.site.get("/people/spam_alice/lists/OL123L").type.key
== "/type/delete"
)
def test_two_spammy_editors(self, mock_site):
spam_alice = make_test_account("spam_alice")
spam_bob = make_test_account("spam_bob")
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/works/OL1W", "Alice is Awesome"),
action='add-book',
)
web.ctx.site.save(
author=spam_bob.get_user(),
query=make_thing("/works/OL2W", "Bob is Awesome"),
action='add-book',
)
web.ctx.site.save(
author=spam_alice.get_user(),
query=make_thing("/works/OL2W", "Bob Sucks"),
action='edit-book',
)
web.ctx.site.save(
author=spam_bob.get_user(),
query=make_thing("/works/OL1W", "Alice Sucks"),
action='edit-book',
)
revert_all_user_edits(spam_alice)
# Reverted back to good edit
assert web.ctx.site.get("/works/OL1W").revision == 3
assert web.ctx.site.get("/works/OL1W").type.key == "/type/delete"
assert web.ctx.site.get("/works/OL2W").revision == 3
assert web.ctx.site.get("/works/OL2W").title == "Bob is Awesome"
revert_all_user_edits(spam_bob)
# Reverted back to good edit
assert web.ctx.site.get("/works/OL1W").revision == 3
assert web.ctx.site.get("/works/OL1W").type.key == "/type/delete"
assert web.ctx.site.get("/works/OL2W").revision == 4
assert web.ctx.site.get("/works/OL2W").type.key == "/type/delete"
| 6,033 | Python | .py | 147 | 31.278912 | 82 | 0.580403 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
439 | models.py | internetarchive_openlibrary/openlibrary/plugins/upstream/models.py | import logging
import re
from functools import cached_property
import sys
import web
from collections import defaultdict
from isbnlib import canonical, mask, NotValidISBNError
from infogami import config
from infogami.infobase import client
from infogami.utils.view import safeint
from infogami.utils import stats
from openlibrary.core import models, ia
from openlibrary.core.models import Image
from openlibrary.core import lending
from openlibrary.plugins.upstream.table_of_contents import TableOfContents
from openlibrary.plugins.upstream.utils import MultiDict, get_edition_config
from openlibrary.plugins.upstream import account
from openlibrary.plugins.upstream import borrow
from openlibrary.plugins.worksearch.code import works_by_author
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.utils import dateutil
from openlibrary.utils.isbn import isbn_10_to_isbn_13, isbn_13_to_isbn_10
from openlibrary.utils.lccn import normalize_lccn
def follow_redirect(doc):
if isinstance(doc, str) and doc.startswith("/a/"):
# Some edition records have authors as ["/a/OL1A""] instead of [{"key": "/a/OL1A"}].
# Hack to fix it temporarily.
doc = web.ctx.site.get(doc.replace("/a/", "/authors/"))
if doc and doc.type.key == "/type/redirect":
key = doc.location
return web.ctx.site.get(key)
else:
return doc
class Edition(models.Edition):
def get_title(self):
if self['title_prefix']:
return self['title_prefix'] + ' ' + self['title']
else:
return self['title']
def get_title_prefix(self):
return ''
# let title be title_prefix + title
title = property(get_title)
title_prefix = property(get_title_prefix)
def get_authors(self):
"""Added to provide same interface for work and edition"""
work_authors = self.works[0].get_authors() if self.works else []
authors = [follow_redirect(a) for a in self.authors]
authors = [a for a in authors if a and a.type.key == "/type/author"]
return work_authors + authors
def get_covers(self):
"""
This methods excludes covers that are -1 or None, which are in the data
but should not be.
"""
return [Image(self._site, 'b', c) for c in self.covers if c and c > 0]
def get_cover(self):
covers = self.get_covers()
return covers and covers[0] or None
def get_cover_url(self, size):
if cover := self.get_cover():
return cover.url(size)
elif self.ocaid:
return self.get_ia_cover(self.ocaid, size)
def get_ia_cover(self, itemid, size):
image_sizes = {"S": (116, 58), "M": (180, 360), "L": (500, 500)}
w, h = image_sizes[size.upper()]
return f"https://archive.org/download/{itemid}/page/cover_w{w}_h{h}.jpg"
def get_isbn10(self):
"""Fetches either isbn_10 or isbn_13 from record and returns canonical
isbn_10
"""
isbn_10 = self.isbn_10 and canonical(self.isbn_10[0])
if not isbn_10:
isbn_13 = self.get_isbn13()
return isbn_13 and isbn_13_to_isbn_10(isbn_13)
return isbn_10
def get_isbn13(self):
"""Fetches either isbn_13 or isbn_10 from record and returns canonical
isbn_13
"""
isbn_13 = self.isbn_13 and canonical(self.isbn_13[0])
if not isbn_13:
isbn_10 = self.isbn_10 and self.isbn_10[0]
return isbn_10 and isbn_10_to_isbn_13(isbn_10)
return isbn_13
def get_worldcat_url(self):
url = 'https://search.worldcat.org/'
if self.get('oclc_numbers'):
return f'{url}/title/{self.oclc_numbers[0]}'
elif self.get_isbn13():
# Handles both isbn13 & 10
return f'{url}/isbn/{self.get_isbn13()}'
return f'{url}/search?q={self.title}'
def get_isbnmask(self):
"""Returns a masked (hyphenated) ISBN if possible."""
isbns = self.get('isbn_13', []) + self.get('isbn_10', [None])
try:
isbn = mask(isbns[0])
except NotValidISBNError:
return isbns[0]
return isbn or isbns[0]
def get_identifiers(self):
"""Returns (name, value) pairs of all available identifiers."""
names = ['ocaid', 'isbn_10', 'isbn_13', 'lccn', 'oclc_numbers']
return self._process_identifiers(
get_edition_config().identifiers, names, self.identifiers
)
def get_ia_meta_fields(self):
# Check for cached value
# $$$ we haven't assigned _ia_meta_fields the first time around but there's apparently
# some magic that lets us check this way (and breaks using hasattr to check if defined)
if self._ia_meta_fields:
return self._ia_meta_fields
if not self.get('ocaid', None):
meta = {}
else:
meta = ia.get_metadata(self.ocaid)
meta.setdefault('external-identifier', [])
meta.setdefault('collection', [])
self._ia_meta_fields = meta
return self._ia_meta_fields
def is_daisy_encrypted(self):
meta_fields = self.get_ia_meta_fields()
if not meta_fields:
return
v = meta_fields['collection']
return 'printdisabled' in v or 'lendinglibrary' in v
# def is_lending_library(self):
# collections = self.get_ia_collections()
# return 'lendinglibrary' in collections
def get_lending_resources(self):
"""Returns the loan resource identifiers (in meta.xml format for ACS4 resources) for books hosted on archive.org
Returns e.g. ['bookreader:lettertoannewarr00west',
'acs:epub:urn:uuid:0df6f344-7ce9-4038-885e-e02db34f2891',
'acs:pdf:urn:uuid:7f192e62-13f5-4a62-af48-be4bea67e109']
"""
# The entries in meta.xml look like this:
# <external-identifier>
# acs:epub:urn:uuid:0df6f344-7ce9-4038-885e-e02db34f2891
# </external-identifier>
itemid = self.ocaid
if not itemid:
return []
lending_resources = []
# Check if available for in-browser lending - marked with 'browserlending' collection
browserLendingCollections = ['browserlending']
for collection in self.get_ia_meta_fields()['collection']:
if collection in browserLendingCollections:
lending_resources.append('bookreader:%s' % self.ocaid)
break
lending_resources.extend(self.get_ia_meta_fields()['external-identifier'])
return lending_resources
def get_lending_resource_id(self, type):
if type == 'bookreader':
desired = 'bookreader:'
else:
desired = 'acs:%s:' % type
for urn in self.get_lending_resources():
if urn.startswith(desired):
# Got a match
# $$$ a little icky - prune the acs:type if present
if urn.startswith('acs:'):
urn = urn[len(desired) :]
return urn
return None
def get_current_and_available_loans(self):
current_loans = borrow.get_edition_loans(self)
current_and_available_loans = (
current_loans,
self._get_available_loans(current_loans),
)
return current_and_available_loans
def get_current_loans(self):
return borrow.get_edition_loans(self)
def get_available_loans(self):
"""
Get the resource types currently available to be loaned out for this edition. Does NOT
take into account the user's status (e.g. number of books out, in-library status, etc).
This is like checking if this book is on the shelf.
Returns [{'resource_id': uuid, 'resource_type': type, 'size': bytes}]
size may be None"""
# no ebook
if not self.ocaid:
return []
# already checked out
if lending.is_loaned_out(self.ocaid):
return []
# find available loans. there are no current loans
return self._get_available_loans([])
def _get_available_loans(self, current_loans):
default_type = 'bookreader'
loans = []
# Check if we have a possible loan - may not yet be fulfilled in ACS4
if current_loans:
# There is a current loan or offer
return []
# Create list of possible loan formats
resource_pattern = r'acs:(\w+):(.*)'
for resource_urn in self.get_lending_resources():
if resource_urn.startswith('acs:'):
(type, resource_id) = re.match(resource_pattern, resource_urn).groups()
loans.append(
{'resource_id': resource_id, 'resource_type': type, 'size': None}
)
elif resource_urn.startswith('bookreader'):
loans.append(
{
'resource_id': resource_urn,
'resource_type': 'bookreader',
'size': None,
}
)
# Put default type at start of list, then sort by type name
def loan_key(loan):
if loan['resource_type'] == default_type:
return '1-%s' % loan['resource_type']
else:
return '2-%s' % loan['resource_type']
loans = sorted(loans, key=loan_key)
# For each possible loan, check if it is available We
# shouldn't be out of sync (we already checked
# get_edition_loans for current loans) but we fail safe, for
# example the book may have been borrowed in a dev instance
# against the live ACS4 server
for loan in loans:
if borrow.is_loaned_out(loan['resource_id']):
# Only a single loan of an item is allowed
# $$$ log out of sync state
return []
return loans
def update_loan_status(self):
"""Update the loan status"""
if self.ocaid:
lending.sync_loan(self.ocaid)
def _process_identifiers(self, config_, names, values):
id_map = {}
for id in config_:
id_map[id.name] = id
id.setdefault("label", id.name)
id.setdefault("url_format", None)
d = MultiDict()
def process(name, value):
if value:
if not isinstance(value, list):
value = [value]
id = id_map.get(name) or web.storage(
name=name, label=name, url_format=None
)
for v in value:
d[id.name] = web.storage(
name=id.name,
label=id.label,
value=v,
url=id.get('url') and id.url.replace('@@@', v.replace(' ', '')),
)
for name in names:
process(name, self[name])
for name in values:
process(name, values[name])
return d
def set_identifiers(self, identifiers):
"""Updates the edition from identifiers specified as (name, value) pairs."""
names = (
'isbn_10',
'isbn_13',
'lccn',
'oclc_numbers',
'ocaid',
'dewey_decimal_class',
'lc_classifications',
)
d = {}
for id in identifiers:
# ignore bad values
if 'name' not in id or 'value' not in id:
continue
name, value = id['name'], id['value']
if name == 'lccn':
value = normalize_lccn(value)
# `None` in this field causes errors. See #7999.
if value is not None:
d.setdefault(name, []).append(value)
# clear existing value first
for name in names:
self._getdata().pop(name, None)
self.identifiers = {}
for name, value in d.items():
# ocaid is not a list
if name == 'ocaid':
self.ocaid = value[0]
elif name in names:
self[name] = value
else:
self.identifiers[name] = value
def get_classifications(self):
names = ["dewey_decimal_class", "lc_classifications"]
return self._process_identifiers(
get_edition_config().classifications, names, self.classifications
)
def set_classifications(self, classifications):
names = ["dewey_decimal_class", "lc_classifications"]
d = defaultdict(list)
for c in classifications:
if (
'name' not in c
or 'value' not in c
or not web.re_compile("[a-z0-9_]*").match(c['name'])
):
continue
d[c['name']].append(c['value'])
for name in names:
self._getdata().pop(name, None)
self.classifications = {}
for name, value in d.items():
if name in names:
self[name] = value
else:
self.classifications[name] = value
def get_weight(self):
"""returns weight as a storage object with value and units fields."""
w = self.weight
return w and UnitParser(["value"]).parse(w)
def set_weight(self, w):
self.weight = w and UnitParser(["value"]).format(w)
def get_physical_dimensions(self):
d = self.physical_dimensions
return d and UnitParser(["height", "width", "depth"]).parse(d)
def set_physical_dimensions(self, d):
# don't overwrite physical dimensions if nothing was passed in - there
# may be dimensions in the database that don't conform to the d x d x d format
if d:
self.physical_dimensions = UnitParser(["height", "width", "depth"]).format(
d
)
def get_toc_text(self) -> str:
if toc := self.get_table_of_contents():
return toc.to_markdown()
return ""
def get_table_of_contents(self) -> TableOfContents | None:
if not self.table_of_contents:
return None
return TableOfContents.from_db(self.table_of_contents)
def set_toc_text(self, text: str | None):
if text:
self.table_of_contents = TableOfContents.from_markdown(text).to_db()
else:
self.table_of_contents = None
def get_links(self):
links1 = [
web.storage(url=url, title=title)
for url, title in zip(self.uris, self.uri_descriptions)
]
links2 = list(self.links)
return links1 + links2
def get_olid(self):
return self.key.split('/')[-1]
@property
def wp_citation_fields(self):
"""
Builds a Wikipedia book citation as defined by https://en.wikipedia.org/wiki/Template:Cite_book
"""
citation = {}
authors = [ar.author for ar in self.works[0].authors]
if len(authors) == 1:
citation['author'] = authors[0].name
else:
for i, a in enumerate(authors, 1):
citation[f'author{i}'] = a.name
citation.update(
{
'date': self.get('publish_date'),
'orig-date': self.works[0].get('first_publish_date'),
'title': self.title.replace("[", "[").replace("]", "]"),
'url': (
f'https://archive.org/details/{self.ocaid}' if self.ocaid else None
),
'publication-place': self.get('publish_places', [None])[0],
'publisher': self.get('publishers', [None])[0],
'isbn': self.get_isbnmask(),
'issn': self.get('identifiers', {}).get('issn', [None])[0],
}
)
if self.lccn and (lccn := normalize_lccn(self.lccn[0])):
citation['lccn'] = lccn
if self.get('oclc_numbers'):
citation['oclc'] = self.oclc_numbers[0]
citation['ol'] = str(self.get_olid())[2:]
# TODO: add 'ol-access': 'free' if the item is free to read.
if citation['date'] == citation['orig-date']:
citation.pop('orig-date')
return citation
def is_fake_record(self):
"""Returns True if this is a record is not a real record from database,
but created on the fly.
The /books/ia:foo00bar records are not stored in the database, but
created at runtime using the data from archive.org metadata API.
"""
return "/ia:" in self.key
def set_provider_data(self, data):
if not self.providers:
self.providers = []
self.providers.append(data)
def set_providers(self, providers):
self.providers = providers
class Author(models.Author):
def get_photos(self):
return [Image(self._site, "a", id) for id in self.photos if id > 0]
def get_photo(self):
photos = self.get_photos()
return photos and photos[0] or None
def get_photo_url(self, size):
photo = self.get_photo()
return photo and photo.url(size)
def get_olid(self):
return self.key.split('/')[-1]
def get_books(self, q=''):
i = web.input(sort='editions', page=1, rows=20, mode="")
try:
# safeguard from passing zero/negative offsets to solr
page = max(1, int(i.page))
except ValueError:
page = 1
return works_by_author(
self.get_olid(),
sort=i.sort,
page=page,
rows=i.rows,
has_fulltext=i.mode == "ebooks",
query=q,
facet=True,
)
def get_work_count(self):
"""Returns the number of works by this author."""
# TODO: avoid duplicate works_by_author calls
result = works_by_author(self.get_olid(), rows=0)
return result.num_found
def as_fake_solr_record(self):
record = {
'key': self.key,
'name': self.name,
'top_subjects': [],
'work_count': 0,
'type': 'author',
}
if self.death_date:
record['death_date'] = self.death_date
if self.birth_date:
record['birth_date'] = self.birth_date
return record
re_year = re.compile(r'(\d{4})$')
class Work(models.Work):
def get_olid(self):
return self.key.split('/')[-1]
def get_covers(self, use_solr=True):
if self.covers:
return [Image(self._site, "w", id) for id in self.covers if id > 0]
elif use_solr:
return self.get_covers_from_solr()
else:
return []
def get_covers_from_solr(self):
try:
w = self._solr_data
except Exception as e:
logging.getLogger("openlibrary").exception(
'Unable to retrieve covers from solr'
)
return []
if w:
if 'cover_id' in w:
return [Image(self._site, "w", int(w['cover_id']))]
elif 'cover_edition_key' in w:
cover_edition = web.ctx.site.get("/books/" + w['cover_edition_key'])
cover = cover_edition and cover_edition.get_cover()
if cover:
return [cover]
return []
@cached_property
def _solr_data(self):
from openlibrary.book_providers import get_solr_keys
fields = [
"key",
"cover_edition_key",
"cover_id",
"edition_key",
"first_publish_year",
"has_fulltext",
"lending_edition_s",
"public_scan_b",
] + get_solr_keys()
solr = get_solr()
stats.begin("solr", get=self.key, fields=fields)
try:
return solr.get(self.key, fields=fields)
except Exception as e:
logging.getLogger("openlibrary").exception("Failed to get solr data")
return None
finally:
stats.end()
def get_cover(self, use_solr=True):
covers = self.get_covers(use_solr=use_solr)
return covers and covers[0] or None
def get_cover_url(self, size, use_solr=True):
cover = self.get_cover(use_solr=use_solr)
return cover and cover.url(size)
def get_author_names(self, blacklist=None):
author_names = []
for author in self.get_authors():
author_name = author if isinstance(author, str) else author.name
if not blacklist or author_name.lower() not in blacklist:
author_names.append(author_name)
return author_names
def get_authors(self):
authors = [a.author for a in self.authors]
authors = [follow_redirect(a) for a in authors]
authors = [a for a in authors if a and a.type.key == "/type/author"]
return authors
def get_subjects(self):
"""Return subject strings."""
subjects = self.subjects
def flip(name):
if name.count(",") == 1:
a, b = name.split(",")
return b.strip() + " " + a.strip()
return name
if subjects and not isinstance(subjects[0], str):
subjects = [flip(s.name) for s in subjects]
return subjects
@staticmethod
def filter_problematic_subjects(subjects, filter_unicode=True):
def is_ascii(s):
try:
return s.isascii()
except AttributeError:
return all(ord(c) < 128 for c in s)
blacklist = [
'accessible_book',
'protected_daisy',
'in_library',
'overdrive',
'large_type_books',
'internet_archive_wishlist',
'fiction',
'popular_print_disabled_books',
'fiction_in_english',
'open_library_staff_picks',
'inlibrary',
'printdisabled',
'browserlending',
'biographies',
'open_syllabus_project',
'history',
'long_now_manual_for_civilization',
'Popular works',
]
blacklist_chars = ['(', ',', '\'', ':', '&', '-', '.']
ok_subjects = []
for subject in subjects:
_subject = subject.lower().replace(' ', '_')
subject = subject.replace('_', ' ')
if (
_subject not in blacklist
and (
not filter_unicode
or (subject.replace(' ', '').isalnum() and is_ascii(subject))
)
and all(char not in subject for char in blacklist_chars)
):
ok_subjects.append(subject)
return ok_subjects
def get_related_books_subjects(self, filter_unicode=True):
return self.filter_problematic_subjects(self.get_subjects(), filter_unicode)
def get_sorted_editions(
self,
ebooks_only: bool = False,
limit: int | None = None,
keys: list[str] | None = None,
) -> list[Edition]:
"""
Get this work's editions sorted by publication year
:param list[str] keys: ensure keys included in fetched editions
"""
db_query = {"type": "/type/edition", "works": self.key}
db_query['limit'] = limit or 10000 # type: ignore[assignment]
edition_keys = []
if ebooks_only:
if self._solr_data:
from openlibrary.book_providers import get_book_providers
# Always use solr data whether it's up to date or not
# to determine which providers this book has
# We only make additional queries when a
# trusted book provider identifier is present
for provider in get_book_providers(self._solr_data):
query = {**db_query, **provider.editions_query}
edition_keys += web.ctx.site.things(query)
else:
db_query["ocaid~"] = "*"
if not edition_keys:
solr_is_up_to_date = (
self._solr_data
and self._solr_data.get('edition_key')
and len(self._solr_data.get('edition_key')) == self.edition_count
)
if solr_is_up_to_date:
edition_keys += [
"/books/" + olid for olid in self._solr_data.get('edition_key')
]
else:
# given librarians are probably doing this, show all editions
edition_keys += web.ctx.site.things(db_query)
edition_keys.extend(keys or [])
editions = web.ctx.site.get_many(list(set(edition_keys)))
editions.sort(
key=lambda ed: ed.get_publish_year() or -sys.maxsize, reverse=True
)
# 2022-03 Once we know the availability-type of editions (e.g. open)
# via editions-search, we can sidestep get_availability to only
# check availability for borrowable editions
ocaids = [ed.ocaid for ed in editions if ed.ocaid]
availability = lending.get_availability_of_ocaids(ocaids) if ocaids else {}
for ed in editions:
ed.availability = availability.get(ed.ocaid) or {"status": "error"}
return editions
def has_ebook(self):
w = self._solr_data or {}
return w.get("has_fulltext", False)
first_publish_year = property(
lambda self: self._solr_data.get("first_publish_year")
)
def get_edition_covers(self):
editions = web.ctx.site.get_many(
web.ctx.site.things(
{"type": "/type/edition", "works": self.key, "limit": 1000}
)
)
existing = {int(c.id) for c in self.get_covers()}
covers = [e.get_cover() for e in editions]
return [c for c in covers if c and int(c.id) not in existing]
def as_fake_solr_record(self):
record = {
'key': self.key,
'title': self.get('title'),
}
if self.subtitle:
record['subtitle'] = self.subtitle
return record
class Subject(client.Thing):
pass
class SubjectPlace(Subject):
pass
class SubjectPerson(Subject):
pass
class User(models.User):
displayname: str | None
def get_name(self):
return self.displayname or self.key.split('/')[-1]
name = property(get_name)
def get_edit_history(self, limit=10, offset=0):
return web.ctx.site.versions(
{"author": self.key, "limit": limit, "offset": offset}
)
def get_users_settings(self):
settings = web.ctx.site.get('%s/preferences' % self.key)
return settings.dict().get('notifications') if settings else {}
def get_creation_info(self):
if web.ctx.path.startswith("/admin"):
d = web.ctx.site.versions(
{'key': self.key, "sort": "-created", "limit": 1}
)[0]
return web.storage({"ip": d.ip, "member_since": d.created})
def get_edit_count(self):
if web.ctx.path.startswith("/admin"):
return web.ctx.site._request('/count_edits_by_user', data={"key": self.key})
else:
return 0
def get_loan_count(self):
return len(borrow.get_loans(self))
def get_loans(self):
self.update_loan_status()
return lending.get_loans_of_user(self.key)
def update_loan_status(self):
"""Update the status of this user's loans."""
loans = lending.get_loans_of_user(self.key)
for loan in loans:
lending.sync_loan(loan['ocaid'])
def get_safe_mode(self):
return (self.get_users_settings() or {}).get('safe_mode', "").lower()
class UnitParser:
"""Parsers values like dimensions and weight.
>>> p = UnitParser(["height", "width", "depth"])
>>> parsed = p.parse("9 x 3 x 2 inches")
>>> isinstance(parsed, web.utils.Storage)
True
>>> sorted(parsed.items())
[('depth', '2'), ('height', '9'), ('units', 'inches'), ('width', '3')]
>>> p.format({"height": "9", "width": 3, "depth": 2, "units": "inches"})
'9 x 3 x 2 inches'
"""
def __init__(self, fields):
self.fields = fields
def format(self, d):
return (
" x ".join(str(d.get(k, '')) for k in self.fields)
+ ' '
+ d.get('units', '')
)
def parse(self, s):
"""Parse the string and return storage object with specified fields and units."""
pattern = "^" + " *x *".join("([0-9.]*)" for f in self.fields) + " *(.*)$"
rx = web.re_compile(pattern)
m = rx.match(s)
return m and web.storage(zip(self.fields + ["units"], m.groups()))
class Changeset(client.Changeset):
def can_undo(self):
return False
def _get_doc(self, key, revision):
if revision == 0:
return {"key": key, "type": {"key": "/type/delete"}}
else:
d = web.ctx.site.get(key, revision).dict()
return d
def process_docs_before_undo(self, docs):
"""Hook to process docs before saving for undo.
This is called by _undo method to allow subclasses to check
for validity or redirects so that undo doesn't fail.
The subclasses may overwrite this as required.
"""
return docs
def _undo(self):
"""Undo this transaction."""
docs = [self._get_doc(c['key'], c['revision'] - 1) for c in self.changes]
docs = self.process_docs_before_undo(docs)
data = {"parent_changeset": self.id}
comment = 'undo ' + self.comment
return web.ctx.site.save_many(docs, action="undo", data=data, comment=comment)
def get_undo_changeset(self):
"""Returns the changeset that undone this transaction if one exists, None otherwise."""
try:
return self._undo_changeset
except AttributeError:
pass
changesets = web.ctx.site.recentchanges(
{"kind": "undo", "data": {"parent_changeset": self.id}}
)
# return the first undo changeset
self._undo_changeset = changesets and changesets[-1] or None
return self._undo_changeset
class NewAccountChangeset(Changeset):
def get_user(self):
keys = [c.key for c in self.get_changes()]
user_key = "/people/" + keys[0].split("/")[2]
return web.ctx.site.get(user_key)
class MergeAuthors(Changeset):
def can_undo(self):
return self.get_undo_changeset() is None
def get_master(self):
master = self.data.get("master")
return master and web.ctx.site.get(master, lazy=True)
def get_duplicates(self):
duplicates = self.data.get("duplicates")
changes = {c['key']: c['revision'] for c in self.changes}
return duplicates and [
web.ctx.site.get(key, revision=changes[key] - 1, lazy=True)
for key in duplicates
if key in changes
]
class MergeWorks(Changeset):
def can_undo(self):
return self.get_undo_changeset() is None
def get_master(self):
master = self.data.get("master")
return master and web.ctx.site.get(master, lazy=True)
def get_duplicates(self):
duplicates = self.data.get("duplicates")
changes = {c['key']: c['revision'] for c in self.changes}
return duplicates and [
web.ctx.site.get(key, revision=changes[key] - 1, lazy=True)
for key in duplicates
if key in changes
]
class Undo(Changeset):
def can_undo(self):
return False
def get_undo_of(self):
undo_of = self.data['undo_of']
return web.ctx.site.get_change(undo_of)
def get_parent_changeset(self):
parent = self.data['parent_changeset']
return web.ctx.site.get_change(parent)
class AddBookChangeset(Changeset):
def get_work(self):
book = self.get_edition()
return (book and book.works and book.works[0]) or None
def get_edition(self):
for doc in self.get_changes():
if doc.key.startswith("/books/"):
return doc
def get_author(self):
for doc in self.get_changes():
if doc.key.startswith("/authors/"):
return doc
class Tag(models.Tag):
"""Class to represent /type/tag objects in Open Library."""
pass
def setup():
models.register_models()
client.register_thing_class('/type/edition', Edition)
client.register_thing_class('/type/author', Author)
client.register_thing_class('/type/work', Work)
client.register_thing_class('/type/subject', Subject)
client.register_thing_class('/type/place', SubjectPlace)
client.register_thing_class('/type/person', SubjectPerson)
client.register_thing_class('/type/user', User)
client.register_thing_class('/type/tag', Tag)
client.register_changeset_class(None, Changeset) # set the default class
client.register_changeset_class('merge-authors', MergeAuthors)
client.register_changeset_class('merge-works', MergeWorks)
client.register_changeset_class('undo', Undo)
client.register_changeset_class('add-book', AddBookChangeset)
client.register_changeset_class('new-account', NewAccountChangeset)
| 33,567 | Python | .py | 821 | 30.677223 | 120 | 0.576141 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
440 | checkins.py | internetarchive_openlibrary/openlibrary/plugins/upstream/checkins.py | """Reading log check-ins handler and services.
"""
import json
import web
from datetime import datetime
from math import floor
from infogami.utils import delegate
from infogami.utils.view import public
from openlibrary.accounts import get_current_user
from openlibrary.app import render_template
from openlibrary.core.yearly_reading_goals import YearlyReadingGoals
from openlibrary.utils import extract_numeric_id_from_olid
from openlibrary.core.bookshelves_events import BookshelfEvent, BookshelvesEvents
from openlibrary.utils.decorators import authorized_for
MAX_READING_GOAL = 10_000
def make_date_string(year: int, month: int | None, day: int | None) -> str:
"""Creates a date string in the expected format, given the year, month, and day.
Event dates can take one of three forms:
"YYYY"
"YYYY-MM"
"YYYY-MM-DD"
"""
result = f'{year}'
if month:
result += f'-{month:02}'
if day:
result += f'-{day:02}'
return result
def is_valid_date(year: int, month: int | None, day: int | None) -> bool:
"""Validates dates.
Dates are considered valid if there is:
1. A year only.
2. A year and a month only.
3. A year, month, and day.
"""
if not year:
return False
return not day or bool(month)
@public
def get_latest_read_date(work_olid: str) -> dict | None:
user = get_current_user()
if not user:
return None
username = user['key'].split('/')[-1]
work_id = extract_numeric_id_from_olid(work_olid)
result = BookshelvesEvents.get_latest_event_date(
username, work_id, BookshelfEvent.FINISH
)
return result
class patron_check_ins(delegate.page):
path = r'/works/OL(\d+)W/check-ins'
encoding = 'json'
def POST(self, work_id):
"""Validates data, constructs date string, and persists check-in event.
Data object should have the following:
event_type : number
year : number
month : number : optional
day : number : optional
edition_key : string : optional
event_id : int : optional
"""
data = json.loads(web.data())
if not self.validate_data(data):
raise web.badrequest(message='Invalid date submitted')
user = get_current_user()
if not user:
raise web.unauthorized(message='Requires login')
username = user['key'].split('/')[-1]
edition_key = data.get('edition_key', None)
edition_id = extract_numeric_id_from_olid(edition_key) if edition_key else None
event_type = data.get('event_type')
date_str = make_date_string(
data.get('year', None),
data.get('month', None),
data.get('day', None),
)
event_id = data.get('event_id', None)
if event_id:
# update existing event
events = BookshelvesEvents.select_by_id(event_id)
if not events:
raise web.notfound(message='Event does not exist')
event = events[0]
if username != event['username']:
raise web.forbidden()
BookshelvesEvents.update_event(
event_id, event_date=date_str, edition_id=edition_id
)
else:
# create new event
result = BookshelvesEvents.create_event(
username, work_id, edition_id, date_str, event_type=event_type
)
event_id = result
return delegate.RawText(json.dumps({'status': 'ok', 'id': event_id}))
def validate_data(self, data):
"""Validates data submitted from check-in dialog."""
# Event type must exist:
if 'event_type' not in data:
return False
if not BookshelfEvent.has_value(data.get('event_type')):
return False
# Date must be valid:
return is_valid_date(
data.get('year', None),
data.get('month', None),
data.get('day', None),
)
class patron_check_in(delegate.page):
path = r'/check-ins/(\d+)'
def DELETE(self, check_in_id):
user = get_current_user()
if not user:
raise web.unauthorized(message="Requires login")
events = BookshelvesEvents.select_by_id(check_in_id)
if not events:
raise web.notfound(message='Event does not exist')
event = events[0]
username = user['key'].split('/')[-1]
if username != event['username']:
raise web.forbidden()
BookshelvesEvents.delete_by_id(check_in_id)
return web.ok()
class yearly_reading_goal_json(delegate.page):
path = '/reading-goal'
encoding = 'json'
def GET(self):
i = web.input(year=None)
user = get_current_user()
if not user:
raise web.unauthorized(message='Requires login')
username = user['key'].split('/')[-1]
if i.year:
results = [
{'year': i.year, 'goal': record.target, 'progress': record.current}
for record in YearlyReadingGoals.select_by_username_and_year(
username, i.year
)
]
else:
results = [
{'year': record.year, 'goal': record.target, 'progress': record.current}
for record in YearlyReadingGoals.select_by_username(username)
]
return delegate.RawText(json.dumps({'status': 'ok', 'goal': results}))
def POST(self):
i = web.input(goal=0, year=None, is_update=None)
goal = min(int(i.goal), MAX_READING_GOAL)
if i.is_update:
if goal < 0:
raise web.badrequest(
message='Reading goal update must be 0 or a positive integer'
)
elif not goal or goal < 1:
raise web.badrequest(message='Reading goal must be a positive integer')
if i.is_update and not i.year:
raise web.badrequest(message='Year required to update reading goals')
user = get_current_user()
if not user:
raise web.unauthorized(message='Requires login')
username = user['key'].split('/')[-1]
current_year = i.year or datetime.now().year
if i.is_update:
if goal == 0:
# Delete goal if "0" was submitted:
YearlyReadingGoals.delete_by_username_and_year(username, i.year)
else:
# Update goal normally:
YearlyReadingGoals.update_target(username, i.year, goal)
else:
YearlyReadingGoals.create(username, current_year, goal)
return delegate.RawText(json.dumps({'status': 'ok'}))
@public
def get_reading_goals(year=None):
user = get_current_user()
if not user:
return None
username = user['key'].split('/')[-1]
if not year:
year = datetime.now().year
if not (data := YearlyReadingGoals.select_by_username_and_year(username, year)):
return None
books_read = BookshelvesEvents.select_distinct_by_user_type_and_year(
username, BookshelfEvent.FINISH, year
)
read_count = len(books_read)
result = YearlyGoal(data[0].year, data[0].target, read_count)
return result
class YearlyGoal:
def __init__(self, year, goal, books_read):
self.year = year
self.goal = goal
self.books_read = books_read
self.progress = floor((books_read / goal) * 100)
@classmethod
def calc_progress(cls, books_read, goal):
return floor((books_read / goal) * 100)
class ui_partials(delegate.page):
path = '/reading-goal/partials'
encoding = 'json'
def GET(self):
i = web.input(year=None)
year = i.year or datetime.now().year
goal = get_reading_goals(year=year)
component = render_template('check_ins/reading_goal_progress', [goal])
partials = {"partials": str(component)}
return delegate.RawText(json.dumps(partials))
def setup():
pass
| 8,086 | Python | .py | 210 | 29.77619 | 88 | 0.609481 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
441 | jsdef.py | internetarchive_openlibrary/openlibrary/plugins/upstream/jsdef.py | """Templetor extension to support javascript templates.
During AJAX development, there will be need to generate HTML and update
some part of the DOM. It it clumsy to do that in javascript. Even though
there are some javascript template engines, it often ends up in duplication
because of writing a Python template and a Javascript template for doing
the same thing.
This extension adds a new block `jsdef` to Templetor, which provides a
a template function just like `def` and also generates an equivalent
javascript function.
USAGE::
import jsdef
render = web.template.render("templates/", extensions=[jsdef.extension])
Sample Template::
$def with (page)
<h1>$page.title</h1>
$jsdef render_books(books):
<ul>
$for book in books:
<li><a href="$book.key">$book.title</a></li>
</ul>
<div id="books">
$:render_books(page.books)
</div>
<script type="text/javascript">
function udpate_books(books) {
document.getElementById("books").innerHTML = render_books(books);
}
</script>
For more details, see:
http://github.com/anandology/notebook/tree/master/2010/03/jsdef/
"""
__author__ = "Anand Chitipothu <[email protected]>"
__version__ = "0.3"
"""change notes:
0.1: first release
0.2: python to javascript conversion for "and", "or" and "not" keywords
0.3: Added support for elif.
"""
import json
import web
from web.template import (
Template,
Parser,
LineNode,
SuiteNode,
DefNode,
PythonTokenizer,
# INDENT,
)
INDENT = " "
def extension(parser):
r"""jsdef extension. Adds support for `jsdef` block to template parser.::
>>> t = Template("$jsdef hello(name):\n Hello $name!", extensions=[extension])
>>> print t() #doctest:+NORMALIZE_WHITESPACE
<script type="text/javascript">
function hello(name){
var self = [], loop;
self.push("Hello "); self.push(websafe(name)); self.push("!\n");
return self.join("");
}
</script>
"""
parser.statement_nodes['jsdef'] = JSDefNode
return parser
class JSDefNode(DefNode):
"""Node to represent jsdef block."""
def __init__(self, *a, **kw):
DefNode.__init__(self, *a, **kw)
self.suite.sections.append(JSNode(self))
self.stmt = self.stmt.replace("jsdef", "def")
class JSNode:
def __init__(self, node):
self.node = node
self._count = 0
def emit(self, indent, text_indent=""):
# Code generation logic is changed in version 0.34
if web.__version__ < "0.34":
return indent[4:] + 'yield "", %s\n' % repr(self.jsemit(self.node, ""))
else:
return indent[4:] + 'self.extend(%s)\n' % repr(self.jsemit(self.node, ""))
def jsemit(self, node, indent):
r"""Emit Javascript for given node.::
>>> jsemit = JSNode(None).jsemit
>>> jsemit(web.template.StatementNode("break"), "")
'break;\n'
>>> jsemit(web.template.AssignmentNode("x = 1"), "")
'var x = 1;\n'
"""
name = "jsemit_" + node.__class__.__name__
if f := getattr(self, name, None):
return f(node, indent)
else:
return ""
def jsemit_SuiteNode(self, node, indent):
return "".join(self.jsemit(s, indent) for s in node.sections)
def jsemit_LineNode(self, node, indent):
text = ["self.push(%s);" % self.jsemit(n, "") for n in node.nodes]
return indent + " ".join(text) + "\n"
def jsemit_TextNode(self, node, indent):
return json.dumps(node.value)
def jsemit_ExpressionNode(self, node, indent):
if node.escape:
return "websafe(%s)" % py2js(node.value)
else:
return py2js(node.value)
def jsemit_AssignmentNode(self, node, indent):
return indent + "var " + py2js(node.code) + ";\n"
def jsemit_StatementNode(self, node, indent):
return indent + py2js(node.stmt) + ";\n"
def jsemit_BlockNode(self, node, indent):
text = ""
jsnames = {"elif": "else if"}
for n in ["if", "elif", "else", "for"]:
if node.stmt.startswith(n):
name = n
break
else:
return ""
expr = node.stmt[len(name) :].strip(": ")
expr = expr and "(" + expr + ")"
jsname = jsnames.get(name, name)
text += indent + f"{jsname} {py2js(expr)} {{\n"
text += self.jsemit(node.suite, indent + INDENT)
text += indent + "}\n"
return text
jsemit_IfNode = jsemit_BlockNode
jsemit_ElseNode = jsemit_BlockNode
jsemit_ElifNode = jsemit_BlockNode
def jsemit_ForNode(self, node, indent):
tok = PythonTokenizer(node.stmt)
tok.consume_till('in')
a = node.stmt[: tok.index].strip() # for i in
a = a[len("for") : -len("in")].strip() # strip `for` and `in`
b = node.stmt[tok.index : -1].strip() # rest of for stmt excluding :
b = web.re_compile(r"loop.setup\((.*)\)").match(b).group(1)
text = ""
text += indent + f"foreach({py2js(b)}, loop, function(loop, {a}) {{\n"
text += self.jsemit(node.suite, indent + INDENT)
text += indent + "});\n"
return text
def jsemit_JSDefNode(self, node, indent):
text = ""
text += '<script type="text/javascript"><!--\n'
text += node.stmt.replace("def ", "function ").strip(": ") + "{\n"
text += ' var self = [], loop;\n'
text += self.jsemit(node.suite, indent + INDENT)
text += ' return self.join("");\n'
text += "}\n"
text += "//--></script>\n"
return text
def tokenize(code):
"""Tokenize python code.::
>>> list(tokenize("x + y"))
['x', ' ', '+', ' ', 'y']
"""
end = 0
tok = PythonTokenizer(code)
try:
while True:
x = next(tok)
begin = x.begin[1]
if begin > end:
yield ' ' * (begin - end)
if x.value:
yield x.value
end = x.end[1]
except StopIteration:
pass
def py2js(expr):
"""Converts a python expression to javascript.::
>>> py2js("x + y")
'x + y'
>>> py2js("x and y")
'x && y'
>>> py2js("x or not y")
'x || ! y'
"""
d = {"and": "&&", "or": "||", "not": "!", "None": "null"}
def f(tokens):
for t in tokens:
yield d.get(t, t)
return "".join(f(tokenize(expr)))
def _testrun(code):
parser = extension(web.template.Parser())
root = parser.parse(code)
node = root.suite
jnode = JSNode(node)
return jnode.jsemit(node, "")
def _test():
r"""
>>> t = _testrun
>>> t("$x")
'self.push(websafe(x));\n'
>>> t("$:x")
'self.push(x);\n'
>>> t("$ x = 1")
'var x = 1;\n'
>>> t("$ x = a and b")
'var x = a && b;\n'
>>> t("$if a or not b: $a")
u'if (a || ! b) {\n self.push(websafe(a));\n}\n'
>>> t("$for i in a and a.data or []: $i")
u'foreach(a && a.data || [], loop, function(loop, i) {\n self.push(websafe(i));\n});\n'
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7,243 | Python | .py | 207 | 28.120773 | 94 | 0.564176 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
442 | mybooks.py | internetarchive_openlibrary/openlibrary/plugins/upstream/mybooks.py | import json
import web
from web.template import TemplateResult
from typing import Final, Literal, cast, TYPE_CHECKING
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import public, safeint, render
from openlibrary.i18n import gettext as _
from openlibrary import accounts
from openlibrary.accounts.model import OpenLibraryAccount
from openlibrary.utils import extract_numeric_id_from_olid
from openlibrary.utils.dateutil import current_year
from openlibrary.core.booknotes import Booknotes
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.lending import (
add_availability,
get_loans_of_user,
)
from openlibrary.core.observations import Observations, convert_observation_ids
from openlibrary.core.models import LoggedBooksData
from openlibrary.core.models import User
from openlibrary.core.follows import PubSub
from openlibrary.core.yearly_reading_goals import YearlyReadingGoals
if TYPE_CHECKING:
from openlibrary.core.lists.model import List
from openlibrary.plugins.upstream.models import Work
RESULTS_PER_PAGE: Final = 25
class avatar(delegate.page):
path = "/people/([^/]+)/avatar"
def GET(self, username: str):
url = User.get_avatar_url(username)
raise web.seeother(url)
class mybooks_home(delegate.page):
path = "/people/([^/]+)/books"
def GET(self, username: str) -> TemplateResult:
"""Renders the template for the my books overview page
The other way to get to this page is /account/books which is
defined in /plugins/account.py account_my_books. But we don't
need to update that redirect because it already just redirects
here.
"""
mb = MyBooksTemplate(username, key='mybooks')
template = self.render_template(mb)
return mb.render(header_title=_("Books"), template=template)
def render_template(self, mb):
# Marshal loans into homogeneous data that carousel can render
want_to_read, currently_reading, already_read, loans = [], [], [], []
if mb.me:
myloans = get_loans_of_user(mb.me.key)
loans = web.Storage({"docs": [], "total_results": len(loans)})
# TODO: should do in one web.ctx.get_many fetch
for loan in myloans:
book = web.ctx.site.get(loan['book'])
book.loan = loan
loans.docs.append(book)
if mb.me or mb.is_public:
params = {'sort': 'created', 'limit': 6, 'sort_order': 'desc', 'page': 1}
want_to_read = mb.readlog.get_works(key='want-to-read', **params)
currently_reading = mb.readlog.get_works(key='currently-reading', **params)
already_read = mb.readlog.get_works(key='already-read', **params)
# Ideally, do all 3 lookups in one add_availability call
want_to_read.docs = add_availability(
[d for d in want_to_read.docs if d.get('title')]
)[:5]
currently_reading.docs = add_availability(
[d for d in currently_reading.docs if d.get('title')]
)[:5]
already_read.docs = add_availability(
[d for d in already_read.docs if d.get('title')]
)[:5]
docs = {
'loans': loans,
'want-to-read': want_to_read,
'currently-reading': currently_reading,
'already-read': already_read,
}
return render['account/mybooks'](
mb.user,
docs,
key=mb.key,
public=mb.is_public,
owners_page=mb.is_my_page,
counts=mb.counts,
lists=mb.lists,
component_times=mb.component_times,
)
class mybooks_notes(delegate.page):
path = "/people/([^/]+)/books/notes"
def GET(self, username):
i = web.input(page=1)
mb = MyBooksTemplate(username, key='notes')
if mb.is_my_page:
docs = PatronBooknotes(mb.user).get_notes(page=int(i.page))
template = render['account/notes'](
docs, mb.user, mb.counts['notes'], page=int(i.page)
)
return mb.render(header_title=_("Notes"), template=template)
raise web.seeother(mb.user.key)
class mybooks_reviews(delegate.page):
path = "/people/([^/]+)/books/observations"
def GET(self, username):
i = web.input(page=1)
mb = MyBooksTemplate(username, key='observations')
if mb.is_my_page:
docs = PatronBooknotes(mb.user).get_observations(page=int(i.page))
template = render['account/observations'](
docs, mb.user, mb.counts['observations'], page=int(i.page)
)
return mb.render(header_title=_("Reviews"), template=template)
raise web.seeother(mb.user.key)
class mybooks_feed(delegate.page):
path = "/people/([^/]+)/books/feed"
def GET(self, username):
mb = MyBooksTemplate(username, key='feed')
if mb.is_my_page:
docs = PubSub.get_feed(username)
doc_count = len(docs)
template = render['account/reading_log'](
docs,
mb.key,
doc_count,
doc_count,
mb.is_my_page,
current_page=1,
user=mb.me,
)
return mb.render(header_title=_("My Feed"), template=template)
raise web.seeother(mb.user.key)
class readinglog_stats(delegate.page):
path = "/people/([^/]+)/books/(want-to-read|currently-reading|already-read)/stats"
def GET(self, username, key='want-to-read'):
user = web.ctx.site.get('/people/%s' % username)
if not user:
return render.notfound("User %s" % username, create=False)
cur_user = accounts.get_current_user()
if not cur_user or cur_user.key.split('/')[-1] != username:
return render.permission_denied(web.ctx.path, 'Permission Denied')
readlog = ReadingLog(user=user)
works = readlog.get_works(key, page=1, limit=2000).docs
works_json = [
{
# Fallback to key if it is a redirect
'title': w.get('title') or w.key,
'subtitle': w.get('subtitle'),
'key': w.get('key'),
'author_keys': ['/authors/' + key for key in w.get('author_key', [])],
'first_publish_year': w.get('first_publish_year') or None,
'subjects': w.get('subject'),
'subject_people': w.get('person'),
'subject_places': w.get('place'),
'subject_times': w.get('time'),
}
for w in works
]
author_keys = {a for work in works_json for a in work['author_keys']}
authors_json = [
{
'key': a.key,
'name': a.name,
'birth_date': a.get('birth_date'),
}
for a in web.ctx.site.get_many(list(author_keys))
]
return render['account/readinglog_stats'](
works_json,
authors_json,
len(works_json),
user.key,
user.displayname,
web.ctx.path.rsplit('/', 1)[0],
key,
lang=web.ctx.lang,
)
class readinglog_yearly(delegate.page):
path = "/people/([^/]+)/books/already-read/year/([0-9]+)"
def GET(self, username, year=None):
year = int(year or current_year())
if year < 1000:
# The year is used in a LIKE statement when we query for the yearly summary, so
# ensuring that the year is at least four digits long avoids incorrect results.
raise web.badrequest(message="Year must be four digits")
mb = MyBooksTemplate(username, 'already-read')
mb.selected_year = str(year)
template = mybooks_readinglog().render_template(mb, year=year)
return mb.render(template=template, header_title=_("Already Read"))
class mybooks_readinglog(delegate.page):
path = r'/people/([^/]+)/books/(want-to-read|currently-reading|already-read)'
def GET(self, username, key='want-to-read'):
mb = MyBooksTemplate(username, key)
if mb.is_my_page or mb.is_public:
KEYS_TITLES = {
'currently-reading': _(
"Currently Reading (%(count)d)",
count=mb.counts['currently-reading'],
),
'want-to-read': _(
"Want to Read (%(count)d)", count=mb.counts['want-to-read']
),
'already-read': _(
"Already Read (%(count)d)", count=mb.counts['already-read']
),
}
template = self.render_template(mb)
return mb.render(header_title=KEYS_TITLES[key], template=template)
raise web.seeother(mb.user.key)
def render_template(self, mb, year=None):
i = web.input(page=1, sort='desc', q="", results_per_page=RESULTS_PER_PAGE)
# Limit reading log filtering to queries of 3+ characters
# because filtering the reading log can be computationally expensive.
if len(i.q) < 3:
i.q = ""
logged_book_data: LoggedBooksData = mb.readlog.get_works(
key=mb.key, page=i.page, sort='created', sort_order=i.sort, q=i.q, year=year
)
docs = add_availability(logged_book_data.docs, mode="openlibrary_work")
doc_count = logged_book_data.total_results
# Add ratings to "already-read" items.
if include_ratings := mb.key == "already-read" and mb.is_my_page:
logged_book_data.load_ratings()
# Add yearly reading goals to the MyBooksTemplate
if mb.key == 'already-read' and mb.is_my_page:
mb.reading_goals = [
str(result.year)
for result in YearlyReadingGoals.select_by_username(
mb.username, order='year DESC'
)
]
ratings = logged_book_data.ratings
return render['account/reading_log'](
docs,
mb.key,
mb.counts[mb.key],
doc_count,
mb.is_my_page,
i.page,
sort_order=i.sort,
user=mb.user,
include_ratings=include_ratings,
q=i.q,
results_per_page=i.results_per_page,
ratings=ratings,
checkin_year=year,
)
class public_my_books_json(delegate.page):
path = r"/people/([^/]+)/books/(want-to-read|currently-reading|already-read)"
encoding = "json"
def GET(self, username, key='want-to-read'):
i = web.input(page=1, limit=100, q="")
key = cast(ReadingLog.READING_LOG_KEYS, key.lower())
if len(i.q) < 3:
i.q = ""
page = safeint(i.page, 1)
limit = safeint(i.limit, 100)
# check if user's reading log is public
user = web.ctx.site.get('/people/%s' % username)
if not user:
return delegate.RawText(
json.dumps({'error': 'User %s not found' % username}),
content_type="application/json",
)
is_public = user.preferences().get('public_readlog', 'no') == 'yes'
logged_in_user = accounts.get_current_user()
if (
is_public
or logged_in_user
and logged_in_user.key.split('/')[-1] == username
):
readlog = ReadingLog(user=user)
books = readlog.get_works(key, page, limit, q=i.q).docs
records_json = [
{
'work': {
'title': w.get('title'),
'key': w.key,
'author_keys': [
'/authors/' + key for key in w.get('author_key', [])
],
'author_names': w.get('author_name', []),
'first_publish_year': w.get('first_publish_year') or None,
'lending_edition_s': (w.get('lending_edition_s') or None),
'edition_key': (w.get('edition_key') or None),
'cover_id': (w.get('cover_i') or None),
'cover_edition_key': (w.get('cover_edition_key') or None),
},
'logged_edition': w.get('logged_edition') or None,
'logged_date': (
w.get('logged_date').strftime("%Y/%m/%d, %H:%M:%S")
if w.get('logged_date')
else None
),
}
for w in books
]
if page == 1 and len(records_json) < limit:
num_found = len(records_json)
else:
num_found = readlog.count_shelf(key)
return delegate.RawText(
json.dumps(
{
'page': page,
'numFound': num_found,
'reading_log_entries': records_json,
}
),
content_type="application/json",
)
else:
return delegate.RawText(
json.dumps({'error': 'Shelf %s not found or not accessible' % key}),
content_type="application/json",
)
@public
def get_patrons_work_read_status(username: str, work_key: str) -> int | None:
if not username:
return None
work_id = extract_numeric_id_from_olid(work_key)
status_id = Bookshelves.get_users_read_status_of_work(username, work_id)
return status_id
@public
class MyBooksTemplate:
# Reading log shelves
READING_LOG_KEYS = {"currently-reading", "want-to-read", "already-read"}
# Keys that can be accessed when not logged in
PUBLIC_KEYS = READING_LOG_KEYS | {"lists", "list"} | {"mybooks"}
# Keys that are only accessible when logged in
# unioned with the public keys
ALL_KEYS = PUBLIC_KEYS | {
"loans",
"feed",
"waitlist",
"notes",
"observations",
"imports",
}
def __init__(self, username: str, key: str) -> None:
"""The following is data required by every My Books sub-template (e.g. sidebar)"""
self.username = username
self.user = web.ctx.site.get('/people/%s' % self.username)
if not self.user:
raise render.notfound("User %s" % self.username, create=False)
self.is_public = self.user.preferences().get('public_readlog', 'no') == 'yes'
self.user_itemname = self.user.get_account().get('internetarchive_itemname')
self.me = accounts.get_current_user()
self.is_my_page = self.me and self.me.key.split('/')[-1] == self.username
self.is_subscribed = (
self.me.is_subscribed_user(self.username)
if self.me and self.is_public
else -1
)
self.key = key.lower()
self.readlog = ReadingLog(user=self.user)
self.lists = self.readlog.lists
self.counts = (
self.readlog.reading_log_counts
if (self.is_my_page or self.is_public)
else {}
)
self.reading_goals: list = []
self.selected_year = None
if self.me and self.is_my_page or self.is_public:
self.counts['followers'] = PubSub.count_followers(self.username)
self.counts['following'] = PubSub.count_following(self.username)
if self.me and self.is_my_page:
self.counts.update(PatronBooknotes.get_counts(self.username))
self.component_times: dict = {}
def render_sidebar(self) -> TemplateResult:
return render['account/sidebar'](
self.username,
self.key,
self.is_my_page,
self.is_public,
self.counts,
self.lists,
self.component_times,
)
def render(
self, template: TemplateResult, header_title: str, page: "List | None" = None
) -> TemplateResult:
"""
Gather the data necessary to render the My Books template, and then
render the template.
"""
return render['account/view'](
mb=self, template=template, header_title=header_title, page=page
)
class ReadingLog:
"""Manages the user's account page books (reading log, waitlists, loans)"""
# Constants
PRESET_SHELVES = Literal["Want to Read", "Already Read", "Currently Reading"]
READING_LOG_KEYS = Literal["want-to-read", "already-read", "currently-reading"]
READING_LOG_KEY_TO_SHELF: dict[READING_LOG_KEYS, PRESET_SHELVES] = {
"want-to-read": "Want to Read",
"already-read": "Already Read",
"currently-reading": "Currently Reading",
}
def __init__(self, user=None):
self.user = user or accounts.get_current_user()
@property
def lists(self) -> list:
return self.user.get_lists()
@property
def booknotes_counts(self):
return PatronBooknotes.get_counts(self.user.get_username())
@property
def get_sidebar_counts(self):
counts = self.reading_log_counts
counts.update(self.booknotes_counts)
return counts
@property
def reading_log_counts(self) -> dict[str, int]:
counts = (
Bookshelves.count_total_books_logged_by_user_per_shelf(
self.user.get_username()
)
if self.user.get_username()
else {}
)
return {
'want-to-read': counts.get(
Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0
),
'currently-reading': counts.get(
Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0
),
'already-read': counts.get(
Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0
),
}
def count_shelf(self, key: READING_LOG_KEYS) -> int:
username = self.user.get_username()
assert username
shelf_id = Bookshelves.PRESET_BOOKSHELVES[self.READING_LOG_KEY_TO_SHELF[key]]
return Bookshelves.count_user_books_on_shelf(username, shelf_id)
def get_works(
self,
key: READING_LOG_KEYS,
page: int = 1,
limit: int = RESULTS_PER_PAGE,
sort: str = 'created',
sort_order: str = 'desc',
q: str = "",
year: int | None = None,
) -> LoggedBooksData:
"""
Get works for want-to-read, currently-reading, and already-read as
determined by {key}.
See LoggedBooksData for specifics on what's returned.
"""
shelf = self.READING_LOG_KEY_TO_SHELF[key]
# Mypy is unhappy about the sort argument not being a literal string.
# Although this doesn't satisfy Mypy, at least make sure sort is either
# "created asc" or "created desc"
if sort + " " + sort_order == "created asc":
sort_literal = "created_asc"
else:
sort_literal = "created desc"
logged_books: LoggedBooksData = Bookshelves.get_users_logged_books(
self.user.get_username(),
bookshelf_id=Bookshelves.PRESET_BOOKSHELVES[shelf],
page=page,
limit=limit,
sort=sort_literal, # type: ignore[arg-type]
checkin_year=year,
q=q,
)
return logged_books
@public
def get_read_status(work_key, username):
work_id = extract_numeric_id_from_olid(work_key.split('/')[-1])
return Bookshelves.get_users_read_status_of_work(username, work_id)
@public
def add_read_statuses(username, works):
work_ids = [extract_numeric_id_from_olid(work.key.split('/')[-1]) for work in works]
results = Bookshelves.get_users_read_status_of_works(username, work_ids)
results_map = {}
for result in results:
results_map[f"OL{result['work_id']}W"] = result['bookshelf_id']
for work in works:
work_olid = work.key.split('/')[-1]
work['readinglog'] = results_map.get(work_olid)
return works
class PatronBooknotes:
"""Manages the patron's book notes and observations"""
def __init__(self, user: User) -> None:
self.user = user
self.username = user.key.split('/')[-1]
def get_notes(self, limit: int = RESULTS_PER_PAGE, page: int = 1) -> list:
notes = Booknotes.get_notes_grouped_by_work(
self.username, limit=limit, page=page
)
for entry in notes:
entry['work_key'] = f"/works/OL{entry['work_id']}W"
entry['work'] = self._get_work(entry['work_key'])
entry['work_details'] = self._get_work_details(entry['work'])
entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}
entry['editions'] = {
k: web.ctx.site.get(f'/books/OL{k}M')
for k in entry['notes']
if k != Booknotes.NULL_EDITION_VALUE
}
return notes
def get_observations(self, limit: int = RESULTS_PER_PAGE, page: int = 1) -> list:
observations = Observations.get_observations_grouped_by_work(
self.username, limit=limit, page=page
)
for entry in observations:
entry['work_key'] = f"/works/OL{entry['work_id']}W"
entry['work'] = self._get_work(entry['work_key'])
entry['work_details'] = self._get_work_details(entry['work'])
ids = {}
for item in entry['observations']:
ids[item['observation_type']] = item['observation_values']
entry['observations'] = convert_observation_ids(ids)
return observations
def _get_work(self, work_key: str) -> "Work | None":
return web.ctx.site.get(work_key)
def _get_work_details(
self, work: "Work"
) -> dict[str, list[str] | str | int | None]:
author_keys = [a.author.key for a in work.get('authors', [])]
return {
'cover_url': (
work.get_cover_url('S')
or 'https://openlibrary.org/images/icons/avatar_book-sm.png'
),
'title': work.get('title'),
'authors': [a.name for a in web.ctx.site.get_many(author_keys)],
'first_publish_year': work.first_publish_year or None,
}
@classmethod
def get_counts(cls, username: str) -> dict[str, int]:
return {
'notes': Booknotes.count_works_with_notes_by_user(username),
'observations': Observations.count_distinct_observations(username),
}
| 22,798 | Python | .py | 536 | 31.733209 | 91 | 0.571203 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
443 | covers.py | internetarchive_openlibrary/openlibrary/plugins/upstream/covers.py | """Handle book cover/author photo upload.
"""
from logging import getLogger
import requests
import web
from io import BytesIO
from infogami.utils import delegate
from infogami.utils.view import safeint
from openlibrary import accounts
from openlibrary.plugins.upstream.models import Image
from openlibrary.plugins.upstream.utils import (
get_coverstore_url,
get_coverstore_public_url,
render_template,
)
logger = getLogger("openlibrary.plugins.upstream.covers")
def setup():
pass
class add_cover(delegate.page):
path = r"(/books/OL\d+M)/add-cover"
cover_category = "b"
def GET(self, key):
book = web.ctx.site.get(key)
return render_template('covers/add', book)
def POST(self, key):
book = web.ctx.site.get(key)
if not book:
raise web.notfound("")
user = accounts.get_current_user()
if user and user.is_read_only():
raise web.forbidden(message="Patron not permitted to upload images")
i = web.input(file={}, url="")
# remove references to field storage objects
web.ctx.pop("_fieldstorage", None)
data = self.upload(key, i)
if coverid := data.get('id'):
if isinstance(i.url, bytes):
i.url = i.url.decode("utf-8")
self.save(book, coverid, url=i.url)
cover = Image(web.ctx.site, "b", coverid)
return render_template("covers/saved", cover)
else:
return render_template("covers/add", book, {'url': i.url}, data)
def upload(self, key, i):
"""Uploads a cover to coverstore and returns the response."""
olid = key.split("/")[-1]
if i.file is not None and hasattr(i.file, 'file'):
data = i.file.file
else:
data = None
if i.url and i.url.strip() == "https://":
i.url = ""
user = accounts.get_current_user()
params = {
"author": user and user.key,
"source_url": i.url,
"olid": olid,
"ip": web.ctx.ip,
}
upload_url = f'{get_coverstore_url()}/{self.cover_category}/upload2'
if upload_url.startswith("//"):
upload_url = "http:" + upload_url
try:
files = {'data': data}
response = requests.post(upload_url, data=params, files=files)
return web.storage(response.json())
except requests.HTTPError as e:
logger.exception("Covers upload failed")
return web.storage({'error': str(e)})
def save(self, book, coverid, url=None):
book.covers = [coverid] + [cover.id for cover in book.get_covers()]
book._save(
f'{get_coverstore_public_url()}/b/id/{coverid}-S.jpg',
action="add-cover",
data={"url": url},
)
class add_work_cover(add_cover):
path = r"(/works/OL\d+W)/add-cover"
cover_category = "w"
def upload(self, key, i):
if "coverid" in i and safeint(i.coverid):
return web.storage(id=int(i.coverid))
else:
return add_cover.upload(self, key, i)
class add_photo(add_cover):
path = r"(/authors/OL\d+A)/add-photo"
cover_category = "a"
def save(self, author, photoid, url=None):
author.photos = [photoid] + [photo.id for photo in author.get_photos()]
author._save("Added new photo", action="add-photo", data={"url": url})
class manage_covers(delegate.page):
path = r"(/books/OL\d+M)/manage-covers"
def GET(self, key):
book = web.ctx.site.get(key)
if not book:
raise web.notfound()
return render_template("covers/manage", key, self.get_images(book))
def get_images(self, book):
return book.get_covers()
def get_image(self, book):
return book.get_cover()
def save_images(self, book, covers):
book.covers = covers
book._save('Update covers')
def POST(self, key):
book = web.ctx.site.get(key)
if not book:
raise web.notfound()
images = web.input(image=[]).image
if '-' in images:
images = [int(id) for id in images[: images.index('-')]]
self.save_images(book, images)
return render_template("covers/saved", self.get_image(book), showinfo=False)
else:
# ERROR
pass
class manage_work_covers(manage_covers):
path = r"(/works/OL\d+W)/manage-covers"
class manage_photos(manage_covers):
path = r"(/authors/OL\d+A)/manage-photos"
def get_images(self, author):
return author.get_photos()
def get_image(self, author):
return author.get_photo()
def save_images(self, author, photos):
author.photos = photos
author._save('Update photos')
| 4,823 | Python | .py | 127 | 29.669291 | 88 | 0.599957 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
444 | code.py | internetarchive_openlibrary/openlibrary/plugins/upstream/code.py | """Upstream customizations."""
import datetime
import hashlib
import json
import os.path
import random
import web
from infogami import config
from infogami.core import code as core
from infogami.plugins.api.code import jsonapi, make_query
from infogami.plugins.api.code import request as infogami_request
from infogami.infobase import client
from infogami.utils import delegate, app, types
from infogami.utils.view import public, safeint, render
from infogami.utils.view import render_template # used for its side effects
from infogami.utils.context import context
from openlibrary import accounts
from openlibrary.plugins.upstream import addbook, addtag, covers, models, utils
from openlibrary.plugins.upstream import spamcheck
from openlibrary.plugins.upstream import merge_authors
from openlibrary.plugins.upstream import edits
from openlibrary.plugins.upstream import checkins
from openlibrary.plugins.upstream import borrow, recentchanges # TODO: unused imports?
from openlibrary.plugins.upstream.utils import render_component
if not config.get('coverstore_url'):
config.coverstore_url = "https://covers.openlibrary.org" # type: ignore[attr-defined]
import logging
logger = logging.getLogger('openlibrary.plugins.upstream.code')
# Note: This is done in web_nginx.conf on production ; this endpoint is
# only used in development/gitpod.
class static(delegate.page):
path = "/images/.*"
def GET(self):
return web.seeother(f'/static{web.ctx.path}')
class history(delegate.mode):
"""Overwrite ?m=history to remove IP"""
encoding = "json"
@jsonapi
def GET(self, path):
query = make_query(web.input(), required_keys=['author', 'offset', 'limit'])
query['key'] = path
query['sort'] = '-created'
# Possibly use infogami.plugins.upstream.utils get_changes to avoid json load/dump?
history = json.loads(
infogami_request('/versions', data={'query': json.dumps(query)})
)
for _, row in enumerate(history):
row.pop("ip")
return json.dumps(history)
class edit(core.edit):
"""Overwrite ?m=edit behaviour for author, book, work, and people pages."""
def GET(self, key):
page = web.ctx.site.get(key)
editable_keys_re = web.re_compile(
r"/(authors|books|works|tags|(people/[^/]+/)?lists)/OL.*"
)
if editable_keys_re.match(key):
if page is None:
return web.seeother(key)
else:
return addbook.safe_seeother(page.url(suffix="/edit"))
else:
return core.edit.GET(self, key)
def POST(self, key):
if web.re_compile('/(people/[^/]+)').match(key) and spamcheck.is_spam():
return render_template(
'message.html', 'Oops', 'Something went wrong. Please try again later.'
)
return core.edit.POST(self, key)
# handlers for change photo and change cover
class change_cover(delegate.mode):
path = r"(/books/OL\d+M)/cover"
def GET(self, key):
page = web.ctx.site.get(key)
if page is None or page.type.key not in ['/type/edition', '/type/author']:
raise web.seeother(key)
return render.change_cover(page)
class change_photo(change_cover):
path = r"(/authors/OL\d+A)/photo"
del delegate.modes[
'change_cover'
] # delete change_cover mode added by openlibrary plugin
class components_test(delegate.page):
path = "/_dev/components/HelloWorld"
def GET(self):
return render_component('HelloWorld') + render_component('HelloWorld')
class library_explorer(delegate.page):
path = "/explore"
def GET(self):
return render_template('library_explorer')
class merge_work(delegate.page):
path = "/works/merge"
def GET(self):
i = web.input(records='', mrid=None, primary=None)
user = web.ctx.site.get_user()
has_access = user and (
(user.is_admin() or user.is_librarian()) or user.is_super_librarian()
)
if not has_access:
raise web.HTTPError('403 Forbidden')
optional_kwargs = {}
if not (user.is_admin() or user.is_super_librarian()):
optional_kwargs['can_merge'] = 'false'
return render_template(
'merge/works', mrid=i.mrid, primary=i.primary, **optional_kwargs
)
@web.memoize
@public
def vendor_js():
pardir = os.path.pardir
path = os.path.abspath(
os.path.join(
__file__,
pardir,
pardir,
pardir,
pardir,
'static',
'upstream',
'js',
'vendor.js',
)
)
with open(path, 'rb') as in_file:
digest = hashlib.md5(in_file.read()).hexdigest()
return '/static/upstream/js/vendor.js?v=' + digest
@web.memoize
@public
def static_url(path):
"""Takes path relative to static/ and constructs url to that resource with hash."""
pardir = os.path.pardir
fullpath = os.path.abspath(
os.path.join(__file__, pardir, pardir, pardir, pardir, "static", path)
)
with open(fullpath, 'rb') as in_file:
digest = hashlib.md5(in_file.read()).hexdigest()
return f"/static/{path}?v={digest}"
class DynamicDocument:
"""Dynamic document is created by concatenating various rawtext documents in the DB.
Used to generate combined js/css using multiple js/css files in the system.
"""
def __init__(self, root):
self.root = web.rstrips(root, '/')
self.docs = None
self._text = None
self.last_modified = None
def update(self):
keys = web.ctx.site.things({'type': '/type/rawtext', 'key~': self.root + '/*'})
docs = sorted(web.ctx.site.get_many(keys), key=lambda doc: doc.key)
if docs:
self.last_modified = min(doc.last_modified for doc in docs)
self._text = "\n\n".join(doc.get('body', '') for doc in docs)
else:
self.last_modified = datetime.datetime.utcnow()
self._text = ""
def get_text(self):
"""Returns text of the combined documents"""
if self._text is None:
self.update()
return self._text
def md5(self):
"""Returns md5 checksum of the combined documents"""
return hashlib.md5(self.get_text().encode('utf-8')).hexdigest()
def create_dynamic_document(url, prefix):
"""Creates a handler for `url` for servering combined js/css for `prefix/*` pages"""
doc = DynamicDocument(prefix)
if url.endswith('.js'):
content_type = "text/javascript"
elif url.endswith(".css"):
content_type = "text/css"
else:
content_type = "text/plain"
class page(delegate.page):
"""Handler for serving the combined content."""
path = "__registered_later_without_using_this__"
def GET(self):
i = web.input(v=None)
v = doc.md5()
if v != i.v:
raise web.seeother(web.changequery(v=v))
if web.modified(etag=v):
oneyear = 365 * 24 * 3600
web.header("Content-Type", content_type)
web.header("Cache-Control", "Public, max-age=%d" % oneyear)
web.lastmodified(doc.last_modified)
web.expires(oneyear)
return delegate.RawText(doc.get_text())
def url(self):
return url + "?v=" + doc.md5()
def reload(self):
doc.update()
class hook(client.hook):
"""Hook to update the DynamicDocument when any of the source pages is updated."""
def on_new_version(self, page):
if page.key.startswith(doc.root):
doc.update()
# register the special page
delegate.pages[url] = {}
delegate.pages[url][None] = page
return page
all_js = create_dynamic_document("/js/all.js", config.get("js_root", "/js"))
web.template.Template.globals['all_js'] = all_js()
all_css = create_dynamic_document("/css/all.css", config.get("css_root", "/css"))
web.template.Template.globals['all_css'] = all_css()
def reload():
"""Reload all.css and all.js"""
all_css().reload()
all_js().reload()
def user_can_revert_records():
user = web.ctx.site.get_user()
return user and (user.is_admin() or user.is_super_librarian())
@public
def get_document(key, limit_redirs=5):
doc = None
for i in range(limit_redirs):
doc = web.ctx.site.get(key)
if doc is None:
return None
if doc.type.key == "/type/redirect":
key = doc.location
else:
return doc
return doc
class revert(delegate.mode):
def GET(self, key):
raise web.seeother(web.changequery(m=None))
def POST(self, key):
i = web.input("v", _comment=None)
v = i.v and safeint(i.v, None)
if v is None:
raise web.seeother(web.changequery({}))
if not web.ctx.site.can_write(key) or not user_can_revert_records():
return render.permission_denied(
web.ctx.fullpath, "Permission denied to edit " + key + "."
)
thing = web.ctx.site.get(key, i.v)
if not thing:
raise web.notfound()
def revert(thing):
if thing.type.key == "/type/delete" and thing.revision > 1:
prev = web.ctx.site.get(thing.key, thing.revision - 1)
if prev.type.key in ["/type/delete", "/type/redirect"]:
return revert(prev)
else:
prev._save("revert to revision %d" % prev.revision)
return prev
elif thing.type.key == "/type/redirect":
redirect = web.ctx.site.get(thing.location)
if redirect and redirect.type.key not in [
"/type/delete",
"/type/redirect",
]:
return redirect
else:
# bad redirect. Try the previous revision
prev = web.ctx.site.get(thing.key, thing.revision - 1)
return revert(prev)
else:
return thing
def process(value):
if isinstance(value, list):
return [process(v) for v in value]
elif isinstance(value, client.Thing):
if value.key:
if value.type.key in ['/type/delete', '/type/revert']:
return revert(value)
else:
return value
else:
for k in value:
value[k] = process(value[k])
return value
else:
return value
for k in thing:
thing[k] = process(thing[k])
comment = i._comment or "reverted to revision %d" % v
thing._save(comment)
raise web.seeother(key)
def setup():
"""Setup for upstream plugin"""
models.setup()
utils.setup()
addbook.setup()
addtag.setup()
covers.setup()
merge_authors.setup()
# merge_works.setup() # ILE code
edits.setup()
checkins.setup()
from openlibrary.plugins.upstream import data, jsdef
data.setup()
# setup template globals
from openlibrary.i18n import ugettext, ungettext, gettext_territory
web.template.Template.globals.update(
{
"gettext": ugettext,
"ugettext": ugettext,
"_": ugettext,
"ungettext": ungettext,
"gettext_territory": gettext_territory,
"random": random.Random(),
"commify": web.commify,
"group": web.group,
"storage": web.storage,
"all": all,
"any": any,
"locals": locals,
}
)
web.template.STATEMENT_NODES["jsdef"] = jsdef.JSDefNode
setup()
| 11,994 | Python | .py | 312 | 29.599359 | 91 | 0.60176 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
445 | borrow.py | internetarchive_openlibrary/openlibrary/plugins/upstream/borrow.py | """Handlers for borrowing books"""
import copy
import hashlib
import hmac
import json
import logging
import re
from typing import Literal
import requests
import time
from datetime import datetime
import web
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import public, render_template, add_flash_message
from infogami.infobase.utils import parse_datetime
from openlibrary.core import models
from openlibrary.core import stats
from openlibrary.core import lending
from openlibrary.core import vendors
from openlibrary.core import waitinglist
from openlibrary.i18n import gettext as _
from openlibrary.accounts.model import OpenLibraryAccount
from openlibrary import accounts
from openlibrary.utils import dateutil
from lxml import etree
import urllib
import lxml.etree
logger = logging.getLogger("openlibrary.borrow")
# ######### Constants
lending_library_subject = 'Lending library'
in_library_subject = 'In library'
lending_subjects = {lending_library_subject, in_library_subject}
loanstatus_url = config.get('loanstatus_url')
# ACS4 resource ids start with 'urn:uuid:'. The meta.xml on archive.org
# adds 'acs:epub:' or 'acs:pdf:' to distinguish the file type.
acs_resource_id_prefixes = ['urn:uuid:', 'acs:epub:', 'acs:pdf:']
# Max loans a user can have at once
user_max_loans = 5
# When we generate a loan offer (.acsm) for a user we assume that the loan has occurred.
# Once the loan fulfillment inside Digital Editions the book status server will know
# the loan has occurred. We allow this timeout so that we don't delete the OL loan
# record before fulfillment because we can't find it in the book status server.
# $$$ If a user borrows an ACS4 book and immediately returns book loan will show as
# "not yet downloaded" for the duration of the timeout.
# BookReader loan status is always current.
loan_fulfillment_timeout_seconds = 60 * 5
# How long the auth token given to the BookReader should last. After the auth token
# expires the BookReader will not be able to access the book. The BookReader polls
# OL periodically to get fresh tokens.
BOOKREADER_AUTH_SECONDS = dateutil.MINUTE_SECS * 10
READER_AUTH_SECONDS = dateutil.MINUTE_SECS * 2
# Base URL for BookReader
try:
bookreader_host = config.bookreader_host # type: ignore[attr-defined]
except AttributeError:
bookreader_host = 'archive.org'
bookreader_stream_base = f'https://{bookreader_host}/stream'
# ######### Page Handlers
# Handler for /books/{bookid}/{title}/borrow
class checkout_with_ocaid(delegate.page):
path = "/borrow/ia/(.*)"
def GET(self, ocaid):
"""Redirect shim: Translate an IA identifier into an OL identifier and
then redirects user to the canonical OL borrow page.
"""
i = web.input()
params = urllib.parse.urlencode(i)
ia_edition = web.ctx.site.get('/books/ia:%s' % ocaid)
if not ia_edition:
raise web.notfound()
edition = web.ctx.site.get(ia_edition.location)
url = '%s/x/borrow' % edition.key
raise web.seeother(url + '?' + params)
def POST(self, ocaid):
"""Redirect shim: Translate an IA identifier into an OL identifier and
then forwards a borrow request to the canonical borrow
endpoint with this OL identifier.
"""
ia_edition = web.ctx.site.get('/books/ia:%s' % ocaid)
if not ia_edition:
raise web.notfound()
borrow().POST(ia_edition.location)
# Handler for /books/{bookid}/{title}/borrow
class borrow(delegate.page):
path = "(/books/.*)/borrow"
def GET(self, key):
return self.POST(key)
def POST(self, key): # noqa: PLR0915
"""Called when the user wants to borrow the edition"""
i = web.input(
action='borrow',
format=None,
ol_host=None,
_autoReadAloud=None,
q="",
redirect="",
)
ol_host = i.ol_host or 'openlibrary.org'
action = i.action
edition = web.ctx.site.get(key)
if not edition:
raise web.notfound()
from openlibrary.book_providers import get_book_provider
if action == 'locate':
raise web.seeother(edition.get_worldcat_url())
# Direct to the first web book if at least one is available.
if (
action in ["borrow", "read"]
and (provider := get_book_provider(edition))
and provider.short_name != "ia"
and (acquisitions := provider.get_acquisitions(edition))
and acquisitions[0].access == "open-access"
):
stats.increment('ol.loans.webbook')
raise web.seeother(acquisitions[0].url)
archive_url = get_bookreader_stream_url(edition.ocaid) + '?ref=ol'
if i._autoReadAloud is not None:
archive_url += '&_autoReadAloud=show'
if i.q:
_q = urllib.parse.quote(i.q, safe='')
raise web.seeother(archive_url + "#page/-/mode/2up/search/%s" % _q)
# Make a call to availability v2 update the subjects according
# to result if `open`, redirect to bookreader
response = lending.get_availability_of_ocaid(edition.ocaid)
availability = response[edition.ocaid] if response else {}
if availability and availability['status'] == 'open':
raise web.seeother(archive_url)
error_redirect = archive_url
edition_redirect = urllib.parse.quote(i.redirect or edition.url())
user = accounts.get_current_user()
if user:
account = OpenLibraryAccount.get_by_email(user.email)
ia_itemname = account.itemname if account else None
s3_keys = web.ctx.site.store.get(account._key).get('s3_keys')
lending.get_cached_loans_of_user.memcache_delete(
user.key, {}
) # invalidate cache for user loans
if not user or not ia_itemname or not s3_keys:
web.setcookie(config.login_cookie_name, "", expires=-1)
redirect_url = (
f"/account/login?redirect={edition_redirect}/borrow?action={action}"
)
if i._autoReadAloud is not None:
redirect_url += '&_autoReadAloud=' + i._autoReadAloud
raise web.seeother(redirect_url)
if action == 'return':
lending.s3_loan_api(s3_keys, ocaid=edition.ocaid, action='return_loan')
stats.increment('ol.loans.return')
edition.update_loan_status()
user.update_loan_status()
raise web.seeother(edition_redirect)
elif action == 'join-waitinglist':
lending.get_cached_user_waiting_loans.memcache_delete(
user.key, {}
) # invalidate cache for user waiting loans
lending.s3_loan_api(s3_keys, ocaid=edition.ocaid, action='join_waitlist')
stats.increment('ol.loans.joinWaitlist')
raise web.redirect(edition_redirect)
elif action == 'leave-waitinglist':
lending.get_cached_user_waiting_loans.memcache_delete(
user.key, {}
) # invalidate cache for user waiting loans
lending.s3_loan_api(s3_keys, ocaid=edition.ocaid, action='leave_waitlist')
stats.increment('ol.loans.leaveWaitlist')
raise web.redirect(edition_redirect)
elif action in ('borrow', 'browse') and not user.has_borrowed(edition):
borrow_access = user_can_borrow_edition(user, edition)
if not (s3_keys and borrow_access):
stats.increment('ol.loans.outdatedAvailabilityStatus')
raise web.seeother(error_redirect)
try:
lending.s3_loan_api(
s3_keys, ocaid=edition.ocaid, action='%s_book' % borrow_access
)
stats.increment('ol.loans.bookreader')
stats.increment('ol.loans.%s' % borrow_access)
except lending.PatronAccessException as e:
stats.increment('ol.loans.blocked')
add_flash_message(
'error',
_(
'Your account has hit a lending limit. Please try again later or contact [email protected].'
),
)
raise web.seeother(key)
if action in ('borrow', 'browse', 'read'):
bookPath = '/stream/' + edition.ocaid
if i._autoReadAloud is not None:
bookPath += '?_autoReadAloud=show'
# Look for loans for this book
user.update_loan_status()
loans = get_loans(user)
for loan in loans:
if loan['book'] == edition.key:
raise web.seeother(
make_bookreader_auth_link(
loan['_key'],
edition.ocaid,
bookPath,
ol_host,
ia_userid=ia_itemname,
)
)
# Action not recognized
raise web.seeother(error_redirect)
# Handler for /books/{bookid}/{title}/_borrow_status
class borrow_status(delegate.page):
path = "(/books/.*)/_borrow_status"
def GET(self, key):
global lending_subjects
i = web.input(callback=None)
edition = web.ctx.site.get(key)
if not edition:
raise web.notfound()
edition.update_loan_status()
available_formats = [
loan['resource_type'] for loan in edition.get_available_loans()
]
loan_available = len(available_formats) > 0
subjects = set()
for work in edition.get('works', []):
for subject in work.get_subjects():
if subject in lending_subjects:
subjects.add(subject)
output = {
'id': key,
'loan_available': loan_available,
'available_formats': available_formats,
'lending_subjects': list(subjects),
}
output_text = json.dumps(output)
content_type = "application/json"
if i.callback:
content_type = "text/javascript"
output_text = f'{i.callback} ( {output_text} );'
return delegate.RawText(output_text, content_type=content_type)
class ia_loan_status(delegate.page):
path = r"/ia_loan_status/(.*)"
def GET(self, itemid):
d = get_borrow_status(itemid, include_resources=False, include_ia=False)
return delegate.RawText(json.dumps(d), content_type="application/json")
@public
def get_borrow_status(itemid, include_resources=True, include_ia=True, edition=None):
"""Returns borrow status for each of the sources and formats.
If the optional argument editions is provided, it uses that edition instead
of finding edition from itemid. This is added for performance reasons.
"""
loan = lending.get_loan(itemid)
has_loan = bool(loan)
if edition:
editions = [edition]
else:
edition_keys = web.ctx.site.things({"type": "/type/edition", "ocaid": itemid})
editions = web.ctx.site.get_many(edition_keys)
has_waitinglist = editions and any(e.get_waitinglist_size() > 0 for e in editions)
d = {
'identifier': itemid,
'checkedout': has_loan or has_waitinglist,
'has_loan': has_loan,
'has_waitinglist': has_waitinglist,
}
if include_ia:
ia_checkedout = lending.is_loaned_out_on_ia(itemid)
d['checkedout'] = d['checkedout'] or ia_checkedout
d['checkedout_on_ia'] = ia_checkedout
if include_resources:
d.update(
{
'resource_bookreader': 'absent',
'resource_pdf': 'absent',
'resource_epub': 'absent',
}
)
if editions:
resources = editions[0].get_lending_resources()
resource_pattern = r'acs:(\w+):(.*)'
for resource_urn in resources:
if resource_urn.startswith('acs:'):
(resource_type, resource_id) = re.match(
resource_pattern, resource_urn
).groups()
else:
resource_type, resource_id = "bookreader", resource_urn
resource_type = "resource_" + resource_type
if is_loaned_out(resource_id):
d[resource_type] = 'checkedout'
else:
d[resource_type] = 'available'
return web.storage(d)
# Handler for /iauth/{itemid}
class ia_auth(delegate.page):
path = r"/ia_auth/(.*)"
def GET(self, item_id):
i = web.input(_method='GET', callback=None, loan=None, token=None)
content_type = "application/json"
# check that identifier is valid
user = accounts.get_current_user()
auth_json = json.dumps(get_ia_auth_dict(user, item_id, i.loan, i.token))
output = auth_json
if i.callback:
content_type = "text/javascript"
output = f'{i.callback} ( {output} );'
return delegate.RawText(output, content_type=content_type)
# Handler for /borrow/receive_notification - receive ACS4 status update notifications
class borrow_receive_notification(delegate.page):
path = r"/borrow/receive_notification"
def GET(self):
web.header('Content-Type', 'application/json')
output = json.dumps({'success': False, 'error': 'Only POST is supported'})
return delegate.RawText(output, content_type='application/json')
def POST(self):
data = web.data()
try:
etree.fromstring(data, parser=lxml.etree.XMLParser(resolve_entities=False))
output = json.dumps({'success': True})
except Exception as e:
output = json.dumps({'success': False, 'error': str(e)})
return delegate.RawText(output, content_type='application/json')
class ia_borrow_notify(delegate.page):
"""Invoked by archive.org to notify about change in loan/waiting list
status of an item.
The payload will be of the following format:
{"identifier": "foo00bar"}
"""
path = "/borrow/notify"
def POST(self):
payload = web.data()
d = json.loads(payload)
identifier = d and d.get('identifier')
if identifier:
lending.sync_loan(identifier)
waitinglist.on_waitinglist_update(identifier)
# ######### Public Functions
@public
def is_loan_available(edition, type) -> bool:
resource_id = edition.get_lending_resource_id(type)
if not resource_id:
return False
return not is_loaned_out(resource_id)
@public
def datetime_from_isoformat(expiry):
"""Returns datetime object, or None"""
return None if expiry is None else parse_datetime(expiry)
@public
def datetime_from_utc_timestamp(seconds):
return datetime.utcfromtimestamp(seconds)
@public
def can_return_resource_type(resource_type: str) -> bool:
"""Returns true if this resource can be returned from the OL site."""
return resource_type.startswith('bookreader')
@public
def ia_identifier_is_valid(item_id: str) -> bool:
"""Returns false if the item id is obviously malformed. Not currently checking length."""
return bool(re.match(r'^[a-zA-Z0-9][a-zA-Z0-9\.\-_]*$', item_id))
@public
def get_bookreader_stream_url(itemid: str) -> str:
return bookreader_stream_base + '/' + itemid
@public
def get_bookreader_host() -> str:
return bookreader_host
# ######### Helper Functions
def get_all_store_values(**query) -> list:
"""Get all values by paging through all results. Note: adds store_key with the row id."""
query = copy.deepcopy(query)
if 'limit' not in query:
query['limit'] = 500
query['offset'] = 0
values = []
got_all = False
while not got_all:
# new_values = web.ctx.site.store.values(**query)
new_items = web.ctx.site.store.items(**query)
for new_item in new_items:
new_item[1].update({'store_key': new_item[0]})
# XXX-Anand: Handling the existing loans
new_item[1].setdefault("ocaid", None)
values.append(new_item[1])
if len(new_items) < query['limit']:
got_all = True
query['offset'] += len(new_items)
return values
def get_all_loans() -> list:
# return web.ctx.site.store.values(type='/type/loan')
return get_all_store_values(type='/type/loan')
def get_loans(user):
return lending.get_loans_of_user(user.key)
def get_edition_loans(edition):
if edition.ocaid:
loan = lending.get_loan(edition.ocaid)
if loan:
return [loan]
return []
def get_loan_link(edition, type):
"""Get the loan link, which may be an ACS4 link or BookReader link depending on the loan type"""
resource_id = edition.get_lending_resource_id(type)
if type == 'bookreader':
# link to bookreader
return (resource_id, get_bookreader_stream_url(edition.ocaid))
raise Exception(
'Unknown resource type %s for loan of edition %s', edition.key, type
)
def get_loan_key(resource_id: str):
"""Get the key for the loan associated with the resource_id"""
# Find loan in OL
loan_keys = web.ctx.site.store.query('/type/loan', 'resource_id', resource_id)
if not loan_keys:
# No local records
return None
# Only support single loan of resource at the moment
if len(loan_keys) > 1:
# raise Exception('Found too many local loan records for resource %s' % resource_id)
logger.error(
"Found too many loan records for resource %s: %s", resource_id, loan_keys
)
loan_key = loan_keys[0]['key']
return loan_key
def get_loan_status(resource_id: str):
"""Should only be used for ACS4 loans. Get the status of the loan from the ACS4 server,
via the Book Status Server (BSS)
Typical BSS response for ACS4 looks like this:
[
{
"loanuntil": "2010-06-25T00:52:04",
"resourceid": "a8b600e2-32fd-4aeb-a2b5-641103583254",
"returned": "F",
"until": "2010-06-25T00:52:04"
}
]
"""
global loanstatus_url
if not loanstatus_url:
raise Exception('No loanstatus_url -- cannot check loan status')
url = f'{loanstatus_url}/is_loaned_out/{resource_id}'
try:
response = requests.get(url).json()
if len(response) == 0:
# No outstanding loans
return None
else:
return response[0]
except OSError:
# status server is down
# $$$ be more graceful
# raise Exception('Loan status server not available - tried at %s', url)
# XXX-Anand: don't crash
return None
raise Exception(
'Error communicating with loan status server for resource %s' % resource_id
)
def get_all_loaned_out():
"""Returns array of BSS status for all resources currently loaned out (according to BSS)"""
global loanstatus_url
if not loanstatus_url:
raise Exception('No loanstatus_url -- cannot check loan status')
url = '%s/is_loaned_out/' % loanstatus_url
try:
return requests.get(url).json()
except OSError:
raise Exception('Loan status server not available')
def is_loaned_out(resource_id: str) -> bool | None:
# bookreader loan status is stored in the private data store
# Check our local status
loan_key = get_loan_key(resource_id)
if not loan_key:
# No loan recorded
identifier = resource_id[len('bookreader:') :]
return lending.is_loaned_out_on_ia(identifier)
# Find the loan and check if it has expired
loan = web.ctx.site.store.get(loan_key)
return bool(loan and datetime_from_isoformat(loan['expiry']) < datetime.utcnow())
def is_loaned_out_from_status(status) -> bool:
return status and status['returned'] != 'T'
def update_loan_status(resource_id: str) -> None:
"""Update the loan status in OL based off status in ACS4. Used to check for early returns."""
# Get local loan record
loan_key = get_loan_key(resource_id)
if not loan_key:
# No loan recorded, nothing to do
return
loan = web.ctx.site.store.get(loan_key)
_update_loan_status(loan_key, loan, None)
def _update_loan_status(loan_key, loan, bss_status=None) -> None:
# If this is a BookReader loan, local version of loan is authoritative
if loan['resource_type'] == 'bookreader':
# delete loan record if has expired
# $$$ consolidate logic for checking expiry. keep loan record for some time after it expires.
if loan['expiry'] and loan['expiry'] < datetime.utcnow().isoformat():
logger.info("%s: loan expired. deleting...", loan_key)
web.ctx.site.store.delete(loan_key)
return
# Load status from book status server
if bss_status is None:
bss_status = get_loan_status(loan['resource_id'])
update_loan_from_bss_status(loan_key, loan, bss_status)
def update_loan_from_bss_status(loan_key, loan, status) -> None:
"""Update the loan status in the private data store from BSS status"""
global loan_fulfillment_timeout_seconds
if not resource_uses_bss(loan['resource_id']):
raise Exception(
'Tried to update loan %s with ACS4/BSS status when it should not use BSS'
% loan_key
)
if not is_loaned_out_from_status(status):
# No loan record, or returned or expired
# Check if our local loan record is fresh -- allow some time for fulfillment
if loan['expiry'] is None:
now = time.time()
# $$$ loan_at in the store is in timestamp seconds until updated (from BSS) to isoformat string
if now - loan['loaned_at'] < loan_fulfillment_timeout_seconds:
# Don't delete the loan record - give it time to complete
return
# Was returned, expired, or timed out
web.ctx.site.store.delete(loan_key)
logger.info("%s: loan returned or expired or timedout, deleting...", loan_key)
return
# Book has non-returned status
# Update expiry
if loan['expiry'] != status['until']:
loan['expiry'] = status['until']
web.ctx.site.store[loan_key] = loan
logger.info("%s: updated expiry to %s", loan_key, loan['expiry'])
def update_all_loan_status() -> None:
"""Update the status of all loans known to Open Library by cross-checking with the book status server.
This is called once an hour from a cron job.
"""
# Get book status records of everything loaned out
bss_statuses = get_all_loaned_out()
bss_resource_ids = [status['resourceid'] for status in bss_statuses]
loans = web.ctx.site.store.values(type='/type/loan', limit=-1)
acs4_loans = [loan for loan in loans if loan['resource_type'] in ['epub', 'pdf']]
for i, loan in enumerate(acs4_loans):
logger.info("processing loan %s (%s)", loan['_key'], i)
bss_status = None
if resource_uses_bss(loan['resource_id']):
try:
bss_status = bss_statuses[bss_resource_ids.index(loan['resource_id'])]
except ValueError:
bss_status = None
_update_loan_status(loan['_key'], loan, bss_status)
def resource_uses_bss(resource_id: str) -> bool:
"""Returns true if the resource should use the BSS for status"""
global acs_resource_id_prefixes
if resource_id:
for prefix in acs_resource_id_prefixes:
if resource_id.startswith(prefix):
return True
return False
def user_can_borrow_edition(user, edition) -> Literal['borrow', 'browse', False]:
"""Returns the type of borrow for which patron is eligible, favoring
"browse" over "borrow" where available, otherwise return False if
patron is not eligible.
"""
lending_st = lending.get_groundtruth_availability(edition.ocaid, {})
book_is_lendable = lending_st.get('is_lendable', False)
book_is_waitlistable = lending_st.get('available_to_waitlist', False)
user_is_below_loan_limit = user.get_loan_count() < user_max_loans
if book_is_lendable:
if web.cookies().get('pd', False):
return 'borrow'
elif user_is_below_loan_limit:
if lending_st.get('available_to_browse'):
return 'browse'
elif lending_st.get('available_to_borrow') or (
book_is_waitlistable and is_users_turn_to_borrow(user, edition)
):
return 'borrow'
return False
def is_users_turn_to_borrow(user, edition) -> bool:
"""If this user is waiting on this edition, it can only borrowed if
user is the user is the first in the waiting list.
"""
waiting_loan = user.get_waiting_loan_for(edition.ocaid)
return (
waiting_loan
and waiting_loan['status'] == 'available'
and waiting_loan['position'] == 1
)
def is_admin() -> bool:
"""Returns True if the current user is in admin usergroup."""
user = accounts.get_current_user()
return user is not None and user.key in [
m.key for m in web.ctx.site.get('/usergroup/admin').members
]
def return_resource(resource_id):
"""Return the book to circulation! This object is invalid and should not be used after
this is called. Currently only possible for bookreader loans."""
loan_key = get_loan_key(resource_id)
if not loan_key:
raise Exception('Asked to return %s but no loan recorded' % resource_id)
loan = web.ctx.site.store.get(loan_key)
delete_loan(loan_key, loan)
def delete_loan(loan_key, loan=None) -> None:
if not loan:
loan = web.ctx.site.store.get(loan_key)
if not loan:
raise Exception('Could not find store record for %s', loan_key)
loan.delete()
def get_ia_auth_dict(user, item_id: str, user_specified_loan_key, access_token):
"""Returns response similar to one of these:
{'success':true,'token':'1287185207-fa72103dd21073add8f87a5ad8bce845','borrowed':true}
{'success':false,'msg':'Book is checked out','borrowed':false, 'resolution': 'You can visit <a href="http://openlibary.org/ia/someid">this book\'s page on Open Library</a>.'}
""" # noqa: E501
base_url = 'http://' + web.ctx.host
resolution_dict = {'base_url': base_url, 'item_id': item_id}
error_message = None
user_has_current_loan = False
# Sanity checks
if not ia_identifier_is_valid(item_id):
return {
'success': False,
'msg': 'Invalid item id',
'resolution': 'This book does not appear to have a valid item identifier.',
}
# Lookup loan information
loan = lending.get_loan(item_id)
loan_key = loan and loan.get_key()
if loan_key is None:
# Book is not checked out as a BookReader loan - may still be checked out in ACS4
error_message = 'Lending Library Book'
resolution_message = (
'This book is part of the <a href="%(base_url)s/subjects/Lending_library">'
'lending library</a>. Please <a href="%(base_url)s/ia/%(item_id)s/borrow">'
'visit this book\'s page on Open Library</a> to access the book.'
% resolution_dict
)
else:
# If we know who this user is, from third-party cookies and they are logged into openlibrary.org, check if they have the loan
if user:
if loan['user'] != user.key:
# Borrowed by someone else - OR possibly came in through ezproxy and there's a stale login in on openlibrary.org
error_message = 'This book is checked out'
resolution_message = (
'This book is currently checked out. You can '
'<a href="%(base_url)s/ia/%(item_id)s">visit this book\'s page on '
'Open Library</a> or '
'<a href="%(base_url)s/subjects/Lending_library">look at other '
'books available to borrow</a>.' % resolution_dict
)
elif loan['expiry'] < datetime.utcnow().isoformat():
# User has the loan, but it's expired
error_message = 'Your loan has expired'
resolution_message = (
'Your loan for this book has expired. You can <a href="%(base_url)s/ia/%(item_id)s">visit this book\'s page on Open Library</a>.'
% resolution_dict
)
else:
# User holds the loan - win!
user_has_current_loan = True
else:
# Don't have user context - not logged in or third-party cookies disabled
# Check if the loan id + token is valid
if (
user_specified_loan_key
and access_token
and ia_token_is_current(item_id, access_token)
):
# Win!
user_has_current_loan = True
else:
# Couldn't validate using token - they need to go to Open Library
error_message = "Lending Library Book"
resolution_message = (
'This book is part of the <a href="%(base_url)s/subjects/Lending_'
'library" title="Open Library Lending Library">lending library</a>. '
'Please <a href="%(base_url)s/ia/%(item_id)s/borrow" title="Borrow '
'book page on Open Library">visit this book\'s page on Open Library'
'</a> to access the book. You must have cookies enabled for '
'archive.org and openlibrary.org to access borrowed books.'
% resolution_dict
)
if error_message:
return {
'success': False,
'msg': error_message,
'resolution': resolution_message,
}
else:
# No error message, make sure we thought the loan was current as sanity check
if not user_has_current_loan:
raise Exception(
'lending: no current loan for this user found but no error condition specified'
)
return {'success': True, 'token': make_ia_token(item_id, BOOKREADER_AUTH_SECONDS)}
def ia_hash(token_data: str) -> str:
access_key = make_access_key()
return hmac.new(access_key, token_data.encode('utf-8'), hashlib.md5).hexdigest()
def make_access_key():
try:
return config.ia_access_secret.encode('utf-8')
except AttributeError:
raise RuntimeError(
"config value config.ia_access_secret is not present -- check your config"
)
def make_ia_token(item_id: str, expiry_seconds: int) -> str:
"""Make a key that allows a client to access the item on archive.org for the number of
seconds from now.
"""
# $timestamp = $time+600; //access granted for ten minutes
# $hmac = hash_hmac('md5', "{$id}-{$timestamp}", configGetValue('ol-loan-secret'));
# return "{$timestamp}-{$hmac}";
timestamp = int(time.time() + expiry_seconds)
token_data = '%s-%d' % (item_id, timestamp)
token = '%d-%s' % (timestamp, ia_hash(token_data))
return token
def ia_token_is_current(item_id: str, access_token: str) -> bool:
# Check if token has expired
try:
token_timestamp = access_token.split('-')[0]
except:
return False
token_time = int(token_timestamp)
now = int(time.time())
if token_time < now:
return False
# Verify token is valid
try:
token_hmac = access_token.split('-')[1]
except:
return False
expected_data = f'{item_id}-{token_timestamp}'
expected_hmac = ia_hash(expected_data)
return token_hmac == expected_hmac
def make_bookreader_auth_link(
loan_key, item_id, book_path, ol_host, ia_userid=None
) -> str:
"""
Generate a link to BookReaderAuth.php that starts the BookReader
with the information to initiate reading a borrowed book
"""
auth_link = 'https://%s/bookreader/BookReaderAuth.php?' % bookreader_host
params = {
'uuid': loan_key,
'token': make_ia_token(item_id, BOOKREADER_AUTH_SECONDS),
'id': item_id,
'bookPath': book_path,
'olHost': ol_host,
'olAuthUrl': f"https://{ol_host}/ia_auth/XXX",
'iaUserId': ia_userid,
'iaAuthToken': make_ia_token(ia_userid, READER_AUTH_SECONDS),
}
return auth_link + urllib.parse.urlencode(params)
lending.setup(config)
vendors.setup(config)
| 32,862 | Python | .py | 744 | 35.375 | 178 | 0.625729 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
446 | recentchanges.py | internetarchive_openlibrary/openlibrary/plugins/upstream/recentchanges.py | """New recentchanges implementation.
This should go into infogami.
"""
import web
import json
import yaml
from infogami.utils import delegate
from infogami.utils.view import public, render, render_template, safeint
from infogami.utils.view import add_flash_message # TODO: unused import?
from infogami.utils import features
from openlibrary.utils import dateutil
from openlibrary.plugins.upstream.utils import get_changes
@public
def recentchanges(query):
return web.ctx.site.recentchanges(query)
class index2(delegate.page):
path = "/recentchanges"
def GET(self):
if features.is_enabled("recentchanges_v2"):
return index().render()
else:
return render.recentchanges()
class index(delegate.page):
path = "/recentchanges(/[^/0-9][^/]*)"
def is_enabled(self):
return features.is_enabled("recentchanges_v2")
def GET(self, kind):
return self.render(kind=kind)
def render(self, date=None, kind=None):
query = {}
if date:
begin_date, end_date = dateutil.parse_daterange(date)
query['begin_date'] = begin_date.isoformat()
query['end_date'] = end_date.isoformat()
if kind:
query['kind'] = kind and kind.strip("/")
if web.ctx.encoding in ["json", "yml"]:
return self.handle_encoding(query, web.ctx.encoding)
return render_template("recentchanges/index", query)
def handle_encoding(self, query, encoding):
i = web.input(bot="", limit=100, offset=0, text="false")
# The bot stuff is handled in the template for the regular path.
# We need to handle it here for api.
if i.bot.lower() == "true":
query['bot'] = True
elif i.bot.lower() == "false":
query['bot'] = False
# and limit and offset business too
limit = safeint(i.limit, 100)
offset = safeint(i.offset, 0)
def constrain(value, low, high):
if value < low:
return low
elif value > high:
return high
else:
return value
# constrain limit and offset for performance reasons
limit = constrain(limit, 0, 1000)
offset = constrain(offset, 0, 10000)
query['limit'] = limit
query['offset'] = offset
result = [c.dict() for c in web.ctx.site.recentchanges(query)]
if encoding == "json":
response = json.dumps(result)
content_type = "application/json"
elif encoding == "yml":
response = self.yaml_dump(result)
content_type = "text/x-yaml"
else:
response = ""
content_type = "text/plain"
if i.text.lower() == "true":
web.header('Content-Type', 'text/plain')
else:
web.header('Content-Type', content_type)
return delegate.RawText(response)
def yaml_dump(self, d):
return yaml.safe_dump(d, indent=4, allow_unicode=True, default_flow_style=False)
class index_with_date(index):
path = r"/recentchanges/(\d\d\d\d(?:/\d\d)?(?:/\d\d)?)(/[^/]*)?"
def GET(self, date, kind):
date = date.replace("/", "-")
return self.render(kind=kind, date=date)
class recentchanges_redirect(delegate.page):
path = r"/recentchanges/goto/(\d+)"
def is_enabled(self):
return features.is_enabled("recentchanges_v2")
def GET(self, id):
id = int(id)
change = web.ctx.site.get_change(id)
if not change:
web.ctx.status = "404 Not Found"
return render.notfound(web.ctx.path)
raise web.found(change.url())
class recentchanges_view(delegate.page):
path = r"/recentchanges/\d\d\d\d/\d\d/\d\d/[^/]*/(\d+)"
def is_enabled(self):
return features.is_enabled("recentchanges_v2")
def get_change_url(self, change):
t = change.timestamp
return "/recentchanges/%04d/%02d/%02d/%s/%s" % (
t.year,
t.month,
t.day,
change.kind,
change.id,
)
def GET(self, id):
id = int(id)
change = web.ctx.site.get_change(id)
if not change:
web.ctx.status = "404 Not Found"
return render.notfound(web.ctx.path)
if web.ctx.encoding == 'json':
return self.render_json(change)
path = self.get_change_url(change)
if path != web.ctx.path:
raise web.redirect(path)
else:
kind = "merge" if change.kind.startswith("merge-") else change.kind
tname = "recentchanges/" + kind + "/view"
if tname in render:
return render_template(tname, change)
else:
return render_template("recentchanges/default/view", change)
def render_json(self, change):
return delegate.RawText(
json.dumps(change.dict()), content_type="application/json"
)
def POST(self, id):
if not features.is_enabled("undo"):
return render_template(
"permission_denied", web.ctx.path, "Permission denied to undo."
)
id = int(id)
change = web.ctx.site.get_change(id)
change._undo()
raise web.seeother(change.url())
class history(delegate.mode):
def GET(self, path):
page = web.ctx.site.get(path)
if not page:
raise web.seeother(path)
i = web.input(page=0)
offset = 20 * safeint(i.page)
limit = 20
history = get_changes({"key": path, "limit": limit, "offset": offset})
return render.history(page, history)
| 5,717 | Python | .py | 149 | 29.308725 | 88 | 0.593405 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
447 | account.py | internetarchive_openlibrary/openlibrary/plugins/upstream/account.py | from datetime import datetime
import json
import logging
import re
import requests
from typing import Any, TYPE_CHECKING, Final
from collections.abc import Callable
from collections.abc import Iterable, Mapping
from math import ceil
import web
from infogami.utils import delegate
from infogami import config
from infogami.utils.view import (
require_login,
render,
render_template,
add_flash_message,
)
from infogami.infobase.client import ClientException
import infogami.core.code as core
from openlibrary import accounts
from openlibrary.i18n import gettext as _
from openlibrary.core import stats
from openlibrary.core import helpers as h, lending
from openlibrary.core.booknotes import Booknotes
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.lending import (
get_items_and_add_availability,
s3_loan_api,
)
from openlibrary.core.observations import Observations
from openlibrary.core.ratings import Ratings
from openlibrary.core.follows import PubSub
from openlibrary.plugins.recaptcha import recaptcha
from openlibrary.plugins.upstream.mybooks import MyBooksTemplate
from openlibrary.plugins import openlibrary as olib
from openlibrary.accounts import (
audit_accounts,
Account,
OpenLibraryAccount,
InternetArchiveAccount,
valid_email,
clear_cookies,
OLAuthenticationError,
)
from openlibrary.plugins.upstream import borrow, forms, utils
from openlibrary.utils.dateutil import elapsed_time
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import Work
logger = logging.getLogger("openlibrary.account")
CONFIG_IA_DOMAIN: Final = config.get('ia_base_url', 'https://archive.org')
USERNAME_RETRIES = 3
RESULTS_PER_PAGE: Final = 25
# XXX: These need to be cleaned up
send_verification_email = accounts.send_verification_email
create_link_doc = accounts.create_link_doc
sendmail = accounts.sendmail
def get_login_error(error_key):
"""Nesting the LOGIN_ERRORS dictionary inside a function prevents
an AttributeError with the web.ctx.lang library"""
LOGIN_ERRORS = {
"invalid_email": _('The email address you entered is invalid'),
"account_blocked": _('This account has been blocked'),
"account_locked": _('This account has been locked'),
"account_not_found": _(
'No account was found with this email. Please try again'
),
"account_incorrect_password": _(
'The password you entered is incorrect. Please try again'
),
"account_bad_password": _('Wrong password. Please try again'),
"account_not_verified": _(
'Please verify your Open Library account before logging in'
),
"ia_account_not_verified": _(
'Please verify your Internet Archive account before logging in'
),
"missing_fields": _('Please fill out all fields and try again'),
"email_registered": _('This email is already registered'),
"username_registered": _('This username is already registered'),
"max_retries_exceeded": _(
'A problem occurred and we were unable to log you in.'
),
"invalid_s3keys": _(
'Login attempted with invalid Internet Archive s3 credentials.'
),
"undefined_error": _('A problem occurred and we were unable to log you in'),
}
return LOGIN_ERRORS[error_key]
class availability(delegate.page):
path = "/internal/fake/availability"
def POST(self):
"""Internal private API required for testing on localhost"""
return delegate.RawText(json.dumps({}), content_type="application/json")
class loans(delegate.page):
path = "/internal/fake/loans"
def POST(self):
"""Internal private API required for testing on localhost"""
return delegate.RawText(json.dumps({}), content_type="application/json")
class xauth(delegate.page):
path = "/internal/fake/xauth"
def POST(self):
"""Internal private API required for testing login on localhost
which normally would have to hit archive.org's xauth
service. This service is spoofable to return successful and
unsuccessful login attempts depending on the provided GET parameters
"""
i = web.input(email='', op=None)
result = {"error": "incorrect option specified"}
if i.op == "authenticate":
result = {
"success": True,
"version": 1,
"values": {
"access": 'foo',
"secret": 'foo',
},
}
elif i.op == "info":
result = {
"success": True,
"values": {
"locked": False,
"email": "[email protected]",
"itemname": "@openlibrary",
"screenname": "openlibrary",
"verified": True,
},
"version": 1,
}
return delegate.RawText(json.dumps(result), content_type="application/json")
class internal_audit(delegate.page):
path = "/internal/account/audit"
def GET(self):
"""Internal API endpoint used for authorized test cases and
administrators to unlink linked OL and IA accounts.
"""
i = web.input(
email='', username='', itemname='', key='', unlink='', new_itemname=''
)
if i.key != lending.config_internal_tests_api_key:
result = {'error': 'Authentication failed for private API'}
else:
try:
result = OpenLibraryAccount.get(
email=i.email, link=i.itemname, username=i.username
)
if result is None:
raise ValueError('Invalid Open Library account email or itemname')
result.enc_password = 'REDACTED'
if i.new_itemname:
result.link(i.new_itemname)
if i.unlink:
result.unlink()
except ValueError as e:
result = {'error': str(e)}
return delegate.RawText(json.dumps(result), content_type="application/json")
class account_migration(delegate.page):
path = "/internal/account/migration"
def GET(self):
i = web.input(username='', email='', key='')
if i.key != lending.config_internal_tests_api_key:
return delegate.RawText(
json.dumps({'error': 'Authentication failed for private API'}),
content_type="application/json",
)
try:
if i.username:
ol_account = OpenLibraryAccount.get(username=i.username)
elif i.email:
ol_account = OpenLibraryAccount.get(email=i.email)
except Exception as e:
return delegate.RawText(
json.dumps({'error': 'bad-account'}), content_type="application/json"
)
if ol_account:
ol_account.enc_password = 'REDACTED'
if ol_account.itemname:
return delegate.RawText(
json.dumps(
{
'status': 'link-exists',
'username': ol_account.username,
'itemname': ol_account.itemname,
'email': ol_account.email.lower(),
}
),
content_type="application/json",
)
if not ol_account.itemname:
ia_account = InternetArchiveAccount.get(email=ol_account.email.lower())
if ia_account:
ol_account.link(ia_account.itemname)
return delegate.RawText(
json.dumps(
{
'username': ol_account.username,
'status': 'link-found',
'itemname': ia_account.itemname,
'ol-itemname': ol_account.itemname,
'email': ol_account.email.lower(),
'ia': ia_account,
}
),
content_type="application/json",
)
password = OpenLibraryAccount.generate_random_password(16)
ia_account = InternetArchiveAccount.create(
ol_account.username or ol_account.displayname,
ol_account.email,
password,
verified=True,
retries=USERNAME_RETRIES,
)
return delegate.RawText(
json.dumps(
{
'username': ol_account.username,
'email': ol_account.email,
'itemname': ia_account.itemname,
'password': password,
'status': 'link-created',
}
),
content_type="application/json",
)
class account(delegate.page):
"""Account preferences."""
@require_login
def GET(self):
user = accounts.get_current_user()
return render.account(user)
class account_create(delegate.page):
"""New account creation.
Account remains in the pending state until the email is activated.
"""
path = "/account/create"
def GET(self):
f = self.get_form()
return render['account/create'](f)
def get_form(self) -> forms.RegisterForm:
f = forms.Register()
recap = self.get_recap()
f.has_recaptcha = recap is not None
if f.has_recaptcha:
f.inputs = list(f.inputs) + [recap]
return f
def get_recap(self):
if self.is_plugin_enabled('recaptcha'):
public_key = config.plugin_invisible_recaptcha.public_key
private_key = config.plugin_invisible_recaptcha.private_key
if public_key and private_key:
return recaptcha.Recaptcha(public_key, private_key)
def is_plugin_enabled(self, name):
return (
name in delegate.get_plugins()
or "openlibrary.plugins." + name in delegate.get_plugins()
)
def POST(self):
f: forms.RegisterForm = self.get_form()
if f.validates(web.input()):
try:
# Create ia_account: require they activate via IA email
# and then login to OL. Logging in after activation with
# IA credentials will auto create and link OL account.
"""NOTE: the values for the notifications must be kept in sync
with the values in the `MAILING_LIST_KEYS` array in
https://git.archive.org/ia/petabox/blob/master/www/common/MailSync/Settings.inc
Currently, per the fundraising/development team, the
"announcements checkbox" should map to BOTH `ml_best_of` and
`ml_updates`
""" # nopep8
mls = ['ml_best_of', 'ml_updates']
notifications = mls if f.ia_newsletter.checked else []
InternetArchiveAccount.create(
screenname=f.username.value,
email=f.email.value,
password=f.password.value,
notifications=notifications,
verified=False,
retries=USERNAME_RETRIES,
)
return render['account/verify'](
username=f.username.value, email=f.email.value
)
except OLAuthenticationError as e:
f.note = get_login_error(e.__str__())
from openlibrary.plugins.openlibrary.sentry import sentry
if sentry.enabled:
sentry.capture_exception(e)
return render['account/create'](f)
del delegate.pages['/account/register']
class account_login_json(delegate.page):
encoding = "json"
path = "/account/login"
def POST(self):
"""Overrides `account_login` and infogami.login to prevent users from
logging in with Open Library username and password if the
payload is json. Instead, if login attempted w/ json
credentials, requires Archive.org s3 keys.
"""
from openlibrary.plugins.openlibrary.code import BadRequest
d = json.loads(web.data())
email = d.get('email', "")
remember = d.get('remember', "")
access = d.get('access', None)
secret = d.get('secret', None)
test = d.get('test', False)
# Try S3 authentication first, fallback to infogami user, pass
if access and secret:
audit = audit_accounts(
None,
None,
require_link=True,
s3_access_key=access,
s3_secret_key=secret,
test=test,
)
error = audit.get('error')
if error:
resp = {
'error': error,
'errorDisplayString': get_login_error(error),
}
raise olib.code.BadRequest(json.dumps(resp))
expires = 3600 * 24 * 365 if remember.lower() == 'true' else ""
web.setcookie(config.login_cookie_name, web.ctx.conn.get_auth_token())
if audit.get('ia_email'):
ol_account = OpenLibraryAccount.get(email=audit['ia_email'])
if ol_account and ol_account.get_user().get_safe_mode() == 'yes':
web.setcookie('sfw', 'yes', expires=expires)
if (
ol_account
and 'yrg_banner_pref' in ol_account.get_user().preferences()
):
web.setcookie(
ol_account.get_user().preferences()['yrg_banner_pref'],
'1',
expires=(3600 * 24 * 365),
)
# Fallback to infogami user/pass
else:
from infogami.plugins.api.code import login as infogami_login
infogami_login().POST()
class account_login(delegate.page):
"""Account login.
Login can fail because of the following reasons:
* account_not_found: Error message is displayed.
* account_bad_password: Error message is displayed with a link to reset password.
* account_not_verified: Error page is displayed with button to "resend verification email".
"""
path = "/account/login"
def render_error(self, error_key, i):
f = forms.Login()
f.fill(i)
f.note = get_login_error(error_key)
return render.login(f)
def GET(self):
referer = web.ctx.env.get('HTTP_REFERER', '')
# Don't set referer if request is from offsite
if 'openlibrary.org' not in referer or referer.endswith('openlibrary.org/'):
referer = None
i = web.input(redirect=referer)
f = forms.Login()
f['redirect'].value = i.redirect
return render.login(f)
def POST(self):
i = web.input(
username="",
connect=None,
password="",
remember=False,
redirect='/',
test=False,
access=None,
secret=None,
)
email = i.username # XXX username is now email
audit = audit_accounts(
email,
i.password,
require_link=True,
s3_access_key=i.access or web.ctx.env.get('HTTP_X_S3_ACCESS'),
s3_secret_key=i.secret or web.ctx.env.get('HTTP_X_S3_SECRET'),
test=i.test,
)
if error := audit.get('error'):
return self.render_error(error, i)
expires = 3600 * 24 * 365 if i.remember else ""
web.setcookie('pd', int(audit.get('special_access')) or '', expires=expires)
web.setcookie(
config.login_cookie_name, web.ctx.conn.get_auth_token(), expires=expires
)
ol_account = OpenLibraryAccount.get(email=email)
if ol_account and ol_account.get_user().get_safe_mode() == 'yes':
web.setcookie('sfw', 'yes', expires=expires)
if ol_account and 'yrg_banner_pref' in ol_account.get_user().preferences():
web.setcookie(
ol_account.get_user().preferences()['yrg_banner_pref'],
'1',
expires=(3600 * 24 * 365),
)
blacklist = [
"/account/login",
"/account/create",
]
if i.redirect == "" or any(path in i.redirect for path in blacklist):
i.redirect = "/account/books"
stats.increment('ol.account.xauth.login')
raise web.seeother(i.redirect)
def POST_resend_verification_email(self, i):
try:
ol_login = OpenLibraryAccount.authenticate(i.email, i.password)
except ClientException as e:
code = e.get_data().get("code")
if code != "account_not_verified":
return self.error("account_incorrect_password", i)
account = OpenLibraryAccount.get(email=i.email)
account.send_verification_email()
title = _("Hi, %(user)s", user=account.displayname)
message = _(
"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.",
email=account.email,
)
return render.message(title, message)
class account_logout(delegate.page):
"""Account logout.
This registers a handler to the /account/logout endpoint in infogami so that additional logic, such as clearing admin cookies,
can be handled prior to the calling of infogami's standard logout procedure
"""
path = "/account/logout"
def POST(self):
clear_cookies()
from infogami.core.code import logout as infogami_logout
return infogami_logout().POST()
class account_verify(delegate.page):
"""Verify user account."""
path = "/account/verify/([0-9a-f]*)"
def GET(self, code):
docs = web.ctx.site.store.values(type="account-link", name="code", value=code)
if docs:
doc = docs[0]
account = accounts.find(username=doc['username'])
if account and account['status'] != "pending":
return render['account/verify/activated'](account)
account.activate()
user = web.ctx.site.get("/people/" + doc['username']) # TBD
return render['account/verify/success'](account)
else:
return render['account/verify/failed']()
def POST(self, code=None):
"""Called to regenerate account verification code."""
i = web.input(email=None)
account = accounts.find(email=i.email)
if not account:
return render_template("account/verify/failed", email=i.email)
elif account['status'] != "pending":
return render['account/verify/activated'](account)
else:
account.send_verification_email()
title = _("Hi, %(user)s", user=account.displayname)
message = _(
"We've sent the verification email to %(email)s. You'll need to read that and click on the verification link to verify your email.",
email=account.email,
)
return render.message(title, message)
class account_verify_old(account_verify):
"""Old account verification code.
This takes username, email and code as url parameters. The new one takes just the code as part of the url.
"""
path = "/account/verify"
def GET(self):
# It is too long since we switched to the new account verification links.
# All old links must be expired by now.
# Show failed message without thinking.
return render['account/verify/failed']()
class account_validation(delegate.page):
path = '/account/validate'
@staticmethod
def ia_username_exists(username):
url = "https://archive.org/metadata/@%s" % username
try:
return bool(requests.get(url).json())
except (OSError, ValueError):
return
@staticmethod
def validate_username(username):
ol_account = OpenLibraryAccount.get(username=username)
if ol_account:
return _("Username unavailable")
ia_account = account_validation.ia_username_exists(username)
if ia_account:
return _("Username unavailable")
@staticmethod
def validate_email(email):
ol_account = OpenLibraryAccount.get(email=email)
if ol_account:
return _('Email already registered')
ia_account = InternetArchiveAccount.get(email=email)
if ia_account:
return _('An Internet Archive account already exists with this email')
def GET(self):
i = web.input()
errors = {'email': None, 'username': None}
if i.get('email') is not None:
errors['email'] = self.validate_email(i.email)
if i.get('username') is not None:
errors['username'] = self.validate_username(i.username)
return delegate.RawText(json.dumps(errors), content_type="application/json")
class account_email_verify(delegate.page):
path = "/account/email/verify/([0-9a-f]*)"
def GET(self, code):
if link := accounts.get_link(code):
username = link['username']
email = link['email']
link.delete()
return self.update_email(username, email)
else:
return self.bad_link()
def update_email(self, username, email):
if accounts.find(email=email):
title = _("Email address is already used.")
message = _(
"Your email address couldn't be updated. The specified email address is already used."
)
else:
logger.info("updated email of %s to %s", username, email)
accounts.update_account(username=username, email=email, status="active")
title = _("Email verification successful.")
message = _(
'Your email address has been successfully verified and updated in your account.'
)
return render.message(title, message)
def bad_link(self):
title = _("Email address couldn't be verified.")
message = _(
"Your email address couldn't be verified. The verification link seems invalid."
)
return render.message(title, message)
class account_email_verify_old(account_email_verify):
path = "/account/email/verify"
def GET(self):
# It is too long since we switched to the new email verification links.
# All old links must be expired by now.
# Show failed message without thinking.
return self.bad_link()
class account_ia_email_forgot(delegate.page):
path = "/account/email/forgot-ia"
def GET(self):
return render_template('account/email/forgot-ia')
def POST(self):
i = web.input(email='', password='')
err = ""
if valid_email(i.email):
act = OpenLibraryAccount.get(email=i.email)
if act:
if OpenLibraryAccount.authenticate(i.email, i.password) == "ok":
ia_act = act.get_linked_ia_account()
if ia_act:
return render_template(
'account/email/forgot-ia', email=ia_act.email
)
else:
err = "Open Library Account not linked. Login with your Open Library credentials to connect or create an Archive.org account"
else:
err = "Incorrect password"
else:
err = "Sorry, this Open Library account does not exist"
else:
err = "Please enter a valid Open Library email"
return render_template('account/email/forgot-ia', err=err)
class account_password_forgot(delegate.page):
path = "/account/password/forgot"
def GET(self):
f = forms.ForgotPassword()
return render['account/password/forgot'](f)
def POST(self):
i = web.input(email='')
f = forms.ForgotPassword()
if not f.validates(i):
return render['account/password/forgot'](f)
account = accounts.find(email=i.email)
if account.is_blocked():
f.note = utils.get_error("account_blocked")
return render_template('account/password/forgot', f)
send_forgot_password_email(account.username, i.email)
return render['account/password/sent'](i.email)
class account_password_reset(delegate.page):
path = "/account/password/reset/([0-9a-f]*)"
def GET(self, code):
docs = web.ctx.site.store.values(type="account-link", name="code", value=code)
if not docs:
title = _("Password reset failed.")
message = "Your password reset link seems invalid or expired."
return render.message(title, message)
f = forms.ResetPassword()
return render['account/password/reset'](f)
def POST(self, code):
link = accounts.get_link(code)
if not link:
title = _("Password reset failed.")
message = "The password reset link seems invalid or expired."
return render.message(title, message)
username = link['username']
i = web.input()
accounts.update_account(username, password=i.password)
link.delete()
return render_template("account/password/reset_success", username=username)
class account_audit(delegate.page):
path = "/account/audit"
def POST(self):
"""When the user attempts a login, an audit is performed to determine
whether their account is already linked (in which case we can
proceed to log the user in), whether there is an error
authenticating their account, or whether a /account/connect
must first performed.
Note: Emails are case sensitive behind the scenes and
functions which require them as lower will make them so
"""
i = web.input(email='', password='')
test = i.get('test', '').lower() == 'true'
email = i.get('email')
password = i.get('password')
result = audit_accounts(email, password, test=test)
return delegate.RawText(json.dumps(result), content_type="application/json")
class account_privacy(delegate.page):
path = "/account/privacy"
@require_login
def GET(self):
user = accounts.get_current_user()
return render['account/privacy'](user.preferences())
@require_login
def POST(self):
i = web.input(public_readlog="", safe_mode="")
user = accounts.get_current_user()
if user.get_safe_mode() != 'yes' and i.safe_mode == 'yes':
stats.increment('ol.account.safe_mode')
user.save_preferences(i)
username = user.key.split('/')[-1]
PubSub.toggle_privacy(username, private=i.public_readlog == 'no')
web.setcookie(
'sfw', i.safe_mode, expires="" if i.safe_mode.lower() == 'yes' else -1
)
add_flash_message(
'note', _("Notification preferences have been updated successfully.")
)
web.seeother("/account")
class account_notifications(delegate.page):
path = "/account/notifications"
@require_login
def GET(self):
user = accounts.get_current_user()
email = user.email
return render['account/notifications'](user.preferences(), email)
@require_login
def POST(self):
user = accounts.get_current_user()
user.save_preferences(web.input())
add_flash_message(
'note', _("Notification preferences have been updated successfully.")
)
web.seeother("/account")
class account_lists(delegate.page):
path = "/account/lists"
@require_login
def GET(self):
user = accounts.get_current_user()
raise web.seeother(user.key + '/lists')
class account_my_books_redirect(delegate.page):
path = "/account/books/(.*)"
@require_login
def GET(self, rest='loans'):
i = web.input(page=1)
user = accounts.get_current_user()
username = user.key.split('/')[-1]
query_params = f'?page={i.page}' if h.safeint(i.page) > 1 else ''
raise web.seeother(f'/people/{username}/books/{rest}{query_params}')
class account_my_books(delegate.page):
path = "/account/books"
@require_login
def GET(self):
user = accounts.get_current_user()
username = user.key.split('/')[-1]
raise web.seeother(f'/people/{username}/books')
class import_books(delegate.page):
path = "/account/import"
@require_login
def GET(self):
user = accounts.get_current_user()
username = user['key'].split('/')[-1]
template = render['account/import']()
return MyBooksTemplate(username, 'imports').render(
header_title=_("Imports and Exports"), template=template
)
class fetch_goodreads(delegate.page):
path = "/account/import/goodreads"
def GET(self):
raise web.seeother("/account/import")
@require_login
def POST(self):
books, books_wo_isbns = process_goodreads_csv(web.input())
return render['account/import'](books, books_wo_isbns)
def csv_header_and_format(row: Mapping[str, Any]) -> tuple[str, str]:
"""
Convert the keys of a dict into csv header and format strings for generating a
comma separated values string. This will only be run on the first row of data.
>>> csv_header_and_format({"item_zero": 0, "one_id_id": 1, "t_w_o": 2, "THREE": 3})
('Item Zero,One Id ID,T W O,Three', '{item_zero},{one_id_id},{t_w_o},{THREE}')
"""
return ( # The .replace("_Id,", "_ID,") converts "Edition Id" --> "Edition ID"
",".join(fld.replace("_", " ").title() for fld in row).replace(" Id,", " ID,"),
",".join("{%s}" % field for field in row),
)
@elapsed_time("csv_string")
def csv_string(source: Iterable[Mapping], row_formatter: Callable | None = None) -> str:
"""
Given a list of dicts, generate comma-separated values where each dict is a row.
An optional reformatter function can be provided to transform or enrich each dict.
The order and names of the formatter's output dict keys will determine the order
and header column titles of the resulting csv string.
:param source: An iterable of all the rows that should appear in the csv string.
:param formatter: A Callable that accepts a Mapping and returns a dict.
>>> csv = csv_string([{"row_id": x, "t w o": 2, "upper": x.upper()} for x in "ab"])
>>> csv.splitlines()
['Row ID,T W O,Upper', 'a,2,A', 'b,2,B']
"""
if not row_formatter: # The default formatter reuses the inbound dict unmodified
def row_formatter(row: Mapping) -> Mapping:
return row
def csv_body() -> Iterable[str]:
"""
On the first row, use csv_header_and_format() to get and yield the csv_header.
Then use csv_format to yield each row as a string of comma-separated values.
"""
assert row_formatter, "Placate mypy."
for i, row in enumerate(source):
if i == 0: # Only on first row, make header and format from the dict keys
csv_header, csv_format = csv_header_and_format(row_formatter(row))
yield csv_header
yield csv_format.format(**row_formatter(row))
return '\n'.join(csv_body())
class export_books(delegate.page):
path = "/account/export"
date_format = '%Y-%m-%d %H:%M:%S'
@require_login
def GET(self):
i = web.input(type='')
filename = ''
user = accounts.get_current_user()
username = user.key.split('/')[-1]
if i.type == 'reading_log':
data = self.generate_reading_log(username)
filename = 'OpenLibrary_ReadingLog.csv'
elif i.type == 'book_notes':
data = self.generate_book_notes(username)
filename = 'OpenLibrary_BookNotes.csv'
elif i.type == 'reviews':
data = self.generate_reviews(username)
filename = 'OpenLibrary_Reviews.csv'
elif i.type == 'lists':
with elapsed_time("user.get_lists()"):
lists = user.get_lists(limit=1000)
with elapsed_time("generate_list_overview()"):
data = self.generate_list_overview(lists)
filename = 'Openlibrary_ListOverview.csv'
elif i.type == 'ratings':
data = self.generate_star_ratings(username)
filename = 'OpenLibrary_Ratings.csv'
web.header('Content-Type', 'text/csv')
web.header('Content-disposition', f'attachment; filename={filename}')
return delegate.RawText('' or data, content_type="text/csv")
def escape_csv_field(self, raw_string: str) -> str:
"""
Formats given CSV field string such that it conforms to definition outlined
in RFC #4180.
Note: We should probably use
https://docs.python.org/3/library/csv.html
"""
escaped_string = raw_string.replace('"', '""')
return f'"{escaped_string}"'
def get_work_from_id(self, work_id: str) -> "Work":
"""
Gets work data for a given work ID (OLxxxxxW format), used to access work author, title, etc. for CSV generation.
"""
work_key = f"/works/{work_id}"
work: Work = web.ctx.site.get(work_key)
if not work:
raise ValueError(f"No Work found for {work_key}.")
if work.type.key == '/type/redirect':
# Fetch actual work and resolve redirects before exporting:
work = web.ctx.site.get(work.location)
work.resolve_redirect_chain(work_key)
return work
def generate_reading_log(self, username: str) -> str:
bookshelf_map = {1: 'Want to Read', 2: 'Currently Reading', 3: 'Already Read'}
def get_subjects(work: "Work", subject_type: str) -> str:
return " | ".join(s.title for s in work.get_subject_links(subject_type))
def format_reading_log(book: dict) -> dict:
"""
Adding, deleting, renaming, or reordering the fields of the dict returned
below will automatically be reflected in the CSV that is generated.
"""
work_id = f"OL{book['work_id']}W"
if edition_id := book.get("edition_id") or "":
edition_id = f"OL{edition_id}M"
work = self.get_work_from_id(work_id)
ratings = work.get_rating_stats() or {"average": "", "count": ""}
ratings_average, ratings_count = ratings.values()
return {
"work_id": work_id,
"title": self.escape_csv_field(work.title),
"authors": self.escape_csv_field(" | ".join(work.get_author_names())),
"first_publish_year": work.first_publish_year,
"edition_id": edition_id,
"edition_count": work.edition_count,
"bookshelf": bookshelf_map[work.get_users_read_status(username)],
"my_ratings": work.get_users_rating(username) or "",
"ratings_average": ratings_average,
"ratings_count": ratings_count,
"has_ebook": work.has_ebook(),
"subjects": self.escape_csv_field(
get_subjects(work=work, subject_type="subject")
),
"subject_people": self.escape_csv_field(
get_subjects(work=work, subject_type="person")
),
"subject_places": self.escape_csv_field(
get_subjects(work=work, subject_type="place")
),
"subject_times": self.escape_csv_field(
get_subjects(work=work, subject_type="time")
),
}
books = Bookshelves.iterate_users_logged_books(username)
return csv_string(books, format_reading_log)
def generate_book_notes(self, username: str) -> str:
def format_booknote(booknote: Mapping) -> dict:
escaped_note = booknote['notes'].replace('"', '""')
return {
"work_id": f"OL{booknote['work_id']}W",
"edition_id": f"OL{booknote['edition_id']}M",
"note": f'"{escaped_note}"',
"created_on": booknote['created'].strftime(self.date_format),
}
return csv_string(Booknotes.select_all_by_username(username), format_booknote)
def generate_reviews(self, username: str) -> str:
def format_observation(observation: Mapping) -> dict:
return {
"work_id": f"OL{observation['work_id']}W",
"review_category": f'"{observation["observation_type"]}"',
"review_value": f'"{observation["observation_value"]}"',
"created_on": observation['created'].strftime(self.date_format),
}
observations = Observations.select_all_by_username(username)
return csv_string(observations, format_observation)
def generate_list_overview(self, lists):
row = {
"list_id": "",
"list_name": "",
"list_description": "",
"entry": "",
"created_on": "",
"last_updated": "",
}
def lists_as_csv(lists) -> Iterable[str]:
for i, list in enumerate(lists):
if i == 0: # Only on first row, make header and format from dict keys
csv_header, csv_format = csv_header_and_format(row)
yield csv_header
row["list_id"] = list.key.split('/')[-1]
row["list_name"] = (list.name or '').replace('"', '""')
row["list_description"] = (list.description or '').replace('"', '""')
row["created_on"] = list.created.strftime(self.date_format)
if (last_updated := list.last_modified or "") and isinstance(
last_updated, datetime
): # placate mypy
last_updated = last_updated.strftime(self.date_format)
row["last_updated"] = last_updated
for seed in list.seeds:
row["entry"] = seed if isinstance(seed, str) else seed.key
yield csv_format.format(**row)
return "\n".join(lists_as_csv(lists))
def generate_star_ratings(self, username: str) -> str:
def format_rating(rating: Mapping) -> dict:
work_id = f"OL{rating['work_id']}W"
if edition_id := rating.get("edition_id") or "":
edition_id = f"OL{edition_id}M"
work = self.get_work_from_id(work_id)
return {
"Work ID": work_id,
"Edition ID": edition_id,
"Title": self.escape_csv_field(work.title),
"Author(s)": self.escape_csv_field(" | ".join(work.get_author_names())),
"Rating": f"{rating['rating']}",
"Created On": rating['created'].strftime(self.date_format),
}
return csv_string(Ratings.select_all_by_username(username), format_rating)
def _validate_follows_page(page, per_page, hits):
min_page = 1
max_page = max(min_page, ceil(hits / per_page))
if isinstance(page, int):
return min(max_page, max(min_page, page))
if isinstance(page, str) and page.isdigit():
return min(max_page, max(min_page, int(page)))
return min_page
class my_follows(delegate.page):
path = r"/people/([^/]+)/(followers|following)"
def GET(self, username, key=""):
page_size = 25
i = web.input(page=1)
# Validate page ID, force between 1 and max allowed by size and total count
follow_count = (
PubSub.count_followers(username)
if key == 'followers'
else PubSub.count_following(username)
)
page = _validate_follows_page(i.page, page_size, follow_count)
# Get slice of follows belonging to this page
offset = max(0, (page - 1) * page_size)
follows = (
PubSub.get_followers(username, page_size, offset)
if key == 'followers'
else PubSub.get_following(username, page_size, offset)
)
mb = MyBooksTemplate(username, 'following')
manage = key == 'following' and mb.is_my_page
template = render['account/follows'](
mb.user, follow_count, page, page_size, follows, manage=manage
)
return mb.render(header_title=_(key.capitalize()), template=template)
class account_loans(delegate.page):
path = "/account/loans"
@require_login
def GET(self):
from openlibrary.core.lending import get_loans_of_user
user = accounts.get_current_user()
user.update_loan_status()
username = user['key'].split('/')[-1]
mb = MyBooksTemplate(username, 'loans')
docs = get_loans_of_user(user.key)
template = render['account/loans'](user, docs)
return mb.render(header_title=_("Loans"), template=template)
class account_loans_json(delegate.page):
encoding = "json"
path = "/account/loans"
@require_login
def GET(self):
user = accounts.get_current_user()
user.update_loan_status()
loans = borrow.get_loans(user)
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps({"loans": loans}))
class account_loan_history(delegate.page):
path = "/account/loan-history"
@require_login
def GET(self):
i = web.input(page=1)
page = int(i.page)
user = accounts.get_current_user()
username = user['key'].split('/')[-1]
mb = MyBooksTemplate(username, key='loan_history')
loan_history_data = get_loan_history_data(page=page, mb=mb)
template = render['account/loan_history'](
docs=loan_history_data['docs'],
current_page=page,
show_next=loan_history_data['show_next'],
ia_base_url=CONFIG_IA_DOMAIN,
)
return mb.render(header_title=_("Loan History"), template=template)
class account_loan_history_json(delegate.page):
encoding = "json"
path = "/account/loan-history"
@require_login
def GET(self):
i = web.input(page=1)
page = int(i.page)
user = accounts.get_current_user()
username = user['key'].split('/')[-1]
mb = MyBooksTemplate(username, key='loan_history')
loan_history_data = get_loan_history_data(page=page, mb=mb)
# Ensure all `docs` are `dicts`, as some are `Edition`s.
loan_history_data['docs'] = [
loan.dict() if not isinstance(loan, dict) else loan
for loan in loan_history_data['docs']
]
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps({"loans_history": loan_history_data}))
class account_waitlist(delegate.page):
path = "/account/waitlist"
def GET(self):
raise web.seeother("/account/loans")
# Disabling because it prevents account_my_books_redirect from working for some reason.
# The purpose of this class is to not show the "Create" link for /account pages since
# that doesn't make any sense.
# class account_others(delegate.page):
# path = "(/account/.*)"
#
# def GET(self, path):
# return render.notfound(path, create=False)
def send_forgot_password_email(username: str, email: str) -> None:
key = f"account/{username}/password"
doc = create_link_doc(key, username, email)
web.ctx.site.store[key] = doc
link = web.ctx.home + "/account/password/reset/" + doc['code']
msg = render_template(
"email/password/reminder", username=username, email=email, link=link
)
sendmail(email, msg)
def as_admin(f):
"""Infobase allows some requests only from admin user. This decorator logs in as admin, executes the function and clears the admin credentials."""
def g(*a, **kw):
try:
delegate.admin_login()
return f(*a, **kw)
finally:
web.ctx.headers = []
return g
def process_goodreads_csv(i):
import csv
csv_payload = i.csv if isinstance(i.csv, str) else i.csv.decode()
csv_file = csv.reader(csv_payload.splitlines(), delimiter=',', quotechar='"')
header = next(csv_file)
books = {}
books_wo_isbns = {}
for book in list(csv_file):
_book = dict(zip(header, book))
isbn = _book['ISBN'] = _book['ISBN'].replace('"', '').replace('=', '')
isbn_13 = _book['ISBN13'] = _book['ISBN13'].replace('"', '').replace('=', '')
if isbn != '':
books[isbn] = _book
elif isbn_13 != '':
books[isbn_13] = _book
books[isbn_13]['ISBN'] = isbn_13
else:
books_wo_isbns[_book['Book Id']] = _book
return books, books_wo_isbns
def get_loan_history_data(page: int, mb: "MyBooksTemplate") -> dict[str, Any]:
"""
Retrieve IA loan history data for page `page` of the patron's history.
This will use a patron's S3 keys to query the IA loan history API,
get the IA IDs, get the OLIDs if available, and and then convert this
into editions and IA-only items for display in the loan history.
This returns both editions and IA-only items because the loan history API
includes items that are not in Open Library, and displaying only IA
items creates pagination and navigation issues. For further discussion,
see https://github.com/internetarchive/openlibrary/pull/8375.
"""
if not (account := OpenLibraryAccount.get(username=mb.username)):
raise render.notfound(
"Account for not found for %s" % mb.username, create=False
)
s3_keys = web.ctx.site.store.get(account._key).get('s3_keys')
limit = RESULTS_PER_PAGE
offset = page * limit - limit
loan_history = s3_loan_api(
s3_keys=s3_keys,
action='user_borrow_history',
limit=limit + 1,
offset=offset,
newest=True,
).json()['history']['items']
# We request limit+1 to see if there is another page of history to display,
# and then pop the +1 off if it's present.
show_next = len(loan_history) == limit + 1
if show_next:
loan_history.pop()
ocaids = [loan_record['identifier'] for loan_record in loan_history]
loan_history_map = {
loan_record['identifier']: loan_record for loan_record in loan_history
}
# Get editions and attach their loan history.
editions_map = get_items_and_add_availability(ocaids=ocaids)
for edition in editions_map.values():
edition_loan_history = loan_history_map.get(edition.get('ocaid'))
edition['last_loan_date'] = (
edition_loan_history.get('updatedate') if edition_loan_history else ''
)
# Create 'placeholders' dicts for items in the Internet Archive loan history,
# but absent from Open Library, and then add loan history.
# ia_only['loan'] isn't set because `LoanStatus.html` reads it as a current
# loan. No apparenty way to distinguish between current and past loans with
# this API call.
ia_only_loans = [{'ocaid': ocaid} for ocaid in ocaids if ocaid not in editions_map]
for ia_only_loan in ia_only_loans:
loan_data = loan_history_map[ia_only_loan['ocaid']]
ia_only_loan['last_loan_date'] = loan_data.get('updatedate', '')
# Determine the macro to load for loan-history items only.
ia_only_loan['ia_only'] = True # type: ignore[typeddict-unknown-key]
editions_and_ia_loans = list(editions_map.values()) + ia_only_loans
editions_and_ia_loans.sort(
key=lambda item: item.get('last_loan_date', ''), reverse=True
)
return {
'docs': editions_and_ia_loans,
'show_next': show_next,
'limit': limit,
'page': page,
}
| 48,653 | Python | .py | 1,098 | 33.925319 | 150 | 0.59531 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
448 | utils.py | internetarchive_openlibrary/openlibrary/plugins/upstream/utils.py | import functools
import os
from typing import Any, Protocol, TYPE_CHECKING, TypeVar
from collections.abc import Callable, Generator, Iterable, Iterator
import unicodedata
import web
import json
import babel
import babel.core
import babel.dates
from babel.lists import format_list
from collections import defaultdict
import re
import random
import xml.etree.ElementTree as ET
import datetime
import logging
from html.parser import HTMLParser
from pathlib import Path
import yaml
import requests
from html import unescape
import urllib
from collections.abc import MutableMapping
from urllib.parse import (
parse_qs,
urlencode as parse_urlencode,
urlparse,
urlunparse,
)
from infogami import config
from infogami.utils import view, delegate, stats
from infogami.utils.view import render, get_template, public, query_param
from infogami.utils.macro import macro
from infogami.utils.context import InfogamiContext, context
from infogami.infobase.client import Changeset, Nothing, Thing, storify
from openlibrary.core.helpers import commify, parse_datetime, truncate
from openlibrary.core.middleware import GZipMiddleware
from openlibrary.core import cache
from web.utils import Storage
from web.template import TemplateResult
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import (
Work,
Author,
Edition,
)
STRIP_CHARS = ",'\" "
REPLACE_CHARS = "]["
class LanguageMultipleMatchError(Exception):
"""Exception raised when more than one possible language match is found."""
def __init__(self, language_name):
self.language_name = language_name
class LanguageNoMatchError(Exception):
"""Exception raised when no matching languages are found."""
def __init__(self, language_name):
self.language_name = language_name
class MultiDict(MutableMapping):
"""Ordered Dictionary that can store multiple values.
Must be initialized without an `items` parameter, or `items` must be an
iterable of two-value sequences. E.g., items=(('a', 1), ('b', 2))
>>> d = MultiDict()
>>> d['x'] = 1
>>> d['x'] = 2
>>> d['y'] = 3
>>> d['x']
2
>>> d['y']
3
>>> d['z']
Traceback (most recent call last):
...
KeyError: 'z'
>>> list(d)
['x', 'x', 'y']
>>> list(d.items())
[('x', 1), ('x', 2), ('y', 3)]
>>> list(d.multi_items())
[('x', [1, 2]), ('y', [3])]
>>> d1 = MultiDict(items=(('a', 1), ('b', 2)), a=('x', 10, 11, 12))
[('a', [1, ('x', 10, 11, 12)]), ('b', [2])]
"""
def __init__(self, items: Iterable[tuple[Any, Any]] = (), **kw) -> None:
self._items: list = []
for k, v in items:
self[k] = v
self.update(kw)
def __getitem__(self, key):
if values := self.getall(key):
return values[-1]
else:
raise KeyError(key)
def __setitem__(self, key: str, value: Any) -> None:
self._items.append((key, value))
def __delitem__(self, key):
self._items = [(k, v) for k, v in self._items if k != key]
def __iter__(self):
yield from self.keys()
def __len__(self):
return len(list(self.keys()))
def getall(self, key):
return [v for k, v in self._items if k == key]
def keys(self):
return [k for k, _ in self._items]
# Subclasses of MutableMapping should return a dictionary view object for
# the values() method, but this implementation returns a list.
# https://docs.python.org/3/library/stdtypes.html#dict-views
def values(self) -> list[Any]: # type: ignore[override]
return [v for _, v in self._items]
def items(self):
return self._items[:]
def multi_items(self) -> list[tuple[str, list]]:
"""Returns items as list of tuples of key and a list of values."""
items = []
d: dict = {}
for k, v in self._items:
if k not in d:
d[k] = []
items.append((k, d[k]))
d[k].append(v)
return items
@macro
@public
def render_template(name: str, *a, **kw) -> TemplateResult:
if "." in name:
name = name.rsplit(".", 1)[0]
return render[name](*a, **kw)
def kebab_case(upper_camel_case: str) -> str:
"""
:param str upper_camel_case: Text in upper camel case (e.g. "HelloWorld")
:return: text in kebab case (e.g. 'hello-world')
>>> kebab_case('HelloWorld')
'hello-world'
>>> kebab_case("MergeUI")
'merge-u-i'
"""
parts = re.findall(r'[A-Z][^A-Z]*', upper_camel_case)
return '-'.join(parts).lower()
@public
def render_component(
name: str,
attrs: dict | None = None,
json_encode: bool = True,
asyncDefer=False,
) -> str:
"""
:param str name: Name of the component (excluding extension)
:param dict attrs: attributes to add to the component element
"""
from openlibrary.plugins.upstream.code import static_url
attrs = attrs or {}
attrs_str = ''
for key, val in attrs.items():
if json_encode and isinstance(val, dict) or isinstance(val, list):
val = json.dumps(val)
# On the Vue side use decodeURIComponent to decode
val = urllib.parse.quote(val)
attrs_str += f' {key}="{val}"'
html = ''
included = web.ctx.setdefault("included-components", [])
if len(included) == 0:
# Need to include Vue
html += '<script src="%s"></script>' % static_url('build/vue.js')
if name not in included:
url = static_url('build/components/production/ol-%s.min.js' % name)
script_attrs = '' if not asyncDefer else 'async defer'
html += f'<script {script_attrs} src="{url}"></script>'
included.append(name)
html += f'<ol-{kebab_case(name)} {attrs_str}></ol-{kebab_case(name)}>'
return html
def render_macro(name, args, **kwargs):
return dict(web.template.Template.globals['macros'][name](*args, **kwargs))
@public
def render_cached_macro(name: str, args: tuple, **kwargs):
from openlibrary.plugins.openlibrary.home import caching_prethread
def get_key_prefix():
lang = web.ctx.lang
key_prefix = f'{name}.{lang}'
if web.cookies().get('pd', False):
key_prefix += '.pd'
if web.cookies().get('sfw', ''):
key_prefix += '.sfw'
return key_prefix
five_minutes = 5 * 60
key_prefix = get_key_prefix()
mc = cache.memcache_memoize(
render_macro,
key_prefix=key_prefix,
timeout=five_minutes,
prethread=caching_prethread(),
hash_args=True, # this avoids cache key length overflow
)
try:
page = mc(name, args, **kwargs)
return web.template.TemplateResult(page)
except (ValueError, TypeError) as e:
return '<span>Failed to render macro</span>'
@public
def get_error(name, *args):
"""Return error with the given name from errors.tmpl template."""
return get_message_from_template("errors", name, args)
@public
def get_message(name: str, *args) -> str:
"""Return message with given name from messages.tmpl template"""
return get_message_from_template("messages", name, args)
def get_message_from_template(
template_name: str, name: str, args: tuple[(Any, ...)]
) -> str:
d = render_template(template_name).get("messages", {})
msg = d.get(name) or name.lower().replace("_", " ")
if msg and args:
return msg % args
else:
return msg
@public
def list_recent_pages(path, limit=100, offset=0):
"""Lists all pages with name path/* in the order of last_modified."""
q = {}
q['key~'] = path + '/*'
# don't show /type/delete and /type/redirect
q['a:type!='] = '/type/delete'
q['b:type!='] = '/type/redirect'
q['sort'] = 'key'
q['limit'] = limit
q['offset'] = offset
q['sort'] = '-last_modified'
# queries are very slow with != conditions
# q['type'] != '/type/delete'
return web.ctx.site.get_many(web.ctx.site.things(q))
@public
def commify_list(items: Iterable[Any]) -> str:
# Not sure why lang is sometimes ''
lang = web.ctx.lang or 'en'
# If the list item is a template/html element, we strip it
# so that there is no space before the comma.
return format_list([str(x).strip() for x in items], locale=lang)
@public
def json_encode(d) -> str:
return json.dumps(d)
def unflatten(d: dict, separator: str = "--") -> dict:
"""Convert flattened data into nested form.
>>> unflatten({"a": 1, "b--x": 2, "b--y": 3, "c--0": 4, "c--1": 5})
{'a': 1, 'c': [4, 5], 'b': {'y': 3, 'x': 2}}
>>> unflatten({"a--0--x": 1, "a--0--y": 2, "a--1--x": 3, "a--1--y": 4})
{'a': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}
"""
def isint(k: Any) -> bool:
try:
int(k)
return True
except ValueError:
return False
def setvalue(data: dict, k, v) -> None:
if '--' in k:
k, k2 = k.split(separator, 1)
setvalue(data.setdefault(k, {}), k2, v)
else:
data[k] = v
def makelist(d):
"""Convert d into a list if all the keys of d are integers."""
if isinstance(d, dict):
if all(isint(k) for k in d):
return [makelist(d[k]) for k in sorted(d, key=int)]
else:
return Storage((k, makelist(v)) for k, v in d.items())
else:
return d
d2: dict = {}
for k, v in d.items():
setvalue(d2, k, v)
return makelist(d2)
def fuzzy_find(value, options, stopwords=None):
stopwords = stopwords or []
"""Try find the option nearest to the value.
>>> fuzzy_find("O'Reilly", ["O'Reilly Inc", "Addison-Wesley"])
"O'Reilly Inc"
"""
if not options:
return value
rx = web.re_compile(r"[-_\.&, ]+")
# build word frequency
d = defaultdict(list)
for option in options:
for t in rx.split(option):
d[t].append(option)
# find score for each option
score = defaultdict(lambda: 0)
for t in rx.split(value):
if t.lower() in stopwords:
continue
for option in d[t]:
score[option] += 1
# take the option with maximum score
return max(options, key=score.__getitem__)
@public
def radio_input(checked=False, **params) -> str:
params['type'] = 'radio'
if checked:
params['checked'] = "checked"
return "<input %s />" % " ".join(
[f'{k}="{web.websafe(v)}"' for k, v in params.items()]
)
def get_coverstore_url() -> str:
return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')
@public
def get_coverstore_public_url() -> str:
if OL_COVERSTORE_PUBLIC_URL := os.environ.get('OL_COVERSTORE_PUBLIC_URL'):
return OL_COVERSTORE_PUBLIC_URL.rstrip('/')
else:
return config.get('coverstore_public_url', get_coverstore_url()).rstrip('/')
def _get_changes_v1_raw(
query: dict[str, str | int], revision: int | None = None
) -> list[Storage]:
"""Returns the raw versions response.
Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.
"""
if 'env' not in web.ctx:
delegate.fakeload()
versions = web.ctx.site.versions(query)
for v in versions:
v.created = v.created.isoformat()
v.author = v.author and v.author.key
# XXX-Anand: hack to avoid too big data to be stored in memcache.
# v.changes is not used and it contributes to memcache bloat in a big way.
v.changes = '[]'
return versions
def get_changes_v1(
query: dict[str, str | int], revision: int | None = None
) -> list[Storage]:
# uses the cached function _get_changes_v1_raw to get the raw data
# and processes to before returning.
def process(v):
v = Storage(v)
v.created = parse_datetime(v.created)
v.author = v.author and web.ctx.site.get(v.author, lazy=True)
return v
return [process(v) for v in _get_changes_v1_raw(query, revision)]
def _get_changes_v2_raw(
query: dict[str, str | int], revision: int | None = None
) -> list[dict]:
"""Returns the raw recentchanges response.
Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.
"""
if 'env' not in web.ctx:
delegate.fakeload()
changes = web.ctx.site.recentchanges(query)
return [c.dict() for c in changes]
# XXX-Anand: disabled temporarily to avoid too much memcache usage.
# _get_changes_v2_raw = cache.memcache_memoize(_get_changes_v2_raw, key_prefix="upstream._get_changes_v2_raw", timeout=10*60)
def get_changes_v2(
query: dict[str, str | int], revision: int | None = None
) -> list[Changeset]:
page = web.ctx.site.get(query['key'])
def first(seq, default=None):
try:
return next(seq)
except StopIteration:
return default
def process_change(change):
change = Changeset.create(web.ctx.site, storify(change))
change.thing = page
change.key = page.key
change.revision = first(c.revision for c in change.changes if c.key == page.key)
change.created = change.timestamp
change.get = change.__dict__.get
change.get_comment = lambda: get_comment(change)
change.machine_comment = change.data.get("machine_comment")
return change
def get_comment(change):
t = get_template("recentchanges/" + change.kind + "/comment") or get_template(
"recentchanges/default/comment"
)
return t(change, page)
query['key'] = page.key
changes = _get_changes_v2_raw(query, revision=page.revision)
return [process_change(c) for c in changes]
def get_changes(
query: dict[str, str | int], revision: int | None = None
) -> list[Changeset]:
return get_changes_v2(query, revision=revision)
@public
def get_history(page: "Work | Author | Edition") -> Storage:
h = Storage(
revision=page.revision, lastest_revision=page.revision, created=page.created
)
if h.revision < 5:
h.recent = get_changes({"key": page.key, "limit": 5}, revision=page.revision)
h.initial = h.recent[-1:]
h.recent = h.recent[:-1]
else:
h.initial = get_changes(
{"key": page.key, "limit": 1, "offset": h.revision - 1},
revision=page.revision,
)
h.recent = get_changes({"key": page.key, "limit": 4}, revision=page.revision)
return h
@public
def get_version(key, revision):
try:
return web.ctx.site.versions({"key": key, "revision": revision, "limit": 1})[0]
except IndexError:
return None
@public
def get_recent_author(doc: "Work") -> "Thing | None":
versions = get_changes_v1(
{'key': doc.key, 'limit': 1, "offset": 0}, revision=doc.revision
)
if versions:
return versions[0].author
return None
@public
def get_recent_accounts(limit=5, offset=0):
versions = web.ctx.site.versions(
{'type': '/type/user', 'revision': 1, 'limit': limit, 'offset': offset}
)
return web.ctx.site.get_many([v.key for v in versions])
def get_locale():
try:
return babel.Locale(web.ctx.get("lang") or "en")
except babel.core.UnknownLocaleError:
return babel.Locale("en")
class HasGetKeyRevision(Protocol):
key: str
revision: int
def get(self, item) -> Any: ...
@public
def process_version(v: HasGetKeyRevision) -> HasGetKeyRevision:
"""Looks at the version and adds machine_comment required for showing "View MARC" link."""
comments = [
"found a matching marc record",
"add publisher and source",
]
if v.key.startswith('/books/') and not v.get('machine_comment'):
thing = v.get('thing') or web.ctx.site.get(v.key, v.revision)
if (
thing.source_records
and v.revision == 1
or (v.comment and v.comment.lower() in comments) # type: ignore [attr-defined]
):
marc = thing.source_records[-1]
if marc.startswith('marc:'):
v.machine_comment = marc[len("marc:") :] # type: ignore [attr-defined]
else:
v.machine_comment = marc # type: ignore [attr-defined]
return v
@public
def is_thing(t) -> bool:
return isinstance(t, Thing)
@public
def putctx(key: str, value: str | bool) -> str:
"""Save a value in the context."""
context[key] = value
return ""
class Metatag:
def __init__(self, tag: str = "meta", **attrs) -> None:
self.tag = tag
self.attrs = attrs
def __str__(self) -> str:
attrs = ' '.join(f'{k}="{websafe(v)}"' for k, v in self.attrs.items())
return f'<{self.tag} {attrs} />'
def __repr__(self) -> str:
return 'Metatag(%s)' % str(self)
@public
def add_metatag(tag: str = "meta", **attrs) -> None:
context.setdefault('metatags', [])
context.metatags.append(Metatag(tag, **attrs))
@public
def url_quote(text: str | bytes) -> str:
if isinstance(text, str):
text = text.encode('utf8')
return urllib.parse.quote_plus(text)
@public
def urlencode(dict_or_list_of_tuples: dict | list[tuple[str, Any]]) -> str:
"""
You probably want to use this, if you're looking to urlencode parameters. This will
encode things to utf8 that would otherwise cause urlencode to error.
"""
from urllib.parse import urlencode as og_urlencode
tuples = dict_or_list_of_tuples
if isinstance(dict_or_list_of_tuples, dict):
tuples = list(dict_or_list_of_tuples.items())
params = [(k, v.encode('utf-8') if isinstance(v, str) else v) for (k, v) in tuples]
return og_urlencode(params)
@public
def entity_decode(text: str) -> str:
return unescape(text)
@public
def set_share_links(
url: str = '#', title: str = '', view_context: InfogamiContext | None = None
) -> None:
"""
Constructs list share links for social platforms and assigns to view context attribute
Args (all required):
url (str or unicode) - complete canonical url to page being shared
title (str or unicode) - title of page being shared
view_context (object that has/can-have share_links attribute)
"""
encoded_url = url_quote(url)
text = url_quote("Check this out: " + entity_decode(title))
links = [
{
'text': 'Facebook',
'url': 'https://www.facebook.com/sharer/sharer.php?u=' + encoded_url,
},
{
'text': 'Twitter',
'url': f'https://twitter.com/intent/tweet?url={encoded_url}&via=openlibrary&text={text}',
},
{
'text': 'Pinterest',
'url': f'https://pinterest.com/pin/create/link/?url={encoded_url}&description={text}',
},
]
if view_context is not None:
view_context.share_links = links
T = TypeVar('T')
def safeget(func: Callable[[], T], default=None) -> T:
"""
TODO: DRY with solrbuilder copy
>>> safeget(lambda: {}['foo'])
>>> safeget(lambda: {}['foo']['bar'][0])
>>> safeget(lambda: {'foo': []}['foo'][0])
>>> safeget(lambda: {'foo': {'bar': [42]}}['foo']['bar'][0])
42
>>> safeget(lambda: {'foo': 'blah'}['foo']['bar'])
"""
try:
return func()
except (KeyError, IndexError, TypeError):
return default
def strip_accents(s: str) -> str:
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
try:
s.encode('ascii')
return s
except UnicodeEncodeError:
return ''.join(
c
for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
@functools.cache
def get_languages(limit: int = 1000) -> dict:
keys = web.ctx.site.things({"type": "/type/language", "limit": limit})
return {
lang.key: lang for lang in web.ctx.site.get_many(keys) if not lang.deprecated
}
def word_prefix_match(prefix: str, text: str) -> bool:
# Compare to each word of `text` for more accurate matching
# Eg. the prefix 'greek' will match with 'ancient greek' as well as 'greek'
return any(piece.startswith(prefix) for piece in text.split())
def autocomplete_languages(prefix: str) -> Iterator[Storage]:
"""
Given, e.g., "English", this returns an iterator of the following:
<Storage {'key': '/languages/ang', 'code': 'ang', 'name': 'English, Old (ca. 450-1100)'}>
<Storage {'key': '/languages/cpe', 'code': 'cpe', 'name': 'Creoles and Pidgins, English-based (Other)'}>
<Storage {'key': '/languages/eng', 'code': 'eng', 'name': 'English'}>
<Storage {'key': '/languages/enm', 'code': 'enm', 'name': 'English, Middle (1100-1500)'}>
"""
def get_names_to_try(lang: dict) -> Generator[str | None, None, None]:
# For each language attempt to match based on:
# The language's name translated into the current user's chosen language (user_lang)
user_lang = web.ctx.lang or 'en'
yield safeget(lambda: lang['name_translated'][user_lang][0])
# The language's name translated into its native name (lang_iso_code)
lang_iso_code = safeget(lambda: lang['identifiers']['iso_639_1'][0])
yield safeget(lambda: lang['name_translated'][lang_iso_code][0])
# The language's name as it was fetched from get_languages() (None)
yield lang['name']
def normalize_for_search(s: str) -> str:
return strip_accents(s).lower()
prefix = normalize_for_search(prefix)
for lang in get_languages().values():
for lang_name in get_names_to_try(lang):
if lang_name and word_prefix_match(prefix, normalize_for_search(lang_name)):
yield Storage(
key=lang.key,
code=lang.code,
name=lang_name,
)
break
def get_abbrev_from_full_lang_name(input_lang_name: str, languages=None) -> str:
"""
Take a language name, in English, such as 'English' or 'French' and return
'eng' or 'fre', respectively, if there is one match.
If there are zero matches, raise LanguageNoMatchError.
If there are multiple matches, raise a LanguageMultipleMatchError.
"""
if languages is None:
languages = get_languages().values()
target_abbrev = ""
def normalize(s: str) -> str:
return strip_accents(s).lower()
for language in languages:
if normalize(language.name) == normalize(input_lang_name):
if target_abbrev:
raise LanguageMultipleMatchError(input_lang_name)
target_abbrev = language.code
continue
for key in language.name_translated:
if normalize(language.name_translated[key][0]) == normalize(
input_lang_name
):
if target_abbrev:
raise LanguageMultipleMatchError(input_lang_name)
target_abbrev = language.code
break
if not target_abbrev:
raise LanguageNoMatchError(input_lang_name)
return target_abbrev
def get_language(lang_or_key: str) -> "None | Thing | Nothing":
if isinstance(lang_or_key, str):
return get_languages().get(lang_or_key)
else:
return lang_or_key
def get_marc21_language(language: str) -> str | None:
"""
Get a three character MARC 21 language abbreviation from another abbreviation format:
https://www.loc.gov/marc/languages/language_code.html
https://www.loc.gov/standards/iso639-2/php/code_list.php
Note: This does not contain all possible languages/abbreviations and is
biased towards abbreviations in ISBNdb.
"""
language_map = {
'ab': 'abk',
'af': 'afr',
'afr': 'afr',
'afrikaans': 'afr',
'agq': 'agq',
'ak': 'aka',
'akk': 'akk',
'alb': 'alb',
'alg': 'alg',
'am': 'amh',
'amh': 'amh',
'ang': 'ang',
'apa': 'apa',
'ar': 'ara',
'ara': 'ara',
'arabic': 'ara',
'arc': 'arc',
'arm': 'arm',
'asa': 'asa',
'aus': 'aus',
'ave': 'ave',
'az': 'aze',
'aze': 'aze',
'ba': 'bak',
'baq': 'baq',
'be': 'bel',
'bel': 'bel',
'bem': 'bem',
'ben': 'ben',
'bengali': 'ben',
'bg': 'bul',
'bis': 'bis',
'bislama': 'bis',
'bm': 'bam',
'bn': 'ben',
'bos': 'bos',
'br': 'bre',
'bre': 'bre',
'breton': 'bre',
'bul': 'bul',
'bulgarian': 'bul',
'bur': 'bur',
'ca': 'cat',
'cat': 'cat',
'catalan': 'cat',
'cau': 'cau',
'cel': 'cel',
'chi': 'chi',
'chinese': 'chi',
'chu': 'chu',
'cop': 'cop',
'cor': 'cor',
'cos': 'cos',
'cpe': 'cpe',
'cpf': 'cpf',
'cre': 'cre',
'croatian': 'hrv',
'crp': 'crp',
'cs': 'cze',
'cy': 'wel',
'cze': 'cze',
'czech': 'cze',
'da': 'dan',
'dan': 'dan',
'danish': 'dan',
'de': 'ger',
'dut': 'dut',
'dutch': 'dut',
'dv': 'div',
'dz': 'dzo',
'ebu': 'ceb',
'egy': 'egy',
'el': 'gre',
'en': 'eng',
'en_us': 'eng',
'enf': 'enm',
'eng': 'eng',
'english': 'eng',
'enm': 'enm',
'eo': 'epo',
'epo': 'epo',
'es': 'spa',
'esk': 'esk',
'esp': 'und',
'est': 'est',
'et': 'est',
'eu': 'eus',
'f': 'fre',
'fa': 'per',
'ff': 'ful',
'fi': 'fin',
'fij': 'fij',
'filipino': 'fil',
'fin': 'fin',
'finnish': 'fin',
'fle': 'fre',
'fo': 'fao',
'fon': 'fon',
'fr': 'fre',
'fra': 'fre',
'fre': 'fre',
'french': 'fre',
'fri': 'fri',
'frm': 'frm',
'fro': 'fro',
'fry': 'fry',
'ful': 'ful',
'ga': 'gae',
'gae': 'gae',
'gem': 'gem',
'geo': 'geo',
'ger': 'ger',
'german': 'ger',
'gez': 'gez',
'gil': 'gil',
'gl': 'glg',
'gla': 'gla',
'gle': 'gle',
'glg': 'glg',
'gmh': 'gmh',
'grc': 'grc',
'gre': 'gre',
'greek': 'gre',
'gsw': 'gsw',
'guj': 'guj',
'hat': 'hat',
'hau': 'hau',
'haw': 'haw',
'heb': 'heb',
'hebrew': 'heb',
'her': 'her',
'hi': 'hin',
'hin': 'hin',
'hindi': 'hin',
'hmn': 'hmn',
'hr': 'hrv',
'hrv': 'hrv',
'hu': 'hun',
'hun': 'hun',
'hy': 'hye',
'ice': 'ice',
'id': 'ind',
'iku': 'iku',
'in': 'ind',
'ind': 'ind',
'indonesian': 'ind',
'ine': 'ine',
'ira': 'ira',
'iri': 'iri',
'irish': 'iri',
'is': 'ice',
'it': 'ita',
'ita': 'ita',
'italian': 'ita',
'iw': 'heb',
'ja': 'jpn',
'jap': 'jpn',
'japanese': 'jpn',
'jpn': 'jpn',
'ka': 'kat',
'kab': 'kab',
'khi': 'khi',
'khm': 'khm',
'kin': 'kin',
'kk': 'kaz',
'km': 'khm',
'ko': 'kor',
'kon': 'kon',
'kor': 'kor',
'korean': 'kor',
'kur': 'kur',
'ky': 'kir',
'la': 'lat',
'lad': 'lad',
'lan': 'und',
'lat': 'lat',
'latin': 'lat',
'lav': 'lav',
'lcc': 'und',
'lit': 'lit',
'lo': 'lao',
'lt': 'ltz',
'ltz': 'ltz',
'lv': 'lav',
'mac': 'mac',
'mal': 'mal',
'mao': 'mao',
'map': 'map',
'mar': 'mar',
'may': 'may',
'mfe': 'mfe',
'mic': 'mic',
'mis': 'mis',
'mk': 'mkh',
'ml': 'mal',
'mla': 'mla',
'mlg': 'mlg',
'mlt': 'mlt',
'mn': 'mon',
'moh': 'moh',
'mon': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'mul': 'mul',
'my': 'mya',
'myn': 'myn',
'nai': 'nai',
'nav': 'nav',
'nde': 'nde',
'ndo': 'ndo',
'ne': 'nep',
'nep': 'nep',
'nic': 'nic',
'nl': 'dut',
'nor': 'nor',
'norwegian': 'nor',
'nso': 'sot',
'ny': 'nya',
'oc': 'oci',
'oci': 'oci',
'oji': 'oji',
'old norse': 'non',
'opy': 'und',
'ori': 'ori',
'ota': 'ota',
'paa': 'paa',
'pal': 'pal',
'pan': 'pan',
'per': 'per',
'persian': 'per',
'farsi': 'per',
'pl': 'pol',
'pli': 'pli',
'pol': 'pol',
'polish': 'pol',
'por': 'por',
'portuguese': 'por',
'pra': 'pra',
'pro': 'pro',
'ps': 'pus',
'pt': 'por',
'pt-br': 'por',
'que': 'que',
'ro': 'rum',
'roa': 'roa',
'roh': 'roh',
'romanian': 'rum',
'ru': 'rus',
'rum': 'rum',
'rus': 'rus',
'russian': 'rus',
'rw': 'kin',
'sai': 'sai',
'san': 'san',
'scc': 'srp',
'sco': 'sco',
'scottish gaelic': 'gla',
'scr': 'scr',
'sesotho': 'sot',
'sho': 'sna',
'shona': 'sna',
'si': 'sin',
'sl': 'slv',
'sla': 'sla',
'slo': 'slv',
'slovenian': 'slv',
'slv': 'slv',
'smo': 'smo',
'sna': 'sna',
'som': 'som',
'sot': 'sot',
'sotho': 'sot',
'spa': 'spa',
'spanish': 'spa',
'sq': 'alb',
'sr': 'srp',
'srp': 'srp',
'srr': 'srr',
'sso': 'sso',
'ssw': 'ssw',
'st': 'sot',
'sux': 'sux',
'sv': 'swe',
'sw': 'swa',
'swa': 'swa',
'swahili': 'swa',
'swe': 'swe',
'swedish': 'swe',
'swz': 'ssw',
'syc': 'syc',
'syr': 'syr',
'ta': 'tam',
'tag': 'tgl',
'tah': 'tah',
'tam': 'tam',
'tel': 'tel',
'tg': 'tgk',
'tgl': 'tgl',
'th': 'tha',
'tha': 'tha',
'tib': 'tib',
'tl': 'tgl',
'tr': 'tur',
'tsn': 'tsn',
'tso': 'sot',
'tsonga': 'tsonga',
'tsw': 'tsw',
'tswana': 'tsw',
'tur': 'tur',
'turkish': 'tur',
'tut': 'tut',
'uk': 'ukr',
'ukr': 'ukr',
'un': 'und',
'und': 'und',
'urd': 'urd',
'urdu': 'urd',
'uz': 'uzb',
'uzb': 'uzb',
'ven': 'ven',
'vi': 'vie',
'vie': 'vie',
'wel': 'wel',
'welsh': 'wel',
'wen': 'wen',
'wol': 'wol',
'xho': 'xho',
'xhosa': 'xho',
'yid': 'yid',
'yor': 'yor',
'yu': 'ypk',
'zh': 'chi',
'zh-cn': 'chi',
'zh-tw': 'chi',
'zul': 'zul',
'zulu': 'zul',
}
return language_map.get(language.casefold())
@public
def get_language_name(lang_or_key: "Nothing | str | Thing") -> Nothing | str:
if isinstance(lang_or_key, str):
lang = get_language(lang_or_key)
if not lang:
return lang_or_key
else:
lang = lang_or_key
user_lang = web.ctx.lang or 'en'
return safeget(lambda: lang['name_translated'][user_lang][0]) or lang.name # type: ignore[index]
@functools.cache
def convert_iso_to_marc(iso_639_1: str) -> str | None:
"""
e.g. 'en' -> 'eng'
"""
for lang in get_languages().values():
code = safeget(lambda: lang['identifiers']['iso_639_1'][0])
if code == iso_639_1:
return lang.code
return None
@public
def get_author_config():
return _get_author_config()
@web.memoize
def _get_author_config():
"""Returns the author config.
The results are cached on the first invocation.
Any changes to /config/author page require restarting the app.
"""
# Load the author config from the author.yml file in the author directory
with open(
'openlibrary/plugins/openlibrary/config/author/identifiers.yml'
) as in_file:
id_config = yaml.safe_load(in_file)
identifiers = [
Storage(id) for id in id_config.get('identifiers', []) if 'name' in id
]
return Storage(identifiers=identifiers)
@public
def get_edition_config() -> Storage:
return _get_edition_config()
@web.memoize
def _get_edition_config():
"""Returns the edition config.
The results are cached on the first invocation. Any changes to /config/edition page require restarting the app.
This is cached because fetching and creating the Thing object was taking about 20ms of time for each book request.
"""
thing = web.ctx.site.get('/config/edition')
classifications = [Storage(t.dict()) for t in thing.classifications if 'name' in t]
roles = thing.roles
with open(
'openlibrary/plugins/openlibrary/config/edition/identifiers.yml'
) as in_file:
id_config = yaml.safe_load(in_file)
identifiers = [
Storage(id) for id in id_config.get('identifiers', []) if 'name' in id
]
return Storage(
classifications=classifications, identifiers=identifiers, roles=roles
)
from openlibrary.core.olmarkdown import OLMarkdown
def get_markdown(text: str, safe_mode: bool = False) -> OLMarkdown:
md = OLMarkdown(source=text, safe_mode=safe_mode)
view._register_mdx_extensions(md)
md.postprocessors += view.wiki_processors
return md
class HTML(str):
__slots__ = ()
def __init__(self, html):
str.__init__(self, web.safeunicode(html))
def __repr__(self):
return "<html: %s>" % str.__repr__(self)
_websafe = web.websafe
def websafe(text: str) -> str:
if isinstance(text, HTML):
return text
elif isinstance(text, TemplateResult):
return web.safestr(text)
else:
return _websafe(text)
from openlibrary.plugins.upstream import adapter
from openlibrary.utils.olcompress import OLCompressor
from openlibrary.utils import olmemcache
import memcache
class UpstreamMemcacheClient:
"""Wrapper to memcache Client to handle upstream specific conversion and OL specific compression.
Compatible with memcache Client API.
"""
def __init__(self, servers):
self._client = memcache.Client(servers)
compressor = OLCompressor()
self.compress = compressor.compress
def decompress(*args, **kw) -> str:
d = json.loads(compressor.decompress(*args, **kw))
return json.dumps(adapter.unconvert_dict(d))
self.decompress = decompress
def get(self, key: str | None):
key = adapter.convert_key(key)
if key is None:
return None
try:
value = self._client.get(web.safestr(key))
except memcache.Client.MemcachedKeyError:
return None
return value and self.decompress(value)
def get_multi(self, keys):
keys = [adapter.convert_key(k) for k in keys]
keys = [web.safestr(k) for k in keys]
d = self._client.get_multi(keys)
return {
web.safeunicode(adapter.unconvert_key(k)): self.decompress(v)
for k, v in d.items()
}
if config.get('upstream_memcache_servers'):
olmemcache.Client = UpstreamMemcacheClient # type: ignore[assignment, misc]
# set config.memcache_servers only after olmemcache.Client is updated
config.memcache_servers = config.upstream_memcache_servers # type: ignore[attr-defined]
def _get_recent_changes():
site = web.ctx.get('site') or delegate.create_site()
web.ctx.setdefault("ip", "127.0.0.1")
# The recentchanges can have multiple revisions for a document if it has been
# modified more than once. Take only the most recent revision in that case.
visited = set()
def is_visited(key):
if key in visited:
return True
else:
visited.add(key)
return False
# ignore reverts
re_revert = web.re_compile(r"reverted to revision \d+")
def is_revert(r):
return re_revert.match(r.comment or "")
# take the 100 recent changes, filter them and take the first 50
q = {"bot": False, "limit": 100}
result = site.versions(q)
result = [r for r in result if not is_visited(r.key) and not is_revert(r)]
result = result[:50]
def process_thing(thing):
t = Storage()
for k in ["key", "title", "name", "displayname"]:
t[k] = thing[k]
t['type'] = Storage(key=thing.type.key)
return t
for r in result:
r.author = r.author and process_thing(r.author)
r.thing = process_thing(site.get(r.key, r.revision))
return result
def _get_recent_changes2():
"""New recent changes for around the library.
This function returns the message to display for each change.
The message is get by calling `recentchanges/$kind/message.html` template.
If `$var ignore=True` is set by the message template, the change is ignored.
"""
if 'env' not in web.ctx:
delegate.fakeload()
q = {"bot": False, "limit": 100}
changes = web.ctx.site.recentchanges(q)
def is_ignored(c):
return (
# c.kind=='update' allow us to ignore update recent changes on people
c.kind == 'update'
or
# ignore change if author has been deleted (e.g. spammer)
(c.author and c.author.type.key == '/type/delete')
)
def render(c):
t = get_template("recentchanges/" + c.kind + "/message") or get_template(
"recentchanges/default/message"
)
return t(c)
messages = [render(c) for c in changes if not is_ignored(c)]
messages = [m for m in messages if str(m.get("ignore", "false")).lower() != "true"]
return messages
_get_recent_changes = web.memoize(_get_recent_changes, expires=5 * 60, background=True)
_get_recent_changes2 = web.memoize(
_get_recent_changes2, expires=5 * 60, background=True
)
@public
def _get_blog_feeds():
url = "https://blog.openlibrary.org/feed/"
try:
stats.begin("get_blog_feeds", url=url)
tree = ET.fromstring(requests.get(url).text)
except Exception:
# Handle error gracefully.
logging.getLogger("openlibrary").error(
"Failed to fetch blog feeds", exc_info=True
)
return []
finally:
stats.end()
def parse_item(item):
pubdate = datetime.datetime.strptime(
item.find("pubDate").text, '%a, %d %b %Y %H:%M:%S +0000'
).isoformat()
return {
"title": item.find("title").text,
"link": item.find("link").text,
"pubdate": pubdate,
}
return [parse_item(item) for item in tree.findall(".//item")]
_get_blog_feeds = cache.memcache_memoize(
_get_blog_feeds, key_prefix="upstream.get_blog_feeds", timeout=5 * 60
)
@public
def is_jsdef():
return False
@public
def jsdef_get(obj, key, default=None):
"""
foo.get(KEY, default) isn't defined in js, so we can't use that construct
in our jsdef methods. This helper function provides a workaround, and works
in both environments.
"""
return obj.get(key, default)
@public
def get_donation_include() -> str:
ia_host = get_ia_host(allow_dev=True)
# The following allows archive.org staff to test banners without
# needing to reload openlibrary services
if ia_host != "archive.org":
script_src = f"https://{ia_host}/includes/donate.js"
else:
script_src = "/cdn/archive.org/donate.js"
if 'ymd' in (web_input := web.input()):
# Should be eg 20220101 (YYYYMMDD)
if len(web_input.ymd) == 8 and web_input.ymd.isdigit():
script_src += '?' + urllib.parse.urlencode({'ymd': web_input.ymd})
else:
raise ValueError('?ymd should be 8 digits (eg 20220101)')
html = (
"""
<div id="donato"></div>
<script src="%s" data-platform="ol"></script>
"""
% script_src
)
return html
@public
def get_ia_host(allow_dev: bool = False) -> str:
if allow_dev:
web_input = web.input()
dev_host = web_input.pop("dev_host", "") # e.g. `www-user`
if dev_host and re.match('^[a-zA-Z0-9-.]+$', dev_host):
return dev_host + ".archive.org"
return "archive.org"
@public
def item_image(image_path: str | None, default: str | None = None) -> str | None:
if image_path is None:
return default
if image_path.startswith('https:'):
return image_path
return "https:" + image_path
@public
def get_blog_feeds() -> list[Storage]:
def process(post):
post = Storage(post)
post.pubdate = parse_datetime(post.pubdate)
return post
return [process(post) for post in _get_blog_feeds()]
class Request:
path = property(lambda self: web.ctx.path)
home = property(lambda self: web.ctx.home)
domain = property(lambda self: web.ctx.host)
fullpath = property(lambda self: web.ctx.fullpath)
@property
def canonical_url(self) -> str:
"""Returns the https:// version of the URL.
Used for adding <meta rel="canonical" ..> tag in all web pages.
Required to make OL retain the page rank after https migration.
"""
readable_path = web.ctx.get('readable_path', web.ctx.path) or ''
query = web.ctx.query or ''
host = web.ctx.host or ''
if url := host + readable_path + query:
url = "https://" + url
parsed_url = urlparse(url)
parsed_query = parse_qs(parsed_url.query)
queries_to_exclude = ['sort', 'mode', 'v', 'type', 'debug']
canonical_query = {
q: v for q, v in parsed_query.items() if q not in queries_to_exclude
}
query = parse_urlencode(canonical_query, doseq=True)
parsed_url = parsed_url._replace(query=query)
url = urlunparse(parsed_url)
return url
return ''
@public
def render_once(key: str) -> bool:
rendered = web.ctx.setdefault('render_once', {})
if key in rendered:
return False
else:
rendered[key] = True
return True
@public
def today():
return datetime.datetime.today()
@public
def to_datetime(time: str):
return datetime.datetime.fromisoformat(time)
class HTMLTagRemover(HTMLParser):
def __init__(self):
super().__init__()
self.data = []
def handle_data(self, data):
self.data.append(data.strip())
def handle_endtag(self, tag):
self.data.append('\n' if tag in ('p', 'li') else ' ')
@public
def reformat_html(html_str: str, max_length: int | None = None) -> str:
"""
Reformats an HTML string, removing all opening and closing tags.
Adds a line break element between each set of text content.
Optionally truncates contents that exceeds the given max length.
returns: A reformatted HTML string
"""
parser = HTMLTagRemover()
# Must have a root node, otherwise the parser will fail
parser.feed(f'<div>{html_str}</div>')
content = [web.websafe(s) for s in parser.data if s]
if max_length:
return truncate(''.join(content), max_length).strip().replace('\n', '<br>')
else:
return ''.join(content).strip().replace('\n', '<br>')
def get_colon_only_loc_pub(pair: str) -> tuple[str, str]:
"""
Get a tuple of a location and publisher name from an Internet Archive
publisher string. For use in simple location-publisher pairs with one colon.
>>> get_colon_only_loc_pub('City : Publisher Name')
('City', 'Publisher Name')
"""
pairs = pair.split(":")
if len(pairs) == 2:
location = pairs[0].strip(STRIP_CHARS)
publisher = pairs[1].strip(STRIP_CHARS)
return (location, publisher)
# Fall back to using the entire string as the publisher.
return ("", pair.strip(STRIP_CHARS))
def get_location_and_publisher(loc_pub: str) -> tuple[list[str], list[str]]:
"""
Parses locations and publisher names out of Internet Archive metadata
`publisher` strings. For use when there is no MARC record.
Returns a tuple of list[location_strings], list[publisher_strings].
E.g.
>>> get_location_and_publisher("[New York] : Random House")
(['New York'], ['Random House'])
>>> get_location_and_publisher("Londres ; New York ; Paris : Berlitz Publishing")
(['Londres', 'New York', 'Paris'], ['Berlitz Publishing'])
>>> get_location_and_publisher("Paris : Pearson ; San Jose (Calif.) : Adobe")
(['Paris', 'San Jose (Calif.)'], ['Pearson', 'Adobe'])
"""
if not loc_pub or not isinstance(loc_pub, str):
return ([], [])
if "Place of publication not identified" in loc_pub:
loc_pub = loc_pub.replace("Place of publication not identified", "")
loc_pub = loc_pub.translate({ord(char): None for char in REPLACE_CHARS})
# This operates on the notion that anything, even multiple items, to the
# left of a colon is a location, and the item immediately to the right of
# the colon is a publisher. This can be exploited by using
# string.split(";") because everything to the 'left' of a colon is a
# location, and whatever is to the right is a publisher.
if ":" in loc_pub:
locations: list[str] = []
publishers: list[str] = []
parts = loc_pub.split(";") if ";" in loc_pub else [loc_pub]
# Track in indices of values placed into locations or publishers.
last_placed_index = 0
# For each part, look for a semi-colon, then extract everything to
# the left as a location, and the item on the right as a publisher.
for index, part in enumerate(parts):
# This expects one colon per part. Two colons breaks our pattern.
# Breaking here gives the chance of extracting a
# `location : publisher` from one or more pairs with one semi-colon.
if part.count(":") > 1:
break
# Per the pattern, anything "left" of a colon in a part is a place.
if ":" in part:
location, publisher = get_colon_only_loc_pub(part)
publishers.append(publisher)
# Every index value between last_placed_index and the current
# index is a location.
for place in parts[last_placed_index:index]:
locations.append(place.strip(STRIP_CHARS))
locations.append(location) # Preserve location order.
last_placed_index = index + 1
# Clean up and empty list items left over from strip() string replacement.
locations = [item for item in locations if item]
publishers = [item for item in publishers if item]
return (locations, publishers)
# Fall back to making the input a list returning that and an empty location.
return ([], [loc_pub.strip(STRIP_CHARS)])
def setup() -> None:
"""Do required initialization"""
# monkey-patch get_markdown to use OL Flavored Markdown
view.get_markdown = get_markdown
# Provide alternate implementations for websafe and commify
web.websafe = websafe
web.template.Template.FILTERS['.html'] = websafe
web.template.Template.FILTERS['.xml'] = websafe
web.commify = commify
web.template.Template.globals.update(
{
'HTML': HTML,
'request': Request(),
'logger': logging.getLogger("openlibrary.template"),
'sum': sum,
'websafe': web.websafe,
}
)
from openlibrary.core import helpers as h
web.template.Template.globals.update(h.helpers)
if config.get('use_gzip') is True:
config.middleware.append(GZipMiddleware)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 48,570 | Python | .py | 1,381 | 27.876177 | 125 | 0.57675 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
449 | table_of_contents.py | internetarchive_openlibrary/openlibrary/plugins/upstream/table_of_contents.py | from dataclasses import dataclass
from functools import cached_property
import json
from typing import Required, TypeVar, TypedDict
from infogami.infobase.client import Nothing, Thing
from openlibrary.core.models import ThingReferenceDict
import web
@dataclass
class TableOfContents:
entries: list['TocEntry']
@cached_property
def min_level(self) -> int:
return min(e.level for e in self.entries)
def is_complex(self) -> bool:
return any(e.extra_fields for e in self.entries)
@staticmethod
def from_db(
db_table_of_contents: list[dict] | list[str] | list[str | dict],
) -> 'TableOfContents':
def row(r: dict | str) -> 'TocEntry':
if isinstance(r, str):
# Legacy, can be just a plain string
return TocEntry(level=0, title=r)
else:
return TocEntry.from_dict(r)
return TableOfContents(
[
toc_entry
for r in db_table_of_contents
if not (toc_entry := row(r)).is_empty()
]
)
def to_db(self) -> list[dict]:
return [r.to_dict() for r in self.entries]
@staticmethod
def from_markdown(text: str) -> 'TableOfContents':
return TableOfContents(
[
TocEntry.from_markdown(line)
for line in text.splitlines()
if line.strip(" |")
]
)
def to_markdown(self) -> str:
return "\n".join(
(' ' * (r.level - self.min_level)) + r.to_markdown()
for r in self.entries
)
class AuthorRecord(TypedDict, total=False):
name: Required[str]
author: ThingReferenceDict | None
@dataclass
class TocEntry:
level: int
label: str | None = None
title: str | None = None
pagenum: str | None = None
authors: list[AuthorRecord] | None = None
subtitle: str | None = None
description: str | None = None
@cached_property
def extra_fields(self) -> dict:
required_fields = ('level', 'label', 'title', 'pagenum')
extra_fields = self.__annotations__.keys() - required_fields
return {
field: getattr(self, field)
for field in extra_fields
if getattr(self, field) is not None
}
@staticmethod
def from_dict(d: dict) -> 'TocEntry':
return TocEntry(
level=d.get('level', 0),
label=d.get('label'),
title=d.get('title'),
pagenum=d.get('pagenum'),
authors=d.get('authors'),
subtitle=d.get('subtitle'),
description=d.get('description'),
)
def to_dict(self) -> dict:
return {key: value for key, value in self.__dict__.items() if value is not None}
@staticmethod
def from_markdown(line: str) -> 'TocEntry':
"""
Parse one row of table of contents.
>>> def f(text):
... d = TocEntry.from_markdown(text)
... return (d.level, d.label, d.title, d.pagenum)
...
>>> f("* chapter 1 | Welcome to the real world! | 2")
(1, 'chapter 1', 'Welcome to the real world!', '2')
>>> f("Welcome to the real world!")
(0, None, 'Welcome to the real world!', None)
>>> f("** | Welcome to the real world! | 2")
(2, None, 'Welcome to the real world!', '2')
>>> f("|Preface | 1")
(0, None, 'Preface', '1')
>>> f("1.1 | Apple")
(0, '1.1', 'Apple', None)
"""
RE_LEVEL = web.re_compile(r"(\**)(.*)")
level, text = RE_LEVEL.match(line.strip()).groups()
if "|" in text:
tokens = text.split("|", 3)
label, title, page, extra_fields = pad(tokens, 4, '')
else:
title = text
label = page = ""
extra_fields = ''
return TocEntry(
level=len(level),
label=label.strip() or None,
title=title.strip() or None,
pagenum=page.strip() or None,
**json.loads(extra_fields or '{}'),
)
def to_markdown(self) -> str:
result = ' | '.join(
(
'*' * self.level
+ (' ' if self.label and self.level else '')
+ (self.label or ''),
self.title or '',
self.pagenum or '',
)
)
if self.extra_fields:
# We might have `Thing` objects instead of plain dicts...
result += ' | ' + json.dumps(self.extra_fields, cls=InfogamiThingEncoder)
return result
def is_empty(self) -> bool:
return all(
getattr(self, field) is None
for field in self.__annotations__
if field != 'level'
)
T = TypeVar('T')
def pad(seq: list[T], size: int, e: T) -> list[T]:
"""
>>> pad([1, 2], 4, 0)
[1, 2, 0, 0]
"""
seq = seq[:]
while len(seq) < size:
seq.append(e)
return seq
class InfogamiThingEncoder(json.JSONEncoder):
def default(self, obj):
"""
Custom JSON encoder for Infogami Thing objects.
"""
if isinstance(obj, Thing):
return obj.dict()
if isinstance(obj, Nothing):
return None
return super().default(obj)
| 5,351 | Python | .py | 157 | 24.923567 | 88 | 0.535943 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
450 | spamcheck.py | internetarchive_openlibrary/openlibrary/plugins/upstream/spamcheck.py | import re
from collections.abc import Iterable
import web
def get_spam_words() -> list[str]:
doc = web.ctx.site.store.get("spamwords") or {}
return doc.get("spamwords", [])
def get_spam_domains() -> list[str]:
doc = web.ctx.site.store.get("spamwords") or {}
return doc.get("domains", [])
def set_spam_words(words: Iterable[str]) -> None:
words = [w.strip() for w in words]
_update_spam_doc(spamwords=words)
def set_spam_domains(domains: Iterable[str]) -> None:
domains = [d.strip() for d in domains]
_update_spam_doc(domains=domains)
def _update_spam_doc(**kwargs) -> None:
doc = web.ctx.site.store.get("spamwords") or {}
doc.update(_key="spamwords", **kwargs)
web.ctx.site.store["spamwords"] = doc
def is_spam(i=None, allow_privileged_edits: bool = False) -> bool:
user = web.ctx.site.get_user()
if user:
# Allow admins and librarians to make edits:
if allow_privileged_edits and (user.is_admin() or user.is_librarian()):
return False
if user.is_read_only():
return True
# Prevent deleted users from making edits:
if user.type.key == '/type/delete':
return True
email = user and user.get_email() or ""
if is_spam_email(email):
return True
# For some odd reason, blocked accounts are still allowed to make edits.
# Hack to stop that.
account = user and user.get_account()
if account and account.get('status') != 'active':
return True
spamwords = get_spam_words()
if i is None:
i = web.input()
text = str(dict(i)).lower()
return any(re.search(w.lower(), text) for w in spamwords)
def is_spam_email(email: str) -> bool:
domain = email.split("@")[-1].lower()
return domain in get_spam_domains()
| 1,810 | Python | .py | 46 | 33.608696 | 79 | 0.643757 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
451 | addtag.py | internetarchive_openlibrary/openlibrary/plugins/upstream/addtag.py | """Handlers for adding and editing tags."""
import web
from typing import NoReturn
from infogami.core.db import ValidationException
from infogami.utils.view import add_flash_message, public
from infogami.infobase.client import ClientException
from infogami.utils import delegate
from openlibrary.accounts import get_current_user
from openlibrary.plugins.upstream import spamcheck, utils
from openlibrary.plugins.upstream.models import Tag
from openlibrary.plugins.upstream.addbook import safe_seeother, trim_doc
from openlibrary.plugins.upstream.utils import render_template
@public
def get_tag_types():
return ["subject", "work", "collection"]
def validate_tag(tag):
return tag.get('name', '') and tag.get('tag_type', '')
class addtag(delegate.page):
path = '/tag/add'
def GET(self):
"""Main user interface for adding a tag to Open Library."""
if not (patron := get_current_user()):
raise web.seeother(f'/account/login?redirect={self.path}')
if not self.has_permission(patron):
raise web.unauthorized(message='Permission denied to add tags')
i = web.input(name=None, type=None, sub_type=None)
return render_template('tag/add', i.name, i.type, subject_type=i.sub_type)
def has_permission(self, user) -> bool:
"""
Can a tag be added?
"""
return user and (
user.is_librarian() or user.is_super_librarian() or user.is_admin()
)
def POST(self):
i = web.input(
name="",
tag_type="",
tag_description="",
tag_plugins="",
)
if spamcheck.is_spam(i, allow_privileged_edits=True):
return render_template(
"message.html", "Oops", 'Something went wrong. Please try again later.'
)
if not (patron := get_current_user()):
raise web.seeother(f'/account/login?redirect={self.path}')
if not self.has_permission(patron):
raise web.unauthorized(message='Permission denied to add tags')
i = utils.unflatten(i)
if not validate_tag(i):
raise web.badrequest()
match = self.find_match(i) # returns None or Tag (if match found)
if match:
# tag match
return self.tag_match(match)
else:
# no match
return self.no_match(i)
def find_match(self, i: web.utils.Storage):
"""
Tries to find an existing tag that matches the data provided by the user.
"""
return Tag.find(i.name, i.tag_type)
def tag_match(self, match: list) -> NoReturn:
"""
Action for when an existing tag has been found.
Redirect user to the found tag's edit page to add any missing details.
"""
tag = web.ctx.site.get(match[0])
raise safe_seeother(tag.key + "/edit")
def no_match(self, i: web.utils.Storage) -> NoReturn:
"""
Action to take when no tags are found.
Creates a new Tag.
Redirects the user to the tag's home page
"""
key = Tag.create(i.name, i.tag_description, i.tag_type, i.tag_plugins)
raise safe_seeother(key)
class tag_edit(delegate.page):
path = r"(/tags/OL\d+T)/edit"
def GET(self, key):
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
"Permission denied to edit " + key + ".",
)
tag = web.ctx.site.get(key)
if tag is None:
raise web.notfound()
return render_template('type/tag/edit', tag)
def POST(self, key):
tag = web.ctx.site.get(key)
if tag is None:
raise web.notfound()
i = web.input(_comment=None)
formdata = self.process_input(i)
try:
if not formdata or not validate_tag(formdata):
raise web.badrequest()
elif "_delete" in i:
tag = web.ctx.site.new(
key, {"key": key, "type": {"key": "/type/delete"}}
)
tag._save(comment=i._comment)
raise safe_seeother(key)
else:
tag.update(formdata)
tag._save(comment=i._comment)
raise safe_seeother(key)
except (ClientException, ValidationException) as e:
add_flash_message('error', str(e))
return render_template("type/tag/edit", tag)
def process_input(self, i):
i = utils.unflatten(i)
tag = trim_doc(i)
return tag
def setup():
"""Do required setup."""
pass
| 4,715 | Python | .py | 121 | 29.636364 | 87 | 0.596753 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
452 | adapter.py | internetarchive_openlibrary/openlibrary/plugins/upstream/adapter.py | """Adapter to provide upstream URL structure over existing Open Library Infobase interface.
Upstream requires:
/user/.* -> /people/.*
/b/.* -> /books/.*
/a/.* -> /authors/.*
This adapter module is a filter that sits above an Infobase server and fakes the new URL structure.
"""
import json
import web
import urllib
urls = (
'/([^/]*)/get',
'get',
'/([^/]*)/get_many',
'get_many',
'/([^/]*)/things',
'things',
'/([^/]*)/versions',
'versions',
'/([^/]*)/new_key',
'new_key',
'/([^/]*)/save(/.*)',
'save',
'/([^/]*)/save_many',
'save_many',
'/([^/]*)/reindex',
'reindex',
'/([^/]*)/account/(.*)',
'account',
'/([^/]*)/count_edits_by_user',
'count_edits_by_user',
'/.*',
'proxy',
)
app = web.application(urls, globals())
conversions = {
# '/people/': '/user/',
# '/books/': '/b/',
# '/authors/': '/a/',
# '/languages/': '/l/',
'/templates/': '/upstream/templates/',
'/macros/': '/upstream/macros/',
'/js/': '/upstream/js/',
'/css/': '/upstream/css/',
'/old/templates/': '/templates/',
'/old/macros/': '/macros/',
'/old/js/': '/js/',
'/old/css/': '/css/',
}
# inverse of conversions
iconversions = {v: k for k, v in conversions.items()}
class proxy:
def delegate(self, *args):
self.args = args
self.input = web.input(_method='GET', _unicode=False)
self.path = web.ctx.path
if web.ctx.method in ['POST', 'PUT']:
self.data = web.data()
else:
self.data = None
headers = {
k[len('HTTP_') :].replace('-', '_').lower(): v
for k, v in web.ctx.environ.items()
}
self.before_request()
try:
server = web.config.infobase_server
req = urllib.request.Request(
server + self.path + '?' + urllib.parse.urlencode(self.input),
self.data,
headers=headers,
)
req.get_method = lambda: web.ctx.method
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
response = e
self.status_code = response.code
self.status_msg = response.msg
self.output = response.read()
self.headers = dict(response.headers.items())
for k in ['transfer-encoding', 'server', 'connection', 'date']:
self.headers.pop(k, None)
if self.status_code == 200:
self.after_request()
else:
self.process_error()
web.ctx.status = f"{self.status_code} {self.status_msg}"
web.ctx.headers = self.headers.items()
return self.output
GET = POST = PUT = DELETE = delegate
def before_request(self):
if 'key' in self.input:
self.input.key = convert_key(self.input.key)
def after_request(self):
if self.output:
d = json.loads(self.output)
d = unconvert_dict(d)
self.output = json.dumps(d)
def process_error(self):
if self.output:
d = json.loads(self.output)
if 'key' in d:
d['key'] = unconvert_key(d['key'])
self.output = json.dumps(d)
def convert_key(key: str | None, mapping: dict[str, str] | None = None) -> str | None:
"""
>>> convert_key("/authors/OL1A", {'/authors/': '/a/'})
'/a/OL1A'
"""
mapping = mapping or conversions
if key is None:
return None
elif key == '/':
return '/upstream'
for new, old in mapping.items():
if key.startswith(new):
key2 = old + key[len(new) :]
return key2
return key
def convert_dict(d, mapping: dict[str, str] | None = None):
"""
>>> convert_dict({'author': {'key': '/authors/OL1A'}}, {'/authors/': '/a/'})
{'author': {'key': '/a/OL1A'}}
"""
mapping = mapping or conversions
if isinstance(d, dict):
if 'key' in d:
d['key'] = convert_key(d['key'], mapping)
for k, v in d.items():
d[k] = convert_dict(v, mapping)
return d
elif isinstance(d, list):
return [convert_dict(x, mapping) for x in d]
else:
return d
def unconvert_key(key: str | None) -> str | None:
if key == '/upstream':
return '/'
return convert_key(key, iconversions)
def unconvert_dict(d):
return convert_dict(d, iconversions)
class get(proxy):
def before_request(self):
if 'key' in (i := self.input):
i.key = convert_key(i.key)
class get_many(proxy):
def before_request(self):
if 'keys' in self.input:
keys = self.input['keys']
keys = json.loads(keys)
keys = [convert_key(k) for k in keys]
self.input['keys'] = json.dumps(keys)
def after_request(self):
d = json.loads(self.output)
d = {unconvert_key(k): unconvert_dict(v) for k, v in d.items()}
self.output = json.dumps(d)
class things(proxy):
def before_request(self):
if 'query' in self.input:
q = self.input.query
q = json.loads(q)
def convert_keys(q):
if isinstance(q, dict):
return {k: convert_keys(v) for k, v in q.items()}
elif isinstance(q, list):
return [convert_keys(x) for x in q]
elif isinstance(q, str):
return convert_key(q)
else:
return q
self.input.query = json.dumps(convert_keys(q))
def after_request(self):
if self.output:
d = json.loads(self.output)
if self.input.get('details', '').lower() == 'true':
d = unconvert_dict(d)
else:
d = [unconvert_key(key) for key in d]
self.output = json.dumps(d)
class versions(proxy):
def before_request(self):
if 'query' in self.input:
q = self.input.query
q = json.loads(q)
if 'key' in q:
q['key'] = convert_key(q['key'])
if 'author' in q:
q['author'] = convert_key(q['author'])
self.input.query = json.dumps(q)
def after_request(self):
if self.output:
d = json.loads(self.output)
for v in d:
v['author'] = v['author'] and unconvert_key(v['author'])
v['key'] = unconvert_key(v['key'])
self.output = json.dumps(d)
class new_key(proxy):
def after_request(self):
if self.output:
d = json.loads(self.output)
d = unconvert_key(d)
self.output = json.dumps(d)
class save(proxy):
def before_request(self):
self.path = f'/{self.args[0]}/save{convert_key(self.args[1])}'
d = json.loads(self.data)
d = convert_dict(d)
self.data = json.dumps(d)
class save_many(proxy):
def before_request(self):
i = web.input(_method="POST")
if 'query' in i:
q = json.loads(i['query'])
q = convert_dict(q)
i['query'] = json.dumps(q)
self.data = urllib.parse.urlencode(i)
class reindex(proxy):
def before_request(self):
i = web.input(_method="POST")
if 'keys' in i:
keys = [convert_key(k) for k in json.loads(i['keys'])]
i['keys'] = json.dumps(keys)
self.data = urllib.parse.urlencode(i)
class account(proxy):
def before_request(self):
i = self.input
if 'username' in i and i.username.startswith('/'):
i.username = convert_key(i.username)
def main():
import sys
import os
web.config.infobase_server = sys.argv[1].rstrip('/')
os.environ['REAL_SCRIPT_NAME'] = ''
sys.argv[1:] = sys.argv[2:]
app.run()
if __name__ == '__main__':
main()
| 7,942 | Python | .py | 237 | 25.012658 | 99 | 0.531528 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
453 | merge_authors.py | internetarchive_openlibrary/openlibrary/plugins/upstream/merge_authors.py | """Merge authors.
"""
import re
import json
import web
from typing import Any
from infogami.infobase.client import ClientException
from infogami.utils import delegate
from infogami.utils.view import render_template, safeint
from openlibrary.accounts import get_current_user
from openlibrary.plugins.upstream.edits import process_merge_request
from openlibrary.plugins.worksearch.code import top_books_from_author
from openlibrary.utils import uniq, dicthash
class BasicRedirectEngine:
"""
Creates redirects whilst updating any references to the now-defunct record to point
to the newly identified canonical record.
"""
def make_redirects(self, master: str, duplicates: list[str]) -> list[dict]:
# Create the actual redirect objects
docs_to_save = [make_redirect_doc(key, master) for key in duplicates]
# find the references of each duplicate and convert them
references = self.find_all_references(duplicates)
docs = get_many(references)
docs_to_save.extend(
self.update_references(doc, master, duplicates) for doc in docs
)
return docs_to_save
def find_references(self, key):
"""
Returns keys of all the docs which have a reference to the given key.
All the subclasses must provide an implementation for this method.
:param str key: e.g. /works/OL1W
:rtype: list of str
"""
raise NotImplementedError()
def find_all_references(self, keys):
refs = {ref for key in keys for ref in self.find_references(key)}
return list(refs)
def update_references(self, doc: Any, master: str, duplicates: list[str]) -> Any:
"""
Converts references to any of the duplicates in the given doc to the master.
"""
if isinstance(doc, dict):
if list(doc) == ['key']:
return {"key": master} if doc['key'] in duplicates else doc
else:
return {
k: self.update_references(v, master, duplicates)
for k, v in doc.items()
}
elif isinstance(doc, list):
values = [self.update_references(v, master, duplicates) for v in doc]
return uniq(values, key=dicthash)
else:
return doc
class BasicMergeEngine:
"""
Generic merge functionality useful for all types of merges.
"""
def __init__(self, redirect_engine):
"""
:param BasicRedirectEngine redirect_engine:
"""
self.redirect_engine = redirect_engine
def merge(self, master, duplicates):
docs = self.do_merge(master, duplicates)
return self.save(docs, master, duplicates)
def do_merge(self, master: str, duplicates: list[str]) -> list:
"""
Performs the merge and returns the list of docs to save.
:param str master: key of master doc
:param list of str duplicates: keys of duplicates
:return: Document to save
"""
docs_to_save = []
docs_to_save.extend(self.redirect_engine.make_redirects(master, duplicates))
# Merge all the duplicates into the master.
master_doc = web.ctx.site.get(master).dict()
dups = get_many(duplicates)
for d in dups:
master_doc = self.merge_docs(master_doc, d)
docs_to_save.append(master_doc)
return docs_to_save
def save(self, docs, master, duplicates):
"""Saves the effected docs because of merge.
All the subclasses must provide an implementation for this method.
"""
raise NotImplementedError()
def merge_docs(self, master, dup):
"""Merge duplicate doc into master doc."""
keys = set(list(master) + list(dup))
return {k: self.merge_property(master.get(k), dup.get(k)) for k in keys}
def merge_property(self, a, b):
if isinstance(a, list) and isinstance(b, list):
return uniq(a + b, key=dicthash)
elif not a:
return b
else:
return a
class AuthorRedirectEngine(BasicRedirectEngine):
def find_references(self, key):
q = {"type": "/type/edition", "authors": key, "limit": 10000}
edition_keys = web.ctx.site.things(q)
editions = get_many(edition_keys)
work_keys_1 = [w['key'] for e in editions for w in e.get('works', [])]
q = {"type": "/type/work", "authors": {"author": {"key": key}}, "limit": 10000}
work_keys_2 = web.ctx.site.things(q)
return edition_keys + work_keys_1 + work_keys_2
class AuthorMergeEngine(BasicMergeEngine):
def merge_docs(self, master, dup):
# avoid merging other types.
if dup['type']['key'] == '/type/author':
master = BasicMergeEngine.merge_docs(self, master, dup)
if dup.get('name') and not name_eq(dup['name'], master.get('name') or ''):
master.setdefault('alternate_names', []).append(dup['name'])
if 'alternate_names' in master:
master['alternate_names'] = uniq(
master['alternate_names'], key=space_squash_and_strip
)
return master
def save(self, docs, master, duplicates):
# There is a bug (#89) due to which old revisions of the docs are being sent to
# save. Collecting all the possible information to detect the problem and
# saving it in datastore. See that data here:
# https://openlibrary.org/admin/inspect/store?type=merge-authors-debug&name=bad_merge&value=true
mc = self._get_memcache()
debug_doc = {
'type': 'merge-authors-debug',
'memcache': mc
and {
k: json.loads(v)
for k, v in mc.get_multi([doc['key'] for doc in docs]).items()
},
'docs': docs,
}
result = web.ctx.site.save_many(
docs,
comment='merge authors',
action="merge-authors",
data={"master": master, "duplicates": list(duplicates)},
)
before_revs = {doc['key']: doc.get('revision') for doc in docs}
after_revs = {row['key']: row['revision'] for row in result}
# Bad merges are happening when we are getting non-recent docs. That can be
# identified by checking difference in the revision numbers before/after save
bad_merge = any(
after_revs[key] > before_revs[key] + 1
for key in after_revs
if before_revs[key] is not None
)
debug_doc['bad_merge'] = str(bad_merge).lower()
debug_doc['result'] = result
key = 'merge_authors/%d' % web.ctx.site.seq.next_value('merge-authors-debug')
web.ctx.site.store[key] = debug_doc
return result
def _get_memcache(self):
from openlibrary.plugins.openlibrary import connection
return connection._memcache
re_whitespace = re.compile(r'\s+')
def space_squash_and_strip(s):
return re_whitespace.sub(' ', s).strip()
def name_eq(n1, n2):
return space_squash_and_strip(n1) == space_squash_and_strip(n2)
def fix_table_of_contents(table_of_contents: list[str | dict]) -> list:
"""
Some books have bad table_of_contents--convert them in to correct format.
"""
def row(r):
if isinstance(r, str):
level = 0
label = ""
title = web.safeunicode(r)
pagenum = ""
elif 'value' in r:
level = 0
label = ""
title = web.safeunicode(r['value'])
pagenum = ""
else:
level = safeint(r.get('level', '0'), 0)
label = r.get('label', '')
title = r.get('title', '')
pagenum = r.get('pagenum', '')
r = web.storage(level=level, label=label, title=title, pagenum=pagenum)
return r
return [row for row in map(row, table_of_contents) if any(row.values())]
def get_many(keys: list[str]) -> list[dict]:
def process(doc):
# some books have bad table_of_contents. Fix them to avoid failure on save.
if doc['type']['key'] == "/type/edition" and 'table_of_contents' in doc:
doc['table_of_contents'] = fix_table_of_contents(doc['table_of_contents'])
return doc
return [process(thing.dict()) for thing in web.ctx.site.get_many(list(keys))]
def make_redirect_doc(key, redirect):
return {"key": key, "type": {"key": "/type/redirect"}, "location": redirect}
class merge_authors(delegate.page):
path = '/authors/merge'
def is_enabled(self):
user = web.ctx.site.get_user()
return "merge-authors" in web.ctx.features or (user and user.is_admin())
def filter_authors(self, keys):
docs = web.ctx.site.get_many(["/authors/" + k for k in keys])
d = {doc.key: doc.type.key for doc in docs}
return [k for k in keys if d.get("/authors/" + k) == '/type/author']
def GET(self):
i = web.input(key=[], mrid=None)
keys = uniq(i.key)
# filter bad keys
keys = self.filter_authors(keys)
# sort keys by lowest OL number
keys = sorted(keys, key=lambda key: int(key[2:-1]))
user = get_current_user()
can_merge = user and (user.is_admin() or user.is_super_librarian())
return render_template(
'merge/authors',
keys,
top_books_from_author=top_books_from_author,
mrid=i.mrid,
can_merge=can_merge,
)
def POST(self):
i = web.input(key=[], master=None, merge_key=[], mrid=None, comment=None)
keys = uniq(i.key)
selected = uniq(i.merge_key)
user = get_current_user()
can_merge = user and (user.is_admin() or user.is_super_librarian())
can_request_merge = not can_merge and (user and user.is_librarian())
# filter bad keys
keys = self.filter_authors(keys)
# doesn't make sense to merge master with it self.
if i.master in selected:
selected.remove(i.master)
formdata = web.storage(master=i.master, selected=selected)
if not i.master or len(selected) == 0:
return render_template(
"merge/authors",
keys,
top_books_from_author=top_books_from_author,
formdata=formdata,
mrid=i.mrid,
)
elif can_request_merge:
# Create merge author request:
selected.insert(0, i.master)
data = {
'mr_type': 2,
'action': 'create-pending',
'olids': ','.join(selected),
}
if i.comment:
data['comment'] = i.comment
result = process_merge_request('create-request', data)
mrid = result.get('id', None)
username = user.get('key').split('/')[-1]
redir_url = f'/merges?submitter={username}'
if mrid:
redir_url = f'{redir_url}#mrid-{mrid}'
raise web.seeother(redir_url)
else:
# redirect to the master. The master will display a progressbar and call the merge_authors_json to trigger the merge.
redir_url = (
f'/authors/{i.master}/-/?merge=true&duplicates={",".join(selected)}'
)
if i.mrid:
redir_url = f'{redir_url}&mrid={i.mrid}'
if i.comment:
redir_url = f'{redir_url}&comment={i.comment}'
raise web.seeother(redir_url)
class merge_authors_json(delegate.page):
"""JSON API for merge authors.
This is called from the master author page to trigger the merge while displaying progress.
"""
path = "/authors/merge"
encoding = "json"
def is_enabled(self):
user = web.ctx.site.get_user()
return "merge-authors" in web.ctx.features or (user and user.is_admin())
def POST(self):
data = json.loads(web.data())
master = data['master']
duplicates = data['duplicates']
mrid = data.get('mrid', None)
comment = data.get('comment', None)
olids = data.get('olids', '')
engine = AuthorMergeEngine(AuthorRedirectEngine())
try:
result = engine.merge(master, duplicates)
if mrid:
# Update the request
rtype = 'update-request'
data = {'action': 'approve', 'mrid': mrid}
else:
# Create new request
rtype = 'create-request'
data = {'mr_type': 2, 'olids': olids, 'action': 'create-merged'}
if comment:
data['comment'] = comment
process_merge_request(rtype, data)
except ClientException as e:
raise web.badrequest(json.loads(e.json))
return delegate.RawText(json.dumps(result), content_type="application/json")
def setup():
pass
| 13,033 | Python | .py | 304 | 33.118421 | 129 | 0.593661 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
454 | edits.py | internetarchive_openlibrary/openlibrary/plugins/upstream/edits.py | """Librarian Edits
"""
import json
import web
from openlibrary import accounts
from openlibrary.core.edits import CommunityEditsQueue, get_status_for_view
from infogami.utils import delegate
from infogami.utils.view import render_template
def response(status='ok', **kwargs):
return {'status': status, **kwargs}
def process_merge_request(rtype, data):
user = accounts.get_current_user()
username = user['key'].split('/')[-1]
# Request types can be: create-request, update-request
if rtype == 'create-request':
resp = community_edits_queue.create_request(username, **data)
elif rtype == 'update-request':
resp = community_edits_queue.update_request(username, **data)
else:
resp = response(status='error', error='Unknown request type')
return resp
class community_edits_queue(delegate.page):
path = '/merges'
def GET(self):
i = web.input(
page=1,
limit=25,
mode="open",
submitter=None,
reviewer=None,
order='desc',
status=None,
)
merge_requests = CommunityEditsQueue.get_requests(
page=int(i.page),
limit=int(i.limit),
mode=i.mode,
submitter=i.submitter,
reviewer=i.reviewer,
order=f'updated {i.order}',
status=i.status,
).list()
total_found = {
"open": CommunityEditsQueue.get_counts_by_mode(
mode='open', submitter=i.submitter, reviewer=i.reviewer
),
"closed": CommunityEditsQueue.get_counts_by_mode(
mode='closed', submitter=i.submitter, reviewer=i.reviewer
),
"submitters": CommunityEditsQueue.get_submitters(),
"reviewers": CommunityEditsQueue.get_reviewers(),
}
librarians = {
'submitters': CommunityEditsQueue.get_submitters(),
'reviewers': CommunityEditsQueue.get_reviewers(),
}
return render_template(
'merge_request_table/merge_request_table',
total_found,
librarians,
merge_requests=merge_requests,
)
def POST(self):
data = json.loads(web.data())
resp = process_merge_request(data.pop('rtype', ''), data)
return delegate.RawText(json.dumps(resp), content_type='application/json')
@staticmethod
def create_request(
username,
action='',
mr_type=None,
olids='',
comment: str | None = None,
primary: str | None = None,
):
def is_valid_action(action):
return action in ('create-pending', 'create-merged')
def needs_unique_url(mr_type):
return mr_type in (
CommunityEditsQueue.TYPE['WORK_MERGE'],
CommunityEditsQueue.TYPE['AUTHOR_MERGE'],
)
if is_valid_action(action):
olid_list = olids.split(',')
title = community_edits_queue.create_title(mr_type, olid_list)
url = community_edits_queue.create_url(mr_type, olid_list, primary=primary)
# Validate URL
is_valid_url = True
if needs_unique_url(mr_type) and CommunityEditsQueue.exists(url):
is_valid_url = False
if is_valid_url:
if action == 'create-pending':
result = CommunityEditsQueue.submit_request(
url, username, title=title, comment=comment, mr_type=mr_type
)
elif action == 'create-merged':
result = CommunityEditsQueue.submit_request(
url,
username,
title=title,
comment=comment,
reviewer=username,
status=CommunityEditsQueue.STATUS['MERGED'],
mr_type=mr_type,
)
resp = (
response(id=result)
if result
else response(status='error', error='Request creation failed.')
)
else:
resp = response(
status='error',
error='A merge request for these items already exists.',
)
else:
resp = response(
status='error',
error=f'Action "{action}" is invalid for this request type.',
)
return resp
@staticmethod
def update_request(username, action='', mrid=None, comment=None):
# Comment on existing request:
if action == 'comment':
if comment:
CommunityEditsQueue.comment_request(mrid, username, comment)
resp = response()
else:
resp = response(status='error', error='No comment sent in request.')
# Assign to existing request:
elif action == 'claim':
result = CommunityEditsQueue.assign_request(mrid, username)
resp = response(**result)
# Unassign from existing request:
elif action == 'unassign':
CommunityEditsQueue.unassign_request(mrid)
status = get_status_for_view(CommunityEditsQueue.STATUS['PENDING'])
resp = response(newStatus=status)
# Close request by approving:
elif action == 'approve':
CommunityEditsQueue.update_request_status(
mrid, CommunityEditsQueue.STATUS['MERGED'], username, comment=comment
)
resp = response()
# Close request by declining:
elif action == 'decline':
CommunityEditsQueue.update_request_status(
mrid, CommunityEditsQueue.STATUS['DECLINED'], username, comment=comment
)
resp = response()
# Unknown request:
else:
resp = response(
status='error',
error=f'Action "{action}" is invalid for this request type.',
)
return resp
@staticmethod
def create_url(mr_type: int, olids: list[str], primary: str | None = None) -> str:
if mr_type == CommunityEditsQueue.TYPE['WORK_MERGE']:
primary_param = f'&primary={primary}' if primary else ''
return f'/works/merge?records={",".join(olids)}{primary_param}'
elif mr_type == CommunityEditsQueue.TYPE['AUTHOR_MERGE']:
return f'/authors/merge?key={"&key=".join(olids)}'
return ''
@staticmethod
def create_title(mr_type: int, olids: list[str]) -> str:
if mr_type == CommunityEditsQueue.TYPE['WORK_MERGE']:
for olid in olids:
book = web.ctx.site.get(f'/works/{olid}')
if book and book.title:
return book.title
elif mr_type == CommunityEditsQueue.TYPE['AUTHOR_MERGE']:
for olid in olids:
author = web.ctx.site.get(f'/authors/{olid}')
if author and author.name:
return author.name
return 'Unknown record'
def setup():
pass
| 7,215 | Python | .py | 181 | 27.906077 | 87 | 0.559155 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
455 | addbook.py | internetarchive_openlibrary/openlibrary/plugins/upstream/addbook.py | """Handlers for adding and editing books."""
import io
import web
import json
import csv
import datetime
from typing import Literal, overload, NoReturn
from infogami import config
from infogami.core.db import ValidationException
from infogami.utils import delegate
from infogami.utils.view import safeint, add_flash_message
from infogami.infobase.client import ClientException
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.core.helpers import uniq
from openlibrary.i18n import gettext as _
from openlibrary import accounts
import logging
from openlibrary.plugins.upstream import spamcheck, utils
from openlibrary.plugins.upstream.models import Author, Edition, Work
from openlibrary.plugins.upstream.utils import render_template, fuzzy_find
from openlibrary.plugins.upstream.account import as_admin
from openlibrary.plugins.recaptcha import recaptcha
import urllib
from web.webapi import SeeOther
logger = logging.getLogger("openlibrary.book")
def get_recaptcha():
def recaptcha_exempt() -> bool:
"""Check to see if account is an admin, or more than two years old."""
user = web.ctx.site.get_user()
account = user and user.get_account()
if not (user and account):
return False
if account.has_tag("trusted-user") or user.is_admin() or user.is_librarian():
return True
create_dt = account.creation_time()
now_dt = datetime.datetime.utcnow()
delta = now_dt - create_dt
return delta.days > 30
def is_plugin_enabled(name) -> bool:
plugin_names = delegate.get_plugins()
return name in plugin_names or "openlibrary.plugins." + name in plugin_names
if is_plugin_enabled('recaptcha') and not recaptcha_exempt():
public_key = config.plugin_recaptcha.public_key
private_key = config.plugin_recaptcha.private_key
return recaptcha.Recaptcha(public_key, private_key)
else:
return None
def make_author(key: str, name: str) -> Author:
"""
Use author_key and author_name and return an Author.
>>> make_author("OL123A", "Samuel Clemens")
<Author: '/authors/OL123A'>
"""
key = "/authors/" + key
return web.ctx.site.new(
key, {"key": key, "type": {"key": "/type/author"}, "name": name}
)
def make_work(doc: dict[str, str | list]) -> web.Storage:
"""
Take a dictionary and make it a work of web.Storage format. This is used as a
wrapper for results from solr.select() when adding books from /books/add and
checking for existing works or editions.
"""
w = web.storage(doc)
w.authors = [
make_author(key, name)
for key, name in zip(doc.get('author_key', []), doc.get('author_name', []))
]
w.cover_url = "/images/icons/avatar_book-sm.png"
w.setdefault('ia', [])
w.setdefault('first_publish_year', None)
return w
@overload
def new_doc(type_: Literal["/type/author"], **data) -> Author: ...
@overload
def new_doc(type_: Literal["/type/edition"], **data) -> Edition: ...
@overload
def new_doc(type_: Literal["/type/work"], **data) -> Work: ...
def new_doc(type_: str, **data) -> Author | Edition | Work:
"""
Create an new OL doc item.
:param str type_: object type e.g. /type/edition
:return: the newly created document
"""
key = web.ctx.site.new_key(type_)
data['key'] = key
data['type'] = {"key": type_}
return web.ctx.site.new(key, data)
class DocSaveHelper:
"""Simple utility to collect the saves and save them together at the end."""
def __init__(self):
self.docs = []
def save(self, doc) -> None:
"""Adds the doc to the list of docs to be saved."""
if not isinstance(doc, dict): # thing
doc = doc.dict()
self.docs.append(doc)
def commit(self, **kw) -> None:
"""Saves all the collected docs."""
if self.docs:
web.ctx.site.save_many(self.docs, **kw)
def create_authors_from_form_data(
self, authors: list[dict], author_names: list[str], _test: bool = False
) -> bool:
"""
Create any __new__ authors in the provided array. Updates the authors
dicts _in place_ with the new key.
:param list[dict] authors: e.g. [{author: {key: '__new__'}}]
:return: Whether new author(s) were created
"""
created = False
for author_dict, author_name in zip(authors, author_names):
if author_dict['author']['key'] == '__new__':
created = True
if not _test:
doc = new_doc('/type/author', name=author_name)
self.save(doc)
author_dict['author']['key'] = doc.key
return created
def encode_url_path(url: str) -> str:
"""Encodes the path part of the url to avoid issues with non-latin characters as
non-latin characters was breaking `web.seeother`.
>>> encode_url_path('/books/OL10M/Вас_ил/edit?mode=add-work')
'/books/OL10M/%D0%92%D0%B0%D1%81_%D0%B8%D0%BB/edit?mode=add-work'
>>> encode_url_path('')
''
>>> encode_url_path('/')
'/'
>>> encode_url_path('/books/OL11M/进入该海域?mode=add-work')
'/books/OL11M/%E8%BF%9B%E5%85%A5%E8%AF%A5%E6%B5%B7%E5%9F%9F?mode=add-work'
""" # noqa: RUF002
result = urllib.parse.urlparse(url)
correct_path = "/".join(urllib.parse.quote(part) for part in result.path.split("/"))
result = result._replace(path=correct_path)
return result.geturl()
def safe_seeother(url: str) -> SeeOther:
"""Safe version of `web.seeother` which encodes the url path appropriately using
`encode_url_path`."""
return web.seeother(encode_url_path(url))
class addbook(delegate.page):
path = "/books/add"
def GET(self):
"""Main user interface for adding a book to Open Library."""
if not self.has_permission():
return safe_seeother(f"/account/login?redirect={self.path}")
i = web.input(work=None, author=None)
work = i.work and web.ctx.site.get(i.work)
author = i.author and web.ctx.site.get(i.author)
# pre-filling existing author(s) if adding new edition from existing work page
authors = (work and work.authors) or []
if work and authors:
authors = [a.author for a in authors]
# pre-filling existing author if adding new work from author page
if author and author not in authors:
authors.append(author)
return render_template(
'books/add', work=work, authors=authors, recaptcha=get_recaptcha()
)
def has_permission(self) -> bool:
"""
Can a book be added?
"""
return web.ctx.site.can_write("/books/add")
def POST(self):
i = web.input(
title="",
book_title="",
publisher="",
publish_date="",
id_name="",
id_value="",
web_book_url="",
_test="false",
)
i.title = i.book_title
if spamcheck.is_spam(i, allow_privileged_edits=True):
return render_template(
"message.html", "Oops", 'Something went wrong. Please try again later.'
)
if not web.ctx.site.get_user():
recap = get_recaptcha()
if recap and not recap.validate():
return render_template(
'message.html',
'Recaptcha solution was incorrect',
'Please <a href="javascript:history.back()">go back</a> and try again.',
)
i = utils.unflatten(i)
saveutil = DocSaveHelper()
created_author = saveutil.create_authors_from_form_data(
i.authors, i.author_names, _test=i._test == 'true'
)
match = None if created_author else self.find_matches(i)
if i._test == 'true' and not isinstance(match, list):
if match:
return f'Matched <a href="{match.key}">{match.key}</a>'
else:
return 'No match found'
if isinstance(match, list):
# multiple matches
return render_template('books/check', i, match)
elif match and match.key.startswith('/books'):
# work match and edition match, match is an Edition
if i.web_book_url:
match.provider = [{"url": i.web_book_url, "format": "web"}]
return self.work_edition_match(match)
elif match and match.key.startswith('/works'):
# work match but not edition
work = match
return self.work_match(saveutil, work, i)
else:
# no match
return self.no_match(saveutil, i)
def find_matches(
self, i: web.utils.Storage
) -> None | Work | Edition | list[web.utils.Storage]:
"""
Tries to find an edition, or work, or multiple work candidates that match the
given input data.
Case#1: No match. None is returned.
Case#2: Work match but not edition. Work is returned.
Case#3: Work match and edition match. Edition is returned
Case#4: Multiple work match. List of works is returned.
:param web.utils.Storage i: addbook user supplied formdata
:return: None or Work or Edition or list of Works (as Storage objects) that are
likely matches.
"""
i.publish_year = i.publish_date and self.extract_year(i.publish_date)
author_key = i.authors and i.authors[0].author.key
# work is set from the templates/books/check.html page.
work_key = i.get('work')
# work_key is set to none-of-these when user selects none-of-these link.
if work_key == 'none-of-these':
return None # Case 1, from check page
work = work_key and web.ctx.site.get(work_key)
if work:
edition = self.try_edition_match(
work=work,
publisher=i.publisher,
publish_year=i.publish_year,
id_name=i.id_name,
id_value=i.id_value,
)
return edition or work # Case 3 or 2, from check page
edition = self.try_edition_match(
title=i.title,
author_key=author_key,
publisher=i.publisher,
publish_year=i.publish_year,
id_name=i.id_name,
id_value=i.id_value,
)
if edition:
return edition # Case 2 or 3 or 4, from add page
solr = get_solr()
# Less exact solr search than try_edition_match(), search by supplied title and author only.
result = solr.select(
{'title': i.title, 'author_key': author_key.split("/")[-1]},
doc_wrapper=make_work,
q_op="AND",
)
if result.num_found == 0:
return None # Case 1, from add page
elif result.num_found == 1:
return result.docs[0] # Case 2
else:
return result.docs # Case 4
def extract_year(self, value: str) -> str:
"""
Extract just the 4 digit year from a date string.
:param str value: A freeform string representing a publication date.
:return: a four digit year
"""
m = web.re_compile(r"(\d\d\d\d)").search(value)
return m and m.group(1)
def try_edition_match(
self,
work: web.Storage | None = None,
title: str | None = None,
author_key: str | None = None,
publisher: str | None = None,
publish_year: str | None = None,
id_name: str | None = None,
id_value: str | None = None,
) -> None | Edition | list[web.Storage]:
"""
Searches solr for potential edition matches.
:param str author_key: e.g. /author/OL1234A
:param str publish_year: yyyy
:param str id_name: from list of values in mapping below
:return: None, an Edition, or a list of Works (as web.Storage objects)
"""
# insufficient data
if not publisher and not publish_year and not id_value:
return None
q: dict = {}
work and q.setdefault('key', work.key.split("/")[-1])
title and q.setdefault('title', title)
author_key and q.setdefault('author_key', author_key.split('/')[-1])
publisher and q.setdefault('publisher', publisher)
# There are some errors indexing of publish_year. Use publish_date until it is fixed
publish_year and q.setdefault('publish_date', publish_year)
mapping = {
'isbn_10': 'isbn',
'isbn_13': 'isbn',
'lccn': 'lccn',
'oclc_numbers': 'oclc',
'ocaid': 'ia',
}
if id_value and id_name in mapping:
if id_name.startswith('isbn'):
id_value = id_value.replace('-', '')
q[mapping[id_name]] = id_value
solr = get_solr()
result = solr.select(q, doc_wrapper=make_work, q_op="AND")
if len(result.docs) > 1:
# found multiple work matches
return result.docs
elif len(result.docs) == 1:
# found one work match
work = result.docs[0]
publisher = publisher and fuzzy_find(
publisher, work.publisher, stopwords=("publisher", "publishers", "and")
)
editions = web.ctx.site.get_many(
["/books/" + key for key in work.edition_key]
)
for e in editions:
d: dict = {}
if publisher and (not e.publishers or e.publishers[0] != publisher):
continue
if publish_year and (
not e.publish_date
or publish_year != self.extract_year(e.publish_date)
):
continue
if id_value and id_name in mapping: # noqa: SIM102
if id_name not in e or id_value not in e[id_name]:
continue
# return the first good likely matching Edition
return e
return None
def work_match(
self, saveutil: DocSaveHelper, work: Work, i: web.utils.Storage
) -> NoReturn:
"""
Action for when a work, but not edition, is matched.
Saves a new edition of work, created form the formdata i.
Redirects the user to the newly created edition page in edit
mode to add more details.
:param Work work: the matched work for this book
:param web.utils.Storage i: user supplied book formdata
"""
edition = self._make_edition(work, i)
saveutil.save(edition)
comment = utils.get_message("comment_add_book")
saveutil.commit(comment=comment, action="add-book")
raise safe_seeother(edition.url("/edit?mode=add-book"))
def work_edition_match(self, edition: Edition) -> NoReturn:
"""
Action for when an exact work and edition match have been found.
Redirect user to the found item's edit page to add any missing details.
"""
raise safe_seeother(edition.url("/edit?mode=found"))
def no_match(self, saveutil: DocSaveHelper, i: web.utils.Storage) -> NoReturn:
"""
Action to take when no matches are found.
Creates and saves both a Work and Edition.
Redirects the user to the work/edition edit page
in `add-work` mode.
"""
# Any new author has been created and added to
# saveutil, and author_key added to i
work = new_doc("/type/work", title=i.title, authors=i.authors)
edition = self._make_edition(work, i)
saveutil.save(work)
saveutil.save(edition)
comment = utils.get_message("comment_add_book")
saveutil.commit(action="add-book", comment=comment)
raise safe_seeother(edition.url("/edit?mode=add-work"))
def _make_edition(self, work: Work, i: web.utils.Storage) -> Edition:
"""
Uses formdata 'i' to create (but not save) an edition of 'work'.
"""
edition = new_doc(
"/type/edition",
works=[{"key": work.key}],
title=i.title,
publishers=[i.publisher],
publish_date=i.publish_date,
)
if i.get('web_book_url'):
edition.set_provider_data({"url": i.web_book_url, "format": "web"})
if i.get("id_name") and i.get("id_value"):
edition.set_identifiers([{"name": i.id_name, "value": i.id_value}])
return edition
# remove existing definitions of addbook and addauthor
delegate.pages.pop('/addbook', None)
delegate.pages.pop('/addauthor', None)
class addbook(delegate.page): # type: ignore[no-redef] # noqa: F811
def GET(self):
raise web.redirect("/books/add")
class addauthor(delegate.page):
def GET(self):
raise web.redirect("/authors")
def trim_value(value):
"""Trim strings, lists and dictionaries to remove empty/None values.
>>> trim_value("hello ")
'hello'
>>> trim_value("")
>>> trim_value([1, 2, ""])
[1, 2]
>>> trim_value({'x': 'a', 'y': ''})
{'x': 'a'}
>>> trim_value({'x': [""]})
None
"""
if isinstance(value, str):
value = value.strip()
return value or None
elif isinstance(value, list):
value = [v2 for v in value for v2 in [trim_value(v)] if v2 is not None]
return value or None
elif isinstance(value, dict):
value = {
k: v2 for k, v in value.items() for v2 in [trim_value(v)] if v2 is not None
}
return value or None
else:
return value
def trim_doc(doc):
"""Replace empty values in the document with Nones."""
return web.storage((k, trim_value(v)) for k, v in doc.items() if k[:1] not in "_{")
class SaveBookHelper:
"""Helper to save edition and work using the form data coming from edition edit and work edit pages.
This does the required trimming and processing of input data before saving.
"""
def __init__(self, work: Work | None, edition: Edition | None):
"""
:param Work|None work: None if editing an orphan edition
:param Edition|None edition: None if just editing work
"""
self.work = work
self.edition = edition
def save(self, formdata: web.Storage) -> None:
"""
Update work and edition documents according to the specified formdata.
"""
comment = formdata.pop('_comment', '')
user = accounts.get_current_user()
delete = (
user
and (user.is_admin() or user.is_super_librarian())
and formdata.pop('_delete', '')
)
formdata = utils.unflatten(formdata)
work_data, edition_data = self.process_input(formdata)
if not delete:
self.process_new_fields(formdata)
saveutil = DocSaveHelper()
if delete:
if self.edition:
self.delete(self.edition.key, comment=comment)
if self.work and self.work.edition_count == 0:
self.delete(self.work.key, comment=comment)
return
just_editing_work = edition_data is None
if work_data:
# Create any new authors that were added
saveutil.create_authors_from_form_data(
work_data.get("authors") or [], formdata.get('authors') or []
)
if not just_editing_work:
# Mypy misses that "not just_editing_work" means there is edition data.
assert self.edition
# Handle orphaned editions
new_work_key = (edition_data.get('works') or [{'key': None}])[0]['key']
if self.work is None and (
new_work_key is None or new_work_key == '__new__'
):
# i.e. not moving to another work, create empty work
self.work = self.new_work(self.edition)
edition_data.works = [{'key': self.work.key}]
work_data.key = self.work.key
elif self.work is not None and new_work_key is None:
# we're trying to create an orphan; let's not do that
edition_data.works = [{'key': self.work.key}]
if self.work is not None:
self.work.update(work_data)
saveutil.save(self.work)
if self.edition and edition_data:
# Create a new work if so desired
new_work_key = (edition_data.get('works') or [{'key': None}])[0]['key']
if new_work_key == "__new__" and self.work is not None:
new_work = self.new_work(self.edition)
edition_data.works = [{'key': new_work.key}]
new_work_options = formdata.get(
'new_work_options',
{
'copy_authors': 'no',
'copy_subjects': 'no',
},
)
if (
new_work_options.get('copy_authors') == 'yes'
and 'authors' in self.work
):
new_work.authors = self.work.authors
if new_work_options.get('copy_subjects') == 'yes':
for field in (
'subjects',
'subject_places',
'subject_times',
'subject_people',
):
if field in self.work:
new_work[field] = self.work[field]
self.work = new_work
saveutil.save(self.work)
identifiers = edition_data.pop('identifiers', [])
self.edition.set_identifiers(identifiers)
classifications = edition_data.pop('classifications', [])
self.edition.set_classifications(classifications)
self.edition.set_physical_dimensions(
edition_data.pop('physical_dimensions', None)
)
self.edition.set_weight(edition_data.pop('weight', None))
self.edition.set_toc_text(edition_data.pop('table_of_contents', None))
if edition_data.pop('translation', None) != 'yes':
edition_data.translation_of = None
edition_data.translated_from = None
if 'contributors' not in edition_data:
self.edition.contributors = []
providers = edition_data.pop('providers', [])
self.edition.set_providers(providers)
self.edition.update(edition_data)
saveutil.save(self.edition)
saveutil.commit(comment=comment, action="edit-book")
@staticmethod
def new_work(edition: Edition) -> Work:
return new_doc(
'/type/work',
title=edition.get('title'),
subtitle=edition.get('subtitle'),
covers=edition.get('covers', []),
)
@staticmethod
def delete(key, comment=""):
doc = web.ctx.site.new(key, {"key": key, "type": {"key": "/type/delete"}})
doc._save(comment=comment)
def process_new_fields(self, formdata: dict):
def f(name: str):
val = formdata.get(name)
return val and json.loads(val)
new_roles = f('select-role-json')
new_ids = f('select-id-json')
new_classifications = f('select-classification-json')
if new_roles or new_ids or new_classifications:
edition_config = web.ctx.site.get('/config/edition')
# TODO: take care of duplicate names
if new_roles:
edition_config.roles += [d.get('value') or '' for d in new_roles]
if new_ids:
edition_config.identifiers += [
{
"name": d.get('value') or '',
"label": d.get('label') or '',
"website": d.get("website") or '',
"notes": d.get("notes") or '',
}
for d in new_ids
]
if new_classifications:
edition_config.classifications += [
{
"name": d.get('value') or '',
"label": d.get('label') or '',
"website": d.get("website") or '',
"notes": d.get("notes") or '',
}
for d in new_classifications
]
as_admin(edition_config._save)("add new fields")
def process_input(self, i):
if 'edition' in i:
edition = self.process_edition(i.edition)
else:
edition = None
if 'work' in i and self.use_work_edits(i):
work = self.process_work(i.work)
else:
work = None
return work, edition
def process_edition(self, edition):
"""Process input data for edition."""
edition.publishers = edition.get('publishers', '').split(';')
edition.publish_places = edition.get('publish_places', '').split(';')
edition = trim_doc(edition)
if list(edition.get('physical_dimensions', [])) == ['units']:
edition.physical_dimensions = None
if list(edition.get('weight', [])) == ['units']:
edition.weight = None
for k in ['roles', 'identifiers', 'classifications']:
edition[k] = edition.get(k) or []
self._prevent_ocaid_deletion(edition)
return edition
def process_work(self, work: web.Storage) -> web.Storage:
"""
Process input data for work.
:param web.storage work: form data work info
"""
def read_subject(subjects):
"""
>>> list(read_subject("A,B,C,B")) == [u'A', u'B', u'C'] # str
True
>>> list(read_subject(r"A,B,C,B")) == [u'A', u'B', u'C'] # raw
True
>>> list(read_subject(u"A,B,C,B")) == [u'A', u'B', u'C'] # Unicode
True
>>> list(read_subject(""))
[]
"""
if not subjects:
return
f = io.StringIO(subjects.replace('\r\n', ''))
dedup = set()
for s in next(csv.reader(f, dialect='excel', skipinitialspace=True)):
if s.casefold() not in dedup:
yield s
dedup.add(s.casefold())
work.subjects = list(read_subject(work.get('subjects', '')))
work.subject_places = list(read_subject(work.get('subject_places', '')))
work.subject_times = list(read_subject(work.get('subject_times', '')))
work.subject_people = list(read_subject(work.get('subject_people', '')))
if ': ' in work.get('title', ''):
work.title, work.subtitle = work.title.split(': ', 1)
else:
work.subtitle = None
for k in ('excerpts', 'links'):
work[k] = work.get(k) or []
# ignore empty authors
work.authors = [
a
for a in work.get('authors', [])
if a.get('author', {}).get('key', '').strip()
]
return trim_doc(work)
def _prevent_ocaid_deletion(self, edition) -> None:
# Allow admins to modify ocaid
user = accounts.get_current_user()
if user and (user.is_admin() or user.is_super_librarian()):
return
# read ocaid from form data
ocaid = next(
(
id_['value']
for id_ in edition.get('identifiers', [])
if id_['name'] == 'ocaid'
),
None,
)
# 'self.edition' is the edition doc from the db and 'edition' is the doc from formdata
if (
self.edition
and self.edition.get('ocaid')
and self.edition.get('ocaid') != ocaid
):
logger.warning(
"Attempt to change ocaid of %s from %r to %r.",
self.edition.key,
self.edition.get('ocaid'),
ocaid,
)
raise ValidationException("Changing Internet Archive ID is not allowed.")
@staticmethod
def use_work_edits(formdata: web.Storage) -> bool:
"""
Check if the form data's work OLID matches the form data's edition's work OLID.
If they don't, then we ignore the work edits.
:param web.storage formdata: form data (parsed into a nested dict)
"""
if 'edition' not in formdata:
# No edition data -> just editing work, so work data matters
return True
has_edition_work = (
'works' in formdata.edition
and formdata.edition.works
and formdata.edition.works[0].key
)
if has_edition_work:
old_work_key = formdata.work.key
new_work_key = formdata.edition.works[0].key
return old_work_key == new_work_key
else:
# i.e. editing an orphan; so we care about the work
return True
class book_edit(delegate.page):
path = r"(/books/OL\d+M)/edit"
def GET(self, key):
i = web.input(v=None)
v = i.v and safeint(i.v, None)
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
"Permission denied to edit " + key + ".",
)
edition = web.ctx.site.get(key, v)
if edition is None:
raise web.notfound()
work = (
edition.works
and edition.works[0]
or edition.make_work_from_orphaned_edition()
)
return render_template('books/edit', work, edition, recaptcha=get_recaptcha())
def POST(self, key):
i = web.input(v=None, work_key=None, _method="GET")
if spamcheck.is_spam(allow_privileged_edits=True):
return render_template(
"message.html", "Oops", 'Something went wrong. Please try again later.'
)
recap = get_recaptcha()
if recap and not recap.validate():
return render_template(
"message.html",
'Recaptcha solution was incorrect',
'Please <a href="javascript:history.back()">go back</a> and try again.',
)
v = i.v and safeint(i.v, None)
edition = web.ctx.site.get(key, v)
if edition is None:
raise web.notfound()
if edition.works:
work = edition.works[0]
else:
work = None
add = (
edition.revision == 1
and work
and work.revision == 1
and work.edition_count == 1
)
try:
helper = SaveBookHelper(work, edition)
helper.save(web.input())
if add:
add_flash_message("info", utils.get_message("flash_book_added"))
else:
add_flash_message("info", utils.get_message("flash_book_updated"))
if i.work_key and i.work_key.startswith('/works/'):
url = i.work_key
else:
url = edition.url()
raise safe_seeother(url)
except ClientException as e:
add_flash_message('error', e.args[-1] or e.json)
return self.GET(key)
except ValidationException as e:
add_flash_message('error', str(e))
return self.GET(key)
class work_edit(delegate.page):
path = r"(/works/OL\d+W)/edit"
def GET(self, key):
i = web.input(v=None, _method="GET")
v = i.v and safeint(i.v, None)
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
"Permission denied to edit " + key + ".",
)
work = web.ctx.site.get(key, v)
if work is None:
raise web.notfound()
return render_template('books/edit', work, recaptcha=get_recaptcha())
def POST(self, key):
i = web.input(v=None, _method="GET")
if spamcheck.is_spam(allow_privileged_edits=True):
return render_template(
"message.html", "Oops", 'Something went wrong. Please try again later.'
)
recap = get_recaptcha()
if recap and not recap.validate():
return render_template(
"message.html",
'Recaptcha solution was incorrect',
'Please <a href="javascript:history.back()">go back</a> and try again.',
)
v = i.v and safeint(i.v, None)
work = web.ctx.site.get(key, v)
if work is None:
raise web.notfound()
try:
helper = SaveBookHelper(work, None)
helper.save(web.input())
add_flash_message("info", utils.get_message("flash_work_updated"))
raise safe_seeother(work.url())
except (ClientException, ValidationException) as e:
add_flash_message('error', str(e))
return self.GET(key)
class author_edit(delegate.page):
path = r"(/authors/OL\d+A)/edit"
def GET(self, key):
if not web.ctx.site.can_write(key):
return render_template(
"permission_denied",
web.ctx.fullpath,
"Permission denied to edit " + key + ".",
)
author = web.ctx.site.get(key)
if author is None:
raise web.notfound()
return render_template("type/author/edit", author)
def POST(self, key):
author = web.ctx.site.get(key)
if author is None:
raise web.notfound()
i = web.input(_comment=None)
formdata = self.process_input(i)
try:
if not formdata:
raise web.badrequest()
elif "_save" in i:
author.update(formdata)
author._save(comment=i._comment)
raise safe_seeother(key)
elif "_delete" in i:
author = web.ctx.site.new(
key, {"key": key, "type": {"key": "/type/delete"}}
)
author._save(comment=i._comment)
raise safe_seeother(key)
except (ClientException, ValidationException) as e:
add_flash_message('error', str(e))
author.update(formdata)
author['comment_'] = i._comment
return render_template("type/author/edit", author)
def process_input(self, i):
i = utils.unflatten(i)
if 'author' in i:
author = trim_doc(i.author)
alternate_names = author.get('alternate_names', None) or ''
author.alternate_names = uniq(
[author.name]
+ [
name.strip() for name in alternate_names.split('\n') if name.strip()
],
)[1:]
author.links = author.get('links') or []
return author
class daisy(delegate.page):
path = "(/books/.*)/daisy"
def GET(self, key):
page = web.ctx.site.get(key)
if not page:
raise web.notfound()
return render_template("books/daisy", page)
class work_identifiers(delegate.view):
# TODO: (cclauss) Fix typing in infogami.utils.delegate and remove type: ignore
suffix = "identifiers" # type: ignore[assignment]
types = ["/type/edition"] # type: ignore[assignment]
def POST(self, edition):
saveutil = DocSaveHelper()
i = web.input(isbn="")
isbn = i.get("isbn")
# Need to do some simple validation here. Perhaps just check if it's a number?
if len(isbn) == 10:
typ = "ISBN 10"
data = [{'name': 'isbn_10', 'value': isbn}]
elif len(isbn) == 13:
typ = "ISBN 13"
data = [{'name': 'isbn_13', 'value': isbn}]
else:
add_flash_message("error", "The ISBN number you entered was not valid")
raise web.redirect(web.ctx.path)
if edition.works:
work = edition.works[0]
else:
work = None
edition.set_identifiers(data)
saveutil.save(edition)
saveutil.commit(comment="Added an %s identifier." % typ, action="edit-book")
add_flash_message("info", "Thank you very much for improving that record!")
raise web.redirect(web.ctx.path)
def setup():
"""Do required setup."""
pass
| 36,952 | Python | .py | 888 | 30.702703 | 104 | 0.560469 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
456 | data.py | internetarchive_openlibrary/openlibrary/plugins/upstream/data.py | """Code for handling /data/*.txt.gz URLs.
"""
import web
from infogami import config
from infogami.utils import delegate
from infogami.utils.view import public
import requests
IA_BASE_URL = config.get('ia_base_url')
def get_ol_dumps():
"""Get list of all archive.org items in the ol_exports collection uploaded by archive.org staff."""
url = (
IA_BASE_URL
+ '/advancedsearch.php?q=(ol_dump+OR+ol_cdump)+AND+collection:ol_exports&fl[]=identifier&output=json&rows=1000'
)
docs = requests.get(url).json()['response']['docs']
return sorted(doc['identifier'] for doc in docs)
# cache the result for half an hour
get_ol_dumps = web.memoize(get_ol_dumps, 30 * 60, background=True)
# public(get_ol_dumps)
def download_url(item, filename):
return f"{IA_BASE_URL}/download/{item}/{filename}"
# Should include openlibrary/data/dump.py split_dump's types at least
DUMP_PREFIXES = (
'',
'_authors',
'_covers_metadata',
'_editions',
'_works',
'_redirects',
'_deletes',
'_lists',
'_other',
'_deworks',
'_ratings',
'_reading-log',
)
class ol_dump_latest(delegate.page):
path = f"/data/ol_dump({'|'.join(DUMP_PREFIXES)})_latest.txt.gz"
def GET(self, prefix):
items = [item for item in get_ol_dumps() if item.startswith("ol_dump")]
if not items:
raise web.notfound()
item = items[-1]
filename = item.replace("dump", "dump" + prefix) + ".txt.gz"
raise web.found(download_url(item, filename))
class ol_cdump_latest(delegate.page):
path = "/data/ol_cdump_latest.txt.gz"
def GET(self):
items = [item for item in get_ol_dumps() if item.startswith("ol_cdump")]
if not items:
raise web.notfound()
item = items[-1]
raise web.found(download_url(item, item + ".txt.gz"))
class ol_dumps(delegate.page):
path = rf"/data/ol_dump({'|'.join(DUMP_PREFIXES)})_(\d\d\d\d-\d\d-\d\d).txt.gz"
def GET(self, prefix, date):
item = "ol_dump_" + date
if item not in get_ol_dumps():
raise web.notfound()
else:
filename = "ol_dump" + prefix + "_" + date + ".txt.gz"
raise web.found(download_url(item, filename))
class ol_cdumps(delegate.page):
path = r"/data/ol_cdump_(\d\d\d\d-\d\d-\d\d).txt.gz"
def GET(self, date):
item = "ol_cdump_" + date
if item not in get_ol_dumps():
raise web.notfound()
else:
raise web.found(download_url(item, item + ".txt.gz"))
def setup():
pass
| 2,585 | Python | .py | 72 | 29.958333 | 119 | 0.622535 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
457 | forms.py | internetarchive_openlibrary/openlibrary/plugins/upstream/forms.py | import web
from infogami.infobase.client import ClientException
from infogami.core import forms
from openlibrary.i18n import lgettext as _
from openlibrary.utils.form import (
Form,
Textbox,
Email,
Password,
Checkbox,
Hidden,
Validator,
RegexpValidator,
)
from openlibrary import accounts
from openlibrary.accounts import InternetArchiveAccount
from . import spamcheck
def find_account(username=None, lusername=None, email=None):
return accounts.find(username=username, lusername=lusername, email=email)
def find_ia_account(email=None):
ia_account = InternetArchiveAccount.get(email=email)
return ia_account
Login = Form(
Textbox('username', description=_('Username'), klass='required'),
Password('password', description=_('Password'), klass='required'),
Hidden('redirect'),
)
forms.login = Login
email_already_used = Validator(
_("No user registered with this email address"),
lambda email: find_account(email=email) is not None,
)
email_not_already_used = Validator(
_("Email already registered"), lambda email: not find_ia_account(email=email)
)
email_not_disposable = Validator(
_("Disposable email not permitted"),
lambda email: not email.lower().endswith('dispostable.com'),
)
email_domain_not_blocked = Validator(
_("Your email provider is not recognized."),
lambda email: not spamcheck.is_spam_email(email),
)
username_validator = Validator(
_("Username already used"),
lambda username: not find_account(lusername=username.lower()),
)
vlogin = RegexpValidator(
r"^[A-Za-z0-9\-_]{3,20}$", _('Must be between 3 and 20 letters and numbers')
)
vpass = RegexpValidator(r".{3,20}", _('Must be between 3 and 20 characters'))
vemail = RegexpValidator(
r".*@.*\..*",
_("Must be a valid email address"),
)
class EqualToValidator(Validator):
def __init__(self, fieldname, message):
Validator.__init__(self, message, None)
self.fieldname = fieldname
self.form = None
def valid(self, value):
# self.form will be set by RegisterForm
return self.form[self.fieldname].value == value
class RegisterForm(Form):
INPUTS = [
Email(
'email',
description=_('Email'),
klass='required',
id='emailAddr',
required="true",
validators=[
vemail,
email_not_already_used,
email_not_disposable,
email_domain_not_blocked,
],
),
Textbox(
'username',
description=_("Screen Name"),
klass='required',
help=_("Public and cannot be changed later."),
autocapitalize="off",
validators=[vlogin, username_validator],
pattern=vlogin.rexp.pattern,
title=vlogin.msg,
required="true",
),
Password(
'password',
description=_('Password'),
klass='required',
validators=[vpass],
minlength="3",
maxlength="20",
required="true",
),
Checkbox(
'ia_newsletter',
description=_(
'I want to receive news, announcements, and resources from the '
'<a href="https://archive.org/">Internet Archive</a>, the non-profit '
'that runs Open Library.'
),
),
]
def __init__(self):
Form.__init__(self, *self.INPUTS)
def validates(self, source):
# Set form in each validator so that validators
# like EqualToValidator can work
for input in self.inputs:
for validator in input.validators:
validator.form = self
return Form.validates(self, source)
Register = RegisterForm()
forms.register = RegisterForm()
def verify_password(password):
user = accounts.get_current_user()
if user is None:
return False
try:
username = user.key.split('/')[-1]
web.ctx.site.login(username, password)
except ClientException:
return False
return True
validate_password = Validator(_("Invalid password"), verify_password)
ForgotPassword = Form(
Textbox(
'email',
description=_("Your email address"),
validators=[vemail, email_already_used],
)
)
ResetPassword = Form(
Password('password', description=_("Choose a password"), validators=[vpass])
)
| 4,495 | Python | .py | 139 | 25.309353 | 86 | 0.630023 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
458 | test_checkins.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_checkins.py | from openlibrary.plugins.upstream.checkins import (
is_valid_date,
make_date_string,
patron_check_ins,
)
class TestMakeDateString:
def test_formatting(self):
date_str = make_date_string(2000, 12, 22)
assert date_str == "2000-12-22"
def test_zero_padding(self):
date_str = make_date_string(2000, 2, 2)
split_date = date_str.split('-')
assert len(split_date) == 3
# Year has four characters:
assert len(split_date[0]) == 4
# Month has two characters:
assert len(split_date[1]) == 2
# Day has two characters:
assert len(split_date[2]) == 2
def test_partial_dates(self):
year_resolution = make_date_string(1998, None, None)
assert year_resolution == "1998"
month_resolution = make_date_string(1998, 10, None)
assert month_resolution == "1998-10"
missing_month = make_date_string(1998, None, 10)
assert missing_month == "1998"
class TestIsValidDate:
def test_date_validation(self):
assert is_valid_date(1999, None, None) is True
assert is_valid_date(1999, 2, None) is True
assert is_valid_date(1999, 2, 30) is True
# Must have a year:
assert is_valid_date(None, 1, 21) is False
# Must have a month if there is a day:
assert is_valid_date(1999, None, 22) is False
class TestValidateData:
def setup_method(self):
self.checkins = patron_check_ins()
self.valid_data = {
'edition_key': '/books/OL1234M',
'event_type': 3,
'year': 2000,
'month': 3,
'day': 7,
}
self.missing_event = {
'edition_key': '/books/OL1234M',
'year': 2000,
'month': 3,
'day': 7,
}
self.invalid_date = {
'edition_key': '/books/OL1234M',
'event_type': 3,
'month': 3,
'day': 7,
}
self.unknown_event = {
'edition_key': '/books/OL1234M',
'event_type': 54321,
'year': 2000,
'month': 3,
'day': 7,
}
def test_validate_data(self):
assert self.checkins.validate_data(self.valid_data) is True
assert self.checkins.validate_data(self.missing_event) is False
assert self.checkins.validate_data(self.invalid_date) is False
assert self.checkins.validate_data(self.unknown_event) is False
| 2,493 | Python | .py | 69 | 27.043478 | 71 | 0.574151 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
459 | test_utils.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_utils.py | from openlibrary.mocks.mock_infobase import MockSite
from .. import utils
from openlibrary.catalog.add_book.tests.conftest import add_languages # noqa: F401
import web
import pytest
def test_url_quote():
assert utils.url_quote('https://foo bar') == 'https%3A%2F%2Ffoo+bar'
assert utils.url_quote('abc') == 'abc'
assert utils.url_quote('Kabitā') == 'Kabit%C4%81'
assert utils.url_quote('Kabit\u0101') == 'Kabit%C4%81'
def test_urlencode():
f = utils.urlencode
assert f({}) == '', 'empty dict'
assert f([]) == '', 'empty list'
assert f({'q': 'hello'}) == 'q=hello', 'basic dict'
assert f({'q': ''}) == 'q=', 'empty param value'
assert f({'q': None}) == 'q=None', 'None param value'
assert f([('q', 'hello')]) == 'q=hello', 'basic list'
assert f([('x', '3'), ('x', '5')]) == 'x=3&x=5', 'list with multi keys'
assert f({'q': 'a b c'}) == 'q=a+b+c', 'handles spaces'
assert f({'q': 'a$$'}) == 'q=a%24%24', 'handles special ascii chars'
assert f({'q': 'héé'}) == 'q=h%C3%A9%C3%A9'
assert f({'q': 'héé'}) == 'q=h%C3%A9%C3%A9', 'handles unicode without the u?'
assert f({'q': 1}) == 'q=1', 'numbers'
assert f({'q': ['test']}) == 'q=%5B%27test%27%5D', 'list'
assert f({'q': 'αβγ'}) == 'q=%CE%B1%CE%B2%CE%B3', 'unicode without the u'
assert f({'q': 'αβγ'.encode()}) == 'q=%CE%B1%CE%B2%CE%B3', 'uf8 encoded unicode'
assert f({'q': 'αβγ'}) == 'q=%CE%B1%CE%B2%CE%B3', 'unicode'
def test_entity_decode():
assert utils.entity_decode('>foo') == '>foo'
assert utils.entity_decode('<h1>') == '<h1>'
def test_set_share_links():
class TestContext:
def __init__(self):
self.share_links = None
test_context = TestContext()
utils.set_share_links(url='https://foo.com', title="bar", view_context=test_context)
assert test_context.share_links == [
{
'text': 'Facebook',
'url': 'https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Ffoo.com',
},
{
'text': 'Twitter',
'url': 'https://twitter.com/intent/tweet?url=https%3A%2F%2Ffoo.com&via=openlibrary&text=Check+this+out%3A+bar',
},
{
'text': 'Pinterest',
'url': 'https://pinterest.com/pin/create/link/?url=https%3A%2F%2Ffoo.com&description=Check+this+out%3A+bar',
},
]
def test_set_share_links_unicode():
# example work that has a unicode title: https://openlibrary.org/works/OL14930766W/Kabit%C4%81
class TestContext:
def __init__(self):
self.share_links = None
test_context = TestContext()
utils.set_share_links(
url='https://foo.\xe9', title='b\u0101', view_context=test_context
)
assert test_context.share_links == [
{
'text': 'Facebook',
'url': 'https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Ffoo.%C3%A9',
},
{
'text': 'Twitter',
'url': 'https://twitter.com/intent/tweet?url=https%3A%2F%2Ffoo.%C3%A9&via=openlibrary&text=Check+this+out%3A+b%C4%81',
},
{
'text': 'Pinterest',
'url': 'https://pinterest.com/pin/create/link/?url=https%3A%2F%2Ffoo.%C3%A9&description=Check+this+out%3A+b%C4%81',
},
]
def test_item_image():
assert utils.item_image('//foo') == 'https://foo'
assert utils.item_image(None, 'bar') == 'bar'
assert utils.item_image(None) is None
def test_canonical_url():
web.ctx.path = '/authors/Ayn_Rand'
web.ctx.query = ''
web.ctx.host = 'www.openlibrary.org'
request = utils.Request()
url = 'https://www.openlibrary.org/authors/Ayn_Rand'
assert request.canonical_url == url
web.ctx.query = '?sort=newest'
url = 'https://www.openlibrary.org/authors/Ayn_Rand'
assert request.canonical_url == url
web.ctx.query = '?page=2'
url = 'https://www.openlibrary.org/authors/Ayn_Rand?page=2'
assert request.canonical_url == url
web.ctx.query = '?page=2&sort=newest'
url = 'https://www.openlibrary.org/authors/Ayn_Rand?page=2'
assert request.canonical_url == url
web.ctx.query = '?sort=newest&page=2'
url = 'https://www.openlibrary.org/authors/Ayn_Rand?page=2'
assert request.canonical_url == url
web.ctx.query = '?sort=newest&page=2&mode=e'
url = 'https://www.openlibrary.org/authors/Ayn_Rand?page=2'
assert request.canonical_url == url
web.ctx.query = '?sort=newest&page=2&mode=e&test=query'
url = 'https://www.openlibrary.org/authors/Ayn_Rand?page=2&test=query'
assert request.canonical_url == url
web.ctx.query = '?sort=new&mode=2'
url = 'https://www.openlibrary.org/authors/Ayn_Rand'
assert request.canonical_url == url
def test_get_coverstore_url(monkeypatch):
from infogami import config
monkeypatch.delattr(config, "coverstore_url", raising=False)
assert utils.get_coverstore_url() == "https://covers.openlibrary.org"
monkeypatch.setattr(config, "coverstore_url", "https://0.0.0.0:80", raising=False)
assert utils.get_coverstore_url() == "https://0.0.0.0:80"
# make sure trailing / is always stripped
monkeypatch.setattr(config, "coverstore_url", "https://0.0.0.0:80/", raising=False)
assert utils.get_coverstore_url() == "https://0.0.0.0:80"
def test_reformat_html():
f = utils.reformat_html
input_string = '<p>This sentence has 32 characters.</p>'
assert f(input_string, 10) == 'This sente...'
assert f(input_string) == 'This sentence has 32 characters.'
assert f(input_string, 5000) == 'This sentence has 32 characters.'
multi_line_string = """<p>This sentence has 32 characters.</p>
<p>This new sentence has 36 characters.</p>"""
assert (
f(multi_line_string) == 'This sentence has 32 '
'characters.<br>This new sentence has 36 characters.'
)
assert f(multi_line_string, 34) == 'This sentence has 32 characters.<br>T...'
assert f("<script>alert('hello')</script>", 34) == "alert('hello')"
assert f("<script>") == "<script>"
def test_strip_accents():
f = utils.strip_accents
assert f('Plain ASCII text') == 'Plain ASCII text'
assert f('Des idées napoléoniennes') == 'Des idees napoleoniennes'
# It only modifies Unicode Nonspacing Mark characters:
assert f('Bokmål : Standard Østnorsk') == 'Bokmal : Standard Østnorsk'
def test_get_abbrev_from_full_lang_name(
mock_site: MockSite, monkeypatch, add_languages # noqa F811
) -> None:
utils.get_languages.cache_clear()
monkeypatch.setattr(web, "ctx", web.storage())
web.ctx.site = mock_site
web.ctx.site.save(
{
"code": "eng",
"key": "/languages/eng",
"name": "English",
"type": {"key": "/type/language"},
"name_translated": {
"tg": ["ингилисӣ"],
"en": ["English"],
"ay": ["Inlish aru"],
"pnb": ["انگریزی"],
"na": ["Dorerin Ingerand"],
},
}
)
web.ctx.site.save(
{
"code": "fre",
"key": "/languages/fre",
"name": "French",
"type": {"key": "/type/language"},
"name_translated": {
"ay": ["Inlish aru"],
"fr": ["anglais"],
"es": ["spanish"],
},
}
)
web.ctx.site.save(
{
"code": "spa",
"key": "/languages/spa",
"name": "Spanish",
"type": {"key": "/type/language"},
}
)
assert utils.get_abbrev_from_full_lang_name("EnGlish") == "eng"
assert utils.get_abbrev_from_full_lang_name("Dorerin Ingerand") == "eng"
assert utils.get_abbrev_from_full_lang_name("ингилисӣ") == "eng"
assert utils.get_abbrev_from_full_lang_name("ингилиси") == "eng"
assert utils.get_abbrev_from_full_lang_name("Anglais") == "fre"
# See openlibrary/catalog/add_book/tests/conftest.py for imported languages.
with pytest.raises(utils.LanguageMultipleMatchError):
utils.get_abbrev_from_full_lang_name("frisian")
with pytest.raises(utils.LanguageMultipleMatchError):
utils.get_abbrev_from_full_lang_name("inlish aru")
with pytest.raises(utils.LanguageMultipleMatchError):
utils.get_abbrev_from_full_lang_name("Spanish")
with pytest.raises(utils.LanguageNoMatchError):
utils.get_abbrev_from_full_lang_name("Missing or non-existent language")
def test_get_colon_only_loc_pub() -> None:
# This is intended as a helper function, and its caller,
# get_location_and_publisher(), replaces certain characters,
# including "[" and "]".
test_cases = [
("", ("", "")),
("New York : Random House", ("New York", "Random House")),
("[New York] : [Random House]", ("[New York]", "[Random House]")),
("Random House,", ("", "Random House")),
]
for tc, expected in test_cases:
result = utils.get_colon_only_loc_pub(tc)
assert result == expected, f"For {tc}, expected {expected}, but got {result}"
def test_get_location_and_publisher() -> None:
# Empty string
assert utils.get_location_and_publisher("") == ([], [])
# Test simple case of "City : Publisher".
loc_pub = "Sŏul T'ŭkpyŏlsi : [Kimyŏngsa]"
assert utils.get_location_and_publisher(loc_pub) == (
["Sŏul T'ŭkpyŏlsi"],
["Kimyŏngsa"],
)
# Test multiple locations and one publisher.
loc_pub = "Londres ; [New York] ; Paris : Berlitz Publishing"
assert utils.get_location_and_publisher(loc_pub) == (
["Londres", "New York", "Paris"],
["Berlitz Publishing"],
)
# Test two locations and two corresponding publishers.
loc_pub = "Paris : Pearson ; San Jose (Calif.) : Adobe"
assert utils.get_location_and_publisher(loc_pub) == (
["Paris", "San Jose (Calif.)"],
["Pearson", "Adobe"],
)
# Test location not identified.
loc_pub = "[Place of publication not identified] : Pearson"
assert utils.get_location_and_publisher(loc_pub) == ([], ["Pearson"])
# "Pattern" breaker insofar as it has two colons separators in a row.
loc_pub = "London : Wise Publications ; Bury St. Edmunds, Suffolk : Exclusive Distributors : Music Sales Limited"
assert utils.get_location_and_publisher(loc_pub) == (
["London"],
["Wise Publications"],
)
# Bad input where Python thinks the IA metadata is a Python list
loc_pub = [ # type: ignore[assignment]
'Charleston, SC : Monkeypaw Press, LLC',
'Charleston, SC : [manufactured by CreateSpace]',
]
assert utils.get_location_and_publisher(loc_pub) == ([], [])
# Separating a not identified place with a comma
loc_pub = "[Place of publication not identified], BARBOUR PUB INC"
assert utils.get_location_and_publisher(loc_pub) == ([], ["BARBOUR PUB INC"])
| 11,048 | Python | .py | 244 | 37.934426 | 130 | 0.609526 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
460 | test_addbook.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_addbook.py | """py.test tests for addbook"""
import web
from .. import addbook
from openlibrary import accounts
from openlibrary.mocks.mock_infobase import MockSite
def strip_nones(d):
return {k: v for k, v in d.items() if v is not None}
def mock_user():
return type(
'MockUser',
(object,),
{
'is_admin': lambda slf: False,
'is_super_librarian': lambda slf: False,
'is_librarian': lambda slf: False,
'is_usergroup_member': lambda slf, grp: False,
},
)()
class TestSaveBookHelper:
def setup_method(self, method):
web.ctx.site = MockSite()
def test_authors(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
s = addbook.SaveBookHelper(None, None)
def f(data):
return strip_nones(s.process_work(web.storage(data)))
assert f({}) == {}
assert f({"authors": []}) == {}
assert f({"authors": [{"type": "/type/author_role"}]}) == {}
def test_editing_orphan_creates_work(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
}
]
)
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "",
"work--title": "Original Edition Title",
"edition--title": "Original Edition Title",
}
)
s = addbook.SaveBookHelper(None, edition)
s.save(formdata)
assert len(web.ctx.site.docs) == 2
assert web.ctx.site.get("/works/OL1W") is not None
assert web.ctx.site.get("/works/OL1W").title == "Original Edition Title"
def test_never_create_an_orphan(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
edition = web.ctx.site.get("/books/OL1M")
work = web.ctx.site.get("/works/OL1W")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Original Work Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
print(web.ctx.site.get("/books/OL1M").title)
assert web.ctx.site.get("/books/OL1M").works[0].key == "/works/OL1W"
def test_moving_orphan(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
}
]
)
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "",
"work--title": "Original Edition Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "/works/OL1W",
}
)
s = addbook.SaveBookHelper(None, edition)
s.save(formdata)
assert len(web.ctx.site.docs) == 1
assert web.ctx.site.get("/books/OL1M").works[0].key == "/works/OL1W"
def test_moving_orphan_ignores_work_edits(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
},
]
)
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "",
"work--title": "Modified Work Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "/works/OL1W",
}
)
s = addbook.SaveBookHelper(None, edition)
s.save(formdata)
assert web.ctx.site.get("/works/OL1W").title == "Original Work Title"
def test_editing_work(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
work = web.ctx.site.get("/works/OL1W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Modified Work Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "/works/OL1W",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert web.ctx.site.get("/works/OL1W").title == "Modified Work Title"
assert web.ctx.site.get("/books/OL1M").title == "Original Edition Title"
def test_editing_edition(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
work = web.ctx.site.get("/works/OL1W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Original Work Title",
"edition--title": "Modified Edition Title",
"edition--works--0--key": "/works/OL1W",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert web.ctx.site.get("/works/OL1W").title == "Original Work Title"
assert web.ctx.site.get("/books/OL1M").title == "Modified Edition Title"
def test_editing_work_and_edition(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
work = web.ctx.site.get("/works/OL1W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Modified Work Title",
"edition--title": "Modified Edition Title",
"edition--works--0--key": "/works/OL1W",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert web.ctx.site.get("/works/OL1W").title == "Modified Work Title"
assert web.ctx.site.get("/books/OL1M").title == "Modified Edition Title"
def test_moving_edition(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
work = web.ctx.site.get("/works/OL1W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Original Work Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "/works/OL2W",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert web.ctx.site.get("/books/OL1M").works[0].key == "/works/OL2W"
def test_moving_edition_ignores_changes_to_work(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL1W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL1W"}],
},
]
)
work = web.ctx.site.get("/works/OL1W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL1W",
"work--title": "Modified Work Title",
"edition--title": "Original Edition Title",
"edition--works--0--key": "/works/OL2W", # Changing work
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert web.ctx.site.get("/works/OL1W").title == "Original Work Title"
def test_moving_edition_to_new_work(self, monkeypatch):
monkeypatch.setattr(accounts, "get_current_user", mock_user)
web.ctx.site.save_many(
[
{
"type": {"key": "/type/work"},
"key": "/works/OL100W",
"title": "Original Work Title",
},
{
"type": {"key": "/type/edition"},
"key": "/books/OL1M",
"title": "Original Edition Title",
"works": [{"key": "/works/OL100W"}],
},
]
)
work = web.ctx.site.get("/works/OL100W")
edition = web.ctx.site.get("/books/OL1M")
formdata = web.storage(
{
"work--key": "/works/OL100W",
"work--title": "FOO BAR",
"edition--title": "Original Edition Title",
"edition--works--0--key": "__new__",
}
)
s = addbook.SaveBookHelper(work, edition)
s.save(formdata)
assert len(web.ctx.site.docs) == 3
# Should create new work with edition data
assert web.ctx.site.get("/works/OL1W") is not None
new_work = web.ctx.site.get("/books/OL1M").works[0]
assert new_work.key == "/works/OL1W"
assert new_work.title == "Original Edition Title"
# Should ignore edits to work data
assert web.ctx.site.get("/works/OL100W").title == "Original Work Title"
class TestMakeWork:
def test_make_author_adds_the_correct_key(self):
author_key = "OL123A"
author_name = "Samuel Clemens"
author = web.ctx.site.new(
"/authors/OL123A",
{"key": author_key, "type": {"key": "/type/author"}, "name": author_name},
)
assert addbook.make_author(author_key, author_name) == author
def test_make_work_does_indeed_make_a_work(self):
doc = {
"author_key": ["OL123A"],
"author_name": ["Samuel Clemens"],
"key": "/works/OL123W",
"type": "work",
"language": ["eng"],
"title": "The Celebrated Jumping Frog of Calaveras County",
}
author_key = "OL123A"
author_name = "Samuel Clemens"
author = web.ctx.site.new(
"/authors/OL123A",
{"key": author_key, "type": {"key": "/type/author"}, "name": author_name},
)
web_doc = web.Storage(
{
"author_key": ["OL123A"],
"author_name": ["Samuel Clemens"],
"key": "/works/OL123W",
"type": "work",
"language": ["eng"],
"title": "The Celebrated Jumping Frog of Calaveras County",
"authors": [author],
"cover_url": "/images/icons/avatar_book-sm.png",
"ia": [],
"first_publish_year": None,
}
)
assert addbook.make_work(doc) == web_doc
def test_make_work_handles_no_author(self):
doc = {
"key": "/works/OL123W",
"type": "work",
"language": ["eng"],
"title": "The Celebrated Jumping Frog of Calaveras County",
}
web_doc = web.Storage(
{
"key": "/works/OL123W",
"type": "work",
"language": ["eng"],
"title": "The Celebrated Jumping Frog of Calaveras County",
"authors": [],
"cover_url": "/images/icons/avatar_book-sm.png",
"ia": [],
"first_publish_year": None,
}
)
assert addbook.make_work(doc) == web_doc
| 14,902 | Python | .py | 385 | 25.397403 | 86 | 0.470087 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
461 | test_merge_authors.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_merge_authors.py | import web
from infogami.infobase import client, common
from openlibrary.plugins.upstream.merge_authors import (
AuthorMergeEngine,
AuthorRedirectEngine,
BasicMergeEngine,
BasicRedirectEngine,
get_many,
make_redirect_doc,
space_squash_and_strip,
)
from openlibrary.utils import dicthash
def setup_module(mod):
# delegate.fakeload()
# models module imports openlibrary.code, which imports ol_infobase and that expects db_parameters.
web.config.db_parameters = {"dbn": "sqlite", "db": ":memory:"}
from openlibrary.plugins.upstream import models
models.setup()
class MockSite(client.Site):
class Seq:
def next_value(self, name):
return 1
def __init__(self):
self.seq = self.Seq()
self.store = {}
self.docs = {}
self.query_results = {}
def get(self, key):
doc = self.docs[key]
# black magic
data = self._process_dict(common.parse_query(doc))
return client.create_thing(self, key, data)
def things(self, query):
return self.query_results.get(dicthash(query), [])
def add_query(self, query, result):
self.query_results[dicthash(query)] = result
def get_dict(self, key):
return self.get(key).dict()
def get_many(self, keys):
return [self.get(k) for k in keys]
def _get_backreferences(self, thing):
return {}
def save_many(self, docs, comment=None, data=None, action=None):
data = data or {}
self.add(docs)
return [{'key': d['key'], 'revision': 1} for d in docs]
def add(self, docs):
self.docs.update((doc['key'], doc) for doc in docs)
def test_MockSite():
site = MockSite()
assert list(site.docs) == []
site.add(
[
{"key": "a", "type": {"key": "/type/object"}},
{"key": "b", "type": {"key": "/type/object"}},
]
)
assert list(site.docs) == ["a", "b"]
TEST_AUTHORS = web.storage(
{
"a": {"key": "/authors/a", "type": {"key": "/type/author"}, "name": "a"},
"b": {"key": "/authors/b", "type": {"key": "/type/author"}, "name": "b"},
"c": {"key": "/authors/c", "type": {"key": "/type/author"}, "name": "c"},
}
)
def test_make_redirect_doc():
assert make_redirect_doc("/a", "/b") == {
"key": "/a",
"type": {"key": "/type/redirect"},
"location": "/b",
}
class TestBasicRedirectEngine:
def test_update_references(self):
engine = BasicRedirectEngine()
doc = {
"key": "/a",
"type": {"key": "/type/object"},
"x1": [{"key": "/b"}],
"x2": [{"key": "/b"}, {"key": "/c"}],
"y1": {"a": "foo", "b": {"key": "/b"}},
"y2": [{"a": "foo", "b": {"key": "/b"}}, {"a": "foo", "b": {"key": "/c"}}],
}
assert engine.update_references(doc, "/c", ["/b"]) == {
"key": "/a",
"type": {"key": "/type/object"},
"x1": [{"key": "/c"}],
"x2": [{"key": "/c"}],
"y1": {"a": "foo", "b": {"key": "/c"}},
"y2": [{"a": "foo", "b": {"key": "/c"}}],
}
class TestBasicMergeEngine:
def test_merge_property(self):
engine = BasicMergeEngine(BasicRedirectEngine())
assert engine.merge_property(None, "hello") == "hello"
assert engine.merge_property("hello", None) == "hello"
assert engine.merge_property("foo", "bar") == "foo"
assert engine.merge_property(["foo"], ["bar"]) == ["foo", "bar"]
assert engine.merge_property(None, ["bar"]) == ["bar"]
def test_get_many():
web.ctx.site = MockSite()
# get_many should handle bad table_of_contents in the edition.
edition = {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"table_of_contents": [{"type": "/type/text", "value": "foo"}],
}
type_edition = {"key": "/type/edition", "type": {"key": "/type/type"}}
web.ctx.site.add([edition, type_edition])
assert web.ctx.site.get("/books/OL1M").type.key == "/type/edition"
assert get_many(["/books/OL1M"])[0] == {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"table_of_contents": [{"label": "", "level": 0, "pagenum": "", "title": "foo"}],
}
class TestAuthorRedirectEngine:
def setup_method(self, method):
web.ctx.site = MockSite()
def test_fix_edition(self):
update_references = AuthorRedirectEngine().update_references
edition = {
"key": "/books/OL2M",
"authors": [{"key": "/authors/OL2A"}],
"title": "book 1",
}
# edition having duplicate author
assert update_references(edition, "/authors/OL1A", ["/authors/OL2A"]) == {
"key": "/books/OL2M",
"authors": [{"key": "/authors/OL1A"}],
"title": "book 1",
}
# edition not having duplicate author
assert update_references(edition, "/authors/OL1A", ["/authors/OL3A"]) == {
"key": "/books/OL2M",
"authors": [{"key": "/authors/OL2A"}],
"title": "book 1",
}
def test_fix_work(self):
update_references = AuthorRedirectEngine().update_references
work = {
"key": "/works/OL2W",
"authors": [
{
"type": {"key": "/type/author_role"},
"author": {"key": "/authors/OL2A"},
}
],
"title": "book 1",
}
# work having duplicate author
assert update_references(work, "/authors/OL1A", ["/authors/OL2A"]) == {
"key": "/works/OL2W",
"authors": [
{
"type": {"key": "/type/author_role"},
"author": {"key": "/authors/OL1A"},
}
],
"title": "book 1",
}
# work not having duplicate author
assert update_references(work, "/authors/OL1A", ["/authors/OL3A"]) == {
"key": "/works/OL2W",
"authors": [
{
"type": {"key": "/type/author_role"},
"author": {"key": "/authors/OL2A"},
}
],
"title": "book 1",
}
class TestAuthorMergeEngine:
def setup_method(self, method):
self.engine = AuthorMergeEngine(AuthorRedirectEngine())
web.ctx.site = MockSite()
def test_redirection(self):
web.ctx.site.add([TEST_AUTHORS.a, TEST_AUTHORS.b, TEST_AUTHORS.c])
self.engine.merge("/authors/a", ["/authors/b", "/authors/c"])
# assert redirection
assert web.ctx.site.get("/authors/b").dict() == {
"key": "/authors/b",
"type": {"key": "/type/redirect"},
"location": "/authors/a",
}
assert web.ctx.site.get("/authors/c").dict() == {
"key": "/authors/c",
"type": {"key": "/type/redirect"},
"location": "/authors/a",
}
def test_alternate_names(self):
web.ctx.site.add([TEST_AUTHORS.a, TEST_AUTHORS.b, TEST_AUTHORS.c])
self.engine.merge("/authors/a", ["/authors/b", "/authors/c"])
assert web.ctx.site.get("/authors/a").alternate_names == ["b", "c"]
def test_photos(self):
a = dict(TEST_AUTHORS.a, photos=[1, 2])
b = dict(TEST_AUTHORS.b, photos=[3, 4])
web.ctx.site.add([a, b])
self.engine.merge("/authors/a", ["/authors/b"])
photos = web.ctx.site.get("/authors/a").photos
assert photos == [1, 2, 3, 4]
def test_links(self):
link_a = {"title": "link a", "url": "http://example.com/a"}
link_b = {"title": "link b", "url": "http://example.com/b"}
a = dict(TEST_AUTHORS.a, links=[link_a])
b = dict(TEST_AUTHORS.b, links=[link_b])
web.ctx.site.add([a, b])
self.engine.merge("/authors/a", ["/authors/b"])
links = web.ctx.site.get("/authors/a").dict()['links']
assert links == [link_a, link_b]
def test_new_field(self):
"""When the duplicate has a new field which is not there in the master,
the new filed must be copied to the master.
"""
birth_date = "1910-01-02"
a = TEST_AUTHORS.a
b = dict(TEST_AUTHORS.b, birth_date=birth_date)
web.ctx.site.add([a, b])
self.engine.merge("/authors/a", ["/authors/b"])
master_birth_date = web.ctx.site.get("/authors/a").get('birth_date')
assert master_birth_date == birth_date
def test_work_authors(self):
a = TEST_AUTHORS.a
b = TEST_AUTHORS.b
work_b = {
"key": "/works/OL1W",
"type": {"key": "/type/work"},
"authors": [{"type": "/type/author_role", "author": {"key": "/authors/b"}}],
}
web.ctx.site.add([a, b, work_b])
q = {
"type": "/type/work",
"authors": {"author": {"key": "/authors/b"}},
"limit": 10000,
}
web.ctx.site.add_query(q, ["/works/OL1W"])
self.engine.merge("/authors/a", ["/authors/b"])
assert web.ctx.site.get_dict("/works/OL1W") == {
"key": "/works/OL1W",
"type": {"key": "/type/work"},
"authors": [{"type": "/type/author_role", "author": {"key": "/authors/a"}}],
}
def test_dicthash():
assert dicthash({}) == dicthash({})
assert dicthash({"a": 1}) == dicthash({"a": 1})
assert dicthash({"a": 1, "b": 2}) == dicthash({"b": 2, "a": 1})
assert dicthash({}) != dicthash({"a": 1})
assert dicthash({"b": 1}) != dicthash({"a": 1})
def test_space_squash_and_strip():
f = space_squash_and_strip
assert f("Hello") == f("Hello")
assert f("Hello") != f("hello")
assert f("") == f("")
assert f("hello world") == f("hello world ")
| 9,917 | Python | .py | 252 | 30.376984 | 103 | 0.518276 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
462 | test_forms.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_forms.py | from .. import forms
from .. import spamcheck
class TestRegister:
def test_validate(self, monkeypatch):
monkeypatch.setattr(forms, 'find_account', lambda **kw: None)
monkeypatch.setattr(forms, 'find_ia_account', lambda **kw: None)
monkeypatch.setattr(spamcheck, "get_spam_domains", list)
f = forms.Register()
d = {
'username': 'foo',
'email': '[email protected]',
'password': 'foo123',
'password2': 'foo123',
}
assert f.validates(d)
| 542 | Python | .py | 15 | 27.733333 | 72 | 0.591603 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
463 | test_related_carousels.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_related_carousels.py | from .. import models
def test_related_subjects():
subjects = {
"In library",
"Conduct of life",
"London (England)",
"Science Fiction & Fantasy",
"Self-experimentation in medicine in fiction",
"Accessible book",
"Physicians in fiction",
"Fiction",
"England in fiction",
"OverDrive",
"Supernatural",
"Scottish Horror tales",
"Horror fiction",
"Mystery and detective stories",
"Physicians",
"Horror",
"Classic Literature",
"Open Library Staff Picks",
"Protected DAISY",
"Self-experimentation in medicine",
"open_syllabus_project",
"Multiple personality in fiction",
"Conduct of life in fiction",
"Supernatural in fiction",
"Juvenile fiction",
"History and criticism",
"Horror tales",
"English fiction",
"Social conditions",
"Horror stories",
"Multiple personality",
"Internet Archive Wishlist",
"François",
"Remove period.",
"Remove &",
"remove '",
}
expected_subjects = {
"Conduct of life",
"Physicians in fiction",
"England in fiction",
"Supernatural",
"Scottish Horror tales",
"Horror fiction",
"Mystery and detective stories",
"Physicians",
"Horror",
"Classic Literature",
"Multiple personality in fiction",
"Conduct of life in fiction",
"Supernatural in fiction",
"Juvenile fiction",
"History and criticism",
"Horror tales",
"English fiction",
"Social conditions",
"Horror stories",
"Multiple personality",
}
actual_subjects = set(models.Work.filter_problematic_subjects(subjects))
assert (actual_subjects ^ expected_subjects) == set()
| 1,914 | Python | .py | 64 | 21.5 | 76 | 0.577922 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
464 | test_account.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_account.py | from .. import account
import web
import os
import re
import pytest
import sys
def open_test_data(filename):
"""Returns a file handle to file with specified filename inside test_data directory."""
root = os.path.dirname(__file__)
fullpath = os.path.join(root, 'test_data', filename)
return open(fullpath, mode='rb')
def test_create_list_doc(wildcard):
key = "account/foo/verify"
username = "foo"
email = "[email protected]"
doc = account.create_link_doc(key, username, email)
assert doc == {
"_key": key,
"_rev": None,
"type": "account-link",
"username": username,
"email": email,
"code": wildcard,
"created_on": wildcard,
"expires_on": wildcard,
}
class TestGoodReadsImport:
def setup_method(self, method):
with open_test_data('goodreads_library_export.csv') as reader:
self.csv_data = reader.read()
self.expected_books = {
"0142402494": {
"Additional Authors": "Florence Lamborn, Louis S. Glanzman",
"Author": "Astrid Lindgren",
"Author l-f": "Lindgren, Astrid",
"Average Rating": "4.13",
"BCID": "",
"Binding": "Mass Market Paperback",
"Book Id": "19302",
"Bookshelves": "to-read",
"Bookshelves with positions": "to-read (#2)",
"Condition": "",
"Condition Description": "",
"Date Added": "2020/12/13",
"Date Read": "",
"Exclusive Shelf": "to-read",
"ISBN": "0142402494",
"ISBN13": "9780142402498",
"My Rating": "0",
"My Review": "",
"Number of Pages": "160",
"Original Publication Year": "1945",
"Original Purchase Date": "",
"Original Purchase Location": "",
"Owned Copies": "0",
"Private Notes": "",
"Publisher": "Puffin Books",
"Read Count": "0",
"Recommended By": "",
"Recommended For": "",
"Spoiler": "",
"Title": "Pippi Longstocking (Pippi LÃ¥ngstrump, #1)",
"Year Published": "2005",
},
"0735214484": {
"Additional Authors": "",
"Author": "David Epstein",
"Author l-f": "Epstein, David",
"Average Rating": "4.16",
"BCID": "",
"Binding": "Hardcover",
"Book Id": "41795733",
"Bookshelves": "to-read",
"Bookshelves with positions": "to-read (#1)",
"Condition": "",
"Condition Description": "",
"Date Added": "2020/12/13",
"Date Read": "",
"Exclusive Shelf": "to-read",
"ISBN": "0735214484",
"ISBN13": "9780735214484",
"My Rating": "0",
"My Review": "",
"Number of Pages": "352",
"Original Publication Year": "2019",
"Original Purchase Date": "",
"Original Purchase Location": "",
"Owned Copies": "0",
"Private Notes": "",
"Publisher": "Riverhead Books",
"Read Count": "0",
"Recommended By": "",
"Recommended For": "",
"Spoiler": "",
"Title": "Range: Why Generalists Triumph in a Specialized World",
"Year Published": "2019",
},
}
self.expected_books_wo_isbns = {
"99999999999": {
"Additional Authors": "",
"Author": "AuthorWith NoISBN",
"Author l-f": "NoISBN, AuthorWith",
"Average Rating": "4.16",
"BCID": "",
"Binding": "Hardcover",
"Book Id": "99999999999",
"Bookshelves": "to-read",
"Bookshelves with positions": "to-read (#1)",
"Condition": "",
"Condition Description": "",
"Date Added": "2020/12/13",
"Date Read": "",
"Exclusive Shelf": "to-read",
"ISBN": "",
"ISBN13": "",
"My Rating": "0",
"My Review": "",
"Number of Pages": "352",
"Original Publication Year": "2019",
"Original Purchase Date": "",
"Original Purchase Location": "",
"Owned Copies": "0",
"Private Notes": "",
"Publisher": "Test Publisher",
"Read Count": "0",
"Recommended By": "",
"Recommended For": "",
"Spoiler": "",
"Title": "Test Book Title With No ISBN",
"Year Published": "2019",
}
}
@pytest.mark.skipif(
sys.version_info < (3, 0), reason="Python2's csv module doesn't support Unicode"
)
def test_process_goodreads_csv_with_utf8(self):
books, books_wo_isbns = account.process_goodreads_csv(
web.storage({'csv': self.csv_data.decode('utf-8')})
)
assert books == self.expected_books
assert books_wo_isbns == self.expected_books_wo_isbns
def test_process_goodreads_csv_with_bytes(self):
# Note: In Python2, reading data as bytes returns a string, which should
# also be supported by account.process_goodreads_csv()
books, books_wo_isbns = account.process_goodreads_csv(
web.storage({'csv': self.csv_data})
)
assert books == self.expected_books
assert books_wo_isbns == self.expected_books_wo_isbns
@pytest.mark.xfail
class TestAccount:
def signup(self, b, displayname, username, password, email):
b.open("/account/create")
b.select_form(name="signup")
b['displayname'] = displayname
b['username'] = username
b['password'] = password
b['email'] = email
b['agreement'] = ['yes']
b.submit()
def login(self, b, username, password):
"""Attempt login and return True if successful."""
b.open("/account/login")
b.select_form(name="register") # wrong name
b["username"] = username
b["password"] = password
b.submit()
return b.path == "/"
def test_create(self, ol):
b = ol.browser()
self.signup(
b,
displayname="Foo",
username="foo",
password="blackgoat",
email="[email protected]",
)
assert "Hi, foo!" in b.get_text(id="contentHead")
assert "sent an email to [email protected]" in b.get_text(id="contentBody")
assert ol.sentmail["from_address"] == "Open Library <[email protected]>"
assert ol.sentmail["to_address"] == "[email protected]"
assert ol.sentmail["subject"] == "Welcome to Open Library"
link = ol.sentmail.extract_links()[0]
assert re.match("^http://0.0.0.0:8080/account/verify/[0-9a-f]{32}$", link)
def test_activate(self, ol):
b = ol.browser()
self.signup(
b,
displayname="Foo",
username="foo",
password="secret",
email="[email protected]",
)
link = ol.sentmail.extract_links()[0]
b.open(link)
assert "Hi, Foo!" in b.get_text(id="contentHead")
assert "Yay! Your email address has been verified." in b.get_text(
id="contentBody"
)
self.login(b, "foo", "secret")
assert b.path == "/"
assert "Log out" in b.get_text()
def test_forgot_password(self, ol):
b = ol.browser()
self.signup(
b,
displayname="Foo",
username="foo",
password="secret",
email="[email protected]",
)
link = ol.sentmail.extract_links()[0]
b.open(link)
b.open("/account/password/forgot")
b.select_form(name="register") # why is the form called register?
b['email'] = "[email protected]"
b.submit()
assert "Thanks" in b.get_text(id="contentHead")
assert "We've sent an email to [email protected] with instructions" in b.get_text(
id="contentBody"
)
link = ol.sentmail.extract_links()[0]
assert re.match(
"^http://0.0.0.0:8080/account/password/reset/[0-9a-f]{32}$", link
)
b.open(link)
assert "Reset Password" in b.get_text(id="contentHead")
assert (
"Please enter a new password for your Open Library account"
in b.get_text(id="contentBody")
)
b.select_form(name="reset")
b['password'] = "secret2"
b.submit()
self.login(b, "foo", "secret2")
assert b.path == "/"
assert "Log out" in b.get_text()
b.reset()
self.login(b, "foo", "secret")
assert b.path == "/account/login"
assert "That password seems incorrect" in b.get_text()
def test_change_password(self, ol):
b = ol.browser()
self.signup(
b,
displayname="Foo",
username="foo",
password="secret",
email="[email protected]",
)
link = ol.sentmail.extract_links()[0]
b.open(link)
self.login(b, "foo", "secret")
b.open("/account/password")
b.select_form(name="register")
b['password'] = "secret"
b['new_password'] = "more_secret"
b.submit()
assert b.path == "/account"
b.reset()
assert self.login(b, "foo", "more_secret") is True
def test_change_email(self, ol):
b = ol.browser()
self.signup(
b,
displayname="Foo",
username="foo",
password="secret",
email="[email protected]",
)
link = ol.sentmail.extract_links()[0]
b.open(link)
self.login(b, "foo", "secret")
b.open("/account/email")
assert "[email protected]" in b.data
b.select_form(name="register")
b['email'] = "[email protected]"
b.submit()
assert "Hi Foo" in b.get_text(id="contentHead")
assert "We've sent an email to [email protected]" in b.get_text(
id="contentBody"
)
link = ol.sentmail.extract_links()[0]
b.open(link)
assert "Email verification successful" in b.get_text(id="contentHead")
b.open("/account/email")
assert "[email protected]" in b.data
| 10,881 | Python | .py | 288 | 26.090278 | 91 | 0.506069 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
465 | test_models.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_models.py | """
Capture some of the unintuitive aspects of Storage, Things, and Works
"""
import web
from infogami.infobase import client
from openlibrary.mocks.mock_infobase import MockSite
import openlibrary.core.lists.model as list_model
from .. import models
class TestModels:
def setup_method(self, method):
web.ctx.site = MockSite()
def test_setup(self):
expected_things = {
'/type/edition': models.Edition,
'/type/author': models.Author,
'/type/work': models.Work,
'/type/subject': models.Subject,
'/type/place': models.SubjectPlace,
'/type/person': models.SubjectPerson,
'/type/user': models.User,
'/type/list': list_model.List,
}
expected_changesets = {
None: models.Changeset,
'merge-authors': models.MergeAuthors,
'undo': models.Undo,
'add-book': models.AddBookChangeset,
'lists': list_model.ListChangeset,
'new-account': models.NewAccountChangeset,
}
models.setup()
for key, value in expected_things.items():
assert client._thing_class_registry[key] == value
for key, value in expected_changesets.items():
assert client._changeset_class_register[key] == value
def test_work_without_data(self):
work = models.Work(web.ctx.site, '/works/OL42679M')
assert repr(work) == str(work) == "<Work: '/works/OL42679M'>"
assert isinstance(work, client.Thing)
assert isinstance(work, models.Work)
assert work._site == web.ctx.site
assert work.key == '/works/OL42679M'
assert work._data is None
# assert isinstance(work.data, client.Nothing) # Fails!
# assert work.data is None # Fails!
# assert not work.hasattr('data') # Fails!
assert work._revision is None
# assert work.revision is None # Fails!
# assert not work.revision('data') # Fails!
def test_work_with_data(self):
work = models.Work(web.ctx.site, '/works/OL42679M', web.Storage())
assert repr(work) == str(work) == "<Work: '/works/OL42679M'>"
assert isinstance(work, client.Thing)
assert isinstance(work, models.Work)
assert work._site == web.ctx.site
assert work.key == '/works/OL42679M'
assert isinstance(work._data, web.Storage)
assert isinstance(work._data, dict)
assert hasattr(work, 'data')
assert isinstance(work.data, client.Nothing)
assert hasattr(work, 'any_attribute') # hasattr() is True for all keys!
assert isinstance(work.any_attribute, client.Nothing)
assert repr(work.any_attribute) == '<Nothing>'
assert str(work.any_attribute) == ''
work.new_attribute = 'new_attribute'
assert isinstance(work.data, client.Nothing) # Still Nothing
assert work.new_attribute == 'new_attribute'
assert work['new_attribute'] == 'new_attribute'
assert work.get('new_attribute') == 'new_attribute'
assert not work.hasattr('new_attribute')
assert work._data == {'new_attribute': 'new_attribute'}
assert repr(work.data) == '<Nothing>'
assert str(work.data) == ''
assert callable(work.get_sorted_editions) # Issue #3633
assert work.get_sorted_editions() == []
def test_user_settings(self):
user = models.User(web.ctx.site, 'user')
assert user.get_safe_mode() == ""
user.save_preferences({'safe_mode': 'yes'})
assert user.get_safe_mode() == 'yes'
user.save_preferences({'safe_mode': "no"})
assert user.get_safe_mode() == "no"
user.save_preferences({'safe_mode': 'yes'})
assert user.get_safe_mode() == 'yes'
| 3,810 | Python | .py | 84 | 36.547619 | 80 | 0.620625 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
466 | test_table_of_contents.py | internetarchive_openlibrary/openlibrary/plugins/upstream/tests/test_table_of_contents.py | from openlibrary.plugins.upstream.table_of_contents import TableOfContents, TocEntry
class TestTableOfContents:
def test_from_db_well_formatted(self):
db_table_of_contents = [
{"level": 1, "title": "Chapter 1"},
{"level": 2, "title": "Section 1.1"},
{"level": 2, "title": "Section 1.2"},
{"level": 1, "title": "Chapter 2"},
]
toc = TableOfContents.from_db(db_table_of_contents)
assert toc.entries == [
TocEntry(level=1, title="Chapter 1"),
TocEntry(level=2, title="Section 1.1"),
TocEntry(level=2, title="Section 1.2"),
TocEntry(level=1, title="Chapter 2"),
]
def test_from_db_empty(self):
db_table_of_contents = []
toc = TableOfContents.from_db(db_table_of_contents)
assert toc.entries == []
def test_from_db_string_rows(self):
db_table_of_contents = [
"Chapter 1",
"Section 1.1",
"Section 1.2",
"Chapter 2",
]
toc = TableOfContents.from_db(db_table_of_contents)
assert toc.entries == [
TocEntry(level=0, title="Chapter 1"),
TocEntry(level=0, title="Section 1.1"),
TocEntry(level=0, title="Section 1.2"),
TocEntry(level=0, title="Chapter 2"),
]
def test_to_db(self):
toc = TableOfContents(
[
TocEntry(level=1, title="Chapter 1"),
TocEntry(level=2, title="Section 1.1"),
TocEntry(level=2, title="Section 1.2"),
TocEntry(level=1, title="Chapter 2"),
]
)
assert toc.to_db() == [
{"level": 1, "title": "Chapter 1"},
{"level": 2, "title": "Section 1.1"},
{"level": 2, "title": "Section 1.2"},
{"level": 1, "title": "Chapter 2"},
]
def test_from_db_complex(self):
db_table_of_contents = [
{
"level": 1,
"title": "Chapter 1",
"authors": [{"name": "Author 1"}],
"subtitle": "Subtitle 1",
"description": "Description 1",
},
{"level": 2, "title": "Section 1.1"},
{"level": 2, "title": "Section 1.2"},
{"level": 1, "title": "Chapter 2"},
]
toc = TableOfContents.from_db(db_table_of_contents)
assert toc.entries == [
TocEntry(
level=1,
title="Chapter 1",
authors=[{"name": "Author 1"}],
subtitle="Subtitle 1",
description="Description 1",
),
TocEntry(level=2, title="Section 1.1"),
TocEntry(level=2, title="Section 1.2"),
TocEntry(level=1, title="Chapter 2"),
]
def test_from_markdown(self):
text = """\
| Chapter 1 | 1
| Section 1.1 | 2
| Section 1.2 | 3
"""
toc = TableOfContents.from_markdown(text)
assert toc.entries == [
TocEntry(level=0, title="Chapter 1", pagenum="1"),
TocEntry(level=0, title="Section 1.1", pagenum="2"),
TocEntry(level=0, title="Section 1.2", pagenum="3"),
]
def test_from_markdown_empty_lines(self):
text = """\
| Chapter 1 | 1
| Section 1.1 | 2
| Section 1.2 | 3
"""
toc = TableOfContents.from_markdown(text)
assert toc.entries == [
TocEntry(level=0, title="Chapter 1", pagenum="1"),
TocEntry(level=0, title="Section 1.1", pagenum="2"),
TocEntry(level=0, title="Section 1.2", pagenum="3"),
]
class TestTocEntry:
def test_from_dict(self):
d = {
"level": 1,
"label": "Chapter 1",
"title": "Chapter 1",
"pagenum": "1",
"authors": [{"name": "Author 1"}],
"subtitle": "Subtitle 1",
"description": "Description 1",
}
entry = TocEntry.from_dict(d)
assert entry == TocEntry(
level=1,
label="Chapter 1",
title="Chapter 1",
pagenum="1",
authors=[{"name": "Author 1"}],
subtitle="Subtitle 1",
description="Description 1",
)
def test_from_dict_missing_fields(self):
d = {"level": 1}
entry = TocEntry.from_dict(d)
assert entry == TocEntry(level=1)
def test_to_dict(self):
entry = TocEntry(
level=1,
label="Chapter 1",
title="Chapter 1",
pagenum="1",
authors=[{"name": "Author 1"}],
subtitle="Subtitle 1",
description="Description 1",
)
assert entry.to_dict() == {
"level": 1,
"label": "Chapter 1",
"title": "Chapter 1",
"pagenum": "1",
"authors": [{"name": "Author 1"}],
"subtitle": "Subtitle 1",
"description": "Description 1",
}
def test_to_dict_missing_fields(self):
entry = TocEntry(level=1)
assert entry.to_dict() == {"level": 1}
entry = TocEntry(level=1, title="")
assert entry.to_dict() == {"level": 1, "title": ""}
def test_from_markdown(self):
line = "| Chapter 1 | 1"
entry = TocEntry.from_markdown(line)
assert entry == TocEntry(level=0, title="Chapter 1", pagenum="1")
line = " ** | Chapter 1 | 1"
entry = TocEntry.from_markdown(line)
assert entry == TocEntry(level=2, title="Chapter 1", pagenum="1")
line = "Chapter missing pipe"
entry = TocEntry.from_markdown(line)
assert entry == TocEntry(level=0, title="Chapter missing pipe")
line = ' | Just title | | {"authors": [{"name": "Author 1"}]}'
entry = TocEntry.from_markdown(line)
assert entry == TocEntry(
level=0, title="Just title", authors=[{"name": "Author 1"}]
)
def test_to_markdown(self):
entry = TocEntry(level=0, title="Chapter 1", pagenum="1")
assert entry.to_markdown() == " | Chapter 1 | 1"
entry = TocEntry(level=2, title="Chapter 1", pagenum="1")
assert entry.to_markdown() == "** | Chapter 1 | 1"
entry = TocEntry(level=0, title="Just title")
assert entry.to_markdown() == " | Just title | "
entry = TocEntry(level=0, title="", authors=[{"name": "Author 1"}])
assert entry.to_markdown() == ' | | | {"authors": [{"name": "Author 1"}]}'
| 6,664 | Python | .py | 172 | 27.610465 | 84 | 0.503642 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
467 | recaptcha.py | internetarchive_openlibrary/openlibrary/plugins/recaptcha/recaptcha.py | """Recapcha Input to use in web.py forms."""
import web
import requests
import logging
class Recaptcha(web.form.Input):
def __init__(self, public_key, private_key):
self.public_key = public_key
self._private_key = private_key
validator = web.form.Validator('Recaptcha failed', self.validate)
web.form.Input.__init__(self, 'recaptcha', validator)
self.description = 'Validator for recaptcha v2'
self.help = ''
self.error = None
def validate(self, value=None):
i = web.input()
url = "https://www.google.com/recaptcha/api/siteverify"
params = {
'secret': self._private_key,
'response': i.get('g-recaptcha-response'),
'remoteip': web.ctx.ip,
}
try:
r = requests.get(url, params=params, timeout=3)
except requests.exceptions.RequestException as e:
logging.getLogger("openlibrary").exception(
'Recaptcha call failed: letting user through'
)
return True
return r.json().get('success', '')
| 1,110 | Python | .py | 29 | 29.413793 | 73 | 0.604846 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
468 | code.py | internetarchive_openlibrary/openlibrary/plugins/recaptcha/code.py | # from infogami.core.forms import register
# from infogami import config
#
# import recaptcha
#
# if config.get('plugin_recaptcha') is not None:
# public_key = config.plugin_recaptcha.public_key
# private_key = config.plugin_recaptcha.private_key
# else:
# public_key = config.recaptcha_public_key
# private_key = config.recaptcha_private_key
#
# register.inputs = list(register.inputs)
# register.inputs.append(recaptcha.Recaptcha(public_key, private_key))
| 474 | Python | .py | 14 | 32.857143 | 70 | 0.76087 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
469 | test_docker_compose.py | internetarchive_openlibrary/tests/test_docker_compose.py | import os
import yaml
def p(*paths):
"""Util to get absolute path from relative path"""
return os.path.join(os.path.dirname(__file__), *paths)
class TestDockerCompose:
def test_all_root_services_must_be_in_prod(self):
"""
Each service in compose.yaml should also be in
compose.production.yaml with a profile. Services without profiles will
match with any profile, meaning the service would get deployed everywhere!
"""
with open(p('..', 'compose.yaml')) as f:
root_dc: dict = yaml.safe_load(f)
with open(p('..', 'compose.production.yaml')) as f:
prod_dc: dict = yaml.safe_load(f)
root_services = set(root_dc['services'])
prod_services = set(prod_dc['services'])
missing = root_services - prod_services
assert missing == set(), "compose.production.yaml missing services"
def test_all_prod_services_need_profile(self):
"""
Without the profiles field, a service will get deployed to _every_ server. That
is not likely what you want. If that is what you want, add all server names to
this service to make things explicit.
"""
with open(p('..', 'compose.production.yaml')) as f:
prod_dc: dict = yaml.safe_load(f)
for serv, opts in prod_dc['services'].items():
assert 'profiles' in opts, f"{serv} is missing 'profiles' field"
def test_shared_constants(self):
# read the value in compose.yaml
with open(p('..', 'compose.yaml')) as f:
prod_dc: dict = yaml.safe_load(f)
solr_service = prod_dc['services']['solr']
solr_opts = next(
var.split('=', 1)[1]
for var in solr_service['environment']
if var.startswith('SOLR_OPTS=')
)
solr_opts_max_boolean_clauses = next(
int(opt.split('=', 1)[1])
for opt in solr_opts.split()
if opt.startswith('-Dsolr.max.booleanClauses')
)
# read the value in openlibrary/core/bookshelves.py
from openlibrary.core.bookshelves import FILTER_BOOK_LIMIT
assert solr_opts_max_boolean_clauses >= FILTER_BOOK_LIMIT
| 2,211 | Python | .py | 48 | 37.0625 | 87 | 0.617633 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
470 | expire_accounts.py | internetarchive_openlibrary/scripts/expire_accounts.py | from datetime import datetime
import web
def delete_old_links():
for doc in web.ctx.site.store.values(type="account-link"):
expiry_date = datetime.strptime(doc["expires_on"], "%Y-%m-%dT%H:%M:%S.%f")
now = datetime.utcnow()
key = doc["_key"]
if expiry_date > now:
print("Deleting link %s" % (key))
del web.ctx.site.store[key]
else:
print("Retaining link %s" % (key))
def main():
delete_old_links()
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| 565 | Python | .py | 18 | 24.611111 | 82 | 0.573284 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
471 | get_web_error.py | internetarchive_openlibrary/scripts/get_web_error.py | #!/usr/bin/env python3
"""
% `python3 get_web_error.py 2023-02-03/011556535629.html | lynx --stdin`
To see the contents of an Open Library user-reported like 2023-02-03/011556535629.html:
1. Copy this script into your user directory on any Open Library host.
2. Type the command above substituting in the error code you are looking for.
This script will ssh into hosts ol-web1 and ol-web2 and if that file is found in the
docker_container openlibrary_web_1, it will print its contents to stdout.
Type `yes` if ssh prompts you to add the hosts to your known_hosts file.
If lynx is not yet installed, please type `sudo apt-get install lynx` to install it.
"""
import subprocess
import sys
usage = f"""Usage: {sys.argv[0]} [filename] | lynx --stdin
filename is like 2023-02-03/000008077313.html"""
def get_web_error(
filename: str,
hosts=("ol-web1.us.archive.org", "ol-web2.us.archive.org"),
docker_container: str = "openlibrary_web_1",
) -> str:
"""
ssh into hosts ol-web1 and ol-web2 and if filename is found in the
docker_container openlibrary_web_1 then return its contents.
"""
file_path = f"/var/log/openlibrary/ol-errors/{filename}"
for host in hosts:
cmd = f"ssh -A -t {host} 'docker exec -i {docker_container} cat {file_path}'"
try:
if output := subprocess.check_output(cmd, shell=True, text=True):
return output
except subprocess.CalledProcessError as e:
print(f"Error: {e!r}")
raise (e)
return f"Error: {file_path} was not found on {' or '.join(hosts)}."
if __name__ == "__main__":
if len(sys.argv) > 1:
print(get_web_error(sys.argv[1]))
else:
print(usage)
| 1,716 | Python | .py | 39 | 39.128205 | 87 | 0.683673 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
472 | delete_import_items.py | internetarchive_openlibrary/scripts/delete_import_items.py | """
Deletes entries from the import_item table.
Reads ia_ids that should be deleted from an input file. The input file is expected to be tab-delimited, and each line will have the following format:
{N number of ia_ids in this line} {edition_key} {ia_id 1} [...] {ia_id N}
Requires a configuration file in order to run. You can use the following as a template for the configuration file:
[args]
in_file=./import-item-cleanup/in.txt
state_file=./import-item-cleanup/curline.txt
error_file=./import-item-cleanup/errors.txt
batch_size=1000
dry_run=True
ol_config=/path/to/openlibrary.yml
"""
import argparse
import time
from configparser import ConfigParser
from pathlib import Path
import _init_path # Imported for its side effect of setting PYTHONPATH
from openlibrary.config import load_config
from openlibrary.core.imports import ImportItem
from openlibrary.core.edits import CommunityEditsQueue
class DeleteImportItemJob:
def __init__(
self, in_file='', state_file='', error_file='', batch_size=1000, dry_run=False
):
self.in_file = in_file
self.state_file = state_file
self.error_file = error_file
self.batch_size = batch_size
self.dry_run = dry_run
self.start_line = 1
state_path = Path(state_file)
if state_path.exists():
with state_path.open('r') as f:
line = f.readline()
if line:
self.start_line = int(line)
def run(self):
with open(self.in_file) as f:
# Seek to start line
for _ in range(1, self.start_line):
f.readline()
# Delete a batch of records
lines_processed = 0
affected_records = 0
num_deleted = 0
for _ in range(self.batch_size):
line = f.readline()
if not line:
break
fields = line.strip().split('\t')
ia_ids = fields[2:]
try:
result = ImportItem.delete_items(ia_ids, _test=self.dry_run)
if self.dry_run:
# Result is string "DELETE FROM ..."
print(result)
else:
# Result is number of records deleted
num_deleted += result
except Exception as e:
print(f'Error when deleting: {e}')
if not self.dry_run:
write_to(self.error_file, line, mode='a+')
lines_processed += 1
affected_records += int(fields[0])
# Write next line number to state file:
if not self.dry_run:
write_to(self.state_file, f'{self.start_line + lines_processed}')
return {
'lines_processed': lines_processed,
'num_deleted': num_deleted,
'affected_records': affected_records,
}
def write_to(filepath, s, mode='w+'):
print(mode)
path = Path(filepath)
path.parent.mkdir(exist_ok=True, parents=True)
with path.open(mode=mode) as f:
f.write(s)
def read_args_from_config(config_path):
path = Path(config_path)
if not path.exists():
raise Exception(f'No configuration file found at {config_path}')
config = ConfigParser()
config.read(path)
args = config['args']
return {
'in_file': args.get('in_file'),
'state_file': args.get('state_file'),
'error_file': args.get('error_file'),
'batch_size': args.getint('batch_size'),
'dry_run': bool(args.get('dry_run')),
'ol_config': args.get('ol_config'),
}
def init_and_start(args):
# Read arguments from config file:
config_args = read_args_from_config(args.config_file)
# Set up Open Library
load_config(config_args['ol_config'])
del config_args['ol_config']
# Delete affected files
results = DeleteImportItemJob(**config_args).run()
print(f'Lines of input processed: {results["lines_processed"]}')
print(f'Records read from input file: {results["affected_records"]}')
print(f'Records deleted: {results["num_deleted"]}')
def build_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'config_file', metavar='config_path', help='Path to configuration file'
)
parser.set_defaults(func=init_and_start)
return parser
if __name__ == '__main__':
start_time = time.time()
parser = build_parser()
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
print(f'Error: {e}')
print('Stopping script early.')
end_time = time.time()
print(f'\nTime elapsed: {end_time - start_time} seconds\n')
print('Program terminated...')
| 4,909 | Python | .py | 127 | 29.92126 | 150 | 0.605939 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
473 | store_counts.py | internetarchive_openlibrary/scripts/store_counts.py | #!/usr/bin/env python
import sys
import _init_path # Imported for its side effect of setting PYTHONPATH
from openlibrary.admin import stats
if __name__ == "__main__":
if len(sys.argv) != 5:
print(
"Usage : %s infobase_config openlibrary_config coverstore_config number_of_days",
file=sys.stderr,
)
sys.exit(-1)
sys.exit(stats.main(*sys.argv[1:]))
| 408 | Python | .py | 12 | 28 | 93 | 0.637755 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
474 | _init_path.py | internetarchive_openlibrary/scripts/_init_path.py | """Helper to add openlibrary module to sys.path.
"""
import os
from os.path import abspath, realpath, join, dirname, pardir
import sys
path = __file__.replace('.pyc', '.py')
scripts_root = dirname(realpath(path))
OL_PATH = abspath(join(scripts_root, pardir))
sys.path.insert(0, OL_PATH)
# Add the PWD as the first entry in the path.
# The path we get from __file__ and abspath will have all the links expanded.
# This creates trouble in symlink based deployments. Work-around is to add the
# current directory to path and let the app run from that directory.
sys.path.insert(0, os.getcwd())
| 595 | Python | .py | 14 | 41.214286 | 78 | 0.753899 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
475 | fake_loan_server.py | internetarchive_openlibrary/scripts/fake_loan_server.py | #!/usr/bin/env python
"""
Fake loan status server to make dev instance work with borrowing books.
"""
import web
urls = ("/is_loaned_out/(.*)", "is_loaned_out")
app = web.application(urls, globals())
class is_loaned_out:
def GET(self, resource_id):
web.header("Content-type", "application/json")
return "[]"
if __name__ == "__main__":
app.run()
| 374 | Python | .py | 13 | 25.538462 | 71 | 0.646067 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
476 | mail_bad_author_query.py | internetarchive_openlibrary/scripts/mail_bad_author_query.py | #!/usr/bin/env python
import web
import os
import smtplib
import sys
from email.mime.text import MIMEText
password = open(os.path.expanduser('~/.openlibrary_db_password')).read()
if password.endswith('\n'):
password = password[:-1]
db_error = web.database(
dbn='postgres', db='ol_errors', host='localhost', user='openlibrary', pw=password
)
db_error.printing = False
body = '''When using the Open Library query interface the results should only include
authors. There shouldn't be any redirects or deleted authors in the results.
Below is a list of bad results for author queries from imports that have
run today.
'''
seen = set()
bad_count = 0
for row in db_error.query(
"select t, query, result from errors where t between 'yesterday' and 'today'"
):
author = row.query
if author in seen:
continue
seen.add(author)
bad_count += 1
body += '-' * 60 + '\nAuthor name: ' + author + '\n'
body += (
'http://openlibrary.org/query.json?type=/type/author&name=%s'
% web.urlquote(author)
+ '\n\n'
)
body += row.result + '\n'
if bad_count == 0:
sys.exit(0)
# print body
addr_from = '[email protected]'
addr_to = '[email protected]'
# msg = MIMEText(body, 'plain', 'utf-8')
try:
msg = MIMEText(body, 'plain', 'iso-8859-15')
except UnicodeEncodeError:
msg = MIMEText(body, 'plain', 'utf-8')
msg['From'] = addr_from
msg['To'] = addr_to
msg['Subject'] = "import error report: %d bad author queries" % bad_count
s = smtplib.SMTP('mail.archive.org')
s.sendmail(addr_from, [addr_to], msg.as_string())
s.quit()
| 1,597 | Python | .py | 51 | 28.431373 | 85 | 0.684655 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
477 | generate-api-docs.py | internetarchive_openlibrary/scripts/generate-api-docs.py | import web
import os
import re
import shutil
from collections import defaultdict
template = """\
$def with (mod, submodules)
$ name = mod.split(".")[-1]
$name
$("=" * len(name))
$if submodules:
Submodules
----------
.. toctree::
:maxdepth: 1
$for m in submodules: $m
Documentation
-------------
.. automodule:: $mod
$else:
.. automodule:: $mod
"""
t = web.template.Template(template)
def docpath(path):
return "docs/api/" + path.replace(".py", ".rst").replace("__init__", "index")
def modname(path):
return path.replace(".py", "").replace("/__init__", "").replace("/", ".")
def write(path, text):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
print("writing", path)
f = open(path, "w")
f.write(text)
f.close()
def find_python_sources(dir):
ignores = [
"openlibrary/catalog.*",
"openlibrary/solr.*",
"openlibrary/plugins/recaptcha.*",
".*tests",
"infogami/plugins.*",
"infogami.utils.markdown",
]
re_ignore = re.compile("|".join(ignores))
for dirpath, dirnames, filenames in os.walk(dir):
if re_ignore.match(dirpath):
print("ignoring", dirpath)
continue
for f in filenames:
if f.endswith(".py"):
yield os.path.join(dirpath, f)
def generate_docs(dir):
shutil.rmtree(docpath(dir), ignore_errors=True)
paths = list(find_python_sources(dir))
submodule_dict = defaultdict(list)
for path in paths:
dir = os.path.dirname(path)
if path.endswith("__init__.py"):
dir = os.path.dirname(dir)
submodule_dict[dir].append(path)
for path in paths:
dirname = os.path.dirname(path)
if path.endswith("__init__.py"):
submodules = [
web.lstrips(docpath(s), docpath(dirname) + "/")
for s in submodule_dict[dirname]
]
else:
submodules = []
submodules.sort()
mod = modname(path)
text = str(t(mod, submodules))
write(docpath(path), text)
# set the modification time same as the source file
mtime = os.stat(path).st_mtime
os.utime(docpath(path), (mtime, mtime))
def generate_index():
filenames = sorted(os.listdir("docs/api"))
f = open("docs/api/index.rst", "w")
f.write("API Documentation\n")
f.write("=================\n")
f.write("\n")
f.write(".. toctree::\n")
f.write(" :maxdepth: 1\n")
f.write("\n")
f.write("\n".join(" " + filename for filename in filenames))
def main():
generate_docs("openlibrary")
generate_docs("infogami")
# generate_index()
if __name__ == "__main__":
main()
| 2,810 | Python | .py | 93 | 23.763441 | 81 | 0.578358 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
478 | import_open_textbook_library.py | internetarchive_openlibrary/scripts/import_open_textbook_library.py | #!/usr/bin/env python
import json
import requests
import time
from itertools import islice
from typing import Any
from collections.abc import Generator
from openlibrary.core.imports import Batch
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
from openlibrary.config import load_config
FEED_URL = 'https://open.umn.edu/opentextbooks/textbooks.json?'
def get_feed() -> Generator[dict[str, Any], None, None]:
"""Fetches and yields each book in the feed."""
next_url = FEED_URL
while next_url:
r = requests.get(next_url)
response = r.json()
# Yield each book in the response
yield from response.get('data', [])
# Get the next page URL from the links section
next_url = response.get('links', {}).get('next')
def map_data(data) -> dict[str, Any]:
"""Maps Open Textbooks data to Open Library import record."""
import_record: dict[str, Any] = {}
import_record["identifiers"] = {'open_textbook_library': [str(data['id'])]}
import_record["source_records"] = ['open_textbook_library:%s' % data['id']]
if data.get("title"):
import_record["title"] = data["title"]
if data.get('ISBN10'):
import_record['isbn_10'] = [data['ISBN10']]
if data.get('ISBN13'):
import_record['isbn_13'] = [data['ISBN13']]
if data.get('language'):
import_record['languages'] = [data['language']]
if data.get('description'):
import_record['description'] = data['description']
if data.get('subjects'):
subjects = [
subject["name"] for subject in data['subjects'] if subject.get("name")
]
if subjects:
import_record['subjects'] = subjects
if data.get('publishers'):
import_record['publishers'] = [
publisher["name"] for publisher in data["publishers"]
]
if data.get("copyright_year"):
import_record['publish_date'] = str(data["copyright_year"])
if data.get('contributors'):
authors = []
ol_contributors = []
for contributor in data["contributors"]:
name = " ".join(
name
for name in (
contributor.get("first_name"),
contributor.get("middle_name"),
contributor.get("last_name"),
)
if name
)
if (
contributor.get("primary") is True
or contributor.get("contribution") == 'Author'
):
authors.append({"name": name})
else:
ol_contributors.append(
{
"role": contributor.get("contribution"),
"name": name,
}
)
if authors:
import_record["authors"] = authors
if ol_contributors:
import_record["contributors"] = ol_contributors
if data.get('subjects'):
lc_classifications = [
subject["call_number"]
for subject in data['subjects']
if subject.get("call_number")
]
if lc_classifications:
import_record["lc_classifications"] = lc_classifications
return import_record
def create_import_jobs(records: list[dict[str, str]]) -> None:
"""Creates Open Textbooks batch import job.
Attempts to find existing Open Textbooks import batch.
If nothing is found, a new batch is created. All of the
given import records are added to the batch job as JSON strings.
"""
now = time.gmtime(time.time())
batch_name = f'open_textbook_library-{now.tm_year}{now.tm_mon}'
batch = Batch.find(batch_name) or Batch.new(batch_name)
batch.add_items([{'ia_id': r['source_records'][0], 'data': r} for r in records])
def import_job(ol_config: str, dry_run: bool = False, limit: int = 10) -> None:
"""
Fetch and process the feed.
:param limit: Specify -1 for no limit
"""
feed = get_feed()
# Use islice to limit the number of items yielded by get_feed
import_objects = map(map_data, islice(feed, limit) if limit != -1 else feed)
if not dry_run:
load_config(ol_config)
batch_items = list(import_objects)
create_import_jobs(batch_items)
print(f'{len(batch_items)} entries added to the batch import job.')
else:
for record in import_objects:
print(json.dumps(record))
if __name__ == '__main__':
FnToCLI(import_job).run()
| 4,544 | Python | .py | 114 | 31 | 84 | 0.596268 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
479 | import_pressbooks.py | internetarchive_openlibrary/scripts/import_pressbooks.py | """
To run:
PYTHONPATH=. python ./scripts/import_pressbooks.py /olsystem/etc/openlibrary.yml ./path/to/pressbooks.json
"""
import json
import datetime
import logging
import requests
import html
from infogami import config
from openlibrary.config import load_config
from openlibrary.core.imports import Batch
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
logger = logging.getLogger("openlibrary.importer.pressbooks")
langs = {
lang['identifiers']['iso_639_1'][0]: lang['code']
for lang in requests.get(
'https://openlibrary.org/query.json',
{
'limit': '500',
'type': '/type/language',
'identifiers.iso_639_1~': '*',
'identifiers': '',
'code': '',
},
).json()
}
def convert_pressbooks_to_ol(data):
book = {"source_records": ['pressbooks:%s' % data['url']]}
if data.get('isbn'):
book['isbn_13'] = [
isbn.split(' ')[0].replace('-', '') for isbn in data['isbn'].split('; ')
]
if data.get('name'):
book['title'] = html.unescape(data['name'])
if data.get('languageCode'):
book['languages'] = [langs[data['languageCode'].split('-', 1)[0]]]
if data.get('author'):
book['authors'] = [{"name": a} for a in data.get('author')]
if data.get('image') and not data['image'].endswith('default-book-cover.jpg'):
book['cover'] = data['image']
description = (
(data.get('description') or '')
+ '\n\n'
+ (data.get('disambiguatingDescription') or '')
).strip()
if description:
book['description'] = description
if data.get('alternateName'):
book['other_titles'] = [data['alternateName']]
if data.get('alternativeHeadline'):
book['edition_name'] = data['alternativeHeadline']
book['publish_date'] = (
data.get('datePublished')
or data.get('copyrightYear')
or datetime.datetime.fromtimestamp(data.get('lastUpdated')).date().isoformat()
)
assert book['publish_date'], data
subjects = (data.get('about') or []) + (data.get('keywords') or '').split(', ')
if subjects:
book['subjects'] = [
s.strip().capitalize() for s in subjects if s # Sometimes they're null?
]
book['publishers'] = [p for p in (data.get('networkName'), "Pressbooks") if p]
book['providers'] = [
{
'provider': 'pressbooks',
'url': data['url'],
}
]
book['physical_format'] = 'Ebook'
copyright_line = ' '.join(
[
data.get('copyrightYear') or '',
data.get('copyrightHolderName') or '',
]
).strip()
if copyright_line:
book['copyright_date'] = copyright_line
if data.get('wordCount'):
book['word_count'] = data['wordCount']
contributors_map = {
'translator': 'Translator',
'editor': 'Editor',
'illustrator': 'Illustrator',
'reviewedBy': 'Reviewer',
'contributor': 'Contributor',
}
contributors = [
[
{"name": person, "role": ol_role}
for person in (data.get(pressbooks_field) or [])
]
for pressbooks_field, ol_role in contributors_map.items()
]
contributors = [contributor for lst in contributors if lst for contributor in lst]
if contributors:
book['contributors'] = contributors
return book
def main(ol_config: str, filename: str, batch_size=5000, dry_run=False):
if not dry_run:
load_config(ol_config)
date = datetime.date.today()
batch_name = f"pressbooks-{date:%Y%m}"
batch = Batch.find(batch_name) or Batch.new(batch_name)
with open(filename, 'rb') as f:
book_items = []
books = json.load(f)
for line_num, record in enumerate(books):
# try:
b = convert_pressbooks_to_ol(record)
book_items.append({'ia_id': b['source_records'][0], 'data': b})
# except (AssertionError, IndexError) as e:
# logger.info(f"Error: {e} from {line}")
if dry_run:
print(json.dumps(b))
# If we have enough items, submit a batch
elif not ((line_num + 1) % batch_size):
batch.add_items(book_items)
book_items = [] # clear added items
# Add any remaining book_items to batch
if not dry_run and book_items:
batch.add_items(book_items)
if __name__ == '__main__':
FnToCLI(main).run()
| 4,553 | Python | .py | 125 | 28.736 | 106 | 0.581744 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
480 | oclc_to_marc.py | internetarchive_openlibrary/scripts/oclc_to_marc.py | """Find marc record URL from oclc number.
Usage: python oclc_to_marc.py oclc_1 oclc_2
"""
import requests
import urllib
root = "https://openlibrary.org"
def wget(path):
return requests.get(root + path).json()
def find_marc_url(d):
if d.get('source_records'):
return d['source_records'][0]
# some times initial revision is 2 instead of 1. So taking first 3 revisions (in reverse order)
# and picking the machine comment from the last one
result = wget('%s.json?m=history&offset=%d' % (d['key'], d['revision'] - 3))
if result:
return result[-1]['machine_comment'] or ""
else:
return ""
def main(oclc):
query = urllib.parse.urlencode(
{'type': '/type/edition', 'oclc_numbers': oclc, '*': ''}
)
result = wget('/query.json?' + query)
for d in result:
print("\t".join([oclc, d['key'], find_marc_url(d)]))
if __name__ == "__main__":
import sys
if len(sys.argv) == 1 or "-h" in sys.argv or "--help" in sys.argv:
print(__doc__, file=sys.stderr)
else:
for oclc in sys.argv[1:]:
main(oclc)
| 1,119 | Python | .py | 32 | 29.59375 | 99 | 0.611578 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
481 | test_py3.sh | internetarchive_openlibrary/scripts/test_py3.sh | #!/bin/sh
pytest . \
--ignore=scripts/2011 \
--ignore=infogami \
--ignore=vendor
RETURN_CODE=$?
ruff --exit-zero --select=E722,F403 --show-source # Show bare exceptions and wildcard (*) imports
safety check || true # Show any insecure dependencies
exit ${RETURN_CODE}
| 294 | Python | .py | 9 | 29 | 98 | 0.680851 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
482 | affiliate_server.py | internetarchive_openlibrary/scripts/affiliate_server.py | #!/usr/bin/env python
"""Run affiliate server.
Usage:
start affiliate-server using dev webserver:
./scripts/affiliate_server.py openlibrary.yml 31337
start affiliate-server as fastcgi:
./scripts/affiliate_server.py openlibrary.yml fastcgi 31337
start affiliate-server using gunicorn webserver:
./scripts/affiliate_server.py openlibrary.yml --gunicorn -b 0.0.0.0:31337
Testing Amazon API:
ol-home0% `docker exec -it openlibrary-affiliate-server-1 bash`
openlibrary@ol-home0:/openlibrary$ `python`
```
import web
import infogami
from openlibrary.config import load_config
load_config('/olsystem/etc/openlibrary.yml')
infogami._setup()
from infogami import config;
from openlibrary.core.vendors import AmazonAPI
params=[config.amazon_api.get('key'), config.amazon_api.get('secret'),config.amazon_api.get('id')]
web.amazon_api = AmazonAPI(*params, throttling=0.9)
products = web.amazon_api.get_products(["195302114X", "0312368615"], serialize=True)
```
"""
import itertools
import json
import logging
import os
import queue
import sys
import threading
import time
from collections.abc import Callable, Collection
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Any, Final
import requests
import web
import _init_path # noqa: F401 Imported for its side effect of setting PYTHONPATH
import infogami
from infogami import config
from openlibrary.config import load_config as openlibrary_load_config
from openlibrary.core import cache, stats
from openlibrary.core.imports import Batch, ImportItem
from openlibrary.core.vendors import AmazonAPI, clean_amazon_metadata_for_load
from openlibrary.utils.dateutil import WEEK_SECS
from openlibrary.utils.isbn import (
normalize_identifier,
isbn_10_to_isbn_13,
)
logger = logging.getLogger("affiliate-server")
# fmt: off
urls = (
'/isbn/([bB]?[0-9a-zA-Z-]+)', 'Submit',
'/status', 'Status',
'/clear', 'Clear',
)
# fmt: on
API_MAX_ITEMS_PER_CALL = 10
API_MAX_WAIT_SECONDS = 0.9
# TODO: make a map for Google Books.
AZ_OL_MAP = {
'cover': 'covers',
'title': 'title',
'authors': 'authors',
'publishers': 'publishers',
'publish_date': 'publish_date',
'number_of_pages': 'number_of_pages',
}
RETRIES: Final = 5
batch: Batch | None = None
web.amazon_queue = (
queue.PriorityQueue()
) # a thread-safe multi-producer, multi-consumer queue
web.amazon_lookup_thread = None
class Priority(Enum):
"""
Priority for the `PrioritizedIdentifier` class.
`queue.PriorityQueue` has a lowest-value-is-highest-priority system, but
setting `PrioritizedIdentifier.priority` to 0 can make it look as if priority is
disabled. Using an `Enum` can help with that.
"""
HIGH = 0
LOW = 1
def __lt__(self, other):
if isinstance(other, Priority):
return self.value < other.value
return NotImplemented
@dataclass(order=True, slots=True)
class PrioritizedIdentifier:
"""
Represent an identifiers's priority in the queue. Sorting is based on the `priority`
attribute, then the `timestamp` to solve tie breaks within a specific priority,
with priority going to whatever `min([items])` would return.
For more, see https://docs.python.org/3/library/queue.html#queue.PriorityQueue.
Therefore, priority 0, which is equivalent to `Priority.HIGH`, is the highest
priority.
This exists so certain identifiers can go to the front of the queue for faster
processing as their look-ups are time sensitive and should return look up data
to the caller (e.g. interactive API usage through `/isbn`).
"""
identifier: str = field(compare=False)
"""identifier is an ISBN 13 or B* ASIN."""
stage_import: bool = True
"""Whether to stage the item for import."""
priority: Priority = field(default=Priority.LOW)
timestamp: datetime = field(default_factory=datetime.now)
def __hash__(self):
"""Only consider the `identifier` attribute when hashing (e.g. for `set` uniqueness)."""
return hash(self.identifier)
def __eq__(self, other):
"""Two instances of PrioritizedIdentifier are equal if their `identifier` attribute is equal."""
if isinstance(other, PrioritizedIdentifier):
return self.identifier == other.identifier
return False
def to_dict(self):
"""
Convert the PrioritizedIdentifier object to a dictionary representation suitable
for JSON serialization.
"""
return {
"isbn": self.identifier,
"priority": self.priority.name,
"stage_import": self.stage_import,
"timestamp": self.timestamp.isoformat(),
}
class BaseLookupWorker(threading.Thread):
"""
A base class for creating API look up workers on their own threads.
"""
def __init__(
self,
queue: queue.PriorityQueue,
process_item: Callable,
stats_client: stats.StatsClient,
logger: logging.Logger,
name: str,
) -> None:
self.queue = queue
self.process_item = process_item
self.stats_client = stats_client
self.logger = logger
self.name = name
def run(self):
while True:
try:
item = self.queue.get(timeout=API_MAX_WAIT_SECONDS)
self.logger.info(f"{self.name} lookup: processing item {item}")
self.process_item(item)
except queue.Empty:
continue
except Exception as e:
self.logger.exception(f"{self.name} Lookup Thread died: {e}")
self.stats_client.incr(f"ol.affiliate.{self.name}.lookup_thread_died")
class AmazonLookupWorker(BaseLookupWorker):
"""
A look up worker for the Amazon Products API.
A separate thread of execution that uses the time up to API_MAX_WAIT_SECONDS to
create a list of isbn_10s that is not larger than API_MAX_ITEMS_PER_CALL and then
passes them to process_amazon_batch()
"""
def run(self):
while True:
start_time = time.time()
asins: set[PrioritizedIdentifier] = set() # no duplicates in the batch
while len(asins) < API_MAX_ITEMS_PER_CALL and self._seconds_remaining(
start_time
):
try: # queue.get() will block (sleep) until successful or it times out
asins.add(
self.queue.get(timeout=self._seconds_remaining(start_time))
)
except queue.Empty:
pass
self.logger.info(f"Before amazon_lookup(): {len(asins)} items")
if asins:
time.sleep(seconds_remaining(start_time))
try:
process_amazon_batch(asins)
self.logger.info(f"After amazon_lookup(): {len(asins)} items")
except Exception:
self.logger.exception("Amazon Lookup Thread died")
self.stats_client.incr("ol.affiliate.amazon.lookup_thread_died")
def _seconds_remaining(self, start_time: float) -> float:
return max(API_MAX_WAIT_SECONDS - (time.time() - start_time), 0)
def fetch_google_book(isbn: str) -> dict | None:
"""
Get Google Books metadata, if it exists.
"""
url = f"https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}"
headers = {"User-Agent": "Open Library BookWorm/1.0"}
try:
r = requests.get(url, headers=headers)
if r.status_code == 200:
return r.json()
except Exception as e:
logger.exception(f"Error processing ISBN {isbn} on Google Books: {e!s}")
stats.increment("ol.affiliate.google.total_fetch_exceptions")
return None
return None
# TODO: See AZ_OL_MAP and do something similar here.
def process_google_book(google_book_data: dict[str, Any]) -> dict[str, Any] | None:
"""
Returns a dict-edition record suitable for import via /api/import
Processing https://www.googleapis.com/books/v1/volumes?q=isbn:9785699350131:
{'isbn_10': ['5699350136'],
'isbn_13': ['9785699350131'],
'title': 'Бал моей мечты',
'subtitle': '[для сред. шк. возраста]',
'authors': [{'name': 'Светлана Лубенец'}],
'source_records': ['google_books:9785699350131'],
'publishers': [],
'publish_date': '2009',
'number_of_pages': 153}
"""
result = {}
isbn_10 = []
isbn_13 = []
if not (data := google_book_data.get("items", [])):
return None
if len(data) != 1:
logger.warning("Google Books had more than one result for an ISBN.")
return None
# Permanent URL: https://www.googleapis.com/books/v1/volumes/{id}
# google_books_identifier = data[0].get("id")
if not (book := data[0].get("volumeInfo", {})):
return None
# Extract ISBNs, if any.
for identifier in book.get("industryIdentifiers", []):
if identifier.get("type") == "ISBN_10":
isbn_10.append(identifier.get("identifier"))
elif identifier.get("type") == "ISBN_13":
isbn_13.append(identifier.get("identifier"))
result["isbn_10"] = isbn_10 if isbn_10 else []
result["isbn_13"] = isbn_13 if isbn_13 else []
result["title"] = book.get("title", "")
result["subtitle"] = book.get("subtitle")
result["authors"] = (
[{"name": author} for author in book.get("authors", [])]
if book.get("authors")
else []
)
# result["identifiers"] = {
# "google": [isbn_13]
# } # Assuming so far is there is always an ISBN 13.
google_books_identifier = isbn_13[0] if isbn_13 else isbn_10[0]
result["source_records"] = [f"google_books:{google_books_identifier}"]
# has publisher: https://www.googleapis.com/books/v1/volumes/YJ1uQwAACAAJ
# does not have publisher: https://www.googleapis.com/books/v1/volumes?q=isbn:9785699350131
result["publishers"] = [book.get("publisher")] if book.get("publisher") else []
result["publish_date"] = book.get("publishedDate", "")
# Language needs converting. 2 character code -> 3 character.
# result["languages"] = [book.get("language")] if book.get("language") else []
result["number_of_pages"] = book.get("pageCount", None)
result["description"] = book.get("description", None)
return result
def stage_from_google_books(isbn: str) -> bool:
"""
Stage `isbn` from the Google Books API. Can be ISBN 10 or 13.
See https://developers.google.com/books.
"""
if google_book_data := fetch_google_book(isbn):
if google_book := process_google_book(google_book_data=google_book_data):
get_current_batch("google").add_items(
[
{
'ia_id': google_book['source_records'][0],
'status': 'staged',
'data': google_book,
}
]
)
stats.increment("ol.affiliate.google.total_items_fetched")
return True
stats.increment("ol.affiliate.google.total_items_not_found")
return False
return False
def get_current_batch(name: str) -> Batch:
"""
At startup, get the `name` (e.g. amz) openlibrary.core.imports.Batch() for global use.
"""
global batch
if not batch:
batch = Batch.find(name) or Batch.new(name)
assert batch
return batch
def get_isbns_from_book(book: dict) -> list[str]: # Singular: book
return [str(isbn) for isbn in book.get('isbn_10', []) + book.get('isbn_13', [])]
def get_isbns_from_books(books: list[dict]) -> list[str]: # Plural: books
return sorted(set(itertools.chain(*[get_isbns_from_book(book) for book in books])))
def is_book_needed(book: dict, edition: dict) -> list[str]:
"""
Should an OL edition's metadata be updated with Amazon book data?
:param book: dict from openlibrary.core.vendors.clean_amazon_metadata_for_load()
:param edition: dict from web.ctx.site.get_many(edition_ids)
"""
needed_book_fields = [] # book fields that should be copied to the edition
for book_field, edition_field in AZ_OL_MAP.items():
if field_value := book.get(book_field) and not edition.get(edition_field):
needed_book_fields.append(book_field)
if needed_book_fields == ["authors"]: # noqa: SIM102
if work_key := edition.get("works") and edition["work"][0].get("key"):
work = web.ctx.site.get(work_key)
if work.get("authors"):
needed_book_fields = []
if needed_book_fields: # Log book fields that should to be copied to the edition
fields = ", ".join(needed_book_fields)
logger.debug(f"{edition.get('key') or 'New Edition'} needs {fields}")
return needed_book_fields
def get_editions_for_books(books: list[dict]) -> list[dict]:
"""
Get the OL editions for a list of ISBNs.
:param isbns: list of book dicts
:return: list of OL editions dicts
"""
isbns = get_isbns_from_books(books)
unique_edition_ids = set(
web.ctx.site.things({'type': '/type/edition', 'isbn_': isbns})
)
return web.ctx.site.get_many(list(unique_edition_ids))
def get_pending_books(books):
pending_books = []
editions = get_editions_for_books(books) # Make expensive call just once
# For each amz book, check that we need its data
for book in books:
ed = next(
(
ed
for ed in editions
if set(book.get('isbn_13')).intersection(set(ed.isbn_13))
or set(book.get('isbn_10')).intersection(set(ed.isbn_10))
),
{},
)
if is_book_needed(book, ed):
pending_books.append(book)
return pending_books
def make_cache_key(product: dict[str, Any]) -> str:
"""
Takes a `product` returned from `vendor.get_products()` and returns a cache key to
identify the product. For a given product, the cache key will be either (1) its
ISBN 13, or (2) it's non-ISBN 10 ASIN (i.e. one that starts with `B`).
"""
if (isbn_13s := product.get("isbn_13")) and len(isbn_13s):
return isbn_13s[0]
if product.get("isbn_10") and (
cache_key := isbn_10_to_isbn_13(product.get("isbn_10", [])[0])
):
return cache_key
if (source_records := product.get("source_records")) and (
amazon_record := next(
(record for record in source_records if record.startswith("amazon:")), ""
)
):
return amazon_record.split(":")[1]
return ""
def process_amazon_batch(asins: Collection[PrioritizedIdentifier]) -> None:
"""
Call the Amazon API to get the products for a list of isbn_10s/ASINs and store
each product in memcache using amazon_product_{isbn_13 or b_asin} as the cache key.
"""
logger.info(f"process_amazon_batch(): {len(asins)} items")
try:
identifiers = [
prioritized_identifier.identifier for prioritized_identifier in asins
]
products = web.amazon_api.get_products(identifiers, serialize=True)
# stats_ol_affiliate_amazon_imports - Open Library - Dashboards - Grafana
# http://graphite.us.archive.org Metrics.stats.ol...
stats.increment(
"ol.affiliate.amazon.total_items_fetched",
n=len(products),
)
except Exception:
logger.exception(f"amazon_api.get_products({asins}, serialize=True)")
return
for product in products:
cache_key = make_cache_key(product) # isbn_13 or non-ISBN-10 ASIN.
cache.memcache_cache.set( # Add each product to memcache
f'amazon_product_{cache_key}', product, expires=WEEK_SECS
)
# Only proceed if config finds infobase db creds
if not config.infobase.get('db_parameters'): # type: ignore[attr-defined]
logger.debug("DB parameters missing from affiliate-server infobase")
return
# Skip staging no_import_identifiers for for import by checking AMZ source record.
no_import_identifiers = {
identifier.identifier for identifier in asins if not identifier.stage_import
}
books = [
clean_amazon_metadata_for_load(product)
for product in products
if product.get("source_records")[0].split(":")[1] not in no_import_identifiers
]
if books:
stats.increment(
"ol.affiliate.amazon.total_items_batched_for_import",
n=len(books),
)
get_current_batch(name="amz").add_items(
[
{'ia_id': b['source_records'][0], 'status': 'staged', 'data': b}
for b in books
]
)
def seconds_remaining(start_time: float) -> float:
return max(API_MAX_WAIT_SECONDS - (time.time() - start_time), 0)
def amazon_lookup(site, stats_client, logger) -> None:
"""
A separate thread of execution that uses the time up to API_MAX_WAIT_SECONDS to
create a list of isbn_10s that is not larger than API_MAX_ITEMS_PER_CALL and then
passes them to process_amazon_batch()
"""
stats.client = stats_client
web.ctx.site = site
while True:
start_time = time.time()
asins: set[PrioritizedIdentifier] = set() # no duplicates in the batch
while len(asins) < API_MAX_ITEMS_PER_CALL and seconds_remaining(start_time):
try: # queue.get() will block (sleep) until successful or it times out
asins.add(web.amazon_queue.get(timeout=seconds_remaining(start_time)))
except queue.Empty:
pass
logger.info(f"Before amazon_lookup(): {len(asins)} items")
if asins:
time.sleep(seconds_remaining(start_time))
try:
process_amazon_batch(asins)
logger.info(f"After amazon_lookup(): {len(asins)} items")
except Exception:
logger.exception("Amazon Lookup Thread died")
stats_client.incr("ol.affiliate.amazon.lookup_thread_died")
def make_amazon_lookup_thread() -> threading.Thread:
"""Called from start_server() and assigned to web.amazon_lookup_thread."""
thread = threading.Thread(
target=amazon_lookup,
args=(web.ctx.site, stats.client, logger),
daemon=True,
)
thread.start()
return thread
class Status:
def GET(self) -> str:
return json.dumps(
{
"thread_is_alive": bool(
web.amazon_lookup_thread and web.amazon_lookup_thread.is_alive()
),
"queue_size": web.amazon_queue.qsize(),
"queue": [isbn.to_dict() for isbn in web.amazon_queue.queue],
}
)
class Clear:
"""Clear web.amazon_queue and return the queue size before it was cleared."""
def GET(self) -> str:
qsize = web.amazon_queue.qsize()
web.amazon_queue.queue.clear()
stats.put(
"ol.affiliate.amazon.currently_queued_isbns",
web.amazon_queue.qsize(),
)
return json.dumps({"Cleared": "True", "qsize": qsize})
class Submit:
def GET(self, identifier: str) -> str:
"""
GET endpoint looking up ISBNs and B* ASINs via the affiliate server.
URL Parameters:
- high_priority='true' or 'false': whether to wait and return result.
- stage_import='true' or 'false': whether to stage result for import.
By default this is 'true'. Setting this to 'false' is useful when you
want to return AMZ metadata but don't want to import; therefore
high_priority=true must also be 'true', or this returns nothing and
stages nothing (unless the result is cached).
If `identifier` is in memcache, then return the `hit` (which is marshalled
into a format appropriate for import on Open Library if `?high_priority=true`).
By default `stage_import=true`, and results will be staged for import if they have
requisite fields. Disable staging with `stage_import=false`.
If no hit, then queue the identifier for look up and either attempt to return
a promise as `submitted`, or if `?high_priority=true`, return marshalled data
from the cache.
`Priority.HIGH` is set when `?high_priority=true` and is the highest priority.
It is used when the caller is waiting for a response with the AMZ data, if
available. See `PrioritizedIdentifier` for more on prioritization.
NOTE: For this API, "ASINs" are ISBN 10s when valid ISBN 10s, and otherwise
they are Amazon-specific identifiers starting with "B".
"""
# cache could be None if reached before initialized (mypy)
if not web.amazon_api:
return json.dumps({"error": "not_configured"})
# Handle URL query parameters.
input = web.input(high_priority=False, stage_import=True)
priority = (
Priority.HIGH if input.get("high_priority") == "true" else Priority.LOW
)
stage_import = input.get("stage_import") != "false"
b_asin, isbn_10, isbn_13 = normalize_identifier(identifier)
key = isbn_10 or b_asin
# For ISBN 13, conditionally go straight to Google Books.
if not key and isbn_13 and priority == Priority.HIGH and stage_import:
return (
json.dumps({"status": "success"})
if stage_from_google_books(isbn=isbn_13)
else json.dumps({"status": "not found"})
)
if not (key := isbn_10 or b_asin):
return json.dumps({"error": "rejected_isbn", "identifier": identifier})
# Cache lookup by isbn_13 or b_asin. If there's a hit return the product to
# the caller.
if product := cache.memcache_cache.get(f'amazon_product_{isbn_13 or b_asin}'):
return json.dumps(
{
"status": "success",
"hit": clean_amazon_metadata_for_load(product),
}
)
# Cache misses will be submitted to Amazon as ASINs (isbn10 if possible, or
# a 'true' ASIN otherwise) and the response will be `staged` for import.
if key not in web.amazon_queue.queue:
key_queue_item = PrioritizedIdentifier(
identifier=key, priority=priority, stage_import=stage_import
)
web.amazon_queue.put_nowait(key_queue_item)
# Give us a snapshot over time of how many new isbns are currently queued
stats.put(
"ol.affiliate.amazon.currently_queued_isbns",
web.amazon_queue.qsize(),
rate=0.2,
)
# Check the cache a few times for product data to return to the client,
# or otherwise return.
if priority == Priority.HIGH:
for _ in range(RETRIES):
time.sleep(1)
if product := cache.memcache_cache.get(
f'amazon_product_{isbn_13 or b_asin}'
):
# If not importing, return whatever data AMZ returns, even if it's unimportable.
cleaned_metadata = clean_amazon_metadata_for_load(product)
if not stage_import:
return json.dumps(
{"status": "success", "hit": cleaned_metadata}
)
# When importing, return a result only if the item can be imported.
source, pid = cleaned_metadata['source_records'][0].split(":")
if ImportItem.find_staged_or_pending(
identifiers=[pid], sources=[source]
):
return json.dumps(
{"status": "success", "hit": cleaned_metadata}
)
stats.increment("ol.affiliate.amazon.total_items_not_found")
# Fall back to Google Books
# TODO: Any point in having option not to stage and just return metadata?
if isbn_13 and stage_from_google_books(isbn=isbn_13):
return json.dumps({"status": "success"})
return json.dumps({"status": "not found"})
else:
return json.dumps(
{"status": "submitted", "queue": web.amazon_queue.qsize()}
)
def load_config(configfile):
# This loads openlibrary.yml + infobase.yml
openlibrary_load_config(configfile)
stats.client = stats.create_stats_client(cfg=config)
web.amazon_api = None
args = [
config.amazon_api.get('key'),
config.amazon_api.get('secret'),
config.amazon_api.get('id'),
]
if all(args):
web.amazon_api = AmazonAPI(*args, throttling=0.9)
logger.info("AmazonAPI Initialized")
else:
raise RuntimeError(f"{configfile} is missing required keys.")
def setup_env():
# make sure PYTHON_EGG_CACHE is writable
os.environ['PYTHON_EGG_CACHE'] = "/tmp/.python-eggs"
# required when run as fastcgi
os.environ['REAL_SCRIPT_NAME'] = ""
def start_server():
sysargs = sys.argv[1:]
configfile, args = sysargs[0], sysargs[1:]
web.ol_configfile = configfile
# # type: (str) -> None
load_config(web.ol_configfile)
# sentry loaded by infogami
infogami._setup()
if "pytest" not in sys.modules:
web.amazon_lookup_thread = make_amazon_lookup_thread()
thread_is_alive = bool(
web.amazon_lookup_thread and web.amazon_lookup_thread.is_alive()
)
logger.critical(f"web.amazon_lookup_thread.is_alive() is {thread_is_alive}")
else:
logger.critical("Not starting amazon_lookup_thread in pytest")
sys.argv = [sys.argv[0]] + list(args)
app.run()
def start_gunicorn_server():
"""Starts the affiliate server using gunicorn server."""
from gunicorn.app.base import Application
configfile = sys.argv.pop(1)
class WSGIServer(Application):
def init(self, parser, opts, args):
pass
def load(self):
load_config(configfile)
# init_setry(app)
return app.wsgifunc(https_middleware)
WSGIServer("%prog openlibrary.yml --gunicorn [options]").run()
def https_middleware(app):
"""Hack to support https even when the app server http only.
The nginx configuration has changed to add the following setting:
proxy_set_header X-Scheme $scheme;
Using that value to overwrite wsgi.url_scheme in the WSGI environ,
which is used by all redirects and other utilities.
"""
def wrapper(environ, start_response):
if environ.get('HTTP_X_SCHEME') == 'https':
environ['wsgi.url_scheme'] = 'https'
return app(environ, start_response)
return wrapper
def runfcgi(func, addr=('localhost', 8000)):
"""Runs a WSGI function as a FastCGI pre-fork server."""
config = dict(web.config.get("fastcgi", {}))
mode = config.pop("mode", None)
if mode == "prefork":
import flup.server.fcgi_fork as flups
else:
import flup.server.fcgi as flups
return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, **config).run()
web.config.debug = False
web.wsgi.runfcgi = runfcgi
app = web.application(urls, locals())
if __name__ == "__main__":
setup_env()
if "--gunicorn" in sys.argv:
sys.argv.pop(sys.argv.index("--gunicorn"))
start_gunicorn_server()
else:
start_server()
| 27,759 | Python | .py | 643 | 34.797823 | 104 | 0.63134 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
483 | copydocs.py | internetarchive_openlibrary/scripts/copydocs.py | #!/usr/bin/env python
from __future__ import annotations
from collections import namedtuple
import json
import os
import sys
from collections.abc import Iterator
import web
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
sys.path.insert(0, ".") # Enable scripts/copydocs.py to be run.
import scripts._init_path
import scripts.tests.test_copydocs
from openlibrary.api import OpenLibrary, marshal
__version__ = "0.2"
def find(server, prefix):
q = {'key~': prefix, 'limit': 1000}
# until all properties and backreferences are deleted on production server
if prefix == '/type':
q['type'] = '/type/type'
return [str(x) for x in server.query(q)]
class Disk:
"""Lets us copy templates from and records to the disk as files"""
def __init__(self, root):
self.root = root
def get_many(self, keys: list[str]) -> dict:
"""
Only gets templates
"""
def f(k):
return {
"key": k,
"type": {"key": "/type/template"},
"body": {
"type": "/type/text",
"value": open(self.root + k.replace(".tmpl", ".html")).read(),
},
}
return {k: f(k) for k in keys}
def save_many(
self, docs: list[dict | web.storage], comment: str | None = None
) -> None:
"""
:param typing.List[dict or web.storage] docs:
:param str or None comment: only here to match the signature of OpenLibrary api
"""
def write(path, text):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
if isinstance(text, dict):
text = text['value']
try:
print("writing", path)
f = open(path, "w")
f.write(text)
f.close()
except OSError:
print("failed", path)
for doc in marshal(docs):
path = os.path.join(self.root, doc['key'][1:])
if doc['type']['key'] == '/type/template':
path = path.replace(".tmpl", ".html")
write(path, doc['body'])
elif doc['type']['key'] == '/type/macro':
path = path + ".html"
write(path, doc['macro'])
else:
path = path + ".json"
write(path, json.dumps(doc, indent=2))
def expand(server: Disk | OpenLibrary, keys: Iterator):
"""
Expands keys like "/templates/*" to be all template keys.
:param Disk or OpenLibrary server:
:param typing.Iterable[str] keys:
:return: typing.Iterator[str]
"""
if isinstance(server, Disk):
yield from keys
else:
for key in keys:
if key.endswith('*'):
yield from find(server, key)
else:
yield key
def read_lines(filename):
try:
return [line.strip() for line in open(filename)]
except OSError:
return []
def get_references(doc, result=None):
if result is None:
result = []
if isinstance(doc, list):
for v in doc:
get_references(v, result)
elif isinstance(doc, dict):
if 'key' in doc and len(doc) == 1:
result.append(doc['key'])
for v in doc.values():
get_references(v, result)
return result
class KeyVersionPair(namedtuple('KeyVersionPair', 'key version')):
"""Helper class to store uri's like /works/OL1W?v=2"""
__slots__ = ()
@staticmethod
def from_uri(uri: str) -> KeyVersionPair:
"""
:param str uri: either something like /works/OL1W, /books/OL1M?v=3, etc.
"""
if '?v=' in uri:
key, version = uri.split('?v=')
else:
key, version = uri, None
return KeyVersionPair._make([key, version])
def to_uri(self) -> str:
""" """
uri = self.key
if self.version:
uri += '?v=' + self.version
return uri
def __str__(self):
return self.to_uri()
def copy(
src: Disk | OpenLibrary,
dest: Disk | OpenLibrary,
keys: list[str],
comment: str,
recursive: bool = False,
editions: bool = False,
saved: set[str] | None = None,
cache: dict | None = None,
) -> None:
"""
:param src: where we'll be copying form
:param dest: where we'll be saving to
:param comment: comment to writing when saving the documents
:param recursive: Whether to recursively fetch an referenced docs
:param editions: Whether to fetch editions of works as well
:param saved: keys saved so far
"""
if saved is None:
saved = set()
if cache is None:
cache = {}
def get_many(keys):
docs = marshal(src.get_many(keys).values())
# work records may contain excerpts, which reference the author of the excerpt.
# Deleting them to prevent loading the users.
for doc in docs:
doc.pop('excerpts', None)
# Authors are now with works. We don't need authors at editions.
if doc['type']['key'] == '/type/edition':
doc.pop('authors', None)
return docs
def fetch(uris: list[str]) -> list[dict | web.storage]:
# The remaining code relies on cache being a dict.
if not isinstance(cache, dict):
return []
key_pairs = [KeyVersionPair.from_uri(uri) for uri in uris]
docs = [cache[pair.key] for pair in key_pairs if pair.key in cache]
key_pairs = [pair for pair in key_pairs if pair.to_uri() not in cache]
unversioned_keys = [pair.key for pair in key_pairs if pair.version is None]
versioned_to_get = [pair for pair in key_pairs if pair.version is not None]
if unversioned_keys:
print("fetching", unversioned_keys)
docs2 = get_many(unversioned_keys)
cache.update((doc['key'], doc) for doc in docs2)
docs.extend(docs2)
# Do versioned second so they can overwrite if necessary
if versioned_to_get:
print("fetching versioned", versioned_to_get)
# src is type Disk | OpenLibrary, and here must be OpenLibrary for the get()
# method, But using isinstance(src, OpenLibrary) causes pytest to fail
# because TestServer is type scripts.tests.test_copydocs.FakeServer.
assert isinstance(
src, (OpenLibrary, scripts.tests.test_copydocs.FakeServer)
), "fetching editions only works with OL src"
docs2 = [src.get(pair.key, int(pair.version)) for pair in versioned_to_get]
cache.update((doc['key'], doc) for doc in docs2)
docs.extend(docs2)
return docs
keys = [
k
for k in keys
# Ignore /scan_record and /scanning_center ; they can cause infinite loops?
if k not in saved and not k.startswith('/scan')
]
docs = fetch(keys)
if editions:
work_keys = [key for key in keys if key.startswith('/works/')]
assert isinstance(src, OpenLibrary), "fetching editions only works with OL src"
if work_keys:
# eg https://openlibrary.org/search.json?q=key:/works/OL102584W
resp = src.search(
'key:' + ' OR '.join(work_keys),
limit=len(work_keys),
fields=['edition_key'],
)
edition_keys = [
f"/books/{olid}" for doc in resp['docs'] for olid in doc['edition_key']
]
if edition_keys:
print("copying edition keys")
copy(
src,
dest,
edition_keys,
comment,
recursive=recursive,
saved=saved,
cache=cache,
)
if recursive:
refs = get_references(docs)
refs = [r for r in set(refs) if not r.startswith(("/type/", "/languages/"))]
if refs:
print("found references", refs)
copy(src, dest, refs, comment, recursive=True, saved=saved, cache=cache)
docs = [doc for doc in docs if doc['key'] not in saved]
keys = [doc['key'] for doc in docs]
print("saving", keys)
# Sometimes saves in-explicably error ; check infobase logs
# group things up to avoid a bad apple failing the batch
for group in web.group(docs, 50):
try:
print(dest.save_many(group, comment=comment))
except BaseException as e:
print(f"Something went wrong saving this batch! {e}")
saved.update(keys)
def copy_list(src, dest, list_key, comment):
keys = set()
def jsonget(url):
url = url.encode("utf-8")
text = src._request(url).read()
return json.loads(text)
def get(key):
print("get", key)
return marshal(src.get(list_key))
def query(**q):
print("query", q)
return [x['key'] for x in marshal(src.query(q))]
def get_list_seeds(list_key):
d = jsonget(list_key + "/seeds.json")
return d['entries'] # [x['url'] for x in d['entries']]
def add_seed(seed):
if seed['type'] in ('edition', 'work'):
keys.add(seed['url'])
elif seed['type'] == 'subject':
doc = jsonget(seed['url'] + '.json')
keys.update(w['key'] for w in doc['works'])
seeds = get_list_seeds(list_key)
for seed in seeds:
add_seed(seed)
edition_keys = {k for k in keys if k.startswith("/books/")}
work_keys = {k for k in keys if k.startswith("/works/")}
for w in work_keys:
edition_keys.update(query(type='/type/edition', works=w, limit=500))
keys = list(edition_keys) + list(work_keys)
copy(src, dest, keys, comment=comment, recursive=True)
def main(
keys: list[str],
src: str = "http://openlibrary.org/",
dest: str = "http://localhost:8080",
comment: str = "",
recursive: bool = True,
editions: bool = True,
lists: list[str] | None = None,
search: str | None = None,
search_limit: int = 10,
) -> None:
"""
Script to copy docs from one OL instance to another.
Typically used to copy templates, macros, css and js from
openlibrary.org to dev instance. paths can end with wildcards.
USAGE:
# Copy all templates
./scripts/copydocs.py --src http://openlibrary.org /templates/*
# Copy specific records
./scripts/copydocs.py /authors/OL113592A /works/OL1098727W?v=2
# Copy search results
./scripts/copydocs.py --search "publisher:librivox" --search-limit 10
:param src: URL of the source open library server
:param dest: URL of the destination open library server
:param recursive: Recursively fetch all the referred docs
:param editions: Also fetch all the editions of works
:param lists: Copy docs from list(s)
:param search: Run a search on open library and copy docs from the results
"""
# Mypy doesn't handle union-ing types across if statements -_-
# https://github.com/python/mypy/issues/6233
src_ol: Disk | OpenLibrary = (
OpenLibrary(src) if src.startswith("http://") else Disk(src)
)
dest_ol: Disk | OpenLibrary = (
OpenLibrary(dest) if dest.startswith("http://") else Disk(dest)
)
if isinstance(dest_ol, OpenLibrary):
section = "[%s]" % web.lstrips(dest, "http://").strip("/")
if section in read_lines(os.path.expanduser("~/.olrc")):
dest_ol.autologin()
else:
dest_ol.login("admin", "admin123")
for list_key in lists or []:
copy_list(src_ol, dest_ol, list_key, comment=comment)
if search:
assert isinstance(src_ol, OpenLibrary), "Search only works with OL src"
keys += [
doc['key']
for doc in src_ol.search(search, limit=search_limit, fields=['key'])['docs']
]
keys = list(expand(src_ol, ('/' + k.lstrip('/') for k in keys)))
copy(src_ol, dest_ol, keys, comment=comment, recursive=recursive, editions=editions)
if __name__ == '__main__':
FnToCLI(main).run()
| 12,281 | Python | .py | 314 | 30.165605 | 88 | 0.582072 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
484 | update-loans.py | internetarchive_openlibrary/scripts/update-loans.py | #!/usr/bin/env python
"""Script to update loans and waiting loans on regular intervals.
Tasks done:
* delete all waiting-loans that are expired
"""
import sys
import web
from openlibrary.core import waitinglist
from openlibrary.plugins.upstream import borrow
web.config.debug = False
def usage():
print(
"python scripts/openlibrary-server openlibrary.yml runscript scripts/update-loans.py [update-loans | update-waitinglists]"
)
def main():
try:
cmd = sys.argv[1]
except IndexError:
cmd = "help"
if cmd == "update-loans":
borrow.update_all_loan_status()
elif cmd == "update-waitinglists":
waitinglist.prune_expired_waitingloans()
waitinglist.update_all_waitinglists()
elif cmd == "update-waitinglist":
waitinglist.update_waitinglist(sys.argv[2])
else:
usage()
if __name__ == "__main__":
main()
| 903 | Python | .py | 30 | 25.466667 | 130 | 0.695602 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
485 | solr_dump_xisbn.py | internetarchive_openlibrary/scripts/solr_dump_xisbn.py | #!/usr/bin/env python
"""
Script for creating a file of similar ISBNs or LCCNs from Solr.
Run on ol-solr1 (the staging solr). Turn off solr-next-updater on
ol-home0 , to avoid extra strain on the server, and then kick it off with
`tmux` (use tmux so if the ssh connection disconnects the process continues)
```sh
# Took ~10.5 hours 2024-04
time docker run --rm \
--name similarities-dump \
--network host \
-e PYTHONPATH=/openlibrary \
openlibrary/olbase:latest \
python scripts/solr_dump_xisbn.py --solr-base http://localhost:8983/solr/openlibrary \
> unwanted_isbns_$(date +"%Y-%m-%d").txt
# Took ~8.5 hours 2024-04
time docker run --rm \
--name similarities-dump \
--network host \
-e PYTHONPATH=/openlibrary \
openlibrary/olbase:latest \
python scripts/solr_dump_xisbn.py --solr-base http://localhost:8983/solr/openlibrary --id-field lccn \
> unwanted_lccns_$(date +"%Y-%m-%d").txt
```
"""
import asyncio
import sys
from typing import Literal
from collections.abc import AsyncGenerator
import httpx
# EG http://localhost:8984/solr/openlibrary/select?editions.fl=key%2Cisbn&editions.q=(%7B!terms%20f%3D_root_%20v%3D%24row.key%7D)%20AND%20language%3Aeng%20AND%20isbn%3A*%20AND%20type%3Aedition&editions.rows=1000000&fl=key%2Ceditions%3A%5Bsubquery%5D&fq=type%3Awork&indent=true&q=isbn%3A*%20AND%20NOT%20subject%3Atextbook%20AND%20_query_%3A(%7B!parent%20which%3Dtype%3Awork%20v%3D%22language%3Aeng%20AND%20ia_box_id%3A*%22%20filters%3D%22type%3Aedition%22%7D)&sort=key%20asc&wt=json
async def fetch_docs(
params: dict[str, str | int],
solr_base: str,
page_size=100,
) -> list[dict]:
"""Stream results from a Solr query. Uses cursors."""
params = params.copy()
params['rows'] = page_size
async with httpx.AsyncClient() as client:
for attempt in range(5):
try:
response = await client.get(
f'{solr_base}/select',
params=params,
timeout=60,
)
response.raise_for_status()
break
except (httpx.RequestError, httpx.HTTPStatusError):
if attempt == 4:
raise
await asyncio.sleep(2)
data = response.json()
return data['response']['docs']
async def stream_bounds(
params: dict[str, str],
solr_base: str,
page_size=100,
) -> AsyncGenerator[tuple[str, str], None]:
"""Stream bounds from a Solr query. Uses cursors."""
params = params.copy()
params['rows'] = page_size
params['cursorMark'] = '*'
numFound = None
seen = 0
# Keep session open and retry on connection errors
transport = httpx.AsyncHTTPTransport()
async with httpx.AsyncClient(transport=transport) as client:
while True:
print(f'FETCH {params["cursorMark"]}', file=sys.stderr)
if numFound:
print(f'{seen/numFound=}', file=sys.stderr)
for attempt in range(5):
try:
response = await client.get(
f'{solr_base}/select',
params=params,
timeout=60,
)
response.raise_for_status()
break
except (httpx.RequestError, httpx.HTTPStatusError):
if attempt == 4:
raise
await asyncio.sleep(2)
data = response.json()
numFound = data['response']['numFound']
docs = data['response']['docs']
if docs:
seen += len(docs)
yield docs[0]['key'], docs[-1]['key']
else:
break
if params['cursorMark'] == data['nextCursorMark']:
break
else:
params['cursorMark'] = data['nextCursorMark']
async def main(
solr_base='http://localhost:8984/solr/openlibrary',
workers=10,
page_size=100,
id_field: Literal['isbn', 'lccn'] = 'isbn',
) -> None:
"""
:param solr_base: Base URL of Solr instance
:param workers: Number of workers to use
:param page_size: Number of results to fetch per query
:param id_field: Which identifier to use for matching
"""
id_filter = f'{id_field}:*'
work_exclusions = 'subject:textbook'
galloping_params = {
# Find works that have at least one edition with an IA box id
'q': f"""
{id_filter}
AND NOT ({work_exclusions})
AND _query_:({{!parent which=type:work v="language:eng AND ia_box_id:*" filters="type:edition"}})
""",
'fq': 'type:work',
'sort': 'key asc',
'fl': 'key',
'wt': 'json',
}
# This is a performance hack
# this returns pairs like ('/works/OL1W', '/works/OL200W'), ('/works/OL201W', '/works/OL300W')
# which we can use to make multiple queries in parallel
# Now create an async worker pool to fetch the actual data
async def fetch_bounds(bounds):
print(f'[ ] FETCH {bounds=}', file=sys.stderr)
start, end = bounds
result = ''
for doc in await fetch_docs(
{
'q': f'key:["{start}" TO "{end}"] AND {galloping_params["q"]}',
'fq': 'type:work',
'fl': 'key,editions:[subquery]',
'editions.q': f'({{!terms f=_root_ v=$row.key}}) AND language:eng AND {id_filter}',
'editions.fq': 'type:edition',
'editions.fl': f'key,{id_field}',
'editions.rows': 1_000_000,
'wt': 'json',
},
solr_base,
page_size=page_size * 2,
):
identifiers = {
identifier
for ed in doc['editions']['docs']
for identifier in ed[id_field]
if (len(identifier) == 13 if id_field == 'isbn' else True)
}
if len(identifiers) > 1:
result += ' '.join(identifiers) + '\n'
print(f'[x] FETCH {bounds=}', file=sys.stderr)
if result:
print(result, flush=True)
# now run N workers in async pool to process the bounds
running: set[asyncio.Task] = set()
async for bounds in stream_bounds(galloping_params, solr_base, page_size=page_size):
if len(running) >= workers:
done, running = await asyncio.wait(
running, return_when=asyncio.FIRST_COMPLETED
)
for task in done:
task.result()
running.add(asyncio.create_task(fetch_bounds(bounds)))
# wait for all workers to finish
await asyncio.wait(running)
if __name__ == '__main__':
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
FnToCLI(main).run()
| 6,968 | Python | .py | 172 | 30.424419 | 481 | 0.571977 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
486 | manage-imports.py | internetarchive_openlibrary/scripts/manage-imports.py | #!/usr/bin/env python
import datetime
import json
import logging
import os
import sys
import time
import web
from openlibrary.api import OLError, OpenLibrary
from openlibrary.config import load_config
from openlibrary.core.ia import get_candidate_ocaids
from openlibrary.core.imports import Batch, ImportItem
logger = logging.getLogger("openlibrary.importer")
@web.memoize
def get_ol(servername=None):
if os.getenv('LOCAL_DEV'):
ol = OpenLibrary(base_url="http://localhost:8080")
ol.login("admin", "admin123")
else:
ol = OpenLibrary(base_url=servername)
ol.autologin()
return ol
def ol_import_request(item, retries=5, servername=None, require_marc=True):
"""Requests OL to import an item and retries on server errors."""
# logger uses batch_id:id for item.data identifier if no item.ia_id
_id = item.ia_id or f"{item.batch_id}:{item.id}"
logger.info(f"importing {_id}")
for i in range(retries):
if i != 0:
logger.info("sleeping for 5 seconds before next attempt.")
time.sleep(5)
try:
ol = get_ol(servername=servername)
if item.data:
return ol.import_data(item.data)
return ol.import_ocaid(item.ia_id, require_marc=require_marc)
except OSError as e:
logger.warning(f"Failed to contact OL server. error={e!r}")
except OLError as e:
logger.warning(f"Failed to contact OL server. error={e!r}")
if e.code < 500:
return e.text
def do_import(item, servername=None, require_marc=True):
import os
logger.info(f"do_import START (pid:{os.getpid()})")
response = ol_import_request(item, servername=servername, require_marc=require_marc)
if response and response.startswith('{'):
d = json.loads(response)
if d.get('success') and 'edition' in d:
edition = d['edition']
logger.info(f"success: {edition['status']} {edition['key']}")
item.set_status(edition['status'], ol_key=edition['key'])
else:
error_code = d.get('error_code', 'unknown-error')
logger.error(f"failed with error code: {error_code}")
item.set_status("failed", error=error_code)
else:
logger.error(f"failed with internal error: {response}")
item.set_status("failed", error='internal-error')
logger.info(f"do_import END (pid:{os.getpid()})")
def add_items(batch_name, filename):
batch = Batch.find(batch_name) or Batch.new(batch_name)
batch.load_items(filename)
def import_ocaids(*ocaids, **kwargs):
"""This method is mostly for testing. It allows you to import one more
archive.org items into Open Library by ocaid
Usage:
$ sudo -u openlibrary \
HOME=/home/openlibrary OPENLIBRARY_RCFILE=/olsystem/etc/olrc-importbot \
python scripts/manage-imports.py \
--config /olsystem/etc/openlibrary.yml \
import-all
"""
servername = kwargs.get('servername', None)
require_marc = not kwargs.get('no_marc', False)
date = datetime.date.today()
if not ocaids:
raise ValueError("Must provide at least one ocaid")
batch_name = f"import-{ocaids[0]}-{date.year:04}{date.month:02}"
try:
batch = Batch.new(batch_name)
except Exception as e:
logger.info(repr(e))
try:
batch.add_items(ocaids)
except Exception:
logger.info("skipping batch adding, already present")
for ocaid in ocaids:
item = ImportItem.find_by_identifier(ocaid)
if item:
do_import(item, servername=servername, require_marc=require_marc)
else:
logger.error(f"{ocaid} is not found in the import queue")
def add_new_scans(args):
"""Adds new scans from yesterday."""
if args:
datestr = args[0]
yyyy, mm, dd = datestr.split("-")
date = datetime.date(int(yyyy), int(mm), int(dd))
else:
# yesterday
date = datetime.date.today() - datetime.timedelta(days=1)
items = list(get_candidate_ocaids(date))
batch_name = f"new-scans-{date.year:04}{date.month:02}"
batch = Batch.find(batch_name) or Batch.new(batch_name)
batch.add_items(items)
def import_batch(args, **kwargs):
servername = kwargs.get('servername', None)
require_marc = not kwargs.get('no_marc', False)
batch_name = args[0]
batch = Batch.find(batch_name)
if not batch:
print("Unknown batch", batch, file=sys.stderr)
sys.exit(1)
for item in batch.get_items():
do_import(item, servername=servername, require_marc=require_marc)
def import_item(args, **kwargs):
servername = kwargs.get('servername', None)
require_marc = not kwargs.get('no_marc', False)
ia_id = args[0]
if item := ImportItem.find_by_identifier(ia_id):
do_import(item, servername=servername, require_marc=require_marc)
else:
logger.error(f"{ia_id} is not found in the import queue")
def import_all(args, **kwargs):
import multiprocessing
servername = kwargs.get('servername', None)
require_marc = not kwargs.get('no_marc', False)
# Use multiprocessing to call do_import on each item
with multiprocessing.Pool(processes=8) as pool:
while True:
logger.info("find_pending START")
items = ImportItem.find_pending()
logger.info("find_pending END")
if not items:
logger.info("No pending items found. sleeping for a minute.")
time.sleep(60)
continue
logger.info("starmap START")
pool.starmap(
do_import, ((item, servername, require_marc) for item in items)
)
logger.info("starmap END")
def main():
if "--config" in sys.argv:
index = sys.argv.index("--config")
configfile = sys.argv[index + 1]
del sys.argv[index : index + 2]
else:
import os
configfile = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
'openlibrary',
'conf',
'openlibrary.yml',
)
)
load_config(configfile)
from infogami import config
cmd = sys.argv[1]
args, flags = [], {
'servername': config.get('servername', 'https://openlibrary.org')
}
for i in sys.argv[2:]:
if i.startswith('--'):
flags[i[2:]] = True
else:
args.append(i)
if cmd == "import-ocaids":
return import_ocaids(*args, **flags)
if cmd == "add-items":
return add_items(*args)
elif cmd == "add-new-scans":
return add_new_scans(args)
elif cmd == "import-batch":
return import_batch(args, **flags)
elif cmd == "import-all":
return import_all(args, **flags)
elif cmd == "import-item":
return import_item(args, **flags)
else:
logger.error(f"Unknown command: {cmd}")
if __name__ == "__main__":
main()
| 7,148 | Python | .py | 187 | 30.31016 | 88 | 0.62052 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
487 | pull-templates.py | internetarchive_openlibrary/scripts/pull-templates.py | #!/usr/bin/env python
"""Script to pull templates and macros from an openlibrary instance to repository.
"""
import _init_path # Imported for its side effect of setting PYTHONPATH
import os
import web
from optparse import OptionParser
from openlibrary.api import OpenLibrary, marshal
def parse_options(args=None):
parser = OptionParser(args)
parser.add_option(
"-s",
"--server",
dest="server",
default="http://openlibrary.org/",
help="URL of the openlibrary website (default: %default)",
)
parser.add_option(
"--template-root",
dest="template_root",
default="/upstream",
help="Template root (default: %default)",
)
parser.add_option(
"--default-plugin",
dest="default_plugin",
default="/upstream",
help="Default plugin (default: %default)",
)
options, args = parser.parse_args()
options.template_root = options.template_root.rstrip("/")
return options, args
def write(path, text):
print("saving", path)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
text = text.replace("\r\n", "\n").replace("\r", "\n")
f = open(path, "w")
f.write(text.encode("utf-8"))
f.close()
def delete(path):
print("deleting", path)
if os.path.exists(path):
os.remove(path)
def make_path(doc):
if doc['key'].endswith(".css"):
return "static/css/" + doc['key'].split("/")[-1]
elif doc['key'].endswith(".js"):
return "openlibrary/plugins/openlibrary/js/" + doc['key'].split("/")[-1]
else:
key = doc['key'].rsplit(".")[0]
key = web.lstrips(key, options.template_root)
plugin = doc.get("plugin", options.default_plugin)
return f"openlibrary/plugins/{plugin}{key}.html"
def get_value(doc, property):
value = doc.get(property, "")
if isinstance(value, dict) and "value" in value:
return value['value']
else:
return value
def main():
global options
options, args = parse_options()
ol = OpenLibrary(options.server)
for pattern in args:
docs = ol.query({"key~": pattern, "*": None}, limit=1000)
for doc in marshal(docs):
# Anand: special care to ignore bad documents in the database.
if "--duplicate" in doc['key']:
continue
if doc['type']['key'] == '/type/template':
write(make_path(doc), get_value(doc, 'body'))
elif doc['type']['key'] == '/type/macro':
write(make_path(doc), get_value(doc, 'macro'))
elif doc['type']['key'] == '/type/rawtext':
write(make_path(doc), get_value(doc, 'body'))
else:
delete(make_path(doc))
if __name__ == "__main__":
main()
| 2,843 | Python | .py | 81 | 27.975309 | 82 | 0.595325 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
488 | detect_missing_i18n.py | internetarchive_openlibrary/scripts/detect_missing_i18n.py | #!/usr/bin/env python
"""Utility script to list html files which might be missing i18n strings."""
import _init_path # noqa: F401 Imported for its side effect of setting PYTHONPATH
import re
import sys
from pathlib import Path
from enum import Enum
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
import glob
# This is a list of files that are intentionally excluded from the i18n process
EXCLUDE_LIST = {
# This is being left untranslated because it is rarely used
"openlibrary/templates/admin/sync.html",
# These are excluded because they require more info to fix
"openlibrary/templates/books/edit.html",
"openlibrary/templates/history/sources.html",
# This can't be fixed because it's not in the i18n directories
"openlibrary/admin/templates/admin/index.html",
# These can't be fixed since they're rendered as static html
"static/offline.html",
"static/status-500.html",
# Uses jsdef and the current stance is no i18n in JS.
"openlibrary/templates/jsdef/LazyAuthorPreview.html",
}
default_directories = ('openlibrary/templates/', 'openlibrary/macros/')
class Errtype(str, Enum):
WARN = "\033[93mWARN\033[0m"
ERR = "\033[91mERRO\033[0m"
SKIP = "\033[94mSKIP\033[0m"
skip_directive = r"# detect-missing-i18n-skip-line"
regex_skip_inline = r"\$" + skip_directive
regex_skip_previous_line = r"^\s*\$?" + skip_directive
# Assumptions:
# - Not concerned about HTML elements whose untranslated contents follow a newline, i.e. <p>\nsome untranslated text\n<p>.
# - Don't want to flag false positives where > characters are not part of tags, so this regex looks for a complete opening tag.
# TODO: replace the huge punctuation array with \p{L} - only supported in pip regex and not re
punctuation = r"[\(\)\{\}\[\]\/\\:;\-_\s+=*^%#\.•·\?♥|≡0-9,!xX✓×@\"'†★]"
htmlents = r"&[a-z0-9]+;"
variables = r"\$:?[^\s]+|\$[^\s\(]+[\(][^\)]+[\)]|\$[^\s\[]+[\[][^\]]+[\]]|\$[\{][^\}]+[\}]|%\(?[a-z_]+\)?|\{\{[^\}]+\}\}"
urls_domains = r"https?:\/\/[^\s]+|[a-z\-]+\.[A-Za-z]{2}[a-z]?"
opening_tag_open = r"<(?!code|link|!--)[a-z][^>]*?"
opening_tag_end = r"[^\/\-\s]>"
opening_tag_syntax = opening_tag_open + opening_tag_end
ignore_after_opening_tag = (
r"(?![<\r\n]|$|\\\$\$|\$:?_?\(|\$:?ungettext\(|(?:"
+ punctuation
+ r"|"
+ htmlents
+ r"|"
+ variables
+ r"|"
+ urls_domains
+ r")+(?:[\r\n<]|$))"
)
warn_after_opening_tag = r"\$\(['\"]"
i18n_element_missing_regex = opening_tag_syntax + ignore_after_opening_tag
i18n_element_warn_regex = opening_tag_syntax + r"\$\(['\"]"
attr_syntax = r"(title|placeholder|alt)="
ignore_double_quote = (
r"\"(?!\$:?_?\(|\$:?ungettext\(|\\\$\$|(?:"
+ punctuation
+ r"|"
+ variables
+ r"|"
+ urls_domains
+ r")*\")"
)
ignore_single_quote = (
r"\'(?!\$:?_?\(|\$:?ungettext\(|\\\$\$|(?:"
+ punctuation
+ r"|"
+ variables
+ r"|"
+ urls_domains
+ r")*\')"
)
i18n_attr_missing_regex = (
opening_tag_open
+ attr_syntax
+ r"(?:"
+ ignore_double_quote
+ r"|"
+ ignore_single_quote
+ r")[^>]*?>"
)
i18n_attr_warn_regex = opening_tag_open + attr_syntax + r"\"\$\(\'"
def terminal_underline(text: str) -> str:
return f"\033[4m{text}\033[0m"
def print_analysis(
errtype: str,
filename: Path,
details: str,
spacing_base: int,
line_number: int = 0,
line_position: int = 0,
):
linestr = (
f":{line_number}:{line_position}"
if line_number > 0 and line_position > 0
else ""
)
filestring = f'{filename}{linestr}'
print(
'\t'.join(
[errtype, terminal_underline(filestring).ljust(spacing_base + 12), details]
)
)
def main(files: list[Path], skip_excluded: bool = True):
"""
:param files: The html files to check for missing i18n. Leave empty to run over all html files.
:param skip_excluded: If --no-skip-excluded is supplied as an arg, files in the EXCLUDE_LIST slice will be processed
"""
if not files:
files = [
Path(file_path)
for ddir in default_directories
for file_path in glob.glob(f'{ddir}**/*.html', recursive=True)
]
# Figure out how much padding to put between the filename and the error output
longest_filename_length = max(len(str(f)) for f in files)
spacing_base = longest_filename_length + len(':XXX:XXX')
errcount: int = 0
warnings: int = 0
for file in files:
contents = file.read_text()
lines = contents.splitlines()
if skip_excluded and str(file) in EXCLUDE_LIST:
print_analysis(Errtype.SKIP, file, "", spacing_base)
continue
for line_number, line in enumerate(lines, start=1):
includes_error_element = re.search(i18n_element_missing_regex, line)
includes_warn_element = re.search(i18n_element_warn_regex, line)
includes_error_attribute = re.search(i18n_attr_missing_regex, line)
includes_warn_attribute = re.search(i18n_attr_warn_regex, line)
char_index = -1
# Element with untranslated elements
if includes_error_element:
char_index = includes_error_element.start()
errtype = Errtype.ERR
# Element with bypassed elements
elif includes_warn_element:
char_index = includes_warn_element.start()
errtype = Errtype.WARN
# Element with untranslated attributes
elif includes_error_attribute:
char_index = includes_error_attribute.start()
errtype = Errtype.ERR
# Element with bypassed attributes
elif includes_warn_attribute:
char_index = includes_warn_attribute.start()
errtype = Errtype.WARN
# Don't proceed if the line doesn't match any of the four cases.
else:
continue
preceding_text = line[:char_index]
regex_match = line[char_index:]
# Don't proceed if the line is likely commented out or part of a $: function.
if (
"<!--" in preceding_text
or "$:" in preceding_text
or "$ " in preceding_text
):
continue
# Don't proceed if skip directive is included inline.
if re.search(regex_skip_inline, regex_match):
continue
# Don't proceed if the previous line is a skip directive.
if re.match(regex_skip_previous_line, lines[line_number - 2]):
continue
print_position = char_index + 1
print_analysis(
errtype,
file,
regex_match,
spacing_base,
line_number,
print_position,
)
if errtype == Errtype.WARN:
warnings += 1
elif errtype == Errtype.ERR:
errcount += 1
print(
f"{len(files)} file{'s' if len(files) != 1 else ''} scanned. {errcount} error{'s' if errcount != 1 else ''} found."
)
if errcount > 0 or warnings > 0:
print(
"Learn how to fix these errors by reading our i18n documentation: https://github.com/internetarchive/openlibrary/wiki/Internationalization#internationalization-i18n-developers-guide"
)
if errcount > 0:
sys.exit(1)
if __name__ == "__main__":
FnToCLI(main).run()
| 7,573 | Python | .py | 193 | 31.699482 | 194 | 0.596862 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
489 | open_syllabus_project_parser.py | internetarchive_openlibrary/scripts/open_syllabus_project_parser.py | '''
Run from root of openlibrary like so:
PYTHONPATH=$(PWD) python3 scripts/open_syllabus_project_parser.py
A python script that takes as an argument one directory.
In that that directory there are files named as follows:
part-00000-d2b72298-1996-464d-b238-27e4737d69ab-c000.json.gz
part-00001-d2b72298-1996-464d-b238-27e4737d69ab-c000.json.gz
part-00002-d2b72298-1996-464d-b238-27e4737d69ab-c000.json.gz
etc
The contents of the uncompressed json files has json like this,
one per line:
{
"ol_id": "/works/OL194763W",
"Accounting": 0,
"Agriculture": 0,
"Anthropology": 0,
"Architecture": 0,
"Astronomy": 0,
"Atmospheric Sciences": 0,
"Basic Computer Skills": 0,
"Basic Skills": 0,
"Biology": 0,
"Business": 0,
"Career Skills": 0,
"Chemistry": 0,
"Chinese": 0,
"Classics": 0,
"Computer Science": 0,
"Construction": 0,
"Cosmetology": 0,
"Criminal Justice": 0,
"Criminology": 0,
"Culinary Arts": 0,
"Dance": 0,
"Dentistry": 0,
"Earth Sciences": 0,
"Economics": 0,
"Education": 0,
"Engineering": 0,
"Engineering Technician": 0,
"English Literature": 0,
"Film and Photography": 0,
"Fine Arts": 0,
"Fitness and Leisure": 0,
"French": 0,
"Geography": 0,
"German": 0,
"Health Technician": 0,
"Hebrew": 0,
"History": 0,
"Japanese": 0,
"Journalism": 0,
"Law": 0,
"Liberal Arts": 0,
"Library Science": 0,
"Linguistics": 0,
"Marketing": 0,
"Mathematics": 0,
"Mechanic / Repair Tech": 0,
"Media / Communications": 0,
"Medicine": 0,
"Military Science": 0,
"Music": 0,
"Natural Resource Management": 0,
"Nursing": 0,
"Nutrition": 0,
"Philosophy": 0,
"Physics": 0,
"Political Science": 0,
"Psychology": 0,
"Public Administration": 0,
"Public Safety": 0,
"Religion": 0,
"Sign Language": 0,
"Social Work": 0,
"Sociology": 0,
"Spanish": 0,
"Theatre Arts": 0,
"Theology": 1,
"Transportation": 0,
"Veterinary Medicine": 0,
"Women's Studies": 0,
"total": 1
}
'''
from openlibrary.utils.open_syllabus_project import generate_osp_db
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
FnToCLI(generate_osp_db).run()
| 2,293 | Python | .py | 88 | 21.772727 | 67 | 0.639091 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
490 | lc_marc_update.py | internetarchive_openlibrary/scripts/lc_marc_update.py | #!/usr/bin/env python
from openlibrary.catalog.importer.scribe import BadImport
from openlibrary.catalog.read_rc import read_rc
from openlibrary import config
from ftplib import FTP
from time import sleep
from lxml import etree
import sys
import httplib
import json
import argparse
import lxml.etree
parser = argparse.ArgumentParser(description='Library of Congress MARC update')
parser.add_argument('--config', default='openlibrary.yml')
args = parser.parse_args()
config_file = args.config
config.load(config_file)
c = config.runtime_config['lc_marc_update']
base_url = 'http://openlibrary.org'
import_api_url = base_url + '/api/import'
internal_error = '<Code>InternalError</Code>'
no_bucket_error = '<Code>NoSuchBucket</Code>'
def put_file(con, ia, filename, data):
print('uploading %s' % filename)
headers = {
'authorization': "LOW " + c['s3_key'] + ':' + c['s3_secret'],
# 'x-archive-queue-derive': 0,
}
url = 'http://s3.us.archive.org/' + ia + '/' + filename
print(url)
for attempt in range(5):
con.request('PUT', url, data, headers)
try:
res = con.getresponse()
except httplib.BadStatusLine as bad:
print('bad status line:', bad.line)
raise
body = res.read()
if '<Error>' not in body:
return
print('error')
print(body)
if no_bucket_error not in body and internal_error not in body:
sys.exit(0)
print('retry')
sleep(5)
print('too many failed attempts')
url = 'http://archive.org/download/marc_loc_updates/marc_loc_updates_files.xml'
attempts = 10
wait = 5
for attempt in range(attempts):
try:
root = etree.parse(
url, parser=lxml.etree.XMLParser(resolve_entities=False)
).getroot()
break
except:
if attempt == attempts - 1:
raise
print('error on attempt %d, retrying in %s seconds' % (attempt, wait))
sleep(wait)
existing = {f.attrib['name'] for f in root}
# existing.remove("v40.i32.records.utf8") # for testing
# existing.remove("v40.i32.report") # for testing
host = 'rs7.loc.gov'
to_upload = set()
def print_line(f):
if 'books.test' not in f and f not in existing:
to_upload.add(f)
def read_block(block):
global data
data += block
ftp = FTP(host)
ftp.set_pasv(False)
welcome = ftp.getwelcome()
ftp.login(c['lc_update_user'], c['lc_update_pass'])
ftp.cwd('/emds/books/all')
ftp.retrlines('NLST', print_line)
if to_upload:
print(welcome)
else:
ftp.close()
sys.exit(0)
bad = open(c['log_location'] + 'lc_marc_bad_import', 'a')
def iter_marc(data):
pos = 0
while pos < len(data):
length = data[pos : pos + 5]
int_length = int(length)
yield (pos, int_length, data[pos : pos + int_length])
pos += int_length
def login(h1, password):
body = json.dumps({'username': 'LCImportBot', 'password': password})
headers = {'Content-Type': 'application/json'}
h1.request('POST', base_url + '/account/login', body, headers)
print(base_url + '/account/login')
res = h1.getresponse()
print(res.read())
print('status:', res.status)
assert res.status == 200
cookies = res.getheader('set-cookie').split(',')
cookie = ';'.join([c.split(';')[0] for c in cookies])
return cookie
h1 = httplib.HTTPConnection('openlibrary.org')
headers = {
'Content-type': 'application/marc',
'Cookie': login(h1, c['ol_bot_pass']),
}
h1.close()
item_id = 'marc_loc_updates'
for f in to_upload:
data = ''
print('downloading', f)
ftp.retrbinary('RETR ' + f, read_block)
print('done')
con = httplib.HTTPConnection('s3.us.archive.org')
con.connect()
put_file(con, item_id, f, data)
con.close()
if not f.endswith('.records.utf8'):
continue
loc_file = item_id + '/' + f
for pos, length, marc_data in iter_marc(data):
loc = '%s:%d:%d' % (loc_file, pos, length)
headers['x-archive-meta-source-record'] = 'marc:' + loc
try:
h1 = httplib.HTTPConnection('openlibrary.org')
h1.request('POST', import_api_url, marc_data, headers)
try:
res = h1.getresponse()
except httplib.BadStatusLine:
raise BadImport
body = res.read()
if res.status != 200:
raise BadImport
else:
try:
reply = json.loads(body)
except ValueError:
print(('not JSON:', repr(body)))
raise BadImport
assert res.status == 200
print(reply)
assert reply['success']
h1.close()
except BadImport:
print(loc, file=bad)
bad.flush()
ftp.close()
| 4,857 | Python | .py | 149 | 26.248322 | 79 | 0.613165 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
491 | partner_batch_imports.py | internetarchive_openlibrary/scripts/partner_batch_imports.py | """
Process partner bibliographic csv data into importable json book
records and then batch submit into the ImportBot
`import_item` table (http://openlibrary.org/admin/imports)
which queues items to be imported via the
Open Library JSON import API: https://openlibrary.org/api/import
To Run:
PYTHONPATH=. python ./scripts/partner_batch_imports.py /olsystem/etc/openlibrary.yml
"""
from collections.abc import Mapping
import datetime
import logging
import os
import re
from typing import cast
import requests
from infogami import config
from openlibrary.config import load_config
from openlibrary.core.imports import Batch
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
logger = logging.getLogger("openlibrary.importer.bwb")
EXCLUDED_AUTHORS = {
x.casefold()
for x in (
"1570 publishing",
"bad bad girl",
"bahija",
"bruna murino",
"creative elegant edition",
"delsee notebooks",
"grace garcia",
"holo",
"jeryx publishing",
"mado",
"mazzo",
"mikemix",
"mitch allison",
"pickleball publishing",
"pizzelle passion",
"punny cuaderno",
"razal koraya",
"t. d. publishing",
"tobias publishing",
)
}
EXCLUDED_INDEPENDENTLY_PUBLISHED_TITLES = {
x.casefold()
for x in (
# Noisy classic re-prints
'annotated',
'annoté',
'classic',
'classics',
'illustarted', # Some books have typos in their titles!
'illustrated',
'Illustrée',
'original',
'summary',
'version',
# Not a book
'calendar',
'diary',
'journal',
'logbook',
'notebook',
'notizbuch',
'planner',
'sketchbook',
)
}
SCHEMA_URL = (
"https://raw.githubusercontent.com/internetarchive"
"/openlibrary-client/master/olclient/schemata/import.schema.json"
)
class Biblio:
ACTIVE_FIELDS = [
'title',
'isbn_13',
'publish_date',
'publishers',
'weight',
'authors',
'lc_classifications',
'number_of_pages',
'pagination',
'languages',
'subjects',
'source_records',
'lccn',
'identifiers',
'dewey_decimal_class',
]
INACTIVE_FIELDS = [
"copyright",
"length",
"width",
"height",
]
REQUIRED_FIELDS = requests.get(SCHEMA_URL).json()['required']
NONBOOK = """A2 AA AB AJ AVI AZ BK BM C3 CD CE CF CR CRM CRW CX D3 DA DD DF DI DL
DO DR DRM DRW DS DV EC FC FI FM FR FZ GB GC GM GR H3 H5 L3 L5 LP MAC MC MF MG MH ML
MS MSX MZ N64 NGA NGB NGC NGE NT OR OS PC PP PRP PS PSC PY QU RE RV SA SD SG SH SK
SL SMD SN SO SO1 SO2 SR SU TA TB TR TS TY UX V35 V8 VC VD VE VF VK VM VN VO VP VS
VU VY VZ WA WC WI WL WM WP WT WX XL XZ ZF ZZ""".split()
def __init__(self, data):
self.primary_format = data[6]
self.product_type = data[121]
assert (
not self.isnonbook()
), f"{self.primary_format}/{self.product_type} is NONBOOK"
self.isbn = data[124]
self.source_id = f'bwb:{self.isbn}'
self.isbn_13 = [self.isbn]
self.title = data[10]
self.publish_date = data[20][:4] # YYYY
self.publishers = [data[135]]
self.weight = data[39]
self.authors = self.contributors(data)
self.lc_classifications = [data[147]] if data[147] else []
if data[36] and data[36].isnumeric():
self.number_of_pages = int(data[36])
self.pagination = None
else:
self.number_of_pages = None
self.pagination = data[36]
self.languages = [data[37].lower()]
self.source_records = [self.source_id]
self.subjects = [
s.capitalize().replace('_', ', ')
for s in data[91:100]
# + data[101:120]
# + data[153:158]
if s
]
self.identifiers = {
**({'issn': [data[54]]} if data[54] else {}),
**({'doi': [data[145]]} if data[145] else {}),
}
self.lccn = [data[146]] if data[146] else []
self.dewey_decimal_class = [data[49]] if data[49] else []
# Inactive fields
self.copyright = data[19]
# physical_dimensions
# e.g. "5.4 x 4.7 x 0.2 inches"
self.length, self.width, self.height = data[40:43]
# Assert importable
for field in self.REQUIRED_FIELDS + ['isbn_13']:
assert getattr(self, field), field
@staticmethod
def contributors(data):
def make_author(name, _, typ):
author = {'name': name}
if typ == 'X':
# set corporate contributor
author['entity_type'] = 'org'
# TODO: sort out contributor types
# AU = author
# ED = editor
return author
contributors = (
(data[21 + i * 3], data[22 + i * 3], data[23 + i * 3]) for i in range(5)
)
# form list of author dicts
authors = [make_author(*c) for c in contributors if c[0]]
return authors
def isnonbook(self):
return self.primary_format in self.NONBOOK or 'OTH' in self.product_type
def json(self):
return {
field: getattr(self, field)
for field in self.ACTIVE_FIELDS
if getattr(self, field)
}
def load_state(path, logfile):
"""Retrieves starting point from logfile, if log exists
Takes as input a path which expands to an ordered candidate list
of bettworldbks* filenames to process, the location of the
logfile, and determines which of those files are remaining, as
well as what our offset is in that file.
e.g. if we request path containing f1, f2, f3 and our log
says f2,100 then we start our processing at f2 at the 100th line.
This assumes the script is being called w/ e.g.:
/1/var/tmp/imports/2021-08/Bibliographic/*/
"""
filenames = sorted(
os.path.join(path, f) for f in os.listdir(path) if f.startswith("bettworldbks")
)
try:
with open(logfile) as fin:
active_fname, offset = next(fin).strip().split(',')
unfinished_filenames = filenames[filenames.index(active_fname) :]
return unfinished_filenames, int(offset)
except (ValueError, OSError):
return filenames, 0
def update_state(logfile, fname, line_num=0):
"""Records the last file we began processing and the current line"""
with open(logfile, 'w') as fout:
fout.write(f'{fname},{line_num}\n')
def csv_to_ol_json_item(line):
"""converts a line to a book item"""
try:
data = line.decode().strip().split('|')
except UnicodeDecodeError:
data = line.decode('ISO-8859-1').strip().split('|')
b = Biblio(data)
return {'ia_id': b.source_id, 'data': b.json()}
def is_low_quality_book(book_item) -> bool:
"""
Check if a book item is of low quality which means that 1) one of its authors
(regardless of case) is in the set of excluded authors.
"""
authors = {a['name'].casefold() for a in book_item.get('authors') or []}
if authors & EXCLUDED_AUTHORS: # Leverage Python set intersection for speed.
return True
# A recent independently published book with excluded key words in its title
# (regardless of case) is also considered a low quality book.
title_words = set(re.split(r'\W+', book_item["title"].casefold()))
publishers = {p.casefold() for p in book_item.get('publishers') or []}
publish_year = int(book_item.get("publish_date", "0")[:4]) # YYYY
return bool(
"independently published" in publishers
and publish_year >= 2018
and title_words & EXCLUDED_INDEPENDENTLY_PUBLISHED_TITLES
)
def is_published_in_future_year(book_item: Mapping[str, str | list]) -> bool:
"""
Prevent import of books with a publication after the current year.
Some import sources have publication dates in a future year, and the likelihood
is high that this is bad data. So we don't want to import these.
"""
publish_year = int(cast(str, book_item.get("publish_date", "0")[:4])) # YYYY
this_year = datetime.datetime.now().year
return publish_year > this_year
def batch_import(path, batch, batch_size=5000):
logfile = os.path.join(path, 'import.log')
filenames, offset = load_state(path, logfile)
for fname in filenames:
book_items = []
with open(fname, 'rb') as f:
logger.info(f"Processing: {fname} from line {offset}")
for line_num, line in enumerate(f):
# skip over already processed records
if offset:
if offset > line_num:
continue
offset = 0
try:
book_item = csv_to_ol_json_item(line)
if not any(
[
is_low_quality_book(book_item["data"]),
is_published_in_future_year(book_item["data"]),
]
):
book_items.append(book_item)
except (AssertionError, IndexError) as e:
logger.info(f"Error: {e} from {line}")
# If we have enough items, submit a batch
if not ((line_num + 1) % batch_size):
batch.add_items(book_items)
update_state(logfile, fname, line_num)
book_items = [] # clear added items
# Add any remaining book_items to batch
if book_items:
batch.add_items(book_items)
update_state(logfile, fname, line_num)
def main(ol_config: str, batch_path: str):
load_config(ol_config)
# Partner data is offset ~15 days from start of month
date = datetime.date.today() - datetime.timedelta(days=15)
batch_name = "%s-%04d%02d" % ('bwb', date.year, date.month)
batch = Batch.find(batch_name) or Batch.new(batch_name)
batch_import(batch_path, batch)
if __name__ == '__main__':
FnToCLI(main).run()
| 10,312 | Python | .py | 275 | 28.963636 | 87 | 0.591287 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
492 | import_standard_ebooks.py | internetarchive_openlibrary/scripts/import_standard_ebooks.py | #!/usr/bin/env python
import json
import requests
from requests.auth import AuthBase, HTTPBasicAuth
import time
from typing import Any
import feedparser
from openlibrary.core.imports import Batch
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
from openlibrary.config import load_config
from infogami import config
FEED_URL = 'https://standardebooks.org/opds/all'
IMAGE_REL = 'http://opds-spec.org/image'
def get_feed(auth: AuthBase):
"""Fetches and returns Standard Ebook's feed."""
with requests.get(FEED_URL, auth=auth, stream=True) as r:
r.raise_for_status()
return feedparser.parse(r.raw, response_headers=r.headers)
def map_data(entry: dict) -> dict[str, Any]:
"""Maps Standard Ebooks feed entry to an Open Library import object."""
std_ebooks_id = entry['id'].replace('https://standardebooks.org/ebooks/', '')
# Standard ebooks only has English works at this time ; because we don't have an
# easy way to translate the language codes they store in the feed to the MARC
# language codes, we're just gonna handle English for now, and have it error
# if Standard Ebooks ever adds non-English works.
lang = entry.get('dcterms_language')
if not lang or not lang.startswith('en-'):
raise ValueError(f'Feed entry language {lang} is not supported.')
import_record = {
"title": entry['title'],
"source_records": [f"standard_ebooks:{std_ebooks_id}"],
"publishers": ['Standard Ebooks'],
"publish_date": entry['published'][0:4],
"authors": [{"name": author['name']} for author in entry['authors']],
"description": entry['content'][0]['value'],
"subjects": [tag['term'] for tag in entry['tags']],
"identifiers": {"standard_ebooks": [std_ebooks_id]},
"languages": ['eng'],
}
cover_url = next(
(link['href'] for link in entry['links'] if link['rel'] == IMAGE_REL),
None,
)
if cover_url:
# This used to be a relative URL; ensure the API doesn't change.
assert cover_url.startswith('https://')
import_record['cover'] = cover_url
return import_record
def create_batch(records: list[dict[str, str]]) -> None:
"""Creates Standard Ebook batch import job.
Attempts to find existing Standard Ebooks import batch.
If nothing is found, a new batch is created. All of the
given import records are added to the batch job as JSON strings.
"""
now = time.gmtime(time.time())
batch_name = f'standardebooks-{now.tm_year}{now.tm_mon}'
batch = Batch.find(batch_name) or Batch.new(batch_name)
batch.add_items([{'ia_id': r['source_records'][0], 'data': r} for r in records])
def import_job(
ol_config: str,
dry_run: bool = False,
) -> None:
"""
:param str ol_config: Path to openlibrary.yml file
:param bool dry_run: If true, only print out records to import
"""
load_config(ol_config)
if not config.get('standard_ebooks_key'):
print('Standard Ebooks key not found in config. Exiting.')
return
auth = HTTPBasicAuth(config.get('standard_ebooks_key'), '')
feed = map(map_data, get_feed(auth).entries)
if not dry_run:
list_feed = list(feed)
create_batch(list_feed)
print(f'{len(list_feed)} entries added to the batch import job.')
else:
for record in feed:
print(json.dumps(record))
if __name__ == '__main__':
print("Start: Standard Ebooks import job")
FnToCLI(import_job).run()
print("End: Standard Ebooks import job")
| 3,581 | Python | .py | 83 | 37.614458 | 84 | 0.669638 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
493 | update_stale_work_references.py | internetarchive_openlibrary/scripts/update_stale_work_references.py | """
PYTHONPATH=. python ./scripts/update_stale_work_references.py /olsystem/etc/openlibrary.yml
"""
import web
import infogami
from infogami import config
from openlibrary.config import load_config
from openlibrary.core.models import Work
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
import datetime
def main(ol_config: str, days=1, skip=7):
load_config(ol_config)
infogami._setup()
Work.resolve_redirects_bulk(
batch_size=1000, days=days, grace_period_days=skip, test=False
)
if __name__ == '__main__':
FnToCLI(main).run()
| 576 | Python | .py | 18 | 29.166667 | 91 | 0.754069 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
494 | pr_slack_digest.py | internetarchive_openlibrary/scripts/pr_slack_digest.py | from datetime import datetime
import requests
import os
def send_slack_message(message: str):
response = requests.post(
'https://slack.com/api/chat.postMessage',
headers={
'Authorization': f"Bearer {os.environ.get('SLACK_TOKEN')}",
'Content-Type': 'application/json; charset=utf-8',
},
json={
'channel': '#team-abc-plus',
'text': message,
},
)
if response.status_code != 200:
print(f"Failed to send message to Slack. Status code: {response.status_code}")
else:
print("Message sent to Slack successfully!")
print(response.content)
if __name__ == "__main__":
GH_LOGIN_TO_SLACK = {
'cdrini': '<@cdrini>',
'jimchamp': '<@U01ARTHG9EV>',
'mekarpeles': '<@mek>',
'scottbarnes': '<@U03MNR6T7FH>',
}
LABEL_EMOJI = {
'Priority: 0': 'üö® ',
'Priority: 1': '❗️ ',
}
INCLUDE_AUTHORS = ['mekarpeles', 'cdrini', 'scottbarnes', 'jimchamp']
EXCLUDE_LABELS = [
'Needs: Submitter Input',
'State: Blocked',
]
query = 'repo:internetarchive/openlibrary is:open is:pr -is:draft'
# apparently `author` acts like an OR in this API and only this API -_-
included_authors = " ".join([f"author:{author}" for author in INCLUDE_AUTHORS])
excluded_labels = " ".join([f'-label:"{label}"' for label in EXCLUDE_LABELS])
query = f'{query} {included_authors} {excluded_labels}'
prs = requests.get(
"https://api.github.com/search/issues",
params={
"q": query,
},
).json()["items"]
message = f"{len(prs)} open staff PRs:\n\n"
for pr in prs:
pr_url = pr['html_url']
pr_age_days = (
datetime.now() - datetime.strptime(pr['created_at'], '%Y-%m-%dT%H:%M:%SZ')
).days
message += f"<{pr_url}|*#{pr['number']}* | {pr['title']}>\n"
message += ' | '.join(
[
f"by {pr['user']['login']} {pr_age_days} days ago",
f"Assigned: {GH_LOGIN_TO_SLACK[pr['assignee']['login']] if pr['assignee'] else '⚠️ None'}",
f"{', '.join(LABEL_EMOJI.get(label['name'], '') + label['name'] for label in pr['labels'])}\n\n",
]
)
send_slack_message(message)
| 2,331 | Python | .py | 62 | 29.564516 | 113 | 0.549757 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
495 | migrate_db.py | internetarchive_openlibrary/scripts/migrate_db.py | #!/usr/bin/env python
"""Script to migrate the OL database to latest schema.
"""
import itertools
import json
import os
import sys
import web
changelog = """\
2010-04-22: 10 - Created unique index on thing.key
2010-07-01: 11 - Added `seq` table
2010-08-02: 12 - Added `data` column to transaction table.
2010-08-03: 13 - Added `changes` column to transaction table
2010-08-13: 14 - Added `transaction_index` table to index data in the transaction table.
"""
LATEST_VERSION = 14
class Upgrader:
def upgrade(self, db):
v = self.get_database_version(db)
print("current db version:", v)
print("latest version:", LATEST_VERSION)
t = db.transaction()
try:
for i in range(v, LATEST_VERSION):
print("upgrading to", i + 1)
f = getattr(self, "upgrade_%03d" % (i + 1))
f(db)
except:
print()
print("**ERROR**: Failed to complete the upgrade. rolling back...")
print()
t.rollback()
raise
else:
t.commit()
print("done")
def upgrade_011(self, db):
"""Add seq table."""
q = """CREATE TABLE seq (
id serial primary key,
name text unique,
value int default 0
)"""
db.query(q)
def upgrade_012(self, db):
"""Add data column to transaction table."""
db.query("ALTER TABLE transaction ADD COLUMN data text")
def upgrade_013(self, db):
"""Add changes column to transaction table."""
db.query("ALTER TABLE transaction ADD COLUMN changes text")
# populate changes
rows = db.query(
"SELECT thing.key, version.revision, version.transaction_id"
" FROM thing, version"
" WHERE thing.id=version.thing_id"
" ORDER BY version.transaction_id"
)
for tx_id, changes in itertools.groupby(rows, lambda row: row.transaction_id):
changes = [{"key": row.key, "revision": row.revision} for row in changes]
db.update(
"transaction",
where="id=$tx_id",
changes=json.dumps(changes),
vars=locals(),
)
def upgrade_014(self, db):
"""Add transaction_index table."""
q = """
create table transaction_index (
tx_id int references transaction,
key text,
value text
);
create index transaction_index_key_value_idx ON transaction_index(key, value);
create index transaction_index_tx_id_idx ON transaction_index(tx_id);
"""
db.query(q)
def get_database_version(self, db):
schema = self.read_schema(db)
if 'seq' not in schema:
return 10
elif 'data' not in schema['transaction']:
return 11
elif 'changes' not in schema['transaction']:
return 12
elif 'transaction_index' not in schema:
return 13
else:
return LATEST_VERSION
def read_schema(self, db):
rows = db.query(
"SELECT table_name, column_name, data_type "
" FROM information_schema.columns"
" WHERE table_schema = 'public'"
)
schema = web.storage()
for row in rows:
t = schema.setdefault(row.table_name, web.storage())
t[row.column_name] = row
return schema
def usage():
print(file=sys.stderr)
print("USAGE: %s dbname" % sys.argv[0], file=sys.stderr)
print(file=sys.stderr)
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
elif sys.argv[1] in ["-h", "--help"]:
usage()
else:
dbname = sys.argv[1]
db = web.database(dbn='postgres', db=dbname, user=os.getenv('USER'), pw='')
Upgrader().upgrade(db)
if __name__ == "__main__":
main()
| 3,954 | Python | .py | 116 | 25.181034 | 88 | 0.566483 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
496 | solr_updater.py | internetarchive_openlibrary/scripts/solr_updater.py | """New script to handle solr updates.
Author: Anand Chitipothu
Changes:
2013-02-25: First version
2018-02-11: Use newer config method
"""
import asyncio
import datetime
import json
import logging
from pathlib import Path
import re
import socket
import sys
import urllib
from typing import Union
from collections.abc import Iterator
import _init_path # Imported for its side effect of setting PYTHONPATH
import aiofiles
import web
from openlibrary.solr import update
from openlibrary.config import load_config
from infogami import config
from openlibrary.utils.open_syllabus_project import set_osp_dump_location
logger = logging.getLogger("openlibrary.solr-updater")
# FIXME: Some kind of hack introduced to work around DB connectivity issue
args: dict = {}
def read_state_file(path, initial_state: str | None = None):
try:
return open(path).read()
except OSError:
logger.error(
"State file %s is not found. Reading log from the beginning of today", path
)
return initial_state or f"{datetime.date.today().isoformat()}:0"
def get_default_offset():
return datetime.date.today().isoformat() + ":0"
class InfobaseLog:
def __init__(self, hostname: str, exclude: str | None = None):
"""
:param str hostname:
:param str|None exclude: if specified, excludes records that include the string
"""
self.base_url = 'http://%s/openlibrary.org/log' % hostname
self.offset = get_default_offset()
self.exclude = exclude
def tell(self):
return self.offset
def seek(self, offset):
self.offset = offset.strip()
def read_records(self, max_fetches=10):
"""Reads all the available log records from the server."""
for i in range(max_fetches):
url = f"{self.base_url}/{self.offset}?limit=100"
logger.debug("Reading log from %s", url)
try:
jsontext = urllib.request.urlopen(url).read()
except urllib.error.URLError as e:
logger.error("Failed to open URL %s", url, exc_info=True)
if e.args and e.args[0].args == (111, 'Connection refused'):
logger.error(
'make sure infogami server is working, connection refused from %s',
url,
)
sys.exit(1)
raise
try:
d = json.loads(jsontext)
except:
logger.error("Bad JSON: %s", jsontext)
raise
data = d['data']
# no more data is available
if not data:
logger.debug("no more records found")
# There's an infobase bug where we'll sometimes get 0 items, but the
# binary offset will have incremented...?
if 'offset' in d:
# There's _another_ infobase bug where if you query a future date,
# it'll return back 2020-12-01. To avoid solrupdater getting stuck
# in a loop, only update the offset if it's newer than the current
old_day, old_boffset = self.offset.split(':')
old_boffset = int(old_boffset)
new_day, new_boffset = d['offset'].split(':')
new_boffset = int(new_boffset)
if new_day >= old_day and new_boffset >= old_boffset:
self.offset = d['offset']
return
for record in data:
if self.exclude and self.exclude in json.dumps(record):
continue
yield record
self.offset = d['offset']
def find_keys(d: dict | list) -> Iterator[str]:
"""
Find any keys in the given dict or list.
>>> list(find_keys({'key': 'foo'}))
['foo']
>>> list(find_keys([{}, {'key': 'bar'}]))
['bar']
>>> list(find_keys([{'key': 'blue'}, {'key': 'bar'}]))
['blue', 'bar']
>>> list(find_keys({'title': 'foo'}))
[]
>>> list(find_keys({ 'works': [ {'key': 'foo'} ] }))
['foo']
>>> list(find_keys({ 'authors': [ { 'author': {'key': 'foo'} } ] }))
['foo']
"""
if isinstance(d, dict):
if 'key' in d:
yield d['key']
for val in d.values():
yield from find_keys(val)
elif isinstance(d, list):
for val in d:
yield from find_keys(val)
else:
# All other types are not recursed
return
def parse_log(records, load_ia_scans: bool):
for rec in records:
action = rec.get('action')
if action in ('save', 'save_many'):
changeset = rec['data'].get('changeset', {})
old_docs = changeset.get('old_docs', [])
new_docs = changeset.get('docs', [])
for before, after in zip(old_docs, new_docs):
yield after['key']
# before is None if the item is new
if before:
before_keys = set(find_keys(before))
after_keys = set(find_keys(after))
# If a key was changed or was removed, the previous keys
# also need to be updated
yield from before_keys - after_keys
elif action == 'store.put':
# A sample record looks like this:
# {
# "action": "store.put",
# "timestamp": "2011-12-01T00:00:44.241604",
# "data": {
# "data": {"borrowed": "false", "_key": "ebooks/books/OL5854888M", "_rev": "975708", "type": "ebook", "book_key": "/books/OL5854888M"},
# "key": "ebooks/books/OL5854888M"
# },
# "site": "openlibrary.org"
# }
data = rec.get('data', {}).get("data", {})
key = data.get("_key", "")
if data.get("type") == "ebook" and key.startswith("ebooks/books/"):
edition_key = data.get('book_key')
if edition_key:
yield edition_key
elif (
load_ia_scans
and data.get("type") == "ia-scan"
and key.startswith("ia-scan/")
):
identifier = data.get('identifier')
if identifier and is_allowed_itemid(identifier):
yield "/books/ia:" + identifier
# Hack to force updating something from admin interface
# The admin interface writes the keys to update to a document named
# 'solr-force-update' in the store and whatever keys are written to that
# are picked by this script
elif key == 'solr-force-update':
keys = data.get('keys')
yield from keys
elif action == 'store.delete':
key = rec.get("data", {}).get("key")
# An ia-scan key is deleted when that book is deleted/darked from IA.
# Delete it from OL solr by updating that key
if key.startswith("ia-scan/"):
ol_key = "/works/ia:" + key.split("/")[-1]
yield ol_key
def is_allowed_itemid(identifier):
if not re.match("^[a-zA-Z0-9_.-]*$", identifier):
return False
# items starts with these prefixes are not books. Ignore them.
ignore_prefixes = config.get("ia_ignore_prefixes", [])
return all(not identifier.startswith(prefix) for prefix in ignore_prefixes)
async def update_keys(keys):
if not keys:
return 0
# FIXME: Some kind of hack introduced to work around DB connectivity issue
global args
logger.debug("Args: %s" % str(args))
update.load_configs(args['ol_url'], args['ol_config'], 'default')
keys = [k for k in keys if update.can_update_key(k)]
count = 0
for chunk in web.group(keys, 100):
chunk = list(chunk)
count += len(chunk)
await update.do_updates(chunk)
# Caches should not persist between different calls to update_keys!
update.data_provider.clear_cache()
if count:
logger.info("updated %d documents", count)
return count
async def main(
ol_config: str,
osp_dump: Path | None = None,
debugger: bool = False,
state_file: str = 'solr-update.state',
exclude_edits_containing: str | None = None,
ol_url='http://openlibrary.org/',
solr_url: str | None = None,
solr_next: bool = False,
socket_timeout: int = 10,
load_ia_scans: bool = False,
initial_state: str | None = None,
):
"""
:param debugger: Wait for a debugger to attach before beginning
:param exclude_edits_containing: Don't index matching edits
:param solr_url: If wanting to override what's in the config file
:param solr_next: Whether to assume new schema/etc are used
:param initial_state: State to use if state file doesn't exist. Defaults to today.
"""
FORMAT = "%(asctime)-15s %(levelname)s %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger.info("BEGIN solr_updater")
if debugger:
import debugpy # noqa: T100
logger.info("Enabling debugger attachment (attach if it hangs here)")
debugpy.listen(address=('0.0.0.0', 3000)) # noqa: T100
logger.info("Waiting for debugger to attach...")
debugpy.wait_for_client() # noqa: T100
logger.info("Debugger attached to port 3000")
# Sometimes archive.org requests blocks forever.
# Setting a timeout will make the request fail instead of waiting forever.
socket.setdefaulttimeout(socket_timeout)
# set OL URL when running on a dev-instance
if ol_url:
host = web.lstrips(ol_url, "http://").strip("/")
update.set_query_host(host)
if solr_url:
update.set_solr_base_url(solr_url)
update.set_solr_next(solr_next)
set_osp_dump_location(osp_dump)
logger.info("loading config from %s", ol_config)
load_config(ol_config)
offset = read_state_file(state_file, initial_state)
logfile = InfobaseLog(
config.get('infobase_server'), exclude=exclude_edits_containing
)
logfile.seek(offset)
while True:
records = logfile.read_records()
keys = parse_log(records, load_ia_scans)
count = await update_keys(keys)
if logfile.tell() != offset:
offset = logfile.tell()
logger.info("saving offset %s", offset)
async with aiofiles.open(state_file, "w") as f:
await f.write(offset)
# don't sleep after committing some records.
# While the commit was on, some more edits might have happened.
if count == 0:
logger.debug("No more log records available, sleeping...")
await asyncio.sleep(5)
if __name__ == "__main__":
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
cli = FnToCLI(main)
args = cli.args_dict()
cli.run()
| 11,014 | Python | .py | 266 | 31.890977 | 153 | 0.583949 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
497 | cron_watcher.py | internetarchive_openlibrary/scripts/cron_watcher.py | #!/usr/bin/env python
"""
Daily Cron-audit task (Python) sentry (who watches the watchers)
If not dump and cdump uploaded for last YYYY-MM on archive.org
If not sitemaps updated for this YYYY-MM on www
If not partner dumps uploaded for this YYYY-MM on archive.org
If no imports in last 48 hours (i.e. 2 days)
If DD>17 for YYYY-MM and bwb `batchname` doesn't exist in import psql table
Send daily email with failures only or slack failures
"""
from datetime import date, timedelta
from internetarchive import search_items
# Last day of last month is the first day of this month minus one day.
last_day_of_last_month = date.today().replace(day=1) - timedelta(days=1)
yyyy_mm = f"{last_day_of_last_month:%Y-%m}"
def find_last_months_dumps_on_ia(yyyy_mm: str = yyyy_mm) -> bool:
"""
Return True if both ol_dump_yyyy_mm and ol_cdump_yyyy_mm files
have been saved on Internet Archive collection:ol_exports.
>>> next_month = date.today().replace(day=1) + timedelta(days=31)
>>> find_last_months_dumps_on_ia(f"{next_month:%Y-%m}")
False
"""
prefixes = {f"ol_dump_{yyyy_mm}": 0, f"ol_cdump_{yyyy_mm}": 0}
# print(prefixes)
for item in search_items("collection:ol_exports"):
for prefix in prefixes:
if item["identifier"].startswith(prefix):
prefixes[prefix] += 1
# Is there at least one item id starting with each prefix?
if files_with_both_prefixes_found := all(prefixes.values()):
return files_with_both_prefixes_found
return all(prefixes.values())
if __name__ == "__main__":
import sys
files_with_both_prefixes_found = find_last_months_dumps_on_ia()
print(f"{files_with_both_prefixes_found = }")
if not files_with_both_prefixes_found:
sys.exit(1)
| 1,806 | Python | .py | 39 | 41.051282 | 76 | 0.68469 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
498 | oldump.py | internetarchive_openlibrary/scripts/oldump.py | #!/usr/bin/env python
import logging
import os
import sys
from datetime import datetime
import _init_path # Imported for its side effect of setting PYTHONPATH
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
def log(*args) -> None:
args_str = " ".join(str(a) for a in args)
msg = f"{datetime.now():%Y-%m-%d %H:%M:%S} [openlibrary.dump] {args_str}"
logger.info(msg)
print(msg, file=sys.stderr)
if __name__ == "__main__":
from contextlib import redirect_stdout
from infogami import config
from openlibrary.config import load_config
from openlibrary.data import dump
from openlibrary.utils.sentry import Sentry
log("{} on Python {}.{}.{}".format(sys.argv, *sys.version_info)) # Python 3.12.2
ol_config = os.getenv("OL_CONFIG")
if ol_config:
logger.info(f"loading config from {ol_config}")
# Squelch output from infobase (needed for sentry setup)
# So it doesn't end up in our data dumps body
with open(os.devnull, 'w') as devnull, redirect_stdout(devnull):
load_config(ol_config)
sentry = Sentry(getattr(config, "sentry_cron_jobs", {}))
if sentry.enabled:
sentry.init()
log(f"sentry.enabled = {bool(ol_config and sentry.enabled)}")
dump.main(sys.argv[1], sys.argv[2:])
| 1,325 | Python | .py | 32 | 36.09375 | 85 | 0.672642 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
499 | promise_batch_imports.py | internetarchive_openlibrary/scripts/promise_batch_imports.py | """
As of 2022-12: Run on `ol-home0 cron container as
```
$ ssh -A ol-home0
$ docker exec -it -uopenlibrary openlibrary-cron-jobs-1 bash
$ PYTHONPATH="/openlibrary" python3 /openlibrary/scripts/promise_batch_imports.py /olsystem/etc/openlibrary.yml
```
The imports can be monitored for their statuses and rolled up / counted using this query on `ol-db1`:
```
=# select count(*) from import_item where batch_id in (select id from import_batch where name like 'bwb_daily_pallets_%');
```
"""
from __future__ import annotations
import datetime
import json
import ijson
import requests
import logging
from typing import Any
from urllib.parse import urlencode
import _init_path # Imported for its side effect of setting PYTHONPATH
from infogami import config
from openlibrary.config import load_config
from openlibrary.core import stats
from openlibrary.core.imports import Batch, ImportItem
from openlibrary.core.vendors import get_amazon_metadata, stage_bookworm_metadata
from openlibrary.plugins.upstream.utils import safeget
from openlibrary.utils.isbn import to_isbn_13
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
logger = logging.getLogger("openlibrary.importer.promises")
def format_date(date: str, only_year: bool) -> str:
"""
Format date as "yyyy-mm-dd" or only "yyyy"
:param date: Date in "yyyymmdd" format.
"""
return date[:4] if only_year else f"{date[0:4]}-{date[4:6]}-{date[6:8]}"
def map_book_to_olbook(book, promise_id):
def clean_null(val: str | None) -> str | None:
if val in ('', 'null', 'null--'):
return None
return val
asin_is_isbn_10 = book.get('ASIN') and book.get('ASIN')[0].isdigit()
product_json = book.get('ProductJSON', {})
publish_date = clean_null(product_json.get('PublicationDate'))
title = product_json.get('Title')
isbn = book.get('ISBN') or ' '
sku = book['BookSKUB'] or book['BookSKU'] or book['BookBarcode']
olbook = {
'local_id': [f"urn:bwbsku:{sku.upper()}"],
'identifiers': {
**({'amazon': [book.get('ASIN')]} if not asin_is_isbn_10 else {}),
**({'better_world_books': [isbn]} if not is_isbn_13(isbn) else {}),
},
**({'isbn_13': [isbn]} if is_isbn_13(isbn) else {}),
**({'isbn_10': [book.get('ASIN')]} if asin_is_isbn_10 else {}),
**({'title': title} if title else {}),
'authors': (
[{"name": clean_null(product_json.get('Author'))}]
if clean_null(product_json.get('Author'))
else []
),
'publishers': [clean_null(product_json.get('Publisher')) or '????'],
'source_records': [f"promise:{promise_id}:{sku}"],
# format_date adds hyphens between YYYY-MM-DD, or use only YYYY if date is suspect.
'publish_date': (
format_date(
date=publish_date, only_year=publish_date[-4:] in ('0000', '0101')
)
if publish_date
else ''
),
}
if not olbook['identifiers']:
del olbook['identifiers']
return olbook
def is_isbn_13(isbn: str):
"""
Naive check for ISBN-13 identifiers.
Returns true if given isbn is in ISBN-13 format.
"""
return isbn and isbn[0].isdigit()
def stage_incomplete_records_for_import(olbooks: list[dict[str, Any]]) -> None:
"""
For incomplete records, try to stage more complete records from BookWorm.
This `staged` record is later used to supplement the lacking record once
the incomplete record is processed via `/api/import`, where additional metadata,
if found, is merged into the incoming import `rec` from the `staged` record this
function aims to create.
An incomplete record lacks one or more of: title, authors, or publish_date.
See https://github.com/internetarchive/openlibrary/issues/9440.
"""
total_records = len(olbooks)
incomplete_records = 0
timestamp = datetime.datetime.now(datetime.UTC)
required_fields = ["title", "authors", "publish_date"]
for book in olbooks:
# Only look to BookWorm if the current record is incomplete.
if all(book.get(field) for field in required_fields):
continue
incomplete_records += 1
# Prefer ISBN 13 as an identifier.
isbn_10 = safeget(lambda: book.get("isbn_10", [])[0])
isbn_13 = safeget(lambda: book.get("isbn_13", [])[0])
identifier = to_isbn_13(isbn_13 or isbn_10 or "")
# Fall back to B* ASIN as a last resort.
if not identifier:
if not (amazon := book.get('identifiers', {}).get('amazon', [])):
continue
identifier = amazon[0]
try:
stage_bookworm_metadata(identifier=identifier)
except requests.exceptions.ConnectionError:
logger.exception("Affiliate Server unreachable")
continue
# Record promise item completeness rate over time.
stats.gauge(f"ol.imports.bwb.{timestamp}.total_records", total_records)
stats.gauge(f"ol.imports.bwb.{timestamp}.incomplete_records", incomplete_records)
def batch_import(promise_id, batch_size=1000, dry_run=False):
url = "https://archive.org/download/"
date = promise_id.split("_")[-1]
resp = requests.get(f"{url}{promise_id}/DailyPallets__{date}.json", stream=True)
olbooks_gen = (
map_book_to_olbook(book, promise_id) for book in ijson.items(resp.raw, 'item')
)
# Note: dry_run won't include BookWorm data.
if dry_run:
for book in olbooks_gen:
print(json.dumps(book), flush=True)
return
olbooks = list(olbooks_gen)
# Stage incomplete records for import so as to supplement their metadata via
# `load()`. See https://github.com/internetarchive/openlibrary/issues/9440.
stage_incomplete_records_for_import(olbooks)
batch = Batch.find(promise_id) or Batch.new(promise_id)
batch_items = [{'ia_id': b['local_id'][0], 'data': b} for b in olbooks]
for i in range(0, len(batch_items), batch_size):
batch.add_items(batch_items[i : i + batch_size])
def get_promise_items_url(start_date: str, end_date: str):
"""
>>> get_promise_items_url('2022-12-01', '2022-12-31')
'https://archive.org/advancedsearch.php?q=collection:bookdonationsfrombetterworldbooks+identifier:bwb_daily_pallets_*+publicdate:[2022-12-01+TO+2022-12-31]&sort=addeddate+desc&fl=identifier&rows=5000&output=json'
>>> get_promise_items_url('2022-12-01', '2022-12-01')
'https://archive.org/advancedsearch.php?q=collection:bookdonationsfrombetterworldbooks+identifier:bwb_daily_pallets_*&sort=addeddate+desc&fl=identifier&rows=5000&output=json'
>>> get_promise_items_url('2022-12-01', '*')
'https://archive.org/advancedsearch.php?q=collection:bookdonationsfrombetterworldbooks+identifier:bwb_daily_pallets_*+publicdate:[2022-12-01+TO+*]&sort=addeddate+desc&fl=identifier&rows=5000&output=json'
"""
is_exact_date = start_date == end_date
selector = start_date if is_exact_date else '*'
q = f"collection:bookdonationsfrombetterworldbooks identifier:bwb_daily_pallets_{selector}"
if not is_exact_date:
q += f' publicdate:[{start_date} TO {end_date}]'
return "https://archive.org/advancedsearch.php?" + urlencode(
{
'q': q,
'sort': 'addeddate desc',
'fl': 'identifier',
'rows': '5000',
'output': 'json',
}
)
def main(ol_config: str, dates: str, dry_run: bool = False):
"""
:param ol_config: Path to openlibrary.yml
:param dates: Get all promise items for this date or date range.
E.g. "yyyy-mm-dd:yyyy-mm-dd" or just "yyyy-mm-dd" for a single date.
"yyyy-mm-dd:*" for all dates after a certain date.
"""
if ':' in dates:
start_date, end_date = dates.split(':')
else:
start_date = end_date = dates
url = get_promise_items_url(start_date, end_date)
r = requests.get(url)
identifiers = [d['identifier'] for d in r.json()['response']['docs']]
if not identifiers:
logger.info("No promise items found for date(s) %s", dates)
return
if not dry_run:
load_config(ol_config)
for promise_id in identifiers:
if dry_run:
print([promise_id, dry_run], flush=True)
batch_import(promise_id, dry_run=dry_run)
if __name__ == '__main__':
FnToCLI(main).run()
| 8,449 | Python | .py | 185 | 39.183784 | 216 | 0.656687 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |