id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
200 | solr_types.py | internetarchive_openlibrary/openlibrary/solr/solr_types.py | # This file is auto-generated by types_generator.py
# fmt: off
from typing import Literal, TypedDict, Optional
class SolrDocument(TypedDict):
key: str
type: Literal['work', 'author', 'subject']
redirects: Optional[list[str]]
has_fulltext: Optional[bool]
title: Optional[str]
title_suggest: Optional[str]
title_sort: Optional[str]
subtitle: Optional[str]
alternative_title: Optional[list[str]]
alternative_subtitle: Optional[list[str]]
edition_count: Optional[int]
edition_key: Optional[list[str]]
cover_edition_key: Optional[str]
by_statement: Optional[list[str]]
publish_date: Optional[list[str]]
publish_year: Optional[list[int]]
first_publish_year: Optional[int]
first_edition: Optional[str]
first_publisher: Optional[str]
language: Optional[list[str]]
number_of_pages_median: Optional[int]
lccn: Optional[list[str]]
ia: Optional[list[str]]
ia_box_id: Optional[list[str]]
ia_loaded_id: Optional[list[str]]
ia_count: Optional[int]
ia_collection: Optional[list[str]]
oclc: Optional[list[str]]
isbn: Optional[list[str]]
ebook_access: Optional[Literal['no_ebook', 'unclassified', 'printdisabled', 'borrowable', 'public']]
lcc: Optional[list[str]]
lcc_sort: Optional[str]
ddc: Optional[list[str]]
ddc_sort: Optional[str]
contributor: Optional[list[str]]
publish_place: Optional[list[str]]
publisher: Optional[list[str]]
format: Optional[list[str]]
publisher_facet: Optional[list[str]]
first_sentence: Optional[list[str]]
author_key: Optional[list[str]]
author_name: Optional[list[str]]
author_alternative_name: Optional[list[str]]
author_facet: Optional[list[str]]
subject: Optional[list[str]]
subject_facet: Optional[list[str]]
subject_key: Optional[list[str]]
place: Optional[list[str]]
place_facet: Optional[list[str]]
place_key: Optional[list[str]]
person: Optional[list[str]]
person_facet: Optional[list[str]]
person_key: Optional[list[str]]
time: Optional[list[str]]
time_facet: Optional[list[str]]
time_key: Optional[list[str]]
ratings_average: Optional[float]
ratings_sortable: Optional[float]
ratings_count: Optional[int]
ratings_count_1: Optional[int]
ratings_count_2: Optional[int]
ratings_count_3: Optional[int]
ratings_count_4: Optional[int]
ratings_count_5: Optional[int]
readinglog_count: Optional[int]
want_to_read_count: Optional[int]
currently_reading_count: Optional[int]
already_read_count: Optional[int]
osp_count: Optional[int]
text: Optional[list[str]]
seed: Optional[list[str]]
name: Optional[str]
name_str: Optional[str]
alternate_names: Optional[list[str]]
birth_date: Optional[str]
death_date: Optional[str]
date: Optional[str]
work_count: Optional[int]
top_work: Optional[str]
top_subjects: Optional[list[str]]
subject_type: Optional[str]
public_scan_b: Optional[bool]
printdisabled_s: Optional[str]
lending_edition_s: Optional[str]
ia_collection_s: Optional[str]
ebook_count_i: Optional[int]
# fmt: on
| 3,176 | Python | .py | 91 | 30.087912 | 104 | 0.703115 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
201 | query_utils.py | internetarchive_openlibrary/openlibrary/solr/query_utils.py | from typing import Literal, Optional
from collections.abc import Callable
from luqum.parser import parser
from luqum.tree import Item, SearchField, BaseOperation, Group, Word, Unary
import re
class EmptyTreeError(Exception):
pass
def luqum_remove_child(child: Item, parents: list[Item]):
"""
Removes a child from a luqum parse tree. If the tree
ends up being empty, errors.
:param child: Node to remove
:param parents: Path of parent nodes leading from the root of the tree
"""
parent = parents[-1] if parents else None
if parent is None:
# We cannot remove the element if it is the root of the tree
raise EmptyTreeError()
elif isinstance(parent, (BaseOperation, Group, Unary)):
new_children = tuple(c for c in parent.children if c != child)
if not new_children:
# If we have deleted all the children, we need to delete the parent
# as well. And potentially recurse up the tree.
luqum_remove_child(parent, parents[:-1])
else:
parent.children = new_children
else:
raise NotImplementedError(
f"Not implemented for Item subclass: {parent.__class__.__name__}"
)
def luqum_replace_child(parent: Item, old_child: Item, new_child: Item):
"""
Replaces a child in a luqum parse tree.
"""
if isinstance(parent, (BaseOperation, Group, Unary)):
new_children = tuple(
new_child if c == old_child else c for c in parent.children
)
parent.children = new_children
else:
raise ValueError("Not supported for generic class Item")
def luqum_traverse(item: Item, _parents: list[Item] | None = None):
"""
Traverses every node in the parse tree in depth-first order.
Does not make any guarantees about what will happen if you
modify the tree while traversing it 😅 But we do it anyways.
:param item: Node to traverse
:param _parents: Internal parameter for tracking parents
"""
parents = _parents or []
yield item, parents
new_parents = [*parents, item]
for child in item.children:
yield from luqum_traverse(child, new_parents)
def escape_unknown_fields(
query: str,
is_valid_field: Callable[[str], bool],
lower=True,
) -> str:
"""
Escapes the colon of any search field that is not deemed valid by the
predicate function `is_valid_field`.
:param query: Query to escape
:param is_valid_field: Predicate function that determines if a field is valid
:param lower: If true, the field will be lowercased before being checked
>>> escape_unknown_fields('title:foo', lambda field: False)
'title\\\\:foo'
>>> escape_unknown_fields('title:foo bar blah:bar baz:boo', lambda field: False)
'title\\\\:foo bar blah\\\\:bar baz\\\\:boo'
>>> escape_unknown_fields('title:foo bar', {'title'}.__contains__)
'title:foo bar'
>>> escape_unknown_fields('title:foo bar baz:boo', {'title'}.__contains__)
'title:foo bar baz\\\\:boo'
>>> escape_unknown_fields('title:foo bar baz:boo', {'TITLE'}.__contains__, lower=False)
'title\\\\:foo bar baz\\\\:boo'
>>> escape_unknown_fields('hi', {'title'}.__contains__)
'hi'
>>> escape_unknown_fields('(title:foo) OR (blah:bah)', {'title'}.__contains__)
'(title:foo) OR (blah\\\\:bah)'
"""
tree = parser.parse(query)
# Note we use the string of the tree, because it strips spaces
# like: "title : foo" -> "title:foo"
escaped_query = str(tree)
offset = 0
for sf, _ in luqum_traverse(tree):
if isinstance(sf, SearchField) and not is_valid_field(
sf.name.lower() if lower else sf.name
):
field = sf.name + r'\:'
if hasattr(sf, 'head'):
# head and tail are used for whitespace between fields;
# copy it along to the write space to avoid things smashing
# together
field = sf.head + field
# We will be moving left to right, so we need to adjust the offset
# to account for the characters we have already replaced
escaped_query = (
escaped_query[: sf.pos + offset]
+ field
+ escaped_query[sf.pos + len(field) - 1 + offset :]
)
offset += 1
return escaped_query
def fully_escape_query(query: str) -> str:
"""
Try to convert a query to basically a plain lucene string.
>>> fully_escape_query('title:foo')
'title\\\\:foo'
>>> fully_escape_query('title:foo bar')
'title\\\\:foo bar'
>>> fully_escape_query('title:foo (bar baz:boo)')
'title\\\\:foo \\\\(bar baz\\\\:boo\\\\)'
>>> fully_escape_query('x:[A TO Z}')
'x\\\\:\\\\[A TO Z\\\\}'
>>> fully_escape_query('foo AND bar')
'foo and bar'
>>> fully_escape_query("foo's bar")
"foo\\\\'s bar"
"""
escaped = query
# Escape special characters
escaped = re.sub(r'[\[\]\(\)\{\}:"\-+?~^/\\,\']', r'\\\g<0>', escaped)
# Remove boolean operators by making them lowercase
escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)
return escaped
def luqum_parser(query: str) -> Item:
"""
Parses a lucene-like query, with the special binding rules of Open Library.
In our queries, unlike native solr/lucene, field names are greedy, and
affect the rest of the query until another field is hit.
Here are some examples. The first query is the native solr/lucene
parsing. The second is the parsing we want.
Query : title:foo bar
Lucene: (title:foo) bar
OL : (title:foo bar)
Query : title:foo OR bar AND author:blah
Lucene: (title:foo) OR (bar) AND (author:blah)
OL : (title:foo OR bar) AND (author:blah)
This requires an annoying amount of manipulation of the default
Luqum parser, unfortunately.
Also, OL queries allow spaces after fields.
"""
tree = parser.parse(query)
def find_next_word(item: Item) -> tuple[Word, BaseOperation | None] | None:
if isinstance(item, Word):
return item, None
elif isinstance(item, BaseOperation) and isinstance(item.children[0], Word):
return item.children[0], item
else:
return None
for node, parents in luqum_traverse(tree):
if isinstance(node, BaseOperation):
# if any of the children are SearchField followed by one or more words,
# we bundle them together
last_sf: SearchField = None
to_rem = []
for child in node.children:
if isinstance(child, SearchField) and isinstance(child.expr, Word):
last_sf = child
elif last_sf and (next_word := find_next_word(child)):
word, parent_op = next_word
# Add it over
if not isinstance(last_sf.expr, Group):
last_sf.expr = Group(type(node)(last_sf.expr, word))
last_sf.expr.tail = word.tail
word.tail = ''
else:
last_sf.expr.expr.children[-1].tail = last_sf.expr.tail
last_sf.expr.expr.children += (word,)
last_sf.expr.tail = word.tail
word.tail = ''
if parent_op:
# A query like: 'title:foo blah OR author:bar
# Lucene parses as: (title:foo) ? (blah OR author:bar)
# We want : (title:foo ? blah) OR (author:bar)
node.op = parent_op.op
node.children += (*parent_op.children[1:],)
to_rem.append(child)
else:
last_sf = None
if len(to_rem) == len(node.children) - 1:
# We only have the searchfield left!
if parents:
# Move the head to the next element
last_sf.head = node.head
parents[-1].children = tuple(
child if child is not node else last_sf
for child in parents[-1].children
)
else:
tree = last_sf
break
else:
node.children = tuple(
child for child in node.children if child not in to_rem
)
# Remove spaces before field names
for node, parents in luqum_traverse(tree):
if isinstance(node, SearchField):
node.expr.head = ''
return tree
def query_dict_to_str(
escaped: dict | None = None,
unescaped: dict | None = None,
op: Literal['AND', 'OR', ''] = '',
phrase: bool = False,
) -> str:
"""
Converts a query dict to a search query.
>>> query_dict_to_str({'title': 'foo'})
'title:(foo)'
>>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'})
'title:(foo bar) author:(bar)'
>>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'}, op='OR')
'title:(foo bar) OR author:(bar)'
>>> query_dict_to_str({'title': 'foo ? to escape'})
'title:(foo \\\\? to escape)'
>>> query_dict_to_str({'title': 'YES AND'})
'title:(YES and)'
>>> query_dict_to_str({'publisher_facet': 'Running Press'}, phrase=True)
'publisher_facet:"Running Press"'
"""
result = ''
if escaped:
result += f' {op} '.join(
(
f'{k}:"{fully_escape_query(v)}"'
if phrase
else f'{k}:({fully_escape_query(v)})'
)
for k, v in escaped.items()
)
if unescaped:
if result:
result += f' {op} '
result += f' {op} '.join(f'{k}:{v}' for k, v in unescaped.items())
return result
def luqum_replace_field(query: Item, replacer: Callable[[str], str]) -> None:
"""
In-place replaces portions of a field, as indicated by the replacement function.
:param query: Passed in the form of a luqum tree
:param replacer: function called on each query.
"""
for sf, _ in luqum_traverse(query):
if isinstance(sf, SearchField):
sf.name = replacer(sf.name)
def luqum_remove_field(query: Item, predicate: Callable[[str], bool]) -> None:
"""
In-place removes fields from a query, as indicated by the predicate function.
:param query: Passed in the form of a luqum tree
:param predicate: function called on each query.
"""
for sf, parents in luqum_traverse(query):
if isinstance(sf, SearchField) and predicate(sf.name):
luqum_remove_child(sf, parents)
| 10,787 | Python | .py | 256 | 33.011719 | 91 | 0.584692 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
202 | utils.py | internetarchive_openlibrary/openlibrary/solr/utils.py | from dataclasses import dataclass, field
import json
import logging
import httpx
from httpx import HTTPError, HTTPStatusError, TimeoutException
from openlibrary import config
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.utils.retry import MaxRetriesExceeded, RetryStrategy
logger = logging.getLogger("openlibrary.solr")
solr_base_url = None
solr_next: bool | None = None
def load_config(c_config='conf/openlibrary.yml'):
if not config.runtime_config:
config.load(c_config)
config.load_config(c_config)
def get_solr_base_url():
"""
Get Solr host
:rtype: str
"""
global solr_base_url
load_config()
if not solr_base_url:
solr_base_url = config.runtime_config['plugin_worksearch']['solr_base_url']
return solr_base_url
def set_solr_base_url(solr_url: str):
global solr_base_url
solr_base_url = solr_url
def get_solr_next() -> bool:
"""
Get whether this is the next version of solr; ie new schema configs/fields, etc.
"""
global solr_next
if solr_next is None:
load_config()
solr_next = config.runtime_config['plugin_worksearch'].get('solr_next', False)
return solr_next
def set_solr_next(val: bool):
global solr_next
solr_next = val
@dataclass
class SolrUpdateRequest:
adds: list[SolrDocument] = field(default_factory=list)
"""Records to be added/modified"""
deletes: list[str] = field(default_factory=list)
"""Records to be deleted"""
commit: bool = False
# Override the + operator
def __add__(self, other):
if isinstance(other, SolrUpdateRequest):
return SolrUpdateRequest(
adds=self.adds + other.adds,
deletes=self.deletes + other.deletes,
commit=self.commit or other.commit,
)
else:
raise TypeError(f"Cannot add {type(self)} and {type(other)}")
def has_changes(self) -> bool:
return bool(self.adds or self.deletes)
def to_solr_requests_json(self, indent: int | str | None = None, sep=',') -> str:
result = '{'
if self.deletes:
result += f'"delete": {json.dumps(self.deletes, indent=indent)}' + sep
for doc in self.adds:
result += f'"add": {json.dumps({"doc": doc}, indent=indent)}' + sep
if self.commit:
result += '"commit": {}' + sep
if result.endswith(sep):
result = result[: -len(sep)]
result += '}'
return result
def clear_requests(self) -> None:
self.adds.clear()
self.deletes.clear()
def solr_update(
update_request: SolrUpdateRequest,
skip_id_check=False,
solr_base_url: str | None = None,
) -> None:
content = update_request.to_solr_requests_json()
solr_base_url = solr_base_url or get_solr_base_url()
params = {
# Don't fail the whole batch if one bad apple
'update.chain': 'tolerant-chain'
}
if skip_id_check:
params['overwrite'] = 'false'
def make_request():
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
try:
resp = httpx.post(
f'{solr_base_url}/update',
# Large batches especially can take a decent chunk of time
timeout=300,
params=params,
headers={'Content-Type': 'application/json'},
content=content,
)
if resp.status_code == 400:
resp_json = resp.json()
indiv_errors = resp_json.get('responseHeader', {}).get('errors', [])
if indiv_errors:
for e in indiv_errors:
logger.error(f'Individual Solr POST Error: {e}')
global_error = resp_json.get('error')
if global_error:
logger.error(f'Global Solr POST Error: {global_error.get("msg")}')
if not (indiv_errors or global_error):
# We can handle the above errors. Any other 400 status codes
# are fatal and should cause a retry
resp.raise_for_status()
else:
resp.raise_for_status()
except HTTPStatusError as e:
logger.error(f'HTTP Status Solr POST Error: {e}')
raise
except TimeoutException:
logger.error(f'Timeout Solr POST Error: {content}')
raise
except HTTPError as e:
logger.error(f'HTTP Solr POST Error: {e}')
raise
retry = RetryStrategy(
[HTTPStatusError, TimeoutException, HTTPError],
max_retries=5,
delay=8,
)
try:
return retry(make_request)
except MaxRetriesExceeded as e:
logger.error(f'Max retries exceeded for Solr POST: {e.last_exception}')
async def solr_insert_documents(
documents: list[dict],
solr_base_url: str | None = None,
skip_id_check=False,
):
"""
Note: This has only been tested with Solr 8, but might work with Solr 3 as well.
"""
solr_base_url = solr_base_url or get_solr_base_url()
params = {}
if skip_id_check:
params['overwrite'] = 'false'
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
async with httpx.AsyncClient() as client:
resp = await client.post(
f'{solr_base_url}/update',
timeout=30, # seconds; the default timeout is silly short
params=params,
headers={'Content-Type': 'application/json'},
content=json.dumps(documents),
)
resp.raise_for_status()
def str_to_key(s):
"""
Convert a string to a valid Solr field name.
TODO: this exists in openlibrary/utils/__init__.py str_to_key(), DRY
:param str s:
:rtype: str
"""
to_drop = set(''';/?:@&=+$,<>#%"{}|\\^[]`\n\r''')
return ''.join(c if c != ' ' else '_' for c in s.lower() if c not in to_drop)
| 6,011 | Python | .py | 162 | 28.697531 | 86 | 0.601723 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
203 | types_generator.py | internetarchive_openlibrary/openlibrary/solr/types_generator.py | #!/usr/bin/env python
import os
root = os.path.dirname(__file__)
OVERRIDES = {
'type': "Literal['work', 'author', 'subject']",
'public_scan_b': 'Optional[bool]',
'printdisabled_s': 'Optional[str]',
'lending_edition_s': 'Optional[str]',
'ia_collection_s': 'Optional[str]',
'ebook_count_i': 'Optional[int]',
}
def generate():
"""This function generates the types.py file."""
import xml.etree.ElementTree as ET
# read the managed-schema xml file
solr_schema = ET.parse(
os.path.join(root, '../../conf/solr/conf/managed-schema.xml')
)
python_fields: list[str] = []
seen_names: set[str] = set()
for field in solr_schema.getroot().findall('field'):
name = field.get('name')
if name.startswith('_'):
continue
required = field.get('required') == 'true'
typ = field.get('type')
multivalued = field.get('multiValued') == 'true'
type_map = {
'pint': 'int',
'string': 'str',
'text_en_splitting': 'str',
'text_general': 'str',
'text_international': 'str',
'text_title_sort': 'str',
'boolean': 'bool',
'pfloat': 'float',
}
if name in OVERRIDES:
python_type = OVERRIDES[name]
elif typ in type_map:
python_type = type_map[typ]
elif (
field_type := solr_schema.find(f".//fieldType[@name='{typ}']")
) is not None:
field_class = field_type.get('class')
if field_class == 'solr.EnumFieldType':
enumsConfigFile = field_type.get('enumsConfig')
enumsConfig = ET.parse(
os.path.join(root, '../../conf/solr/conf/', enumsConfigFile)
)
enum_values = [
el.text
for el in enumsConfig.findall(
f".//enum[@name='{field_type.get('enumName')}']/value"
)
]
python_type = f"Literal[{', '.join(map(repr, enum_values))}]"
else:
raise Exception(f"Unknown field type class {field_class}")
else:
raise Exception(f"Unknown field type {typ}")
if name not in OVERRIDES:
if multivalued:
python_type = f"list[{python_type}]"
if not required:
python_type = f"Optional[{python_type}]"
seen_names.add(name)
python_fields.append(f" {name}: {python_type}")
for key in OVERRIDES:
if key not in seen_names:
python_fields.append(f" {key}: {OVERRIDES[key]}")
body = '\n'.join(python_fields)
python = f"""# This file is auto-generated by types_generator.py
# fmt: off
from typing import Literal, TypedDict, Optional
class SolrDocument(TypedDict):
{body}
# fmt: on"""
return python
if __name__ == '__main__':
print(generate())
| 2,981 | Python | .py | 81 | 27.111111 | 80 | 0.539528 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
204 | update.py | internetarchive_openlibrary/openlibrary/solr/update.py | import functools
import logging
from pathlib import Path
from typing import Literal, cast
import aiofiles
import json
import web
from openlibrary.catalog.utils.query import set_query_host
from openlibrary.solr.data_provider import (
get_data_provider,
DataProvider,
ExternalDataProvider,
)
from openlibrary.solr.updater.abstract import AbstractSolrUpdater
from openlibrary.solr.updater.author import AuthorSolrUpdater
from openlibrary.solr.updater.edition import EditionSolrUpdater
from openlibrary.solr.updater.list import ListSolrUpdater
from openlibrary.solr.updater.work import WorkSolrUpdater
from openlibrary.solr.utils import (
SolrUpdateRequest,
load_config,
set_solr_base_url,
set_solr_next,
solr_update,
)
from openlibrary.utils import uniq
from openlibrary.utils.open_syllabus_project import set_osp_dump_location
logger = logging.getLogger("openlibrary.solr")
# This will be set to a data provider; have faith, mypy!
data_provider = cast(DataProvider, None)
@functools.cache
def get_solr_updaters() -> list[AbstractSolrUpdater]:
global data_provider
assert data_provider is not None
return [
# ORDER MATTERS
EditionSolrUpdater(data_provider),
WorkSolrUpdater(data_provider),
AuthorSolrUpdater(data_provider),
ListSolrUpdater(data_provider),
]
def can_update_key(key: str) -> bool:
return any(updater.key_test(key) for updater in get_solr_updaters())
async def update_keys(
keys: list[str],
commit=True,
output_file=None,
skip_id_check=False,
update: Literal['update', 'print', 'pprint', 'quiet'] = 'update',
) -> SolrUpdateRequest:
"""
Insert/update the documents with the provided keys in Solr.
:param list[str] keys: Keys to update (ex: ["/books/OL1M"]).
:param bool commit: Create <commit> tags to make Solr persist the changes (and make the public/searchable).
:param str output_file: If specified, will save all update actions to output_file **instead** of sending to Solr.
Each line will be JSON object.
FIXME Updates to editions/subjects ignore output_file and will be sent (only) to Solr regardless.
"""
logger.debug("BEGIN update_keys")
def _solr_update(update_state: SolrUpdateRequest):
if update == 'update':
return solr_update(update_state, skip_id_check)
elif update == 'pprint':
print(update_state.to_solr_requests_json(sep='\n', indent=4))
elif update == 'print':
print(update_state.to_solr_requests_json(sep='\n'))
elif update == 'quiet':
pass
global data_provider
if data_provider is None:
data_provider = get_data_provider('default')
net_update = SolrUpdateRequest(commit=commit)
for updater in get_solr_updaters():
update_state = SolrUpdateRequest(commit=commit)
updater_keys = uniq(k for k in keys if updater.key_test(k))
await updater.preload_keys(updater_keys)
for key in updater_keys:
logger.debug(f"processing {key}")
try:
thing = await data_provider.get_document(key)
if thing and thing['type']['key'] == '/type/redirect':
logger.warning("Found redirect to %r", thing['location'])
# When the given key is not found or redirects to another thing,
# explicitly delete the key. It won't get deleted otherwise.
update_state.deletes.append(thing['key'])
thing = await data_provider.get_document(thing['location'])
if not thing:
logger.warning("No thing found for key %r. Ignoring...", key)
continue
if thing['type']['key'] == '/type/delete':
logger.info(
"%r has type %r. queuing for deleting it solr.",
thing['key'],
thing['type']['key'],
)
update_state.deletes.append(thing['key'])
else:
new_update_state, new_keys = await updater.update_key(thing)
update_state += new_update_state
keys += new_keys
except: # noqa: E722
logger.error("Failed to update %r", key, exc_info=True)
if update_state.has_changes():
if output_file:
async with aiofiles.open(output_file, "w") as f:
for doc in update_state.adds:
await f.write(f"{json.dumps(doc)}\n")
else:
_solr_update(update_state)
net_update += update_state
logger.debug("END update_keys")
return net_update
async def do_updates(keys):
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
await update_keys(keys, commit=False)
def load_configs(
c_host: str,
c_config: str,
c_data_provider: (
DataProvider | Literal["default", "legacy", "external"]
) = 'default',
) -> DataProvider:
host = web.lstrips(c_host, "http://").strip("/")
set_query_host(host)
load_config(c_config)
global data_provider
if data_provider is None:
if isinstance(c_data_provider, DataProvider):
data_provider = c_data_provider
elif c_data_provider == 'external':
data_provider = ExternalDataProvider(host)
else:
data_provider = get_data_provider(c_data_provider)
return data_provider
async def main(
keys: list[str],
osp_dump: Path | None = None,
ol_url="http://openlibrary.org",
ol_config="openlibrary.yml",
output_file: str | None = None,
commit=True,
data_provider: Literal['default', 'legacy', 'external'] = "default",
solr_base: str | None = None,
solr_next=False,
update: Literal['update', 'print', 'pprint'] = 'update',
):
"""
Insert the documents with the given keys into Solr.
:param keys: The keys of the items to update (ex: /books/OL1M)
:param ol_url: URL of the openlibrary website
:param ol_config: Open Library config file
:param output_file: Where to save output
:param commit: Whether to also trigger a Solr commit
:param data_provider: Name of the data provider to use
:param solr_base: If wanting to override openlibrary.yml
:param solr_next: Whether to assume schema of next solr version is active
:param update: Whether/how to do the actual solr update call
"""
load_configs(ol_url, ol_config, data_provider)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
if keys[0].startswith('//'):
keys = [k[1:] for k in keys]
if solr_base:
set_solr_base_url(solr_base)
set_solr_next(solr_next)
set_osp_dump_location(osp_dump)
await update_keys(keys, commit=commit, output_file=output_file, update=update)
if __name__ == '__main__':
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
FnToCLI(main).run()
| 7,160 | Python | .py | 174 | 33.241379 | 117 | 0.645941 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
205 | data_provider.py | internetarchive_openlibrary/openlibrary/solr/data_provider.py | """Module to provide data for solr indexer.
This module has all the logic for querying different sources for getting the
data required for solr.
Multiple data providers are supported, each is good for different use case.
"""
import asyncio
import itertools
import logging
import re
from typing import Optional, TypedDict, cast
from collections.abc import Iterable, Sized
import httpx
from httpx import HTTPError
import requests
import web
from web import DB
from infogami.infobase.client import Site
from openlibrary.core import ia
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.ratings import Ratings, WorkRatingsSummary
from openlibrary.utils import extract_numeric_id_from_olid
logger = logging.getLogger("openlibrary.solr.data_provider")
IA_METADATA_FIELDS = ('identifier', 'boxid', 'collection', 'access-restricted-item')
OCAID_PATTERN = re.compile(r'^[^\s&#?/]+$')
def get_data_provider(type="default"):
"""Returns the data provider of given type."""
if type == "default":
return BetterDataProvider()
elif type == "legacy":
return LegacyDataProvider()
else:
raise ValueError("unknown data provider type: %s" % type)
def is_valid_ocaid(ocaid: str):
return bool(OCAID_PATTERN.match(ocaid))
def batch(items: list, max_batch_len: int):
"""
>>> list(batch([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(batch([], 2))
[]
>>> list(batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
>>> list(batch([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
>>> list(batch([1,2,3,4,5], 6))
[[1, 2, 3, 4, 5]]
"""
start = 0
while start < len(items):
yield items[start : start + max_batch_len]
start += max_batch_len
def batch_until_len(items: Iterable[Sized], max_batch_len: int):
batch_len = 0
batch: list[Sized] = []
for item in items:
if batch_len + len(item) > max_batch_len and batch:
yield batch
batch = [item]
batch_len = len(item)
else:
batch.append(item)
batch_len += len(item)
if batch:
yield batch
def partition(lst: list, parts: int):
"""
>>> list(partition([1,2,3,4,5,6], 1))
[[1, 2, 3, 4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 2))
[[1, 2, 3], [4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 3))
[[1, 2], [3, 4], [5, 6]]
>>> list(partition([1,2,3,4,5,6], 4))
[[1], [2], [3], [4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 5))
[[1], [2], [3], [4], [5, 6]]
>>> list(partition([1,2,3,4,5,6], 6))
[[1], [2], [3], [4], [5], [6]]
>>> list(partition([1,2,3,4,5,6], 7))
[[1], [2], [3], [4], [5], [6]]
>>> list(partition([1,2,3,4,5,6,7], 3))
[[1, 2], [3, 4], [5, 6, 7]]
>>> list(partition([], 5))
[]
"""
if not lst:
return
total_len = len(lst)
parts = min(total_len, parts)
size = total_len // parts
for i in range(parts):
start = i * size
end = total_len if (i == parts - 1) else ((i + 1) * size)
yield lst[start:end]
class WorkReadingLogSolrSummary(TypedDict):
readinglog_count: int
want_to_read_count: int
currently_reading_count: int
already_read_count: int
class DataProvider:
"""
DataProvider is the interface for solr updater
to get additional information for building solr index.
This is an abstract class and multiple implementations are provided
in this module.
"""
def __init__(self) -> None:
self.ia_cache: dict[str, dict | None] = {}
@staticmethod
async def _get_lite_metadata(ocaids: list[str], _recur_depth=0, _max_recur_depth=3):
"""
For bulk fetch, some of the ocaids in Open Library may be bad
and break archive.org ES fetches. When this happens, we (up to
3 times) recursively split up the pool of ocaids to do as many
successful sub-bulk fetches as we can and then when limit is
reached, downstream code will fetch remaining ocaids individually
(and skip bad ocaids)
"""
if not ocaids or _recur_depth > _max_recur_depth:
logger.warning(
'Max recursion exceeded trying fetch IA data', extra={'ocaids': ocaids}
)
return []
try:
async with httpx.AsyncClient() as client:
r = await client.get(
"https://archive.org/advancedsearch.php",
timeout=30, # The default is silly short
headers={
'x-application-id': 'ol-solr',
},
params={
'q': f"identifier:({' OR '.join(ocaids)})",
'rows': len(ocaids),
'fl': ','.join(IA_METADATA_FIELDS),
'page': 1,
'output': 'json',
'save': 'yes',
'service': 'metadata__unlimited',
},
)
r.raise_for_status()
return r.json()['response']['docs']
except HTTPError:
logger.warning("IA bulk query failed")
except (ValueError, KeyError):
logger.warning(f"IA bulk query failed {r.status_code}: {r.json()['error']}")
# Only here if an exception occurred
# there's probably a bad apple; try splitting the batch
parts = await asyncio.gather(
*(
DataProvider._get_lite_metadata(part, _recur_depth=_recur_depth + 1)
for part in partition(ocaids, 6)
)
)
return list(itertools.chain(*parts))
@staticmethod
async def _get_lite_metadata_direct(ocaid: str):
try:
async with httpx.AsyncClient() as client:
r = await client.get(
f"https://archive.org/metadata/{ocaid}/metadata",
timeout=30, # The default is silly short
)
r.raise_for_status()
response = r.json()
if 'error' not in response:
lite_metadata = {
key: response['result'][key]
for key in IA_METADATA_FIELDS
if key in response['result']
}
return lite_metadata
else:
return {
'error': response['error'],
'identifier': ocaid,
}
except HTTPError:
logger.warning(f'Error fetching metadata for {ocaid}')
return None
async def get_document(self, key):
"""Returns the document with specified key from the database.
:param str key: type-prefixed key (ex: /books/OL1M)
:rtype: dict
"""
raise NotImplementedError()
def get_metadata(self, identifier: str):
if identifier in self.ia_cache:
logger.debug("IA metadata cache hit")
return self.ia_cache[identifier]
elif not is_valid_ocaid(identifier):
return None
else:
logger.debug("IA metadata cache miss")
return ia.get_metadata_direct(identifier)
async def preload_documents(self, keys: Iterable[str]):
"""
Preload a set of documents in a single request. Should make subsequent calls to
get_document faster.
"""
pass
async def preload_metadata(self, ocaids: list[str]):
invalid_ocaids = {ocaid for ocaid in ocaids if not is_valid_ocaid(ocaid)}
if invalid_ocaids:
logger.warning(f"Trying to cache invalid OCAIDs: {invalid_ocaids}")
valid_ocaids = list(set(ocaids) - invalid_ocaids)
batches = list(batch_until_len(valid_ocaids, 3000))
# Start them all async
tasks = [asyncio.create_task(self._get_lite_metadata(b)) for b in batches]
for task in tasks:
for doc in await task:
self.ia_cache[doc['identifier']] = doc
missing_ocaids = [ocaid for ocaid in valid_ocaids if ocaid not in self.ia_cache]
missing_ocaid_batches = list(batch(missing_ocaids, 6))
for missing_batch in missing_ocaid_batches:
# Start them all async
tasks = [
asyncio.create_task(self._get_lite_metadata_direct(ocaid))
for ocaid in missing_batch
]
for task in tasks:
lite_metadata = await task
if lite_metadata:
self.ia_cache[lite_metadata['identifier']] = lite_metadata
def preload_editions_of_works(self, work_keys: Iterable[str]):
"""
Preload the editions of the provided works. Should make subsequent calls to
get_editions_of_work faster.
:param list of str work_keys: type-prefixed keys to work keys (ex: /works/OL1W)
:return: None
"""
pass
def find_redirects(self, key):
"""
Returns keys of all things which redirect to this one.
:param str key: type-prefixed key
:rtype: list of str
"""
raise NotImplementedError()
def get_editions_of_work(self, work):
"""
:param dict work: work object
:rtype: list of dict
"""
raise NotImplementedError()
def get_work_ratings(self, work_key: str) -> WorkRatingsSummary | None:
raise NotImplementedError()
def get_work_reading_log(self, work_key: str) -> WorkReadingLogSolrSummary | None:
raise NotImplementedError()
def clear_cache(self):
self.ia_cache.clear()
class LegacyDataProvider(DataProvider):
def __init__(self):
from openlibrary.catalog.utils.query import query_iter, withKey
super().__init__()
self._query_iter = query_iter
self._withKey = withKey
def find_redirects(self, key):
"""Returns keys of all things which are redirected to this one."""
logger.info("find_redirects %s", key)
q = {'type': '/type/redirect', 'location': key}
return [r['key'] for r in self._query_iter(q)]
def get_editions_of_work(self, work):
logger.info("find_editions_of_work %s", work['key'])
q = {'type': '/type/edition', 'works': work['key'], '*': None}
return list(self._query_iter(q))
async def get_document(self, key):
logger.info("get_document %s", key)
return self._withKey(key)
def get_work_ratings(self, work_key: str) -> WorkRatingsSummary | None:
work_id = int(work_key[len('/works/OL') : -len('W')])
return Ratings.get_work_ratings_summary(work_id)
def get_work_reading_log(self, work_key: str) -> WorkReadingLogSolrSummary:
work_id = extract_numeric_id_from_olid(work_key)
counts = Bookshelves.get_work_summary(work_id)
return cast(
WorkReadingLogSolrSummary,
{
'readinglog_count': sum(counts.values()),
**{f'{shelf}_count': count for shelf, count in counts.items()},
},
)
def clear_cache(self):
# Nothing's cached, so nothing to clear!
return
class ExternalDataProvider(DataProvider):
"""
Only used for local env, this data provider fetches data using public OL apis
"""
def __init__(self, ol_host: str):
super().__init__()
self.ol_host = ol_host
def find_redirects(self, key: str):
# NOT IMPLEMENTED
return []
def get_editions_of_work(self, work):
resp = requests.get(
f"http://{self.ol_host}{work['key']}/editions.json", params={'limit': 500}
).json()
if 'next' in resp['links']:
logger.warning(f"Too many editions for {work['key']}")
return resp['entries']
async def get_document(self, key: str):
async with httpx.AsyncClient() as client:
response = await client.get(f"http://{self.ol_host}{key}.json")
return response.json()
class BetterDataProvider(LegacyDataProvider):
def __init__(
self,
site: Site | None = None,
db: DB | None = None,
):
"""Test with
import web; import infogami
from openlibrary.config import load_config
load_config('/openlibrary/config/openlibrary.yml')
infogami._setup()
from infogami import config
"""
super().__init__()
# cache for documents
self.cache: dict[str, dict] = {}
# cache for redirects
self.redirect_cache: dict[str, list[str]] = {}
self.edition_keys_of_works_cache: dict[str, list[str]] = {}
import infogami
from infogami.utils import delegate
# web.ctx might not be defined at this time -_-
self.get_site = lambda: site or web.ctx.site
if not db:
infogami._setup()
delegate.fakeload()
from openlibrary.core.db import get_db
self.db: DB = get_db()
else:
self.db = db
async def get_document(self, key):
# logger.info("get_document %s", key)
if key not in self.cache:
await self.preload_documents([key])
if key not in self.cache:
logger.warning("NOT FOUND %s", key)
return self.cache.get(key) or {"key": key, "type": {"key": "/type/delete"}}
async def preload_documents(self, keys: Iterable[str]):
keys2 = set(keys)
# keys2.update(k for k in self.ia_redirect_cache.values() if k is not None)
self.preload_documents0(keys2)
self._preload_works()
self._preload_authors()
self._preload_editions()
await self._preload_metadata_of_editions()
# for all works and authors, find redirects as they'll requested later
keys3 = [k for k in self.cache if k.startswith(("/works/", "/authors/"))]
self.preload_redirects(keys3)
def preload_documents0(self, keys):
keys = [k for k in keys if k not in self.cache]
if not keys:
return
logger.info("preload_documents0 %s", keys)
for chunk in web.group(keys, 100):
docs = self.get_site().get_many(list(chunk))
for doc in docs:
self.cache[doc['key']] = doc.dict()
def _preload_works(self):
"""Preloads works for all editions in the cache."""
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/edition' and doc.get('works'):
keys.append(doc['works'][0]['key'])
# print "preload_works, found keys", keys
self.preload_documents0(keys)
def _preload_editions(self):
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/work':
keys.append(doc['key'])
self.preload_editions_of_works(keys)
async def _preload_metadata_of_editions(self):
identifiers = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/edition' and doc.get('ocaid'):
identifiers.append(doc['ocaid'])
# source_records = doc.get("source_records", [])
# identifiers.extend(r[len("ia:"):] for r in source_records if r.startswith("ia:"))
await self.preload_metadata(identifiers)
def _preload_authors(self):
"""Preloads authors for all works in the cache."""
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/work' and doc.get('authors'):
keys.extend(a['author']['key'] for a in doc['authors'])
if doc and doc['type']['key'] == '/type/edition' and doc.get('authors'):
keys.extend(a['key'] for a in doc['authors'])
self.preload_documents0(list(set(keys)))
def find_redirects(self, key):
"""Returns all the keys that are redirected to this."""
self.preload_redirects([key])
return self.redirect_cache[key]
def preload_redirects(self, keys):
keys = [k for k in keys if k not in self.redirect_cache]
if not keys:
return
logger.info("preload_redirects %s", keys)
for chunk in web.group(keys, 100):
self._preload_redirects0(list(chunk))
def _preload_redirects0(self, keys):
query = {
"type": "/type/redirect",
"location": keys,
"a:location": None, # asking it to fill location in results
}
for k in keys:
self.redirect_cache.setdefault(k, [])
matches = self.get_site().things(query, details=True)
for thing in matches:
# we are trying to find documents that are redirecting to each of the given keys
self.redirect_cache[thing.location].append(thing.key)
def get_editions_of_work(self, work):
wkey = work['key']
self.preload_editions_of_works([wkey])
edition_keys = self.edition_keys_of_works_cache.get(wkey, [])
return [self.cache[k] for k in edition_keys]
def preload_editions_of_works(self, work_keys: Iterable[str]):
work_keys = [
wkey for wkey in work_keys if wkey not in self.edition_keys_of_works_cache
]
if not work_keys:
return
logger.info("preload_editions_of_works %s ..", work_keys[:5])
# Infobase doesn't has a way to do find editions of multiple works at once.
# Using raw SQL to avoid making individual infobase queries, which is very
# time consuming.
key_query = (
"select id from property where name='works'"
" and type=(select id from thing where key='/type/edition')"
)
q = (
"SELECT edition.key as edition_key, work.key as work_key"
" FROM thing as edition, thing as work, edition_ref"
" WHERE edition_ref.thing_id=edition.id"
" AND edition_ref.value=work.id"
f" AND edition_ref.key_id=({key_query})"
" AND work.key in $keys"
)
result = self.db.query(q, vars={"keys": work_keys})
for row in result:
self.edition_keys_of_works_cache.setdefault(row.work_key, []).append(
row.edition_key
)
keys = [k for _keys in self.edition_keys_of_works_cache.values() for k in _keys]
self.preload_documents0(keys)
return
def clear_cache(self):
super().clear_cache()
self.cache.clear()
self.redirect_cache.clear()
self.edition_keys_of_works_cache.clear()
| 18,714 | Python | .py | 458 | 31.360262 | 99 | 0.580283 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
206 | author.py | internetarchive_openlibrary/openlibrary/solr/updater/author.py | from typing import cast
import typing
import httpx
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url
from openlibrary.solr.data_provider import WorkReadingLogSolrSummary
from openlibrary.core.ratings import WorkRatingsSummary, Ratings
SUBJECT_FACETS = ['subject_facet', 'time_facet', 'person_facet', 'place_facet']
class AuthorSolrUpdater(AbstractSolrUpdater):
key_prefix = '/authors/'
thing_type = '/type/author'
async def update_key(self, author: dict) -> tuple[SolrUpdateRequest, list[str]]:
author_id = author['key'].split("/")[-1]
base_url = get_solr_base_url() + '/query'
json: dict[str, typing.Any] = {
"params": {
"json.nl": "arrarr",
"q": "author_key:%s " % author_id,
"fq": "type:work",
"fl": "title, subtitle",
"sort": "edition_count desc",
},
'facet': {
"ratings_count_1": "sum(ratings_count_1)",
"ratings_count_2": "sum(ratings_count_2)",
"ratings_count_3": "sum(ratings_count_3)",
"ratings_count_4": "sum(ratings_count_4)",
"ratings_count_5": "sum(ratings_count_5)",
"readinglog_count": "sum(readinglog_count)",
"want_to_read_count": "sum(want_to_read_count)",
"currently_reading_count": "sum(currently_reading_count)",
"already_read_count": "sum(already_read_count)",
},
}
for field in SUBJECT_FACETS:
json["facet"][field] = {
"type": "terms",
"field": field,
}
async with httpx.AsyncClient() as client:
response = await client.post(
base_url,
timeout=30,
json=json,
)
reply = response.json()
doc = AuthorSolrBuilder(author, reply).build()
return SolrUpdateRequest(adds=[doc]), []
class AuthorSolrBuilder(AbstractSolrBuilder):
def __init__(self, author: dict, solr_reply: dict):
self._author = author
self._solr_reply = solr_reply
@property
def key(self) -> str:
return self._author['key']
@property
def type(self) -> str:
return 'author'
@property
def name(self) -> str | None:
return self._author.get('name')
@property
def alternate_names(self) -> list[str]:
return self._author.get('alternate_names', [])
@property
def birth_date(self) -> str | None:
return self._author.get('birth_date')
@property
def death_date(self) -> str | None:
return self._author.get('death_date')
@property
def date(self) -> str | None:
"""I think this is legacy?"""
return self._author.get('date')
@property
def top_work(self) -> str | None:
docs = self._solr_reply['response'].get('docs', [])
if docs and docs[0].get('title', None):
top_work = docs[0]['title']
if docs[0].get('subtitle', None):
top_work += ': ' + docs[0]['subtitle']
return top_work
return None
@property
def work_count(self) -> int:
return self._solr_reply['response']['numFound']
@property
def top_subjects(self) -> list[str]:
all_subjects = []
for field in SUBJECT_FACETS:
if facet := self._solr_reply['facets'].get(field):
for bucket in facet['buckets']:
all_subjects.append((bucket["count"], bucket["val"]))
all_subjects.sort(reverse=True)
return [top_facets for num, top_facets in all_subjects[:10]]
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_ratings()
doc |= self.build_reading_log()
return cast(SolrDocument, doc)
def build_ratings(self) -> WorkRatingsSummary:
return Ratings.work_ratings_summary_from_counts(
[
self._solr_reply["facets"].get(f"ratings_count_{index}", 0)
for index in range(1, 6)
]
)
def build_reading_log(self) -> WorkReadingLogSolrSummary:
reading_log = {
"want_to_read_count": self._solr_reply["facets"].get(
"want_to_read_count", 0.0
),
"already_read_count": self._solr_reply["facets"].get(
"already_read_count", 0.0
),
"currently_reading_count": self._solr_reply["facets"].get(
"currently_reading_count", 0.0
),
"readinglog_count": self._solr_reply["facets"].get("readinglog_count", 0.0),
}
return cast(WorkReadingLogSolrSummary, reading_log)
| 4,951 | Python | .py | 122 | 30.47541 | 88 | 0.569288 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
207 | edition.py | internetarchive_openlibrary/openlibrary/solr/updater/edition.py | from functools import cached_property
import logging
import re
from typing import TYPE_CHECKING, cast
import requests
import openlibrary.book_providers as bp
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url
from openlibrary.utils import uniq
from openlibrary.utils.isbn import opposite_isbn
if TYPE_CHECKING:
from openlibrary.solr.updater.work import WorkSolrBuilder
logger = logging.getLogger("openlibrary.solr")
re_edition_key_basename = re.compile("^[a-zA-Z0-9:.-]+$")
re_lang_key = re.compile(r'^/(?:l|languages)/([a-z]{3})$')
re_year = re.compile(r'\b(\d{4})\b')
re_solr_field = re.compile(r'^[-\w]+$', re.U)
re_not_az = re.compile('[^a-zA-Z]')
class EditionSolrUpdater(AbstractSolrUpdater):
key_prefix = '/books/'
thing_type = '/type/edition'
async def update_key(self, thing: dict) -> tuple[SolrUpdateRequest, list[str]]:
update = SolrUpdateRequest()
new_keys: list[str] = []
if thing['type']['key'] == self.thing_type:
if thing.get("works"):
new_keys.append(thing["works"][0]['key'])
# Make sure we remove any fake works created from orphaned editions
update.deletes.append(thing['key'].replace('/books/', '/works/'))
else:
# index the edition as it does not belong to any work
new_keys.append(thing['key'].replace('/books/', '/works/'))
else:
logger.info(
"%r is a document of type %r. Checking if any work has it as edition in solr...",
thing['key'],
thing['type']['key'],
)
work_key = solr_select_work(thing['key'])
if work_key:
logger.info("found %r, updating it...", work_key)
new_keys.append(work_key)
return update, new_keys
def solr_select_work(edition_key):
"""
Get corresponding work key for given edition key in Solr.
:param str edition_key: (ex: /books/OL1M)
:return: work_key
:rtype: str or None
"""
# solr only uses the last part as edition_key
edition_key = edition_key.split("/")[-1]
if not re_edition_key_basename.match(edition_key):
return None
edition_key = solr_escape(edition_key)
reply = requests.get(
f'{get_solr_base_url()}/select',
params={
'wt': 'json',
'q': f'edition_key:{edition_key}',
'rows': 1,
'fl': 'key',
},
).json()
if docs := reply['response'].get('docs', []):
return docs[0]['key'] # /works/ prefix is in solr
def solr_escape(query):
"""
Escape special characters in Solr query.
:param str query:
:rtype: str
"""
return re.sub(r'([\s\-+!()|&{}\[\]^"~*?:\\])', r'\\\1', query)
def is_sine_nomine(pub: str) -> bool:
"""Check if the publisher is 'sn' (excluding non-letter characters)."""
return re_not_az.sub('', pub).lower() == 'sn'
class EditionSolrBuilder(AbstractSolrBuilder):
def __init__(
self,
edition: dict,
solr_work: 'WorkSolrBuilder | None' = None,
ia_metadata: bp.IALiteMetadata | None = None,
):
self._edition = edition
self._solr_work = solr_work
self._ia_metadata = ia_metadata
self._provider = bp.get_book_provider(edition)
@property
def key(self):
return self._edition['key']
@property
def title(self) -> str | None:
return self._edition.get('title')
@property
def subtitle(self) -> str | None:
return self._edition.get('subtitle')
@property
def alternative_title(self) -> set[str]:
"""Get titles from the editions as alternative titles."""
result: set[str] = set()
full_title = self._edition.get('title')
if not full_title:
return result
if self._edition.get('subtitle'):
full_title += ': ' + cast(str, self._edition['subtitle'])
result.add(full_title)
result.update(self._edition.get('work_titles', []))
result.update(self._edition.get('other_titles', []))
return result
@property
def cover_i(self) -> int | None:
return next(
(
cover_id
for cover_id in self._edition.get('covers', [])
if cover_id != -1
),
None,
)
@property
def language(self) -> list[str]:
"""Gets the 3 letter language codes (eg ['ger', 'fre'])"""
result: list[str] = []
for lang in self._edition.get('languages', []):
m = re_lang_key.match(lang['key'] if isinstance(lang, dict) else lang)
if m:
result.append(m.group(1))
return uniq(result)
@property
def publisher(self) -> list[str]:
return uniq(
publisher if not is_sine_nomine(publisher) else 'Sine nomine'
for publisher in self._edition.get('publishers', [])
)
@property
def number_of_pages(self) -> int | None:
try:
return int(self._edition.get('number_of_pages', None)) or None
except (TypeError, ValueError): # int(None) -> TypeErr, int("vii") -> ValueErr
return None
@property
def translation_of(self) -> str | None:
return self._edition.get("translation_of")
@property
def format(self) -> str | None:
return self._edition.get('physical_format')
@property
def isbn(self) -> list[str]:
"""
Get all ISBNs of the given edition. Calculates complementary ISBN13 for each
ISBN10 and vice-versa. Does not remove '-'s.
"""
isbns = []
isbns += [
isbn.replace("_", "").strip() for isbn in self._edition.get("isbn_13", [])
]
isbns += [
isbn.replace("_", "").strip() for isbn in self._edition.get("isbn_10", [])
]
# Get the isbn13 when isbn10 is present and vice-versa.
isbns += [opposite_isbn(v) for v in isbns]
return uniq(isbn for isbn in isbns if isbn)
@property
def lccn(self) -> list[str]:
return uniq(lccn.strip() for lccn in self._edition.get('lccn', []))
@property
def publish_date(self) -> str | None:
return self._edition.get('publish_date')
@property
def publish_year(self) -> int | None:
if self.publish_date:
m = re_year.search(self.publish_date)
return int(m.group(1)) if m else None
else:
return None
@property
def ia(self) -> str | None:
ocaid = self._edition.get('ocaid')
return ocaid.strip() if ocaid else None
@property
def ia_collection(self) -> list[str]:
collections = self._ia_metadata['collection'] if self._ia_metadata else set()
# Exclude fav-* collections because they're not useful to us.
return [c for c in collections if not c.startswith('fav-')]
@property
def ia_box_id(self) -> list[str]:
boxids = []
if 'ia_box_id' in self._edition:
if isinstance(self._edition['ia_box_id'], str):
boxids = [self._edition['ia_box_id']]
elif isinstance(self._edition['ia_box_id'], list):
boxids = self._edition['ia_box_id']
else:
logger.warning(
f'Bad ia_box_id on {self.key}: "{self._edition["ia_box_id"]}"'
)
if self._ia_metadata:
boxids += list(self._ia_metadata.get('boxid') or [])
return uniq(boxids, key=lambda x: x.lower())
@property
def identifiers(self) -> dict:
identifiers = {}
for key, id_list in self._edition.get('identifiers', {}).items():
solr_key = (
key.replace('.', '_')
.replace(',', '_')
.replace('(', '')
.replace(')', '')
.replace(':', '_')
.replace('/', '')
.replace('#', '')
.lower()
)
m = re_solr_field.match(solr_key)
if not m:
logger.warning(f'Bad identifier on {self.key}: "{key}"')
continue
identifiers[f'id_{solr_key}'] = uniq(v.strip() for v in id_list)
return identifiers
@cached_property
def ebook_access(self) -> bp.EbookAccess:
if not self._provider:
return bp.EbookAccess.NO_EBOOK
elif isinstance(self._provider, bp.InternetArchiveProvider):
return self._provider.get_access(self._edition, self._ia_metadata)
else:
return self._provider.get_access(self._edition)
@property
def has_fulltext(self) -> bool:
return self.ebook_access > bp.EbookAccess.UNCLASSIFIED
@property
def public_scan_b(self) -> bool:
return self.ebook_access == bp.EbookAccess.PUBLIC
def build(self) -> SolrDocument:
"""
Build the solr document for the given edition to store as a nested
document
Completely override parent class method to handle some peculiar
fields
"""
solr_doc: SolrDocument = cast(
SolrDocument,
{
'key': self.key,
'type': 'edition',
# Display data
'title': self.title,
'subtitle': self.subtitle,
'alternative_title': list(self.alternative_title),
'cover_i': self.cover_i,
'language': self.language,
# Duplicate the author data from the work
**(
{
'author_name': self._solr_work.author_name,
'author_key': self._solr_work.author_key,
'author_alternative_name': list(
self._solr_work.author_alternative_name
),
'author_facet': self._solr_work.author_facet,
}
if self._solr_work
else {}
),
# Misc useful data
'publisher': self.publisher,
'format': [self.format] if self.format else None,
'publish_date': [self.publish_date] if self.publish_date else None,
'publish_year': [self.publish_year] if self.publish_year else None,
# Identifiers
'isbn': self.isbn,
'lccn': self.lccn,
**self.identifiers,
# IA
'ia': [self.ia] if self.ia else None,
'ia_collection': self.ia_collection,
'ia_box_id': self.ia_box_id,
# Ebook access
'ebook_access': self.ebook_access.to_solr_str(),
'has_fulltext': self.has_fulltext,
'public_scan_b': self.public_scan_b,
},
)
return cast(
SolrDocument,
{
key: solr_doc[key] # type: ignore
for key in solr_doc
if solr_doc[key] not in (None, [], '') # type: ignore
},
)
| 11,402 | Python | .py | 292 | 28.705479 | 97 | 0.549358 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
208 | work.py | internetarchive_openlibrary/openlibrary/solr/updater/work.py | from collections import defaultdict
from collections.abc import Iterable
import datetime
from functools import cached_property
import itertools
import logging
from math import ceil
import re
from statistics import median
import time
from typing import Optional, TypedDict, cast
from openlibrary.core import helpers as h
import openlibrary.book_providers as bp
from openlibrary.core.ratings import WorkRatingsSummary
from openlibrary.plugins.upstream.utils import safeget
from openlibrary.plugins.worksearch.subjects import SubjectPseudoKey
from openlibrary.solr.data_provider import DataProvider, WorkReadingLogSolrSummary
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.updater.edition import EditionSolrBuilder
from openlibrary.solr.utils import SolrUpdateRequest, str_to_key
from openlibrary.utils import uniq
from openlibrary.utils.ddc import choose_sorting_ddc, normalize_ddc
from openlibrary.utils.lcc import choose_sorting_lcc, short_lcc_to_sortable_lcc
from openlibrary.utils.open_syllabus_project import get_total_by_olid
logger = logging.getLogger("openlibrary.solr")
re_author_key = re.compile(r'^/(?:a|authors)/(OL\d+A)')
re_edition_key = re.compile(r"/books/([^/]+)")
re_subject = re.compile("[, _]+")
class WorkSolrUpdater(AbstractSolrUpdater):
key_prefix = '/works/'
thing_type = '/type/work'
async def preload_keys(self, keys: Iterable[str]):
await super().preload_keys(keys)
self.data_provider.preload_editions_of_works(keys)
async def update_key(self, work: dict) -> tuple[SolrUpdateRequest, list[str]]:
"""
Get the Solr requests necessary to insert/update this work into Solr.
:param dict work: Work to insert/update
"""
wkey = work['key']
update = SolrUpdateRequest()
# q = {'type': '/type/redirect', 'location': wkey}
# redirect_keys = [r['key'][7:] for r in query_iter(q)]
# redirect_keys = [k[7:] for k in data_provider.find_redirects(wkey)]
# deletes += redirect_keys
# deletes += [wkey[7:]] # strip /works/ from /works/OL1234W
# Handle edition records as well
# When an edition does not contain a works list, create a fake work and index it.
if work['type']['key'] == '/type/edition':
fake_work = {
# Solr uses type-prefixed keys. It's required to be unique across
# all types of documents. The website takes care of redirecting
# /works/OL1M to /books/OL1M.
'key': wkey.replace("/books/", "/works/"),
'type': {'key': '/type/work'},
'title': work.get('title'),
'editions': [work],
'authors': [
{'type': '/type/author_role', 'author': {'key': a['key']}}
for a in work.get('authors', [])
],
}
# Hack to add subjects when indexing /books/ia:xxx
if work.get("subjects"):
fake_work['subjects'] = work['subjects']
return await self.update_key(fake_work)
elif work['type']['key'] == '/type/work':
try:
# Anand - Oct 2013
# For /works/ia:xxx, editions are already supplied. Querying will empty response.
# Fetch editions
if "editions" in work:
editions = work['editions']
else:
editions = self.data_provider.get_editions_of_work(work)
# Fetch authors
author_keys = [
author['author']['key']
for author in normalize_authors(work.get('authors', []))
]
authors = [
await self.data_provider.get_document(key) for key in author_keys
]
if any(a['type']['key'] != '/type/author' for a in authors):
# we don't want to raise an exception but just write a warning on the log
logger.warning('Unexpected author type error: %s', work['key'])
authors = [a for a in authors if a['type']['key'] == '/type/author']
# Fetch ia_metadata
iaids = [e["ocaid"] for e in editions if "ocaid" in e]
ia_metadata = {
iaid: get_ia_collection_and_box_id(iaid, self.data_provider)
for iaid in iaids
}
solr_doc = WorkSolrBuilder(
work, editions, authors, self.data_provider, ia_metadata
).build()
except: # noqa: E722
logger.error("failed to update work %s", work['key'], exc_info=True)
else:
if solr_doc is not None:
iaids = solr_doc.get('ia') or []
# Delete all ia:foobar keys
if iaids:
update.deletes += [f"/works/ia:{iaid}" for iaid in iaids]
update.adds.append(solr_doc)
else:
logger.error("unrecognized type while updating work %s", wkey)
return update, []
def get_ia_collection_and_box_id(
ia: str, data_provider: DataProvider
) -> Optional['bp.IALiteMetadata']:
"""
Get the collections and boxids of the provided IA id
TODO Make the return type of this a namedtuple so that it's easier to reference
:param str ia: Internet Archive ID
:return: A dict of the form `{ boxid: set[str], collection: set[str] }`
:rtype: dict[str, set]
"""
if len(ia) == 1:
return None
def get_list(d, key):
"""
Return d[key] as some form of list, regardless of if it is or isn't.
:param dict or None d:
:param str key:
:rtype: list
"""
if not d:
return []
value = d.get(key, [])
if not value:
return []
elif value and not isinstance(value, list):
return [value]
else:
return value
metadata = data_provider.get_metadata(ia)
if metadata is None:
# It's none when the IA id is not found/invalid.
# TODO: It would be better if get_metadata riased an error.
return None
return {
'boxid': set(get_list(metadata, 'boxid')),
'collection': set(get_list(metadata, 'collection')),
'access_restricted_item': metadata.get('access-restricted-item'),
}
class KeyDict(TypedDict):
key: str
class NormalizedAuthor(TypedDict):
type: KeyDict
author: KeyDict
def normalize_authors(authors: list[dict]) -> list[NormalizedAuthor]:
"""
Need to normalize to a predictable format because of inconsistencies in data
>>> normalize_authors([
... {'type': {'key': '/type/author_role'}, 'author': '/authors/OL1A'}
... ])
[{'type': {'key': '/type/author_role'}, 'author': {'key': '/authors/OL1A'}}]
>>> normalize_authors([{
... "type": {"key": "/type/author_role"},
... "author": {"key": "/authors/OL1A"}
... }])
[{'type': {'key': '/type/author_role'}, 'author': {'key': '/authors/OL1A'}}]
"""
return [
cast(
NormalizedAuthor,
{
'type': {'key': safeget(lambda: a['type']['key'], '/type/author_role')},
'author': (
a['author']
if isinstance(a['author'], dict)
else {'key': a['author']}
),
},
)
for a in authors
# TODO: Remove after
# https://github.com/internetarchive/openlibrary-client/issues/126
if 'author' in a
]
def extract_edition_olid(key: str) -> str:
m = re_edition_key.match(key)
if not m:
raise ValueError(f'Invalid key: {key}')
return m.group(1)
def datetimestr_to_int(datestr):
"""
Convert an OL datetime to a timestamp integer.
:param str or dict datestr: Either a string like `"2017-09-02T21:26:46.300245"` or a dict like
`{"value": "2017-09-02T21:26:46.300245"}`
:rtype: int
"""
if isinstance(datestr, dict):
datestr = datestr['value']
if datestr:
try:
t = h.parse_datetime(datestr)
except (TypeError, ValueError):
t = datetime.datetime.now()
else:
t = datetime.datetime.now()
return int(time.mktime(t.timetuple()))
def subject_name_to_key(subject_type: str, name: str) -> SubjectPseudoKey:
prefix = '/subjects/'
if subject_type != 'subject':
prefix += f'{subject_type}:'
return prefix + re_subject.sub("_", name.lower()).strip("_")
class WorkSolrBuilder(AbstractSolrBuilder):
def __init__(
self,
work: dict,
editions: list[dict],
authors: list[dict],
data_provider: DataProvider,
ia_metadata: dict[str, Optional['bp.IALiteMetadata']],
):
self._work = work
self._editions = editions
self._authors = authors
self._ia_metadata = ia_metadata
self._data_provider = data_provider
self._solr_editions = [
EditionSolrBuilder(
e, self, self._ia_metadata.get(e.get('ocaid', '').strip())
)
for e in self._editions
]
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_identifiers()
doc |= self.build_subjects()
doc |= self.build_legacy_ia_fields()
doc |= self.build_ratings() or {}
doc |= self.build_reading_log() or {}
return cast(SolrDocument, doc)
@property
def key(self):
return self._work['key']
@property
def type(self):
return 'work'
@property
def seed(self) -> list[str]:
w = self._work
return uniq(
itertools.chain(
(e.key for e in self._solr_editions),
(self.key,),
(author['key'] for author in self._authors),
(subject_name_to_key("subject", s) for s in w.get("subjects", [])),
(subject_name_to_key("person", s) for s in w.get("subject_people", [])),
(subject_name_to_key("place", s) for s in w.get("subject_places", [])),
(subject_name_to_key("time", s) for s in w.get("subject_times", [])),
)
)
@property
def title(self) -> str | None:
if self._work.get('title'):
return self._work['title']
else:
# Some works are missing a title, but have titles on their editions
logger.warning('Work missing title %s' % self.key)
return next(
(ed.title for ed in self._solr_editions if ed.title), '__None__'
)
@property
def subtitle(self) -> str | None:
return self._work.get('subtitle')
@property
def alternative_title(self) -> set[str]:
alt_title_set = set()
for book in (EditionSolrBuilder(self._work), *self._solr_editions):
alt_title_set.update(book.alternative_title)
if book.translation_of:
alt_title_set.add(book.translation_of)
return alt_title_set
@property
def alternative_subtitle(self) -> set[str]:
"""Get subtitles from the editions as alternative titles."""
return {
bookish['subtitle'] for bookish in self._editions if bookish.get('subtitle')
}
@property
def edition_count(self) -> int:
return len(self._editions)
@property
def osp_count(self) -> int | None:
return get_total_by_olid(self.key)
@property
def edition_key(self) -> list[str]:
return [extract_edition_olid(e['key']) for e in self._editions]
@property
def by_statement(self) -> set[str]:
return {e["by_statement"] for e in self._editions if "by_statement" in e}
@property
def publish_date(self) -> set[str]:
return {e.publish_date for e in self._solr_editions if e.publish_date}
@property
def publish_year(self) -> set[int]:
return {
year for e in self._solr_editions if (year := e.publish_year) is not None
}
@property
def first_publish_year(self) -> int | None:
if publish_years := self.publish_year:
return min(publish_years)
else:
return None
@property
def number_of_pages_median(self) -> int | None:
number_of_pages = [
pages
for e in self._solr_editions
if (pages := e.number_of_pages) is not None
]
if number_of_pages:
return ceil(median(number_of_pages))
else:
return None
@property
def editions(self) -> list[SolrDocument]:
return [ed.build() for ed in self._solr_editions]
@property
def lccn(self) -> set[str]:
return {lccn for ed in self._solr_editions for lccn in ed.lccn}
@property
def publish_place(self) -> set[str]:
return {v for e in self._editions for v in e.get('publish_places', [])}
@property
def oclc(self) -> set[str]:
return {v for e in self._editions for v in e.get('oclc_numbers', [])}
@property
def contributor(self) -> set[str]:
return {
v
for e in self._editions
for v in (
e.get('contributions', [])
# TODO: contributors wasn't included here in the past, but
# we likely want it to be edition-only if possible?
# Excluding for now to avoid a possible perf hit in the
# next full reindex which is already pretty loaded
# + [c.get('name') for c in e.get('contributors', [])]
)
if v
}
@property
def lcc(self) -> set[str]:
raw_lccs = {
lcc for ed in self._editions for lcc in ed.get('lc_classifications', [])
}
return {lcc for lcc in map(short_lcc_to_sortable_lcc, raw_lccs) if lcc}
@property
def lcc_sort(self) -> str | None:
if lccs := self.lcc:
return choose_sorting_lcc(lccs)
else:
return None
@property
def ddc(self) -> set[str]:
raw_ddcs = {ddc for ed in self._editions for ddc in get_edition_ddcs(ed)}
return {ddc for raw_ddc in raw_ddcs for ddc in normalize_ddc(raw_ddc) if ddc}
@property
def ddc_sort(self) -> str | None:
if ddcs := self.ddc:
return choose_sorting_ddc(ddcs)
else:
return None
@property
def isbn(self) -> set[str]:
return {isbn for ed in self._editions for isbn in EditionSolrBuilder(ed).isbn}
@property
def last_modified_i(self) -> int:
return max(
datetimestr_to_int(doc.get('last_modified'))
for doc in (self._work, *self._editions)
)
@property
def ebook_count_i(self) -> int:
return sum(
1 for e in self._solr_editions if e.ebook_access > bp.EbookAccess.NO_EBOOK
)
@cached_property
def ebook_access(self) -> bp.EbookAccess:
return max(
(e.ebook_access for e in self._solr_editions),
default=bp.EbookAccess.NO_EBOOK,
)
@property
def has_fulltext(self) -> bool:
return any(e.has_fulltext for e in self._solr_editions)
@property
def public_scan_b(self) -> bool:
return any(e.public_scan_b for e in self._solr_editions)
@cached_property
def ia(self) -> list[str]:
return [cast(str, e.ia) for e in self._ia_editions]
@property
def ia_collection(self) -> list[str]:
return sorted(uniq(c for e in self._solr_editions for c in e.ia_collection))
@property
def ia_collection_s(self) -> str:
return ';'.join(self.ia_collection)
@cached_property
def _ia_editions(self) -> list[EditionSolrBuilder]:
def get_ia_sorting_key(ed: EditionSolrBuilder) -> tuple[int, str]:
return (
# -1 to sort in reverse and make public first
-1 * ed.ebook_access.value,
# De-prioritize google scans because they are lower quality
'0: non-goog' if not cast(str, ed.ia).endswith('goog') else '1: goog',
)
return sorted((e for e in self._solr_editions if e.ia), key=get_ia_sorting_key)
# --- These should be deprecated and removed ---
@property
def lending_edition_s(self) -> str | None:
if (
not self._ia_editions
or self._ia_editions[0].ebook_access <= bp.EbookAccess.PRINTDISABLED
):
return None
else:
return extract_edition_olid(self._ia_editions[0].key)
@property
def lending_identifier_s(self) -> str | None:
if (
not self._ia_editions
or self._ia_editions[0].ebook_access <= bp.EbookAccess.PRINTDISABLED
):
return None
else:
return self._ia_editions[0].ia
@property
def printdisabled_s(self) -> str | None:
printdisabled_eds = [
ed for ed in self._ia_editions if 'printdisabled' in ed.ia_collection
]
if not printdisabled_eds:
return None
else:
return ';'.join(
cast(str, extract_edition_olid(ed.key)) for ed in printdisabled_eds
)
# ^^^ These should be deprecated and removed ^^^
def build_ratings(self) -> WorkRatingsSummary | None:
return self._data_provider.get_work_ratings(self._work['key'])
def build_reading_log(self) -> WorkReadingLogSolrSummary | None:
return self._data_provider.get_work_reading_log(self._work['key'])
@cached_property
def cover_i(self) -> int | None:
work_cover_id = next(
itertools.chain(
(
cover_id
for cover_id in self._work.get('covers', [])
if cover_id != -1
),
[None],
)
)
return work_cover_id or next(
(ed.cover_i for ed in self._solr_editions if ed.cover_i is not None), None
)
@property
def cover_edition_key(self) -> str | None:
if self.cover_i is None:
return None
return next(
(
extract_edition_olid(ed['key'])
for ed in self._editions
if self.cover_i in ed.get('covers', [])
),
None,
)
@property
def first_sentence(self) -> set[str]:
return {
s['value'] if isinstance(s, dict) else s
for ed in self._editions
if (s := ed.get('first_sentence', None))
}
@property
def publisher(self) -> set[str]:
return {publisher for ed in self._solr_editions for publisher in ed.publisher}
@property
def format(self) -> set[str]:
return {ed.format for ed in self._solr_editions if ed.format}
@property
def language(self) -> set[str]:
return {lang for ed in self._solr_editions for lang in ed.language}
def build_legacy_ia_fields(self) -> dict:
ia_loaded_id = set()
ia_box_id = set()
for e in self._editions:
# When do we write these to the actual edition?? This code might
# be dead.
if e.get('ia_loaded_id'):
if isinstance(e['ia_loaded_id'], str):
ia_loaded_id.add(e['ia_loaded_id'])
else:
try:
assert isinstance(e['ia_loaded_id'], list)
assert isinstance(e['ia_loaded_id'][0], str)
except AssertionError:
logger.error(
"AssertionError: ia=%s, ia_loaded_id=%s",
e.get("ia"),
e['ia_loaded_id'],
)
raise
ia_loaded_id.update(e['ia_loaded_id'])
if e.get('ia_box_id'):
if isinstance(e['ia_box_id'], str):
ia_box_id.add(e['ia_box_id'])
else:
try:
assert isinstance(e['ia_box_id'], list)
assert isinstance(e['ia_box_id'][0], str)
except AssertionError:
logger.error("AssertionError: %s", e['key'])
raise
ia_box_id.update(e['ia_box_id'])
doc = {}
if ia_loaded_id:
doc['ia_loaded_id'] = list(ia_loaded_id)
if ia_box_id:
doc['ia_box_id'] = list(ia_box_id)
return doc
@cached_property
def author_key(self) -> list[str]:
return [
m.group(1)
for m in (re_author_key.match(a['key']) for a in self._authors)
if m
]
@cached_property
def author_name(self) -> list[str]:
return [a.get('name', '') for a in self._authors]
@cached_property
def author_alternative_name(self) -> set[str]:
return {
alt_name for a in self._authors for alt_name in a.get('alternate_names', [])
}
@cached_property
def author_facet(self) -> list[str]:
return [f'{key} {name}' for key, name in zip(self.author_key, self.author_name)]
def build_identifiers(self) -> dict[str, list[str]]:
identifiers: dict[str, list[str]] = defaultdict(list)
for ed in self._solr_editions:
for k, v in ed.identifiers.items():
identifiers[k] += v
return dict(identifiers)
def build_subjects(self) -> dict:
doc: dict = {}
field_map = {
'subjects': 'subject',
'subject_places': 'place',
'subject_times': 'time',
'subject_people': 'person',
}
for work_field, subject_type in field_map.items():
if not self._work.get(work_field):
continue
doc |= {
subject_type: self._work[work_field],
f'{subject_type}_facet': self._work[work_field],
f'{subject_type}_key': [str_to_key(s) for s in self._work[work_field]],
}
return doc
def get_edition_ddcs(ed: dict):
ddcs: list[str] = ed.get('dewey_decimal_class', [])
if len(ddcs) > 1:
# In DDC, `92` or `920` is sometimes appended to a DDC to denote
# "Biography". We have a clause to handle this if it's part of the same
# DDC (See utils/ddc.py), but some books have it as an entirely separate
# DDC; e.g.:
# * [ "979.4/830046872073", "92" ]
# https://openlibrary.org/books/OL3029363M.json
# * [ "813/.54", "B", "92" ]
# https://openlibrary.org/books/OL2401343M.json
# * [ "092", "823.914" ]
# https://openlibrary.org/books/OL24767417M
ddcs = [ddc for ddc in ddcs if ddc not in ('92', '920', '092')]
return ddcs
| 23,289 | Python | .py | 587 | 29.529813 | 98 | 0.562085 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
209 | abstract.py | internetarchive_openlibrary/openlibrary/solr/updater/abstract.py | from collections.abc import Iterable
from typing import cast
import openlibrary.book_providers as bp
from openlibrary.solr.data_provider import DataProvider
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.utils import SolrUpdateRequest
class AbstractSolrUpdater:
key_prefix: str
thing_type: str
data_provider: DataProvider
def __init__(self, data_provider: DataProvider):
self.data_provider = data_provider
def key_test(self, key: str) -> bool:
return key.startswith(self.key_prefix)
async def preload_keys(self, keys: Iterable[str]):
await self.data_provider.preload_documents(keys)
async def update_key(self, thing: dict) -> tuple[SolrUpdateRequest, list[str]]:
"""
:return: (update, new keys to update)
"""
raise NotImplementedError()
class AbstractSolrBuilder:
def build(self) -> SolrDocument:
# Iterate over all non-_ properties of this instance and add them to the
# document.
# Allow @property and @cached_property though!
doc: dict = {}
for field in dir(self):
if field.startswith('_'):
continue
val = getattr(self, field)
if callable(val):
continue
elif val is None or (isinstance(val, Iterable) and not val):
# Skip if empty list/string
continue
elif isinstance(val, set):
doc[field] = list(val)
elif isinstance(val, bp.EbookAccess):
doc[field] = val.to_solr_str()
elif isinstance(val, (str, int, float, bool, list)):
doc[field] = val
else:
raise ValueError(f'Unknown type for {field}: {type(val)}')
return cast(SolrDocument, doc)
| 1,838 | Python | .py | 45 | 31.622222 | 83 | 0.629277 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
210 | list.py | internetarchive_openlibrary/openlibrary/solr/updater/list.py | from collections import defaultdict
import re
from typing import cast
import httpx
from openlibrary.plugins.openlibrary.lists import (
SeedType,
seed_key_to_seed_type,
)
from openlibrary.plugins.worksearch.subjects import SubjectType
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url, str_to_key
class ListSolrUpdater(AbstractSolrUpdater):
key_prefix = '/lists/'
thing_type = '/type/list'
def key_test(self, key: str) -> bool:
return bool(re.match(r'^(/people/[^/]+)?/lists/[^/]+$', key))
async def update_key(self, list: dict) -> tuple[SolrUpdateRequest, list[str]]:
seeds = ListSolrBuilder(list).seed
lst = ListSolrBuilder(list, await fetch_seeds_facets(seeds))
doc = lst.build()
return SolrUpdateRequest(adds=[doc]), []
async def fetch_seeds_facets(seeds: list[str]):
base_url = get_solr_base_url() + '/select'
facet_fields: list[SubjectType] = ['subject', 'time', 'person', 'place']
seeds_by_type: defaultdict[SeedType, list] = defaultdict(list)
for seed in seeds:
seeds_by_type[seed_key_to_seed_type(seed)].append(seed)
query: list[str] = []
for seed_type, seed_values in seeds_by_type.items():
match seed_type:
case 'edition' | 'author':
edition_olids = " OR ".join(key.split('/')[-1] for key in seed_values)
query.append(f'edition_key:( {edition_olids} )')
case 'work':
seed_keys = " OR ".join(f'"{key}"' for key in seed_values)
query.append(f'key:( {seed_keys} )')
case 'subject':
pass
case _:
raise NotImplementedError(f'Unknown seed type {seed_type}')
async with httpx.AsyncClient() as client:
response = await client.post(
base_url,
timeout=30,
data={
'wt': 'json',
'json.nl': 'arrarr',
'q': ' OR '.join(query),
'fq': 'type:work',
'rows': 0,
'facet': 'true',
'facet.mincount': 1,
'facet.limit': 50,
'facet.field': [f"{field}_facet" for field in facet_fields],
},
)
return response.json()
class ListSolrBuilder(AbstractSolrBuilder):
def __init__(self, list: dict, solr_reply: dict | None = None):
self._list = list
self._solr_reply = solr_reply
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_subjects()
return cast(SolrDocument, doc)
def build_subjects(self) -> dict:
if not self._solr_reply:
return {}
doc: dict = {}
for facet, counts in self._solr_reply['facet_counts']['facet_fields'].items():
subject_type = cast(SubjectType, facet.split('_')[0])
subjects = [s for s, count in counts]
doc |= {
subject_type: subjects,
f'{subject_type}_facet': subjects,
f'{subject_type}_key': [str_to_key(s) for s in subjects],
}
return doc
@property
def key(self) -> str:
return self._list['key']
@property
def type(self) -> str:
return 'list'
@property
def name(self) -> str | None:
return self._list.get('name')
@property
def seed(self) -> list[str]:
return [
(
(seed.get('key') or seed['thing']['key'])
if isinstance(seed, dict)
else seed
)
for seed in self._list.get('seeds', [])
]
| 3,819 | Python | .py | 98 | 29.44898 | 86 | 0.571043 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
211 | get_ia.py | internetarchive_openlibrary/openlibrary/catalog/get_ia.py | import requests
from infogami import config
from lxml import etree
from time import sleep
from openlibrary.catalog.marc.marc_binary import MarcBinary
from openlibrary.catalog.marc.marc_xml import MarcXml
from openlibrary.core import ia
import lxml.etree
IA_BASE_URL = config.get('ia_base_url')
IA_DOWNLOAD_URL = f'{IA_BASE_URL}/download/'
MAX_MARC_LENGTH = 100000
def urlopen_keep_trying(url: str, headers=None, **kwargs):
"""Tries to request the url three times, raises HTTPError if 403, 404, or 416. Returns a requests.Response"""
for i in range(3):
try:
resp = requests.get(url, headers=headers, **kwargs)
resp.raise_for_status()
return resp
except requests.HTTPError as error:
if error.response and error.response.status_code in (403, 404, 416):
raise
sleep(2)
def get_marc_record_from_ia(
identifier: str, ia_metadata: dict | None = None
) -> MarcBinary | MarcXml | None:
"""
Takes IA identifiers and optional IA metadata and returns MARC record instance.
08/2018: currently called by openlibrary/plugins/importapi/code.py
when the /api/import/ia endpoint is POSTed to.
:param ia_metadata: The full ia metadata; e.g. https://archive.org/metadata/goody,
not https://archive.org/metadata/goody/metadata
"""
if ia_metadata is None:
ia_metadata = ia.get_metadata(identifier)
filenames = ia_metadata['_filenames'] # type: ignore[index]
marc_xml_filename = identifier + '_marc.xml'
marc_bin_filename = identifier + '_meta.mrc'
item_base = f'{IA_DOWNLOAD_URL}{identifier}/'
# Try marc.bin first
if marc_bin_filename in filenames:
data = urlopen_keep_trying(item_base + marc_bin_filename).content
return MarcBinary(data)
# If that fails, try marc.xml
if marc_xml_filename in filenames:
data = urlopen_keep_trying(item_base + marc_xml_filename).content
root = etree.fromstring(
data, parser=lxml.etree.XMLParser(resolve_entities=False)
)
return MarcXml(root)
return None
def get_from_archive_bulk(locator):
"""
Gets a single binary MARC record from within an Archive.org
bulk MARC item, and return the offset and length of the next
item.
If offset or length are `None`, then there is no next record.
:param str locator: Locator ocaid/filename:offset:length
:rtype: (str|None, int|None, int|None)
:return: (Binary MARC data, Next record offset, Next record length)
"""
if locator.startswith('marc:'):
locator = locator[5:]
filename, offset, length = locator.split(":")
offset = int(offset)
length = int(length)
r0, r1 = offset, offset + length - 1
# get the next record's length in this request
r1 += 5
url = IA_DOWNLOAD_URL + filename
assert 0 < length < MAX_MARC_LENGTH
response = urlopen_keep_trying(url, headers={'Range': 'bytes=%d-%d' % (r0, r1)})
data = None
if response:
# this truncates the data to MAX_MARC_LENGTH, but is probably not necessary here?
data = response.content[:MAX_MARC_LENGTH]
len_in_rec = int(data[:5])
if len_in_rec != length:
data, next_offset, next_length = get_from_archive_bulk(
'%s:%d:%d' % (filename, offset, len_in_rec)
)
else:
next_length = data[length:]
data = data[:length]
if len(next_length) == 5:
# We have data for the next record
next_offset = offset + len_in_rec
next_length = int(next_length)
else:
next_offset = next_length = None
return data, next_offset, next_length
| 3,774 | Python | .py | 90 | 34.677778 | 114 | 0.651842 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
212 | match.py | internetarchive_openlibrary/openlibrary/catalog/add_book/match.py | import re
import unicodedata
import web
# fields needed for matching:
# title, subtitle, isbn, publish_country, lccn, publishers, publish_date, number_of_pages, authors
re_amazon_title_paren = re.compile(r'^(.*) \([^)]+?\)$')
re_brackets = re.compile(r'^(.+)\[.*?\]$')
re_whitespace_and_punct = re.compile(r'[-\s,;:.]+')
ISBN_MATCH = 85
THRESHOLD = 875
def editions_match(rec: dict, existing) -> bool:
"""
Converts the existing edition into a comparable dict and performs a
thresholded comparison to decide whether they are the same.
Used by add_book.load() -> add_book.find_match() to check whether two
editions match.
:param dict rec: Import record candidate
:param Thing existing: Edition object to be tested against candidate
:rtype: bool
:return: Whether candidate is sufficiently the same as the 'existing' edition
"""
thing_type = existing.type.key
if thing_type == '/type/delete':
return False
assert thing_type == '/type/edition'
rec2 = {}
for f in (
'title',
'subtitle',
'isbn',
'isbn_10',
'isbn_13',
'lccn',
'publish_country',
'publishers',
'publish_date',
):
if existing.get(f):
rec2[f] = existing[f]
rec2['authors'] = []
# Transfer authors as Dicts str: str
for a in existing.get_authors():
author = {'name': a['name']}
if birth := a.get('birth_date'):
author['birth_date'] = birth
if death := a.get('death_date'):
author['death_date'] = death
rec2['authors'].append(author)
return threshold_match(rec, rec2, THRESHOLD)
def normalize(s: str) -> str:
"""
Normalizes a title for matching purposes, not display,
by lowercasing, unicode -> NFC,
stripping extra whitespace and punctuation, and replacing ampersands.
"""
s = unicodedata.normalize('NFC', s)
s = s.replace(' & ', ' and ')
s = re_whitespace_and_punct.sub(' ', s.lower()).strip()
return s
def mk_norm(s: str) -> str:
"""
Normalizes titles and strips ALL spaces and small words
to aid with string comparisons of two titles.
Used in comparing Work titles.
:param str s: A book title to normalize and strip.
:return: a lowercase string with no spaces, containing the main words of the title.
"""
if m := re_brackets.match(s):
s = m.group(1)
norm = normalize(s).replace(' and ', '')
return strip_articles(norm).replace(' ', '')
def strip_articles(s: str) -> str:
"""
Strip articles for matching purposes.
TODO: Expand using
https://web.archive.org/web/20230320141510/https://www.loc.gov/marc/bibliographic/bdapndxf.html
or something sensible.
"""
if s.lower().startswith('the '):
s = s[4:]
elif s.lower().startswith('a '):
s = s[2:]
return s
def add_db_name(rec: dict) -> None:
"""
db_name = Author name followed by dates.
adds 'db_name' in place for each author.
"""
if 'authors' not in rec:
return
for a in rec['authors'] or []:
date = None
if 'date' in a:
assert 'birth_date' not in a
assert 'death_date' not in a
date = a['date']
elif 'birth_date' in a or 'death_date' in a:
date = a.get('birth_date', '') + '-' + a.get('death_date', '')
a['db_name'] = ' '.join([a['name'], date]) if date else a['name']
def expand_record(rec: dict) -> dict[str, str | list[str]]:
"""
Returns an expanded representation of an edition dict,
usable for accurate comparisons between existing and new
records.
:param dict rec: Import edition representation
:return: An expanded version of an edition dict
more titles, normalized + short
all isbns in "isbn": []
authors have db_name (name with dates)
"""
rec['full_title'] = rec['title']
if subtitle := rec.get('subtitle'):
rec['full_title'] += ' ' + subtitle
expanded_rec = build_titles(rec['full_title'])
expanded_rec['isbn'] = []
for f in 'isbn', 'isbn_10', 'isbn_13':
expanded_rec['isbn'].extend(rec.get(f, []))
if 'publish_country' in rec and rec['publish_country'] not in (
' ',
'|||',
):
expanded_rec['publish_country'] = rec['publish_country']
for f in (
'lccn',
'publishers',
'publish_date',
'number_of_pages',
'authors',
'contribs',
):
if f in rec:
expanded_rec[f] = rec[f]
add_db_name(expanded_rec)
return expanded_rec
def build_titles(title: str):
"""
Uses a full title to create normalized and short title versions.
Used for expanding a set of title variants for matching,
not for storing on records or display.
:param str title: Full title of an edition
:rtype: dict
:return: An expanded set of title variations
"""
normalized_title = normalize(title)
titles = [ # TODO: how different and helpful are these titles variants?
title,
normalized_title,
strip_articles(normalized_title),
]
if m := re_amazon_title_paren.match(normalized_title):
titles.append(m.group(1))
titles.append(strip_articles(m.group(1)))
return {
'full_title': title,
'normalized_title': normalized_title,
'titles': list(set(titles)),
'short_title': normalized_title[:25],
}
def within(a, b, distance):
return abs(a - b) <= distance
def compare_country(e1: dict, e2: dict):
field = 'publish_country'
if field not in e1 or field not in e2:
return (field, 'value missing', 0)
if e1[field] == e2[field]:
return (field, 'match', 40)
# West Berlin (wb) == Germany (gw)
if e1[field] in ('gw ', 'wb ') and e2[field] in ('gw ', 'wb '):
return (field, 'match', 40)
return (field, 'mismatch', -205)
def compare_lccn(e1: dict, e2: dict):
field = 'lccn'
if field not in e1 or field not in e2:
return (field, 'value missing', 0)
if e1[field] == e2[field]:
return (field, 'match', 200)
return (field, 'mismatch', -320)
def compare_date(e1: dict, e2: dict):
if 'publish_date' not in e1 or 'publish_date' not in e2:
return ('date', 'value missing', 0)
if e1['publish_date'] == e2['publish_date']:
return ('date', 'exact match', 200)
try:
e1_pub = int(e1['publish_date'])
e2_pub = int(e2['publish_date'])
if within(e1_pub, e2_pub, 2):
return ('date', '+/-2 years', -25)
else:
return ('date', 'mismatch', -250)
except ValueError as TypeError:
return ('date', 'mismatch', -250)
def compare_isbn(e1: dict, e2: dict):
if len(e1['isbn']) == 0 or len(e2['isbn']) == 0:
return ('ISBN', 'missing', 0)
for i in e1['isbn']:
for j in e2['isbn']:
if i == j:
return ('ISBN', 'match', ISBN_MATCH)
return ('ISBN', 'mismatch', -225)
# 450 + 200 + 85 + 200
def level1_match(e1: dict, e2: dict):
"""
:param dict e1: Expanded Edition, output of expand_record()
:param dict e2: Expanded Edition, output of expand_record()
:rtype: list
:return: a list of tuples (field/category, result str, score int)
"""
score = []
if e1['short_title'] == e2['short_title']:
score.append(('short-title', 'match', 450))
else:
score.append(('short-title', 'mismatch', 0))
score.append(compare_lccn(e1, e2))
score.append(compare_date(e1, e2))
score.append(compare_isbn(e1, e2))
return score
def level2_match(e1: dict, e2: dict):
"""
:param dict e1: Expanded Edition, output of expand_record()
:param dict e2: Expanded Edition, output of expand_record()
:rtype: list
:return: a list of tuples (field/category, result str, score int)
"""
score = []
score.append(compare_date(e1, e2))
score.append(compare_country(e1, e2))
score.append(compare_isbn(e1, e2))
score.append(compare_title(e1, e2))
score.append(compare_lccn(e1, e2))
if page_score := compare_number_of_pages(e1, e2):
score.append(page_score)
score.append(compare_publisher(e1, e2))
score.append(compare_authors(e1, e2))
return score
def compare_author_fields(e1_authors, e2_authors):
for i in e1_authors:
for j in e2_authors:
if normalize(i['db_name']) == normalize(j['db_name']):
return True
if normalize(i['name']).strip('.') == normalize(j['name']).strip('.'):
return True
return False
def compare_author_keywords(e1_authors, e2_authors):
max_score = 0
for i in e1_authors:
for j in e2_authors:
percent, ordered = keyword_match(i['name'], j['name'])
if percent > 0.50:
score = percent * 80
if ordered:
score += 10
max_score = max(score, max_score)
if max_score:
return ('authors', 'keyword match', max_score)
else:
return ('authors', 'mismatch', -200)
def compare_authors(e1: dict, e2: dict):
"""
Compares the authors of two edition representations and
returns a evaluation and score.
:param dict e1: Expanded Edition, output of expand_record()
:param dict e2: Expanded Edition, output of expand_record()
:rtype: tuple
:return: str?, message, score
"""
if 'authors' in e1 and 'authors' in e2: # noqa: SIM102
if compare_author_fields(e1['authors'], e2['authors']):
return ('authors', 'exact match', 125)
if 'authors' in e1 and 'contribs' in e2: # noqa: SIM102
if compare_author_fields(e1['authors'], e2['contribs']):
return ('authors', 'exact match', 125)
if 'contribs' in e1 and 'authors' in e2: # noqa: SIM102
if compare_author_fields(e1['contribs'], e2['authors']):
return ('authors', 'exact match', 125)
if 'authors' in e1 and 'authors' in e2:
return compare_author_keywords(e1['authors'], e2['authors'])
if 'authors' not in e1 and 'authors' not in e2:
if (
'contribs' in e1
and 'contribs' in e2
and compare_author_fields(e1['contribs'], e2['contribs'])
):
return ('authors', 'exact match', 125)
return ('authors', 'no authors', 75)
return ('authors', 'field missing from one record', -25)
def title_replace_amp(amazon):
return normalize(amazon['full-title'].replace(" & ", " and ")).lower()
def substr_match(a: str, b: str):
return a.find(b) != -1 or b.find(a) != -1
def keyword_match(in1, in2):
s1, s2 = (i.split() for i in (in1, in2))
s1_set = set(s1)
s2_set = set(s2)
match = s1_set & s2_set
if len(s1) == 0 and len(s2) == 0:
return 0, True
ordered = [x for x in s1 if x in match] == [x for x in s2 if x in match]
return float(len(match)) / max(len(s1), len(s2)), ordered
def compare_title(amazon, marc):
amazon_title = amazon['normalized_title'].lower()
marc_title = normalize(marc['full_title']).lower()
short = False
if len(amazon_title) < 9 or len(marc_title) < 9:
short = True
if not short:
for a in amazon['titles']:
for m in marc['titles']:
if a == m:
return ('full-title', 'exact match', 600)
for a in amazon['titles']:
for m in marc['titles']:
if substr_match(a, m):
return ('full-title', 'containted within other title', 350)
max_score = 0
for a in amazon['titles']:
for m in marc['titles']:
percent, ordered = keyword_match(a, m)
score = percent * 450
if ordered:
score += 50
if score and score > max_score:
max_score = score
if max_score:
return ('full-title', 'keyword match', max_score)
elif short:
return ('full-title', 'shorter than 9 characters', 0)
else:
return ('full-title', 'mismatch', -600)
def compare_number_of_pages(amazon, marc):
if 'number_of_pages' not in amazon or 'number_of_pages' not in marc:
return
amazon_pages = amazon['number_of_pages']
marc_pages = marc['number_of_pages']
if amazon_pages == marc_pages:
if amazon_pages > 10:
return ('pagination', 'match exactly and > 10', 100)
else:
return ('pagination', 'match exactly and < 10', 50)
elif within(amazon_pages, marc_pages, 10):
if amazon_pages > 10 and marc_pages > 10:
return ('pagination', 'match within 10 and both are > 10', 50)
else:
return ('pagination', 'match within 10 and either are < 10', 20)
else:
return ('pagination', 'non-match (by more than 10)', -225)
def short_part_publisher_match(p1, p2):
pub1 = p1.split()
pub2 = p2.split()
if len(pub1) == 1 or len(pub2) == 1:
return False
return all(substr_match(i, j) for i, j in zip(pub1, pub2))
def compare_publisher(e1: dict, e2: dict):
if 'publishers' in e1 and 'publishers' in e2:
for e1_pub in e1['publishers']:
e1_norm = normalize(e1_pub)
for e2_pub in e2['publishers']:
e2_norm = normalize(e2_pub)
if e1_norm == e2_norm:
return ('publisher', 'match', 100)
elif substr_match(e1_norm, e2_norm) or substr_match(
e1_norm.replace(' ', ''), e2_norm.replace(' ', '')
):
return ('publisher', 'occur within the other', 100)
elif short_part_publisher_match(e1_norm, e2_norm):
return ('publisher', 'match', 100)
return ('publisher', 'mismatch', -51)
if 'publishers' not in e1 or 'publishers' not in e2:
return ('publisher', 'either missing', 0)
def threshold_match(
rec1: dict, rec2: dict, threshold: int, debug: bool = False
) -> bool:
"""
Determines (according to a threshold) whether two edition representations are
sufficiently the same. Used when importing new books.
:param dict e1: dict representing an import schema edition
:param dict e2: dict representing an import schema edition
:param int threshold: each field match or difference adds or subtracts a score. Example: 875 for standard edition matching
:rtype: bool
:return: Whether two editions have sufficient fields in common to be considered the same
"""
e1 = expand_record(rec1)
e2 = expand_record(rec2)
level1 = level1_match(e1, e2)
total = sum(i[2] for i in level1)
if debug:
print(f"E1: {e1}\nE2: {e2}", flush=True)
print(f"TOTAL 1 = {total} : {level1}", flush=True)
if total >= threshold:
return True
level2 = level2_match(e1, e2)
total = sum(i[2] for i in level2)
if debug:
print(f"TOTAL 2 = {total} : {level2}", flush=True)
return total >= threshold
| 15,119 | Python | .py | 394 | 31.147208 | 126 | 0.59828 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
213 | __init__.py | internetarchive_openlibrary/openlibrary/catalog/add_book/__init__.py | """Module to load books into Open Library.
This is used to load books from various MARC sources, including
Internet Archive.
For loading a book, the available metadata is compiled as a dict,
called a record internally. Here is a sample record:
{
"title": "The Adventures of Tom Sawyer",
"source_records": ["ia:TheAdventuresOfTomSawyer_201303"],
"authors": [{
"name": "Mark Twain"
}]
}
The title and source_records fields are mandatory.
A record is loaded by calling the load function.
record = {...}
response = load(record)
"""
import itertools
import re
from typing import TYPE_CHECKING, Any, Final
import web
from collections import defaultdict
from copy import copy
from time import sleep
import requests
from infogami import config
from openlibrary import accounts
from openlibrary.catalog.utils import (
EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS,
get_non_isbn_asin,
get_publication_year,
is_independently_published,
is_promise_item,
needs_isbn_and_lacks_one,
publication_too_old_and_not_exempt,
published_in_future_year,
)
from openlibrary.core import lending
from openlibrary.plugins.upstream.utils import strip_accents, safeget
from openlibrary.utils import uniq, dicthash
from openlibrary.utils.isbn import normalize_isbn
from openlibrary.utils.lccn import normalize_lccn
from openlibrary.catalog.add_book.load_book import (
build_query,
east_in_by_statement,
import_author,
InvalidLanguage,
)
from openlibrary.catalog.add_book.match import editions_match, mk_norm
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import Edition
re_normalize = re.compile('[^[:alphanum:] ]', re.U)
re_lang = re.compile('^/languages/([a-z]{3})$')
ISBD_UNIT_PUNCT = ' : ' # ISBD cataloging title-unit separator punctuation
SUSPECT_PUBLICATION_DATES: Final = ["1900", "January 1, 1900", "1900-01-01"]
SOURCE_RECORDS_REQUIRING_DATE_SCRUTINY: Final = ["amazon", "bwb", "promise"]
type_map = {
'description': 'text',
'notes': 'text',
'number_of_pages': 'int',
}
class CoverNotSaved(Exception):
def __init__(self, f):
self.f = f
def __str__(self):
return "coverstore responded with: '%s'" % self.f
class RequiredField(Exception):
def __init__(self, f):
self.f = f
def __str__(self):
return "missing required field(s): %s" % ", ".join(self.f)
class PublicationYearTooOld(Exception):
def __init__(self, year):
self.year = year
def __str__(self):
return f"publication year is too old (i.e. earlier than {EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS}): {self.year}"
class PublishedInFutureYear(Exception):
def __init__(self, year):
self.year = year
def __str__(self):
return f"published in future year: {self.year}"
class IndependentlyPublished(Exception):
def __init__(self):
pass
def __str__(self):
return "book is independently published"
class SourceNeedsISBN(Exception):
def __init__(self):
pass
def __str__(self):
return "this source needs an ISBN"
# don't use any of these as work titles
bad_titles = {
'Publications',
'Works. English',
'Missal',
'Works',
'Report',
'Letters',
'Calendar',
'Bulletin',
'Plays',
'Sermons',
'Correspondence',
'Bill',
'Bills',
'Selections',
'Selected works',
'Selected works. English',
'The Novels',
'Laws, etc',
}
subject_fields = ['subjects', 'subject_places', 'subject_times', 'subject_people']
def normalize(s):
"""Strip non-alphanums and truncate at 25 chars."""
norm = strip_accents(s).lower()
norm = norm.replace(' and ', ' ')
if norm.startswith('the '):
norm = norm[4:]
elif norm.startswith('a '):
norm = norm[2:]
# strip bracketed text
norm = re.sub(r' ?\(.*\)', '', norm)
return norm.replace(' ', '')[:25]
def is_redirect(thing):
"""
:param Thing thing:
:rtype: bool
"""
if not thing:
return False
return thing.type.key == '/type/redirect'
def get_title(e):
if not e.get('work_titles'):
return e['title']
wt = e['work_titles'][0]
return e['title'] if wt in bad_titles else e['title']
def split_subtitle(full_title):
"""
Splits a title into (title, subtitle),
strips parenthetical tags. Used for bookseller
catalogs which do not pre-separate subtitles.
:param str full_title:
:rtype: (str, str | None)
:return: (title, subtitle | None)
"""
# strip parenthetical blocks wherever they occur
# can handle 1 level of nesting
re_parens_strip = re.compile(r'\(([^\)\(]*|[^\(]*\([^\)]*\)[^\)]*)\)')
clean_title = re.sub(re_parens_strip, '', full_title)
titles = clean_title.split(':')
subtitle = titles.pop().strip() if len(titles) > 1 else None
title = ISBD_UNIT_PUNCT.join([unit.strip() for unit in titles])
return (title, subtitle)
def find_matching_work(e):
"""
Looks for an existing Work representing the new import edition by
comparing normalized titles for every work by each author of the current edition.
Returns the first match found, or None.
:param dict e: An OL edition suitable for saving, has a key, and has full Authors with keys
but has not yet been saved.
:rtype: None or str
:return: the matched work key "/works/OL..W" if found
"""
seen = set()
for a in e['authors']:
q = {'type': '/type/work', 'authors': {'author': {'key': a['key']}}}
work_keys = list(web.ctx.site.things(q))
for wkey in work_keys:
w = web.ctx.site.get(wkey)
if wkey in seen:
continue
seen.add(wkey)
if not w.get('title'):
continue
if mk_norm(w['title']) == mk_norm(get_title(e)):
assert w.type.key == '/type/work'
return wkey
def build_author_reply(authors_in, edits, source):
"""
Steps through an import record's authors, and creates new records if new,
adding them to 'edits' to be saved later.
:param list authors_in: import author dicts [{"name:" "Bob"}, ...], maybe dates
:param list edits: list of Things to be saved later. Is modified by this method.
:param str source: Source record e.g. marc:marc_ex/part01.dat:26456929:680
:rtype: tuple
:return: (list, list) authors [{"key": "/author/OL..A"}, ...], author_reply
"""
authors = []
author_reply = []
for a in authors_in:
new_author = 'key' not in a
if new_author:
a['key'] = web.ctx.site.new_key('/type/author')
a['source_records'] = [source]
edits.append(a)
authors.append({'key': a['key']})
author_reply.append(
{
'key': a['key'],
'name': a['name'],
'status': ('created' if new_author else 'matched'),
}
)
return (authors, author_reply)
def new_work(edition, rec, cover_id=None):
"""
:param dict edition: New OL Edition
:param dict rec: Edition import data
:param (int|None) cover_id: cover id
:rtype: dict
:return: a work to save
"""
w = {
'type': {'key': '/type/work'},
'title': get_title(rec),
}
for s in subject_fields:
if s in rec:
w[s] = rec[s]
if 'authors' in edition:
w['authors'] = [
{'type': {'key': '/type/author_role'}, 'author': akey}
for akey in edition['authors']
]
if 'description' in rec:
w['description'] = {'type': '/type/text', 'value': rec['description']}
wkey = web.ctx.site.new_key('/type/work')
if edition.get('covers'):
w['covers'] = edition['covers']
w['key'] = wkey
return w
def add_cover(cover_url, ekey, account_key=None):
"""
Adds a cover to coverstore and returns the cover id.
:param str cover_url: URL of cover image
:param str ekey: Edition key /book/OL..M
:rtype: int or None
:return: Cover id, or None if upload did not succeed
"""
olid = ekey.split('/')[-1]
coverstore_url = config.get('coverstore_url').rstrip('/')
upload_url = coverstore_url + '/b/upload2'
if upload_url.startswith('//'):
upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)
if not account_key:
user = accounts.get_current_user()
if not user:
raise RuntimeError("accounts.get_current_user() failed")
account_key = user.get('key') or user.get('_key')
params = {
'author': account_key,
'data': None,
'source_url': cover_url,
'olid': olid,
'ip': web.ctx.ip,
}
reply = None
for attempt in range(10):
try:
payload = requests.compat.urlencode(params).encode('utf-8')
response = requests.post(upload_url, data=payload)
except requests.HTTPError:
sleep(2)
continue
body = response.text
if response.status_code == 500:
raise CoverNotSaved(body)
if body not in ['', 'None']:
reply = response.json()
if response.status_code == 200 and 'id' in reply:
break
sleep(2)
if not reply or reply.get('message') == 'Invalid URL':
return
cover_id = int(reply['id'])
return cover_id
def get_ia_item(ocaid):
import internetarchive as ia
cfg = {'general': {'secure': False}}
item = ia.get_item(ocaid, config=cfg)
return item
def modify_ia_item(item, data):
access_key = (
lending.config_ia_ol_metadata_write_s3
and lending.config_ia_ol_metadata_write_s3['s3_key']
)
secret_key = (
lending.config_ia_ol_metadata_write_s3
and lending.config_ia_ol_metadata_write_s3['s3_secret']
)
return item.modify_metadata(data, access_key=access_key, secret_key=secret_key)
def create_ol_subjects_for_ocaid(ocaid, subjects):
item = get_ia_item(ocaid)
openlibrary_subjects = copy(item.metadata.get('openlibrary_subject')) or []
if not isinstance(openlibrary_subjects, list):
openlibrary_subjects = [openlibrary_subjects]
for subject in subjects:
if subject not in openlibrary_subjects:
openlibrary_subjects.append(subject)
r = modify_ia_item(item, {'openlibrary_subject': openlibrary_subjects})
if r.status_code != 200:
return f'{item.identifier} failed: {r.content}'
else:
return "success for %s" % item.identifier
def update_ia_metadata_for_ol_edition(edition_id):
"""
Writes the Open Library Edition and Work id to a linked
archive.org item.
:param str edition_id: of the form OL..M
:rtype: dict
:return: error report, or modified archive.org metadata on success
"""
data = {'error': 'No qualifying edition'}
if edition_id:
ed = web.ctx.site.get('/books/%s' % edition_id)
if ed.ocaid:
work = ed.works[0] if ed.get('works') else None
if work and work.key:
item = get_ia_item(ed.ocaid)
work_id = work.key.split('/')[2]
r = modify_ia_item(
item,
{'openlibrary_work': work_id, 'openlibrary_edition': edition_id},
)
if r.status_code != 200:
data = {'error': f'{item.identifier} failed: {r.content}'}
else:
data = item.metadata
return data
def normalize_record_bibids(rec):
"""
Returns the Edition import record with all ISBN fields and LCCNs cleaned.
:param dict rec: Edition import record
:rtype: dict
:return: A record with cleaned LCCNs, and ISBNs in the various possible ISBN locations.
"""
for field in ('isbn_13', 'isbn_10', 'isbn'):
if rec.get(field):
rec[field] = [
normalize_isbn(isbn) for isbn in rec.get(field) if normalize_isbn(isbn)
]
if rec.get('lccn'):
rec['lccn'] = [
normalize_lccn(lccn) for lccn in rec.get('lccn') if normalize_lccn(lccn)
]
return rec
def isbns_from_record(rec):
"""
Returns a list of all isbns from the various possible isbn fields.
:param dict rec: Edition import record
:rtype: list
"""
isbns = rec.get('isbn', []) + rec.get('isbn_10', []) + rec.get('isbn_13', [])
return isbns
def build_pool(rec):
"""
Searches for existing edition matches on title and bibliographic keys.
:param dict rec: Edition record
:rtype: dict
:return: {<identifier: title | isbn | lccn etc>: [list of /books/OL..M keys that match rec on <identifier>]}
"""
pool = defaultdict(set)
match_fields = ('title', 'oclc_numbers', 'lccn', 'ocaid')
# Find records with matching fields
for field in match_fields:
pool[field] = set(editions_matched(rec, field))
# update title pool with normalized title matches
pool['title'].update(
set(editions_matched(rec, 'normalized_title_', normalize(rec['title'])))
)
# Find records with matching ISBNs
if isbns := isbns_from_record(rec):
pool['isbn'] = set(editions_matched(rec, 'isbn_', isbns))
return {k: list(v) for k, v in pool.items() if v}
def find_quick_match(rec: dict) -> str | None:
"""
Attempts to quickly find an existing item match using bibliographic keys.
:param dict rec: Edition record
:return: First key matched of format "/books/OL..M" or None if no match found.
"""
if 'openlibrary' in rec:
return '/books/' + rec['openlibrary']
ekeys = editions_matched(rec, 'ocaid')
if ekeys:
return ekeys[0]
if isbns := isbns_from_record(rec):
ekeys = editions_matched(rec, 'isbn_', isbns)
if ekeys:
return ekeys[0]
# Look for a matching non-ISBN ASIN identifier (e.g. from a BWB promise item).
if (non_isbn_asin := get_non_isbn_asin(rec)) and (
ekeys := editions_matched(rec, "identifiers.amazon", non_isbn_asin)
):
return ekeys[0]
# Only searches for the first value from these lists
for f in 'source_records', 'oclc_numbers', 'lccn':
if rec.get(f):
if f == 'source_records' and not rec[f][0].startswith('ia:'):
continue
if ekeys := editions_matched(rec, f, rec[f][0]):
return ekeys[0]
return None
def editions_matched(rec, key, value=None):
"""
Search OL for editions matching record's 'key' value.
:param dict rec: Edition import record
:param str key: Key to search on, e.g. 'isbn_'
:param list|str value: Value or Values to use, overriding record values
:rtpye: list
:return: List of edition keys ["/books/OL..M",]
"""
if value is None and key not in rec:
return []
if value is None:
value = rec[key]
q = {'type': '/type/edition', key: value}
ekeys = list(web.ctx.site.things(q))
return ekeys
def find_threshold_match(rec: dict, edition_pool: dict) -> str | None:
"""
Find the best match for rec in edition_pool and return its key.
:param dict rec: the new edition we are trying to match.
:param list edition_pool: list of possible edition key matches, output of build_pool(import record)
:return: None or the edition key '/books/OL...M' of the best edition match for enriched_rec in edition_pool
"""
seen = set()
for edition_keys in edition_pool.values():
for edition_key in edition_keys:
if edition_key in seen:
continue
thing = None
while not thing or is_redirect(thing):
seen.add(edition_key)
thing = web.ctx.site.get(edition_key)
if thing is None:
break
if is_redirect(thing):
edition_key = thing['location']
if thing and editions_match(rec, thing):
return edition_key
return None
def load_data(
rec: dict,
account_key: str | None = None,
existing_edition: "Edition | None" = None,
):
"""
Adds a new Edition to Open Library, or overwrites existing_edition with rec data.
The overwrite option exists for cases where the existing edition data
should be (nearly) completely overwritten by rec data. Revision 1 promise
items are an example.
Checks for existing Works.
Creates a new Work, and Author, if required,
otherwise associates the new Edition with the existing Work.
:param dict rec: Edition record to add (no further checks at this point)
:rtype: dict
:return:
{
"success": False,
"error": <error msg>
}
OR
{
"success": True,
"work": {"key": <key>, "status": "created" | "modified" | "matched"},
"edition": {"key": <key>, "status": "created"},
"authors": [{"status": "matched", "name": "John Smith", "key": <key>}, ...]
}
"""
cover_url = None
if 'cover' in rec:
cover_url = rec['cover']
del rec['cover']
try:
# get an OL style edition dict
rec_as_edition = build_query(rec)
edition: dict[str, Any]
if existing_edition:
# Note: This will overwrite any fields in the existing edition. This is ok for
# now, because we'll really only come here when overwriting a promise
# item
edition = existing_edition.dict() | rec_as_edition
# Preserve source_records to avoid data loss.
edition['source_records'] = existing_edition.get(
'source_records', []
) + rec.get('source_records', [])
# Preserve existing authors, if any.
if authors := existing_edition.get('authors'):
edition['authors'] = authors
else:
edition = rec_as_edition
except InvalidLanguage as e:
return {
'success': False,
'error': str(e),
}
if not (edition_key := edition.get('key')):
edition_key = web.ctx.site.new_key('/type/edition')
cover_id = None
if cover_url:
cover_id = add_cover(cover_url, edition_key, account_key=account_key)
if cover_id:
edition['covers'] = [cover_id]
edits: list[dict] = [] # Things (Edition, Work, Authors) to be saved
reply = {}
# edition.authors may have already been processed by import_authors() in build_query(),
# but not necessarily
author_in = [
(
import_author(a, eastern=east_in_by_statement(rec, a))
if isinstance(a, dict)
else a
)
for a in edition.get('authors', [])
]
# build_author_reply() adds authors to edits
(authors, author_reply) = build_author_reply(
author_in, edits, rec['source_records'][0]
)
if authors:
edition['authors'] = authors
reply['authors'] = author_reply
work_key = safeget(lambda: edition['works'][0]['key'])
work_state = 'created'
# Look for an existing work
if not work_key and 'authors' in edition:
work_key = find_matching_work(edition)
if work_key:
work = web.ctx.site.get(work_key)
work_state = 'matched'
need_update = False
for k in subject_fields:
if k not in rec:
continue
for s in rec[k]:
if normalize(s) not in [
normalize(existing) for existing in work.get(k, [])
]:
work.setdefault(k, []).append(s)
need_update = True
if cover_id:
work.setdefault('covers', []).append(cover_id)
need_update = True
if need_update:
work_state = 'modified'
edits.append(work.dict())
else:
# Create new work
work = new_work(edition, rec, cover_id)
work_state = 'created'
work_key = work['key']
edits.append(work)
assert work_key
if not edition.get('works'):
edition['works'] = [{'key': work_key}]
edition['key'] = edition_key
edits.append(edition)
comment = "overwrite existing edition" if existing_edition else "import new book"
web.ctx.site.save_many(edits, comment=comment, action='add-book')
# Writes back `openlibrary_edition` and `openlibrary_work` to
# archive.org item after successful import:
if 'ocaid' in rec:
update_ia_metadata_for_ol_edition(edition_key.split('/')[-1])
reply['success'] = True
reply['edition'] = (
{'key': edition_key, 'status': 'modified'}
if existing_edition
else {'key': edition_key, 'status': 'created'}
)
reply['work'] = {'key': work_key, 'status': work_state}
return reply
def normalize_import_record(rec: dict) -> None:
"""
Normalize the import record by:
- Verifying required fields;
- Ensuring source_records is a list;
- Splitting subtitles out of the title field;
- Cleaning all ISBN and LCCN fields ('bibids');
- Deduplicate authors; and
- Remove throw-away data used for validation.
- Remove publication years of 1900 for AMZ/BWB/Promise.
NOTE: This function modifies the passed-in rec in place.
"""
required_fields = [
'title',
'source_records',
] # ['authors', 'publishers', 'publish_date']
for field in required_fields:
if not rec.get(field):
raise RequiredField(field)
# Ensure source_records is a list.
if not isinstance(rec['source_records'], list):
rec['source_records'] = [rec['source_records']]
publication_year = get_publication_year(rec.get('publish_date'))
if publication_year and published_in_future_year(publication_year):
del rec['publish_date']
# Split subtitle if required and not already present
if ':' in rec.get('title', '') and not rec.get('subtitle'):
title, subtitle = split_subtitle(rec.get('title'))
if subtitle:
rec['title'] = title
rec['subtitle'] = subtitle
rec = normalize_record_bibids(rec)
# deduplicate authors
rec['authors'] = uniq(rec.get('authors', []), dicthash)
# Validation by parse_data(), prior to calling load(), requires facially
# valid publishers. If data are unavailable, we provide throw-away data
# which validates. We use ["????"] as an override, but this must be
# removed prior to import.
if rec.get('publishers') == ["????"]:
rec.pop('publishers')
# Remove suspect publication dates from certain sources (e.g. 1900 from Amazon).
if any(
source_record.split(":")[0] in SOURCE_RECORDS_REQUIRING_DATE_SCRUTINY
and rec.get('publish_date') in SUSPECT_PUBLICATION_DATES
for source_record in rec['source_records']
):
rec.pop('publish_date')
def validate_record(rec: dict) -> None:
"""
Check for:
- publication years too old from non-exempt sources (e.g. Amazon);
- publish dates in a future year;
- independently published books; and
- books that need an ISBN and lack one.
Each check raises an error or returns None.
If all the validations pass, implicitly return None.
"""
# Only validate publication year if a year is found.
if publication_year := get_publication_year(rec.get('publish_date')):
if publication_too_old_and_not_exempt(rec):
raise PublicationYearTooOld(publication_year)
elif published_in_future_year(publication_year):
raise PublishedInFutureYear(publication_year)
if is_independently_published(rec.get('publishers', [])):
raise IndependentlyPublished
if needs_isbn_and_lacks_one(rec):
raise SourceNeedsISBN
def find_match(rec: dict, edition_pool: dict) -> str | None:
"""Use rec to try to find an existing edition key that matches."""
return find_quick_match(rec) or find_threshold_match(rec, edition_pool)
def update_edition_with_rec_data(
rec: dict, account_key: str | None, edition: "Edition"
) -> bool:
"""
Enrich the Edition by adding certain fields present in rec but absent
in edition.
NOTE: This modifies the passed-in Edition in place.
"""
need_edition_save = False
# Add cover to edition
if 'cover' in rec and not edition.get_covers():
cover_url = rec['cover']
cover_id = add_cover(cover_url, edition.key, account_key=account_key)
if cover_id:
edition['covers'] = [cover_id]
need_edition_save = True
# Add ocaid to edition (str), if needed
if 'ocaid' in rec and not edition.ocaid:
edition['ocaid'] = rec['ocaid']
need_edition_save = True
# Fields which have their VALUES added if absent.
edition_list_fields = [
'local_id',
'lccn',
'lc_classifications',
'oclc_numbers',
'source_records',
]
for f in edition_list_fields:
if f not in rec or not rec[f]:
continue
# ensure values is a list
values = rec[f] if isinstance(rec[f], list) else [rec[f]]
if f in edition:
# get values from rec field that are not currently on the edition
case_folded_values = {v.casefold() for v in edition[f]}
to_add = [v for v in values if v.casefold() not in case_folded_values]
edition[f] += to_add
else:
edition[f] = to_add = values
if to_add:
need_edition_save = True
# Fields that are added as a whole if absent. (Individual values are not added.)
other_edition_fields = [
'description',
'number_of_pages',
'publishers',
'publish_date',
]
for f in other_edition_fields:
if f not in rec or not rec[f]:
continue
if f not in edition:
edition[f] = rec[f]
need_edition_save = True
# Add new identifiers
if 'identifiers' in rec:
identifiers = defaultdict(list, edition.dict().get('identifiers', {}))
for k, vals in rec['identifiers'].items():
identifiers[k].extend(vals)
identifiers[k] = list(set(identifiers[k]))
if edition.dict().get('identifiers') != identifiers:
edition['identifiers'] = identifiers
need_edition_save = True
return need_edition_save
def update_work_with_rec_data(
rec: dict, edition: "Edition", work: dict[str, Any], need_work_save: bool
) -> bool:
"""
Enrich the Work by adding certain fields present in rec but absent
in work.
NOTE: This modifies the passed-in Work in place.
"""
# Add subjects to work, if not already present
if 'subjects' in rec:
work_subjects: list[str] = list(work.get('subjects', []))
rec_subjects: list[str] = rec.get('subjects', [])
deduped_subjects = uniq(
itertools.chain(work_subjects, rec_subjects), lambda item: item.casefold()
)
if work_subjects != deduped_subjects:
work['subjects'] = deduped_subjects
need_work_save = True
# Add cover to work, if needed
if not work.get('covers') and edition.get_covers():
work['covers'] = [edition['covers'][0]]
need_work_save = True
# Add description to work, if needed
if not work.get('description') and edition.get('description'):
work['description'] = edition['description']
need_work_save = True
# Add authors to work, if needed
if not work.get('authors'):
authors = [import_author(a) for a in rec.get('authors', [])]
work['authors'] = [
{'type': {'key': '/type/author_role'}, 'author': a.get('key')}
for a in authors
if a.get('key')
]
if work.get('authors'):
need_work_save = True
return need_work_save
def should_overwrite_promise_item(
edition: "Edition", from_marc_record: bool = False
) -> bool:
"""
Returns True for revision 1 promise items with MARC data available.
Promise items frequently have low quality data, and MARC data is high
quality. Overwriting revision 1 promise items with MARC data ensures
higher quality records and eliminates the risk of obliterating human edits.
"""
if edition.get('revision') != 1 or not from_marc_record:
return False
# Promise items are always index 0 in source_records.
return bool(safeget(lambda: edition['source_records'][0], '').startswith("promise"))
def load(rec: dict, account_key=None, from_marc_record: bool = False) -> dict:
"""Given a record, tries to add/match that edition in the system.
Record is a dictionary containing all the metadata of the edition.
The following fields are mandatory:
* title: str
* source_records: list
:param dict rec: Edition record to add
:param bool from_marc_record: whether the record is based on a MARC record.
:rtype: dict
:return: a dict to be converted into a JSON HTTP response, same as load_data()
"""
if not is_promise_item(rec):
validate_record(rec)
normalize_import_record(rec)
# Resolve an edition if possible, or create and return one if not.
edition_pool = build_pool(rec)
if not edition_pool:
# No match candidates found, add edition
return load_data(rec, account_key=account_key)
match = find_match(rec, edition_pool)
if not match:
# No match found, add edition
return load_data(rec, account_key=account_key)
# We have an edition match at this point
need_work_save = need_edition_save = False
work: dict[str, Any]
existing_edition: Edition = web.ctx.site.get(match)
# check for, and resolve, author redirects
for a in existing_edition.authors:
while is_redirect(a):
if a in existing_edition.authors:
existing_edition.authors.remove(a)
a = web.ctx.site.get(a.location)
if not is_redirect(a):
existing_edition.authors.append(a)
if existing_edition.get('works'):
work = existing_edition.works[0].dict()
work_created = False
else:
# Found an edition without a work
work_created = need_work_save = need_edition_save = True
work = new_work(existing_edition.dict(), rec)
existing_edition.works = [{'key': work['key']}]
# Send revision 1 promise item editions to the same pipeline as new editions
# because we want to overwrite most of their data.
if should_overwrite_promise_item(
edition=existing_edition, from_marc_record=from_marc_record
):
return load_data(
rec, account_key=account_key, existing_edition=existing_edition
)
need_edition_save = update_edition_with_rec_data(
rec=rec, account_key=account_key, edition=existing_edition
)
need_work_save = update_work_with_rec_data(
rec=rec, edition=existing_edition, work=work, need_work_save=need_work_save
)
edits = []
reply = {
'success': True,
'edition': {'key': match, 'status': 'matched'},
'work': {'key': work['key'], 'status': 'matched'},
}
if need_edition_save:
reply['edition']['status'] = 'modified' # type: ignore[index]
edits.append(existing_edition.dict())
if need_work_save:
reply['work']['status'] = 'created' if work_created else 'modified' # type: ignore[index]
edits.append(work)
if edits:
web.ctx.site.save_many(
edits, comment='import existing book', action='edit-book'
)
if 'ocaid' in rec:
update_ia_metadata_for_ol_edition(match.split('/')[-1])
return reply
| 32,077 | Python | .py | 833 | 31.103241 | 118 | 0.618973 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
214 | load_book.py | internetarchive_openlibrary/openlibrary/catalog/add_book/load_book.py | from typing import TYPE_CHECKING, Any, Final
import web
from openlibrary.catalog.utils import flip_name, author_dates_match, key_int
from openlibrary.core.helpers import extract_year
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import Author
# Sort by descending length to remove the _longest_ match.
# E.g. remove "señorita" and not "señor", when both match.
HONORIFICS: Final = sorted(
[
'countess',
'doctor',
'doktor',
'dr',
'dr.',
'frau',
'fräulein',
'herr',
'lady',
'lord',
'm.',
'madame',
'mademoiselle',
'miss',
'mister',
'mistress',
'mixter',
'mlle',
'mlle.',
'mme',
'mme.',
'monsieur',
'mr',
'mr.',
'mrs',
'mrs.',
'ms',
'ms.',
'mx',
'mx.',
'professor',
'señor',
'señora',
'señorita',
'sir',
'sr.',
'sra.',
'srta.',
],
key=lambda x: len(x),
reverse=True,
)
HONORIFC_NAME_EXECPTIONS = frozenset(
{
"dr. seuss",
"dr seuss",
"dr oetker",
"doctor oetker",
}
)
def east_in_by_statement(rec: dict[str, Any], author: dict[str, Any]) -> bool:
"""
Returns False if there is no by_statement in rec.
Otherwise returns whether author name uses eastern name order.
TODO: elaborate on what this actually means, and how it is used.
"""
if 'by_statement' not in rec:
return False
if 'authors' not in rec:
return False
name = author['name']
flipped = flip_name(name)
name = name.replace('.', '')
name = name.replace(', ', '')
if name == flipped.replace('.', ''):
# name was not flipped
return False
return rec['by_statement'].find(name) != -1
def do_flip(author: dict[str, Any]) -> None:
"""
Given an author import dict, flip its name in place
i.e. Smith, John => John Smith
"""
if 'personal_name' in author and author['personal_name'] != author['name']:
# Don't flip names if name is more complex than personal_name (legacy behaviour)
return
first_comma = author['name'].find(', ')
if first_comma == -1:
return
# e.g: Harper, John Murdoch, 1845-
if author['name'].find(',', first_comma + 1) != -1:
return
if author['name'].find('i.e.') != -1:
return
if author['name'].find('i. e.') != -1:
return
name = flip_name(author['name'])
author['name'] = name
if 'personal_name' in author:
author['personal_name'] = name
def pick_from_matches(author: dict[str, Any], match: list["Author"]) -> "Author":
"""
Finds the best match for author from a list of OL authors records, match.
:param dict author: Author import representation
:param list match: List of matching OL author records
:rtype: dict
:return: A single OL author record from match
"""
maybe = []
if 'birth_date' in author and 'death_date' in author:
maybe = [m for m in match if 'birth_date' in m and 'death_date' in m]
elif 'date' in author:
maybe = [m for m in match if 'date' in m]
if not maybe:
maybe = match
if len(maybe) == 1:
return maybe[0]
return min(maybe, key=key_int)
def find_author(author: dict[str, Any]) -> list["Author"]:
"""
Searches OL for an author by a range of queries.
"""
def walk_redirects(obj, seen):
seen.add(obj['key'])
while obj['type']['key'] == '/type/redirect':
assert obj['location'] != obj['key']
obj = web.ctx.site.get(obj['location'])
seen.add(obj['key'])
return obj
# Try for an 'exact' (case-insensitive) name match, but fall back to alternate_names,
# then last name with identical birth and death dates (that are not themselves `None` or '').
name = author["name"].replace("*", r"\*")
queries = [
{"type": "/type/author", "name~": name},
{"type": "/type/author", "alternate_names~": name},
{
"type": "/type/author",
"name~": f"* {name.split()[-1]}",
"birth_date~": f"*{extract_year(author.get('birth_date', '')) or -1}*",
"death_date~": f"*{extract_year(author.get('death_date', '')) or -1}*",
}, # Use `-1` to ensure an empty string from extract_year doesn't match empty dates.
]
for query in queries:
if reply := list(web.ctx.site.things(query)):
break
authors = [web.ctx.site.get(k) for k in reply]
if any(a.type.key != '/type/author' for a in authors):
seen: set[dict] = set()
authors = [walk_redirects(a, seen) for a in authors if a['key'] not in seen]
return authors
def find_entity(author: dict[str, Any]) -> "Author | None":
"""
Looks for an existing Author record in OL
and returns it if found.
:param dict author: Author import dict {"name": "Some One"}
:return: Existing Author record if found, or None.
"""
assert isinstance(author, dict)
things = find_author(author)
if author.get('entity_type', 'person') != 'person':
return things[0] if things else None
match = []
seen = set()
for a in things:
key = a['key']
if key in seen:
continue
seen.add(key)
orig_key = key
assert a.type.key == '/type/author'
if 'birth_date' in author and 'birth_date' not in a:
continue
if 'birth_date' not in author and 'birth_date' in a:
continue
if not author_dates_match(author, a):
continue
match.append(a)
if not match:
return None
if len(match) == 1:
return match[0]
return pick_from_matches(author, match)
def remove_author_honorifics(name: str) -> str:
"""
Remove honorifics from an author's name field.
If the author's name is only an honorific, it will return the original name.
"""
if name.casefold() in HONORIFC_NAME_EXECPTIONS:
return name
if honorific := next(
(
honorific
for honorific in HONORIFICS
if name.casefold().startswith(f"{honorific} ") # Note the trailing space.
),
None,
):
return name[len(f"{honorific} ") :].lstrip() or name
return name
def import_author(author: dict[str, Any], eastern=False) -> "Author | dict[str, Any]":
"""
Converts an import style new-author dictionary into an
Open Library existing author, or new author candidate, representation.
Does NOT create new authors.
:param dict author: Author import record {"name": "Some One"}
:param bool eastern: Eastern name order
:return: Open Library style Author representation, either existing Author with "key",
or new candidate dict without "key".
"""
assert isinstance(author, dict)
if author.get('entity_type') != 'org' and not eastern:
do_flip(author)
if existing := find_entity(author):
assert existing.type.key == '/type/author'
for k in 'last_modified', 'id', 'revision', 'created':
if existing.k:
del existing.k
new = existing
if 'death_date' in author and 'death_date' not in existing:
new['death_date'] = author['death_date']
return new
a = {'type': {'key': '/type/author'}}
for f in 'name', 'title', 'personal_name', 'birth_date', 'death_date', 'date':
if f in author:
a[f] = author[f]
return a
class InvalidLanguage(Exception):
def __init__(self, code):
self.code = code
def __str__(self):
return f"invalid language code: '{self.code}'"
type_map = {'description': 'text', 'notes': 'text', 'number_of_pages': 'int'}
def build_query(rec: dict[str, Any]) -> dict[str, Any]:
"""
Takes an edition record dict, rec, and returns an Open Library edition
suitable for saving.
:return: Open Library style edition dict representation
"""
book: dict[str, Any] = {
'type': {'key': '/type/edition'},
}
for k, v in rec.items():
if k == 'authors':
if v and v[0]:
book['authors'] = []
for author in v:
author['name'] = remove_author_honorifics(author['name'])
east = east_in_by_statement(rec, author)
book['authors'].append(import_author(author, eastern=east))
continue
if k in ('languages', 'translated_from'):
for language in v:
if web.ctx.site.get('/languages/' + language.lower()) is None:
raise InvalidLanguage(language.lower())
book[k] = [{'key': '/languages/' + language.lower()} for language in v]
continue
if k in type_map:
t = '/type/' + type_map[k]
if isinstance(v, list):
book[k] = [{'type': t, 'value': i} for i in v]
else:
book[k] = {'type': t, 'value': v}
else:
book[k] = v
return book
| 9,244 | Python | .py | 265 | 27.211321 | 97 | 0.5717 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
215 | conftest.py | internetarchive_openlibrary/openlibrary/catalog/add_book/tests/conftest.py | import pytest
@pytest.fixture
def add_languages(mock_site):
languages = [
('eng', 'English'),
('spa', 'Spanish'),
('fre', 'French'),
('yid', 'Yiddish'),
('fri', 'Frisian'),
('fry', 'Frisian'),
]
for code, name in languages:
mock_site.save(
{
'code': code,
'key': '/languages/' + code,
'name': name,
'type': {'key': '/type/language'},
}
)
| 504 | Python | .py | 20 | 15.9 | 50 | 0.417012 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
216 | test_match.py | internetarchive_openlibrary/openlibrary/catalog/add_book/tests/test_match.py | import pytest
from copy import deepcopy
from openlibrary.catalog.add_book import load
from openlibrary.catalog.add_book.match import (
THRESHOLD,
add_db_name,
build_titles,
compare_authors,
compare_publisher,
editions_match,
expand_record,
normalize,
mk_norm,
threshold_match,
)
def test_editions_match_identical_record(mock_site):
rec = {
'title': 'Test item',
'lccn': ['12345678'],
'authors': [{'name': 'Smith, John', 'birth_date': '1980'}],
'source_records': ['ia:test_item'],
}
reply = load(rec)
ekey = reply['edition']['key']
e = mock_site.get(ekey)
assert editions_match(rec, e) is True
def test_add_db_name():
authors = [
{'name': 'Smith, John'},
{'name': 'Smith, John', 'date': '1950'},
{'name': 'Smith, John', 'birth_date': '1895', 'death_date': '1964'},
]
orig = deepcopy(authors)
add_db_name({'authors': authors})
orig[0]['db_name'] = orig[0]['name']
orig[1]['db_name'] = orig[1]['name'] + ' 1950'
orig[2]['db_name'] = orig[2]['name'] + ' 1895-1964'
assert authors == orig
rec = {}
add_db_name(rec)
assert rec == {}
# Handle `None` authors values.
rec = {'authors': None}
add_db_name(rec)
assert rec == {'authors': None}
titles = [
('Hello this is a Title', 'hello this is a title'), # Spaces
('Kitāb Yatīmat ud-Dahr', 'kitāb yatīmat ud dahr'), # Unicode
('This and That', 'this and that'),
('This & That', 'this and that'), # ampersand
('A Title.', 'a title'), # period and space stripping
('A Title. ', 'a title'),
('A Title .', 'a title'),
('The Fish and Chips', 'the fish and chips'),
('A Fish & Chip shop', 'a fish and chip shop'),
]
@pytest.mark.parametrize('title,normalized', titles)
def test_normalize(title, normalized):
assert normalize(title) == normalized
mk_norm_conversions = [
("Hello I'm a title.", "helloi'matitle"),
("Hello I'm a title.", "helloi'matitle"),
('Forgotten Titles: A Novel.', 'forgottentitlesanovel'),
('Kitāb Yatīmat ud-Dahr', 'kitābyatīmatuddahr'),
('The Fish and Chips', 'fishchips'),
('A Fish & Chip shop', 'fishchipshop'),
]
@pytest.mark.parametrize('title,expected', mk_norm_conversions)
def test_mk_norm(title, expected):
assert mk_norm(title) == expected
mk_norm_matches = [
("Your Baby's First Word Will Be DADA", "Your baby's first word will be DADA"),
]
@pytest.mark.parametrize('a,b', mk_norm_matches)
def test_mk_norm_equality(a, b):
assert mk_norm(a) == mk_norm(b)
class TestExpandRecord:
rec = {
'title': 'A test full title',
'subtitle': 'subtitle (parens).',
'source_records': ['ia:test-source'],
}
def test_expand_record(self):
edition = self.rec.copy()
expanded_record = expand_record(edition)
assert isinstance(expanded_record['titles'], list)
assert self.rec['title'] not in expanded_record['titles']
expected_titles = [
edition['full_title'],
'a test full title subtitle (parens)',
'test full title subtitle (parens)',
'a test full title subtitle',
'test full title subtitle',
]
for t in expected_titles:
assert t in expanded_record['titles']
assert len(set(expanded_record['titles'])) == len(set(expected_titles))
assert (
expanded_record['normalized_title'] == 'a test full title subtitle (parens)'
)
assert expanded_record['short_title'] == 'a test full title subtitl'
def test_expand_record_publish_country(self):
edition = self.rec.copy()
expanded_record = expand_record(edition)
assert 'publish_country' not in expanded_record
for publish_country in (' ', '|||'):
edition['publish_country'] = publish_country
assert 'publish_country' not in expand_record(edition)
for publish_country in ('USA', 'usa'):
edition['publish_country'] = publish_country
assert expand_record(edition)['publish_country'] == publish_country
def test_expand_record_transfer_fields(self):
edition = self.rec.copy()
expanded_record = expand_record(edition)
transfer_fields = (
'lccn',
'publishers',
'publish_date',
'number_of_pages',
'authors',
'contribs',
)
for field in transfer_fields:
assert field not in expanded_record
for field in transfer_fields:
edition[field] = []
expanded_record = expand_record(edition)
for field in transfer_fields:
assert field in expanded_record
def test_expand_record_isbn(self):
edition = self.rec.copy()
expanded_record = expand_record(edition)
assert expanded_record['isbn'] == []
edition.update(
{
'isbn': ['1234567890'],
'isbn_10': ['123', '321'],
'isbn_13': ['1234567890123'],
}
)
expanded_record = expand_record(edition)
assert expanded_record['isbn'] == ['1234567890', '123', '321', '1234567890123']
class TestAuthors:
@pytest.mark.xfail(
reason=(
'This expected result taken from the amazon and '
'merge versions of compare_author, '
'Current merge_marc.compare_authors() '
'does NOT take by_statement into account.'
)
)
def test_compare_authors_by_statement(self):
rec1 = {
'title': 'Full Title, required',
'authors': [{'name': 'Alistair Smith'}],
}
rec2 = {
'title': 'A different Full Title, only matching authors here.',
'authors': [
{
'name': 'National Gallery (Great Britain)',
'entity_type': 'org',
}
],
'by_statement': 'Alistair Smith.',
}
result = compare_authors(expand_record(rec1), expand_record(rec2))
assert result == ('main', 'exact match', 125)
def test_author_contrib(self):
rec1 = {
'authors': [{'name': 'Bruner, Jerome S.'}],
'title': 'Contemporary approaches to cognition ',
'subtitle': 'a symposium held at the University of Colorado.',
'number_of_pages': 210,
'publish_country': 'xxu',
'publish_date': '1957',
'publishers': ['Harvard U.P'],
}
rec2 = {
'authors': [
{
'name': (
'University of Colorado (Boulder campus). '
'Dept. of Psychology.'
)
}
],
# TODO: the contrib db_name needs to be populated by expand_record() to be useful
'contribs': [{'name': 'Bruner, Jerome S.', 'db_name': 'Bruner, Jerome S.'}],
'title': 'Contemporary approaches to cognition ',
'subtitle': 'a symposium held at the University of Colorado',
'lccn': ['57012963'],
'number_of_pages': 210,
'publish_country': 'mau',
'publish_date': '1957',
'publishers': ['Harvard University Press'],
}
assert compare_authors(expand_record(rec1), expand_record(rec2)) == (
'authors',
'exact match',
125,
)
threshold = 875
assert threshold_match(rec1, rec2, threshold) is True
class TestTitles:
def test_build_titles(self):
# Used by openlibrary.catalog.merge.merge_marc.expand_record()
full_title = 'This is a title.' # Input title
normalized = 'this is a title' # Expected normalization
result = build_titles(full_title)
assert isinstance(result['titles'], list)
assert result['full_title'] == full_title
assert result['short_title'] == normalized
assert result['normalized_title'] == normalized
assert len(result['titles']) == 2
assert full_title in result['titles']
assert normalized in result['titles']
def test_build_titles_ampersand(self):
full_title = 'This & that'
result = build_titles(full_title)
assert 'this and that' in result['titles']
assert 'This & that' in result['titles']
def test_build_titles_complex(self):
full_title = 'A test full title : subtitle (parens)'
full_title_period = 'A test full title : subtitle (parens).'
titles_period = build_titles(full_title_period)['titles']
assert isinstance(titles_period, list)
assert full_title_period in titles_period
titles = build_titles(full_title)['titles']
assert full_title in titles
common_titles = [
'a test full title subtitle (parens)',
'test full title subtitle (parens)',
]
for t in common_titles:
assert t in titles
assert t in titles_period
assert 'test full title subtitle' in titles
assert 'a test full title subtitle' in titles
# Check for duplicates:
assert len(titles_period) == len(set(titles_period))
assert len(titles) == len(set(titles))
assert len(titles) == len(titles_period)
def test_compare_publisher():
foo = {'publishers': ['foo']}
bar = {'publishers': ['bar']}
foo2 = {'publishers': ['foo']}
both = {'publishers': ['foo', 'bar']}
assert compare_publisher({}, {}) == ('publisher', 'either missing', 0)
assert compare_publisher(foo, {}) == ('publisher', 'either missing', 0)
assert compare_publisher({}, bar) == ('publisher', 'either missing', 0)
assert compare_publisher(foo, foo2) == ('publisher', 'match', 100)
assert compare_publisher(foo, bar) == ('publisher', 'mismatch', -51)
assert compare_publisher(bar, both) == ('publisher', 'match', 100)
assert compare_publisher(both, foo) == ('publisher', 'match', 100)
class TestRecordMatching:
def test_match_without_ISBN(self):
# Same year, different publishers
# one with ISBN, one without
bpl = {
'authors': [
{
'birth_date': '1897',
'entity_type': 'person',
'name': 'Green, Constance McLaughlin',
'personal_name': 'Green, Constance McLaughlin',
}
],
'title': 'Eli Whitney and the birth of American technology',
'isbn': ['188674632X'],
'number_of_pages': 215,
'publish_date': '1956',
'publishers': ['HarperCollins', '[distributed by Talman Pub.]'],
'source_records': ['marc:bpl/bpl101.mrc:0:1226'],
}
lc = {
'authors': [
{
'birth_date': '1897',
'entity_type': 'person',
'name': 'Green, Constance McLaughlin',
'personal_name': 'Green, Constance McLaughlin',
}
],
'title': 'Eli Whitney and the birth of American technology.',
'isbn': [],
'number_of_pages': 215,
'publish_date': '1956',
'publishers': ['Little, Brown'],
'source_records': [
'marc:marc_records_scriblio_net/part04.dat:119539872:591'
],
}
assert compare_authors(expand_record(bpl), expand_record(lc)) == (
'authors',
'exact match',
125,
)
threshold = 875
assert threshold_match(bpl, lc, threshold) is True
def test_match_low_threshold(self):
# year is off by < 2 years, counts a little
e1 = {
'publishers': ['Collins'],
'isbn_10': ['0002167530'],
'number_of_pages': 287,
'title': 'Sea Birds Britain Ireland',
'publish_date': '1975',
'authors': [{'name': 'Stanley Cramp'}],
}
e2 = {
'publishers': ['Collins'],
'isbn_10': ['0002167530'],
'title': 'seabirds of Britain and Ireland',
'publish_date': '1974',
'authors': [
{
'entity_type': 'person',
'name': 'Stanley Cramp.',
'personal_name': 'Cramp, Stanley.',
}
],
'source_records': [
'marc:marc_records_scriblio_net/part08.dat:61449973:855'
],
}
threshold = 515
assert threshold_match(e1, e2, threshold) is True
assert threshold_match(e1, e2, threshold + 1) is False
def test_matching_title_author_and_publish_year_but_not_publishers(self) -> None:
"""
Matching only title, author, and publish_year should not be sufficient for
meeting the match threshold if the publisher is truthy and doesn't match,
as a book published in different publishers in the same year would easily meet
the criteria.
"""
existing_edition = {
'authors': [{'name': 'Edgar Lee Masters'}],
'publish_date': '2022',
'publishers': ['Creative Media Partners, LLC'],
'title': 'Spoon River Anthology',
}
potential_match1 = {
'authors': [{'name': 'Edgar Lee Masters'}],
'publish_date': '2022',
'publishers': ['Standard Ebooks'],
'title': 'Spoon River Anthology',
}
assert threshold_match(existing_edition, potential_match1, THRESHOLD) is False
potential_match2 = {
'authors': [{'name': 'Edgar Lee Masters'}],
'publish_date': '2022',
'title': 'Spoon River Anthology',
}
# If there is no publisher and nothing else to match, the editions should be
# indistinguishable, and therefore matches.
assert threshold_match(existing_edition, potential_match2, THRESHOLD) is True
def test_noisbn_record_should_not_match_title_only(self):
# An existing light title + ISBN only record
existing_edition = {
# NO author
# NO date
#'publishers': ['Creative Media Partners, LLC'],
'title': 'Just A Title',
'isbn_13': ['9780000000002'],
}
potential_match = {
'authors': [{'name': 'Bob Smith'}],
'publish_date': '1913',
'publishers': ['Early Editions'],
'title': 'Just A Title',
'source_records': ['marc:somelibrary/some_marc.mrc'],
}
assert threshold_match(existing_edition, potential_match, THRESHOLD) is False
| 14,923 | Python | .py | 374 | 30.010695 | 93 | 0.559343 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
217 | test_load_book.py | internetarchive_openlibrary/openlibrary/catalog/add_book/tests/test_load_book.py | import pytest
from openlibrary.catalog.add_book import load_book
from openlibrary.catalog.add_book.load_book import (
find_entity,
import_author,
build_query,
InvalidLanguage,
remove_author_honorifics,
)
from openlibrary.core.models import Author
@pytest.fixture
def new_import(monkeypatch):
monkeypatch.setattr(load_book, 'find_entity', lambda a: None)
# These authors will be imported with natural name order
# i.e. => Forename Surname
natural_names = [
{'name': 'Forename Surname'},
{'name': 'Surname, Forename', 'personal_name': 'Surname, Forename'},
{'name': 'Surname, Forename'},
{'name': 'Surname, Forename', 'entity_type': 'person'},
]
# These authors will be imported with 'name' unchanged
unchanged_names = [
{'name': 'Forename Surname'},
{
'name': 'Smith, John III, King of Coats, and Bottles',
'personal_name': 'Smith, John',
},
{'name': 'Smith, John III, King of Coats, and Bottles'},
{'name': 'Harper, John Murdoch, 1845-'},
{'entity_type': 'org', 'name': 'Organisation, Place'},
{
'entity_type': 'org',
'name': 'Shou du shi fan da xue (Beijing, China). Zhongguo shi ge yan jiu zhong xin',
},
]
@pytest.mark.parametrize('author', natural_names)
def test_import_author_name_natural_order(author, new_import):
result = import_author(author)
assert isinstance(result, dict)
assert result['name'] == 'Forename Surname'
@pytest.mark.parametrize('author', unchanged_names)
def test_import_author_name_unchanged(author, new_import):
expect = author['name']
result = import_author(author)
assert isinstance(result, dict)
assert result['name'] == expect
def test_build_query(add_languages):
rec = {
'title': 'magic',
'languages': ['ENG', 'fre'],
'translated_from': ['yid'],
'authors': [{'name': 'Surname, Forename'}],
'description': 'test',
}
q = build_query(rec)
assert q['title'] == 'magic'
assert q['authors'][0]['name'] == 'Forename Surname'
assert q['description'] == {'type': '/type/text', 'value': 'test'}
assert q['type'] == {'key': '/type/edition'}
assert q['languages'] == [{'key': '/languages/eng'}, {'key': '/languages/fre'}]
assert q['translated_from'] == [{'key': '/languages/yid'}]
pytest.raises(InvalidLanguage, build_query, {'languages': ['wtf']})
class TestImportAuthor:
def add_three_existing_authors(self, mock_site):
for num in range(3):
existing_author = {
"name": f"John Smith {num}",
"key": f"/authors/OL{num}A",
"type": {"key": "/type/author"},
}
mock_site.save(existing_author)
@pytest.mark.parametrize(
["name", "expected"],
[
("Drake von Drake", "Drake von Drake"),
("Dr. Seuss", "Dr. Seuss"),
("dr. Seuss", "dr. Seuss"),
("Dr Seuss", "Dr Seuss"),
("M. Anicet-Bourgeois", "Anicet-Bourgeois"),
("Mr Blobby", "Blobby"),
("Mr. Blobby", "Blobby"),
("monsieur Anicet-Bourgeois", "Anicet-Bourgeois"),
# Don't strip from last name.
("Anicet-Bourgeois M.", "Anicet-Bourgeois M."),
('Doctor Ivo "Eggman" Robotnik', 'Ivo "Eggman" Robotnik'),
("John M. Keynes", "John M. Keynes"),
("Mr.", 'Mr.'),
],
)
def test_author_importer_drops_honorifics(self, name, expected):
got = remove_author_honorifics(name=name)
assert got == expected
def test_author_match_is_case_insensitive_for_names(self, mock_site):
"""Ensure name searches for John Smith and JOHN SMITH return the same record."""
self.add_three_existing_authors(mock_site)
existing_author = {
'name': "John Smith",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
}
mock_site.save(existing_author)
author = {"name": "John Smith"}
case_sensitive_author = find_entity(author)
author = {"name": "JoHN SmITh"}
case_insensitive_author = find_entity(author)
assert case_insensitive_author is not None
assert case_sensitive_author == case_insensitive_author
def test_author_wildcard_match_with_no_matches_creates_author_with_wildcard(
self, mock_site
):
"""This test helps ensure compatibility with production; we should not use this."""
self.add_three_existing_authors(mock_site)
author = {"name": "Mr. Blobby*"}
new_author_name = import_author(author)
assert author["name"] == new_author_name["name"]
def test_first_match_priority_name_and_dates(self, mock_site):
"""
Highest priority match is name, birth date, and death date.
"""
self.add_three_existing_authors(mock_site)
# Exact name match with no birth or death date
author = {
"name": "William H. Brewer",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
}
# An alternate name is an exact match.
author_alternate_name = {
"name": "William Brewer",
"key": "/authors/OL4A",
"alternate_names": ["William H. Brewer"],
"type": {"key": "/type/author"},
}
# Exact name, birth, and death date matches.
author_with_birth_and_death = {
"name": "William H. Brewer",
"key": "/authors/OL5A",
"type": {"key": "/type/author"},
"birth_date": "1829",
"death_date": "1910",
}
mock_site.save(author)
mock_site.save(author_alternate_name)
mock_site.save(author_with_birth_and_death)
# Look for exact match on author name and date.
searched_author = {
"name": "William H. Brewer",
"birth_date": "1829",
"death_date": "1910",
}
found = import_author(searched_author)
assert found.key == author_with_birth_and_death["key"]
def test_non_matching_birth_death_creates_new_author(self, mock_site):
"""
If a year in birth or death date isn't an exact match, create a new record,
other things being equal.
"""
author_with_birth_and_death = {
"name": "William H. Brewer",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
"birth_date": "1829",
"death_date": "1910",
}
mock_site.save(author_with_birth_and_death)
searched_and_not_found_author = {
"name": "William H. Brewer",
"birth_date": "1829",
"death_date": "1911",
}
found = import_author(searched_and_not_found_author)
assert isinstance(found, dict)
assert found["death_date"] == searched_and_not_found_author["death_date"]
def test_second_match_priority_alternate_names_and_dates(self, mock_site):
"""
Matching, as a unit, alternate name, birth date, and death date, get
second match priority.
"""
self.add_three_existing_authors(mock_site)
# No exact name match.
author = {
"name": "Фёдор Михайлович Достоевский",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
}
# Alternate name match with no birth or death date
author_alternate_name = {
"name": "Фёдор Михайлович Достоевский",
"key": "/authors/OL4A",
"alternate_names": ["Fyodor Dostoevsky"],
"type": {"key": "/type/author"},
}
# Alternate name match with matching birth and death date.
author_alternate_name_with_dates = {
"name": "Фёдор Михайлович Достоевский",
"key": "/authors/OL5A",
"alternate_names": ["Fyodor Dostoevsky"],
"type": {"key": "/type/author"},
"birth_date": "1821",
"death_date": "1881",
}
mock_site.save(author)
mock_site.save(author_alternate_name)
mock_site.save(author_alternate_name_with_dates)
searched_author = {
"name": "Fyodor Dostoevsky",
"birth_date": "1821",
"death_date": "1881",
}
found = import_author(searched_author)
assert isinstance(found, Author)
assert found.key == author_alternate_name_with_dates["key"]
def test_last_match_on_surname_and_dates(self, mock_site):
"""
The lowest priority match is an exact surname match + birth and death date matches.
"""
author = {
"name": "William Brewer",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
"birth_date": "1829",
"death_date": "1910",
}
mock_site.save(author)
searched_author = {
"name": "Mr. William H. brewer",
"birth_date": "1829",
"death_date": "1910",
}
found = import_author(searched_author)
assert found.key == author["key"]
# But non-exact birth/death date doesn't match.
searched_author = {
"name": "Mr. William H. brewer",
"birth_date": "1829",
"death_date": "1911",
}
found = import_author(searched_author)
# No match, so create a new author.
assert found == {
'type': {'key': '/type/author'},
'name': 'Mr. William H. brewer',
'birth_date': '1829',
'death_date': '1911',
}
def test_last_match_on_surname_and_dates_and_dates_are_required(self, mock_site):
"""
Like above, but ensure dates must exist for this match (so don't match on
falsy dates).
"""
author = {
"name": "William Brewer",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
}
mock_site.save(author)
searched_author = {
"name": "Mr. William J. Brewer",
}
found = import_author(searched_author)
# No match, so a new author is created.
assert found == {
'name': 'Mr. William J. Brewer',
'type': {'key': '/type/author'},
}
def test_birth_and_death_date_match_is_on_year_strings(self, mock_site):
"""
The lowest priority match is an exact surname match + birth and death date matches,
as shown above, but additionally, use only years on *both* sides, and only for four
digit years.
"""
author = {
"name": "William Brewer",
"key": "/authors/OL3A",
"type": {"key": "/type/author"},
"birth_date": "September 14th, 1829",
"death_date": "11/2/1910",
}
mock_site.save(author)
searched_author = {
"name": "Mr. William H. brewer",
"birth_date": "1829-09-14",
"death_date": "November 1910",
}
found = import_author(searched_author)
assert found.key == author["key"]
| 11,307 | Python | .py | 287 | 29.916376 | 93 | 0.56219 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
218 | test_add_book.py | internetarchive_openlibrary/openlibrary/catalog/add_book/tests/test_add_book.py | import os
import pytest
from datetime import datetime
from infogami.infobase.client import Nothing
from infogami.infobase.core import Text
from openlibrary.catalog import add_book
from openlibrary.catalog.add_book import (
build_pool,
editions_matched,
find_match,
IndependentlyPublished,
isbns_from_record,
load,
load_data,
normalize_import_record,
PublicationYearTooOld,
PublishedInFutureYear,
RequiredField,
should_overwrite_promise_item,
SourceNeedsISBN,
split_subtitle,
validate_record,
)
from openlibrary.catalog.marc.parse import read_edition
from openlibrary.catalog.marc.marc_binary import MarcBinary
def open_test_data(filename):
"""Returns a file handle to file with specified filename inside test_data directory."""
root = os.path.dirname(__file__)
fullpath = os.path.join(root, 'test_data', filename)
return open(fullpath, mode='rb')
@pytest.fixture
def ia_writeback(monkeypatch):
"""Prevent ia writeback from making live requests."""
monkeypatch.setattr(add_book, 'update_ia_metadata_for_ol_edition', lambda olid: {})
def test_isbns_from_record():
rec = {'title': 'test', 'isbn_13': ['9780190906764'], 'isbn_10': ['0190906766']}
result = isbns_from_record(rec)
assert isinstance(result, list)
assert '9780190906764' in result
assert '0190906766' in result
assert len(result) == 2
bookseller_titles = [
# Original title, title, subtitle
['Test Title', 'Test Title', None],
[
'Killers of the Flower Moon: The Osage Murders and the Birth of the FBI',
'Killers of the Flower Moon',
'The Osage Murders and the Birth of the FBI',
],
['Pachinko (National Book Award Finalist)', 'Pachinko', None],
['Trapped in a Video Game (Book 1) (Volume 1)', 'Trapped in a Video Game', None],
[
"An American Marriage (Oprah's Book Club): A Novel",
'An American Marriage',
'A Novel',
],
['A Növel (German Edition)', 'A Növel', None],
[
(
'Vietnam Travel Guide 2019: Ho Chi Minh City - First Journey : '
'10 Tips For an Amazing Trip'
),
'Vietnam Travel Guide 2019 : Ho Chi Minh City - First Journey',
'10 Tips For an Amazing Trip',
],
[
'Secrets of Adobe(r) Acrobat(r) 7. 150 Best Practices and Tips (Russian Edition)',
'Secrets of Adobe Acrobat 7. 150 Best Practices and Tips',
None,
],
[
(
'Last Days at Hot Slit: The Radical Feminism of Andrea Dworkin '
'(Semiotext(e) / Native Agents)'
),
'Last Days at Hot Slit',
'The Radical Feminism of Andrea Dworkin',
],
[
'Bloody Times: The Funeral of Abraham Lincoln and the Manhunt for Jefferson Davis',
'Bloody Times',
'The Funeral of Abraham Lincoln and the Manhunt for Jefferson Davis',
],
]
@pytest.mark.parametrize('full_title,title,subtitle', bookseller_titles)
def test_split_subtitle(full_title, title, subtitle):
assert split_subtitle(full_title) == (title, subtitle)
def test_editions_matched_no_results(mock_site):
rec = {'title': 'test', 'isbn_13': ['9780190906764'], 'isbn_10': ['0190906766']}
isbns = isbns_from_record(rec)
result = editions_matched(rec, 'isbn_', isbns)
# returns no results because there are no existing editions
assert result == []
def test_editions_matched(mock_site, add_languages, ia_writeback):
rec = {
'title': 'test',
'isbn_13': ['9780190906764'],
'isbn_10': ['0190906766'],
'source_records': ['test:001'],
}
load(rec)
isbns = isbns_from_record(rec)
result_10 = editions_matched(rec, 'isbn_10', '0190906766')
assert result_10 == ['/books/OL1M']
result_13 = editions_matched(rec, 'isbn_13', '9780190906764')
assert result_13 == ['/books/OL1M']
# searching on key isbn_ will return a matching record on either isbn_10 or isbn_13 metadata fields
result = editions_matched(rec, 'isbn_', isbns)
assert result == ['/books/OL1M']
def test_load_without_required_field():
rec = {'ocaid': 'test item'}
pytest.raises(RequiredField, load, {'ocaid': 'test_item'})
def test_load_test_item(mock_site, add_languages, ia_writeback):
rec = {
'ocaid': 'test_item',
'source_records': ['ia:test_item'],
'title': 'Test item',
'languages': ['eng'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
e = mock_site.get(reply['edition']['key'])
assert e.type.key == '/type/edition'
assert e.title == 'Test item'
assert e.ocaid == 'test_item'
assert e.source_records == ['ia:test_item']
languages = e.languages
assert len(languages) == 1
assert languages[0].key == '/languages/eng'
assert reply['work']['status'] == 'created'
w = mock_site.get(reply['work']['key'])
assert w.title == 'Test item'
assert w.type.key == '/type/work'
def test_load_deduplicates_authors(mock_site, add_languages, ia_writeback):
"""
Testings that authors are deduplicated before being added
This will only work if all the author dicts are identical
Not sure if that is the case when we get the data for import
"""
rec = {
'ocaid': 'test_item',
'source_records': ['ia:test_item'],
'authors': [{'name': 'John Brown'}, {'name': 'John Brown'}],
'title': 'Test item',
'languages': ['eng'],
}
reply = load(rec)
assert reply['success'] is True
assert len(reply['authors']) == 1
def test_load_with_subjects(mock_site, ia_writeback):
rec = {
'ocaid': 'test_item',
'title': 'Test item',
'subjects': ['Protected DAISY', 'In library'],
'source_records': 'ia:test_item',
}
reply = load(rec)
assert reply['success'] is True
w = mock_site.get(reply['work']['key'])
assert w.title == 'Test item'
assert w.subjects == ['Protected DAISY', 'In library']
def test_load_with_new_author(mock_site, ia_writeback):
rec = {
'ocaid': 'test_item',
'title': 'Test item',
'authors': [{'name': 'John Döe'}],
'source_records': 'ia:test_item',
}
reply = load(rec)
assert reply['success'] is True
w = mock_site.get(reply['work']['key'])
assert reply['authors'][0]['status'] == 'created'
assert reply['authors'][0]['name'] == 'John Döe'
akey1 = reply['authors'][0]['key']
assert akey1 == '/authors/OL1A'
a = mock_site.get(akey1)
assert w.authors
assert a.type.key == '/type/author'
# Tests an existing author is modified if an Author match is found, and more data is provided
# This represents an edition of another work by the above author.
rec = {
'ocaid': 'test_item1b',
'title': 'Test item1b',
'authors': [{'name': 'Döe, John', 'entity_type': 'person'}],
'source_records': 'ia:test_item1b',
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
assert reply['work']['status'] == 'created'
akey2 = reply['authors'][0]['key']
# TODO: There is no code that modifies an author if more data is provided.
# previously the status implied the record was always 'modified', when a match was found.
# assert reply['authors'][0]['status'] == 'modified'
# a = mock_site.get(akey2)
# assert 'entity_type' in a
# assert a.entity_type == 'person'
assert reply['authors'][0]['status'] == 'matched'
assert akey1 == akey2 == '/authors/OL1A'
# Tests same title with different ocaid and author is not overwritten
rec = {
'ocaid': 'test_item2',
'title': 'Test item',
'authors': [{'name': 'James Smith'}],
'source_records': 'ia:test_item2',
}
reply = load(rec)
akey3 = reply['authors'][0]['key']
assert akey3 == '/authors/OL2A'
assert reply['authors'][0]['status'] == 'created'
assert reply['work']['status'] == 'created'
assert reply['edition']['status'] == 'created'
w = mock_site.get(reply['work']['key'])
e = mock_site.get(reply['edition']['key'])
assert e.ocaid == 'test_item2'
assert len(w.authors) == 1
assert len(e.authors) == 1
def test_load_with_redirected_author(mock_site, add_languages):
"""Test importing existing editions without works
which have author redirects. A work should be created with
the final author.
"""
redirect_author = {
'type': {'key': '/type/redirect'},
'name': 'John Smith',
'key': '/authors/OL55A',
'location': '/authors/OL10A',
}
final_author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL10A',
}
orphaned_edition = {
'title': 'Test item HATS',
'key': '/books/OL10M',
'publishers': ['TestPub'],
'publish_date': '1994',
'authors': [{'key': '/authors/OL55A'}],
'type': {'key': '/type/edition'},
}
mock_site.save(orphaned_edition)
mock_site.save(redirect_author)
mock_site.save(final_author)
rec = {
'title': 'Test item HATS',
'authors': [{'name': 'John Smith'}],
'publishers': ['TestPub'],
'publish_date': '1994',
'source_records': 'ia:test_redir_author',
}
reply = load(rec)
assert reply['edition']['status'] == 'modified'
assert reply['edition']['key'] == '/books/OL10M'
assert reply['work']['status'] == 'created'
e = mock_site.get(reply['edition']['key'])
assert e.authors[0].key == '/authors/OL10A'
w = mock_site.get(reply['work']['key'])
assert w.authors[0].author.key == '/authors/OL10A'
def test_duplicate_ia_book(mock_site, add_languages, ia_writeback):
rec = {
'ocaid': 'test_item',
'source_records': ['ia:test_item'],
'title': 'Test item',
'languages': ['eng'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
e = mock_site.get(reply['edition']['key'])
assert e.type.key == '/type/edition'
assert e.source_records == ['ia:test_item']
rec = {
'ocaid': 'test_item',
'source_records': ['ia:test_item'],
# Titles MUST match to be considered the same
'title': 'Test item',
'languages': ['fre'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
class Test_From_MARC:
def test_from_marc_author(self, mock_site, add_languages):
ia = 'flatlandromanceo00abbouoft'
marc = MarcBinary(open_test_data(ia + '_meta.mrc').read())
rec = read_edition(marc)
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
a = mock_site.get(reply['authors'][0]['key'])
assert a.type.key == '/type/author'
assert a.name == 'Edwin Abbott Abbott'
assert a.birth_date == '1838'
assert a.death_date == '1926'
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
@pytest.mark.parametrize(
'ia',
(
'coursepuremath00hardrich',
'roadstogreatness00gall',
'treatiseonhistor00dixo',
),
)
def test_from_marc(self, ia, mock_site, add_languages):
data = open_test_data(ia + '_meta.mrc').read()
assert len(data) == int(data[:5])
rec = read_edition(MarcBinary(data))
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
e = mock_site.get(reply['edition']['key'])
assert e.type.key == '/type/edition'
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
def test_author_from_700(self, mock_site, add_languages):
ia = 'sexuallytransmit00egen'
data = open_test_data(ia + '_meta.mrc').read()
rec = read_edition(MarcBinary(data))
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
# author from 700
akey = reply['authors'][0]['key']
a = mock_site.get(akey)
assert a.type.key == '/type/author'
assert a.name == 'Laura K. Egendorf'
assert a.birth_date == '1973'
def test_from_marc_reimport_modifications(self, mock_site, add_languages):
src = 'v38.i37.records.utf8--16478504-1254'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['marc:' + src]
reply = load(rec)
assert reply['success'] is True
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
src = 'v39.i28.records.utf8--5362776-1764'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['marc:' + src]
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
def test_missing_ocaid(self, mock_site, add_languages, ia_writeback):
ia = 'descendantsofhug00cham'
src = ia + '_meta.mrc'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['marc:testdata.mrc']
reply = load(rec)
assert reply['success'] is True
rec['source_records'] = ['ia:' + ia]
rec['ocaid'] = ia
reply = load(rec)
assert reply['success'] is True
e = mock_site.get(reply['edition']['key'])
assert e.ocaid == ia
assert 'ia:' + ia in e.source_records
def test_from_marc_fields(self, mock_site, add_languages):
ia = 'isbn_9781419594069'
data = open_test_data(ia + '_meta.mrc').read()
rec = read_edition(MarcBinary(data))
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
# author from 100
assert reply['authors'][0]['name'] == 'Adam Weiner'
edition = mock_site.get(reply['edition']['key'])
# Publish place, publisher, & publish date - 260$a, $b, $c
assert edition['publishers'][0] == 'Kaplan Publishing'
assert edition['publish_date'] == '2007'
assert edition['publish_places'][0] == 'New York'
# Pagination 300
assert edition['number_of_pages'] == 264
assert edition['pagination'] == 'viii, 264 p.'
# 8 subjects, 650
assert len(edition['subjects']) == 8
assert sorted(edition['subjects']) == [
'Action and adventure films',
'Cinematography',
'Miscellanea',
'Physics',
'Physics in motion pictures',
'Popular works',
'Science fiction films',
'Special effects',
]
# Edition description from 520
desc = (
'Explains the basic laws of physics, covering such topics '
'as mechanics, forces, and energy, while deconstructing '
'famous scenes and stunts from motion pictures, including '
'"Apollo 13" and "Titanic," to determine if they are possible.'
)
assert isinstance(edition['description'], Text)
assert edition['description'] == desc
# Work description from 520
work = mock_site.get(reply['work']['key'])
assert isinstance(work['description'], Text)
assert work['description'] == desc
def test_build_pool(mock_site):
assert build_pool({'title': 'test'}) == {}
etype = '/type/edition'
ekey = mock_site.new_key(etype)
e = {
'title': 'test',
'type': {'key': etype},
'lccn': ['123'],
'oclc_numbers': ['456'],
'ocaid': 'test00test',
'key': ekey,
}
mock_site.save(e)
pool = build_pool(e)
assert pool == {
'lccn': ['/books/OL1M'],
'oclc_numbers': ['/books/OL1M'],
'title': ['/books/OL1M'],
'ocaid': ['/books/OL1M'],
}
pool = build_pool(
{
'lccn': ['234'],
'oclc_numbers': ['456'],
'title': 'test',
'ocaid': 'test00test',
}
)
assert pool == {
'oclc_numbers': ['/books/OL1M'],
'title': ['/books/OL1M'],
'ocaid': ['/books/OL1M'],
}
def test_load_multiple(mock_site):
rec = {
'title': 'Test item',
'lccn': ['123'],
'source_records': ['ia:test_item'],
'authors': [{'name': 'Smith, John', 'birth_date': '1980'}],
}
reply = load(rec)
assert reply['success'] is True
ekey1 = reply['edition']['key']
reply = load(rec)
assert reply['success'] is True
ekey2 = reply['edition']['key']
assert ekey1 == ekey2
reply = load(
{'title': 'Test item', 'source_records': ['ia:test_item2'], 'lccn': ['456']}
)
assert reply['success'] is True
ekey3 = reply['edition']['key']
assert ekey3 != ekey1
reply = load(rec)
assert reply['success'] is True
ekey4 = reply['edition']['key']
assert ekey1 == ekey2 == ekey4
def test_extra_author(mock_site, add_languages):
mock_site.save(
{
"name": "Hubert Howe Bancroft",
"death_date": "1918.",
"alternate_names": ["HUBERT HOWE BANCROFT", "Hubert Howe Bandcroft"],
"key": "/authors/OL563100A",
"birth_date": "1832",
"personal_name": "Hubert Howe Bancroft",
"type": {"key": "/type/author"},
}
)
mock_site.save(
{
"title": "The works of Hubert Howe Bancroft",
"covers": [6060295, 5551343],
"first_sentence": {
"type": "/type/text",
"value": (
"When it first became known to Europe that a new continent had "
"been discovered, the wise men, philosophers, and especially the "
"learned ecclesiastics, were sorely perplexed to account for such "
"a discovery.",
),
},
"subject_places": [
"Alaska",
"America",
"Arizona",
"British Columbia",
"California",
"Canadian Northwest",
"Central America",
"Colorado",
"Idaho",
"Mexico",
"Montana",
"Nevada",
"New Mexico",
"Northwest Coast of North America",
"Northwest boundary of the United States",
"Oregon",
"Pacific States",
"Texas",
"United States",
"Utah",
"Washington (State)",
"West (U.S.)",
"Wyoming",
],
"excerpts": [
{
"excerpt": (
"When it first became known to Europe that a new continent "
"had been discovered, the wise men, philosophers, and "
"especially the learned ecclesiastics, were sorely perplexed "
"to account for such a discovery."
)
}
],
"first_publish_date": "1882",
"key": "/works/OL3421434W",
"authors": [
{
"type": {"key": "/type/author_role"},
"author": {"key": "/authors/OL563100A"},
}
],
"subject_times": [
"1540-1810",
"1810-1821",
"1821-1861",
"1821-1951",
"1846-1850",
"1850-1950",
"1859-",
"1859-1950",
"1867-1910",
"1867-1959",
"1871-1903",
"Civil War, 1861-1865",
"Conquest, 1519-1540",
"European intervention, 1861-1867",
"Spanish colony, 1540-1810",
"To 1519",
"To 1821",
"To 1846",
"To 1859",
"To 1867",
"To 1871",
"To 1889",
"To 1912",
"Wars of Independence, 1810-1821",
],
"type": {"key": "/type/work"},
"subjects": [
"Antiquities",
"Archaeology",
"Autobiography",
"Bibliography",
"California Civil War, 1861-1865",
"Comparative Literature",
"Comparative civilization",
"Courts",
"Description and travel",
"Discovery and exploration",
"Early accounts to 1600",
"English essays",
"Ethnology",
"Foreign relations",
"Gold discoveries",
"Historians",
"History",
"Indians",
"Indians of Central America",
"Indians of Mexico",
"Indians of North America",
"Languages",
"Law",
"Mayas",
"Mexican War, 1846-1848",
"Nahuas",
"Nahuatl language",
"Oregon question",
"Political aspects of Law",
"Politics and government",
"Religion and mythology",
"Religions",
"Social life and customs",
"Spanish",
"Vigilance committees",
"Writing",
"Zamorano 80",
"Accessible book",
"Protected DAISY",
],
}
)
ia = 'workshuberthowe00racegoog'
src = ia + '_meta.mrc'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
w = mock_site.get(reply['work']['key'])
reply = load(rec)
assert reply['success'] is True
w = mock_site.get(reply['work']['key'])
assert len(w['authors']) == 1
def test_missing_source_records(mock_site, add_languages):
mock_site.save(
{
'key': '/authors/OL592898A',
'name': 'Michael Robert Marrus',
'personal_name': 'Michael Robert Marrus',
'type': {'key': '/type/author'},
}
)
mock_site.save(
{
'authors': [
{'author': '/authors/OL592898A', 'type': {'key': '/type/author_role'}}
],
'key': '/works/OL16029710W',
'subjects': [
'Nuremberg Trial of Major German War Criminals, Nuremberg, Germany, 1945-1946',
'Protected DAISY',
'Lending library',
],
'title': 'The Nuremberg war crimes trial, 1945-46',
'type': {'key': '/type/work'},
}
)
mock_site.save(
{
"number_of_pages": 276,
"subtitle": "a documentary history",
"series": ["The Bedford series in history and culture"],
"covers": [6649715, 3865334, 173632],
"lc_classifications": ["D804.G42 N87 1997"],
"ocaid": "nurembergwarcrim00marr",
"contributions": ["Marrus, Michael Robert."],
"uri_descriptions": ["Book review (H-Net)"],
"title": "The Nuremberg war crimes trial, 1945-46",
"languages": [{"key": "/languages/eng"}],
"subjects": [
"Nuremberg Trial of Major German War Criminals, Nuremberg, Germany, 1945-1946"
],
"publish_country": "mau",
"by_statement": "[compiled by] Michael R. Marrus.",
"type": {"key": "/type/edition"},
"uris": ["http://www.h-net.org/review/hrev-a0a6c9-aa"],
"publishers": ["Bedford Books"],
"ia_box_id": ["IA127618"],
"key": "/books/OL1023483M",
"authors": [{"key": "/authors/OL592898A"}],
"publish_places": ["Boston"],
"pagination": "xi, 276 p. :",
"lccn": ["96086777"],
"notes": {
"type": "/type/text",
"value": "Includes bibliographical references (p. 262-268) and index.",
},
"identifiers": {"goodreads": ["326638"], "librarything": ["1114474"]},
"url": ["http://www.h-net.org/review/hrev-a0a6c9-aa"],
"isbn_10": ["031216386X", "0312136919"],
"publish_date": "1997",
"works": [{"key": "/works/OL16029710W"}],
}
)
ia = 'nurembergwarcrim1997marr'
src = ia + '_meta.mrc'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['ia:' + ia]
reply = load(rec)
assert reply['success'] is True
e = mock_site.get(reply['edition']['key'])
assert 'source_records' in e
def test_no_extra_author(mock_site, add_languages):
author = {
"name": "Paul Michael Boothe",
"key": "/authors/OL1A",
"type": {"key": "/type/author"},
}
mock_site.save(author)
work = {
"title": "A Separate Pension Plan for Alberta",
"covers": [1644794],
"key": "/works/OL1W",
"authors": [{"type": "/type/author_role", "author": {"key": "/authors/OL1A"}}],
"type": {"key": "/type/work"},
}
mock_site.save(work)
edition = {
"number_of_pages": 90,
"subtitle": "Analysis and Discussion (Western Studies in Economic Policy, No. 5)",
"weight": "6.2 ounces",
"covers": [1644794],
"latest_revision": 6,
"title": "A Separate Pension Plan for Alberta",
"languages": [{"key": "/languages/eng"}],
"subjects": [
"Economics",
"Alberta",
"Political Science / State & Local Government",
"Government policy",
"Old age pensions",
"Pensions",
"Social security",
],
"type": {"key": "/type/edition"},
"physical_dimensions": "9 x 6 x 0.2 inches",
"publishers": ["The University of Alberta Press"],
"physical_format": "Paperback",
"key": "/books/OL1M",
"authors": [{"key": "/authors/OL1A"}],
"identifiers": {"goodreads": ["4340973"], "librarything": ["5580522"]},
"isbn_13": ["9780888643513"],
"isbn_10": ["0888643519"],
"publish_date": "May 1, 2000",
"works": [{"key": "/works/OL1W"}],
}
mock_site.save(edition)
src = 'v39.i34.records.utf8--186503-1413'
marc = MarcBinary(open_test_data(src).read())
rec = read_edition(marc)
rec['source_records'] = ['marc:' + src]
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
assert reply['work']['status'] == 'modified'
assert 'authors' not in reply
assert reply['edition']['key'] == edition['key']
assert reply['work']['key'] == work['key']
e = mock_site.get(reply['edition']['key'])
w = mock_site.get(reply['work']['key'])
assert 'source_records' in e
assert 'subjects' in w
assert len(e['authors']) == 1
assert len(w['authors']) == 1
def test_same_twice(mock_site, add_languages):
rec = {
'source_records': ['ia:test_item'],
"publishers": ["Ten Speed Press"],
"pagination": "20 p.",
"description": (
"A macabre mash-up of the children's classic Pat the Bunny and the "
"present-day zombie phenomenon, with the tactile features of the original "
"book revoltingly re-imagined for an adult audience.",
),
"title": "Pat The Zombie",
"isbn_13": ["9781607740360"],
"languages": ["eng"],
"isbn_10": ["1607740362"],
"authors": [
{
"entity_type": "person",
"name": "Aaron Ximm",
"personal_name": "Aaron Ximm",
}
],
"contributions": ["Kaveh Soofi (Illustrator)"],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
assert reply['work']['status'] == 'created'
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
assert reply['work']['status'] == 'matched'
def test_existing_work(mock_site, add_languages):
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [{'author': '/authors/OL20A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL16W',
'title': 'Finding existing works',
'type': {'key': '/type/work'},
}
mock_site.save(author)
mock_site.save(existing_work)
rec = {
'source_records': 'non-marc:test',
'title': 'Finding Existing Works',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
assert reply['work']['status'] == 'matched'
assert reply['work']['key'] == '/works/OL16W'
assert reply['authors'][0]['status'] == 'matched'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['key'] == '/works/OL16W'
def test_existing_work_with_subtitle(mock_site, add_languages):
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [{'author': '/authors/OL20A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL16W',
'title': 'Finding existing works',
'type': {'key': '/type/work'},
}
mock_site.save(author)
mock_site.save(existing_work)
rec = {
'source_records': 'non-marc:test',
'title': 'Finding Existing Works',
'subtitle': 'the ongoing saga!',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
assert reply['work']['status'] == 'matched'
assert reply['work']['key'] == '/works/OL16W'
assert reply['authors'][0]['status'] == 'matched'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['key'] == '/works/OL16W'
def test_subtitle_gets_split_from_title(mock_site) -> None:
"""
Ensures that if there is a subtitle (designated by a colon) in the title
that it is split and put into the subtitle field.
"""
rec = {
'source_records': 'non-marc:test',
'title': 'Work with a subtitle: not yet split',
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'created'
assert reply['work']['status'] == 'created'
assert reply['work']['key'] == '/works/OL1W'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['title'] == "Work with a subtitle"
assert isinstance(
e.works[0]['subtitle'], Nothing
) # FIX: this is presumably a bug. See `new_work` not assigning 'subtitle'
assert e['title'] == "Work with a subtitle"
assert e['subtitle'] == "not yet split"
# This documents the fact that titles DO NOT have trailing periods stripped (at this point)
def test_title_with_trailing_period_is_stripped() -> None:
rec = {
'source_records': 'non-marc:test',
'title': 'Title with period.',
}
normalize_import_record(rec)
assert rec['title'] == 'Title with period.'
def test_find_match_is_used_when_looking_for_edition_matches(mock_site) -> None:
"""
This tests the case where there is an edition_pool, but `find_quick_match()`
finds no matches. This should return a match from `find_threshold_match()`.
This also indirectly tests `add_book.match.editions_match()`
"""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [
{'author': {'key': '/authors/OL20A'}, 'type': {'key': '/type/author_role'}}
],
'key': '/works/OL16W',
'title': 'Finding Existing',
'subtitle': 'sub',
'type': {'key': '/type/work'},
}
existing_edition_1 = {
'key': '/books/OL16M',
'title': 'Finding Existing',
'subtitle': 'sub',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'source_records': ['non-marc:test'],
'works': [{'key': '/works/OL16W'}],
}
existing_edition_2 = {
'key': '/books/OL17M',
'source_records': ['non-marc:test'],
'title': 'Finding Existing',
'subtitle': 'sub',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'publish_country': 'usa',
'publish_date': 'Jan 09, 2011',
'works': [{'key': '/works/OL16W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition_1)
mock_site.save(existing_edition_2)
rec = {
'source_records': ['non-marc:test'],
'title': 'Finding Existing',
'subtitle': 'sub',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot substring match'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
'publish_country': 'usa',
}
reply = load(rec)
assert reply['edition']['key'] == '/books/OL17M'
e = mock_site.get(reply['edition']['key'])
assert e['key'] == '/books/OL17M'
def test_covers_are_added_to_edition(mock_site, monkeypatch) -> None:
"""Ensures a cover from rec is added to a matched edition."""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [
{'author': {'key': '/authors/OL20A'}, 'type': {'key': '/type/author_role'}}
],
'key': '/works/OL16W',
'title': 'Covers',
'type': {'key': '/type/work'},
}
existing_edition = {
'key': '/books/OL16M',
'title': 'Covers',
'publishers': ['Black Spot'],
# TODO: only matches if the date is exact. 2011 != Jan 09, 2011
#'publish_date': '2011',
'publish_date': 'Jan 09, 2011',
'type': {'key': '/type/edition'},
'source_records': ['non-marc:test'],
'works': [{'key': '/works/OL16W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'source_records': ['non-marc:test'],
'title': 'Covers',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'cover': 'https://www.covers.org/cover.jpg',
}
monkeypatch.setattr(add_book, "add_cover", lambda _, __, account_key: 1234)
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
e = mock_site.get(reply['edition']['key'])
assert e['covers'] == [1234]
def test_add_description_to_work(mock_site) -> None:
"""
Ensure that if an edition has a description, and the associated work does
not, that the edition's description is added to the work.
"""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [{'author': '/authors/OL20A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL16W',
'title': 'Finding Existing Works',
'type': {'key': '/type/work'},
}
existing_edition = {
'key': '/books/OL16M',
'title': 'Finding Existing Works',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'source_records': ['non-marc:test'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
'works': [{'key': '/works/OL16W'}],
'description': 'An added description from an existing edition',
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'source_records': 'non-marc:test',
'title': 'Finding Existing Works',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
assert reply['work']['status'] == 'modified'
assert reply['work']['key'] == '/works/OL16W'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['key'] == '/works/OL16W'
assert e.works[0]['description'] == 'An added description from an existing edition'
def test_add_subjects_to_work_deduplicates(mock_site) -> None:
"""
Ensure a rec's subjects, after a case insensitive check, are added to an
existing Work if not already present.
"""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL1A',
}
existing_work = {
'authors': [{'author': '/authors/OL1A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL1W',
'subjects': ['granite', 'GRANITE', 'Straße', 'ΠΑΡΆΔΕΙΣΟΣ'],
'title': 'Some Title',
'type': {'key': '/type/work'},
}
existing_edition = {
'key': '/books/OL1M',
'title': 'Some Title',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'source_records': ['non-marc:test'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
'works': [{'key': '/works/OL1W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'authors': [{'name': 'John Smith'}],
'isbn_10': ['1250144051'],
'publish_date': 'Jan 09, 2011',
'publishers': ['Black Spot'],
'source_records': 'non-marc:test',
'subjects': [
'granite',
'Granite',
'SANDSTONE',
'sandstone',
'strasse',
'παράδεισος',
],
'title': 'Some Title',
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'matched'
assert reply['work']['status'] == 'modified'
assert reply['work']['key'] == '/works/OL1W'
w = mock_site.get(reply['work']['key'])
def get_casefold(item_list: list[str]):
return [item.casefold() for item in item_list]
expected = ['granite', 'Straße', 'ΠΑΡΆΔΕΙΣΟΣ', 'sandstone']
got = w.subjects
assert get_casefold(got) == get_casefold(expected)
def test_add_identifiers_to_edition(mock_site) -> None:
"""
Ensure a rec's identifiers that are not present in a matched edition are
added to that matched edition.
"""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL20A',
}
existing_work = {
'authors': [{'author': '/authors/OL20A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL19W',
'title': 'Finding Existing Works',
'type': {'key': '/type/work'},
}
existing_edition = {
'key': '/books/OL19M',
'title': 'Finding Existing Works',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'source_records': ['non-marc:test'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
'works': [{'key': '/works/OL19W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'source_records': 'non-marc:test',
'title': 'Finding Existing Works',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1250144051'],
'identifiers': {'goodreads': ['1234'], 'librarything': ['5678']},
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
assert reply['work']['status'] == 'matched'
assert reply['work']['key'] == '/works/OL19W'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['key'] == '/works/OL19W'
assert e.identifiers._data == {'goodreads': ['1234'], 'librarything': ['5678']}
def test_adding_list_field_items_to_edition_deduplicates_input(mock_site) -> None:
"""
Ensure a rec's edition_list_fields that are not present in a matched
edition are added to that matched edition.
"""
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL1A',
}
existing_work = {
'authors': [{'author': '/authors/OL1A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL1W',
'title': 'Some Title',
'type': {'key': '/type/work'},
}
existing_edition = {
'isbn_10': ['1250144051'],
'key': '/books/OL1M',
'lccn': ['agr25000003'],
'publish_date': 'Jan 09, 2011',
'publishers': ['Black Spot'],
'source_records': ['non-marc:test'],
'title': 'Some Title',
'type': {'key': '/type/edition'},
'works': [{'key': '/works/OL1W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'authors': [{'name': 'John Smith'}],
'isbn_10': ['1250144051'],
'lccn': ['AGR25000003', 'AGR25-3'],
'publish_date': 'Jan 09, 2011',
'publishers': ['Black Spot', 'Second Publisher'],
'source_records': ['NON-MARC:TEST', 'ia:someid'],
'title': 'Some Title',
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
assert reply['work']['status'] == 'matched'
assert reply['work']['key'] == '/works/OL1W'
e = mock_site.get(reply['edition']['key'])
assert e.works[0]['key'] == '/works/OL1W'
assert e.lccn == ['agr25000003']
assert e.source_records == ['non-marc:test', 'ia:someid']
@pytest.mark.parametrize(
'name, rec, error',
[
(
"Books prior to 1400 CANNOT be imported if from a bookseller requiring additional validation",
{
'title': 'a book',
'source_records': ['amazon:123'],
'publish_date': '1399',
'isbn_10': ['1234567890'],
},
PublicationYearTooOld,
),
(
"Books published on or after 1400 CE+ can be imported from any source",
{
'title': 'a book',
'source_records': ['amazon:123'],
'publish_date': '1400',
'isbn_10': ['1234567890'],
},
None,
),
(
"Trying to import a book from a future year raises an error",
{'title': 'a book', 'source_records': ['ia:ocaid'], 'publish_date': '3000'},
PublishedInFutureYear,
),
(
"Independently published books CANNOT be imported",
{
'title': 'a book',
'source_records': ['ia:ocaid'],
'publishers': ['Independently Published'],
},
IndependentlyPublished,
),
(
"Non-independently published books can be imported",
{
'title': 'a book',
'source_records': ['ia:ocaid'],
'publishers': ['Best Publisher'],
},
None,
),
(
"Import sources that require an ISBN CANNOT be imported without an ISBN",
{'title': 'a book', 'source_records': ['amazon:amazon_id'], 'isbn_10': []},
SourceNeedsISBN,
),
(
"Can import sources that require an ISBN and have ISBN",
{
'title': 'a book',
'source_records': ['amazon:amazon_id'],
'isbn_10': ['1234567890'],
},
None,
),
(
"Can import from sources that don't require an ISBN",
{'title': 'a book', 'source_records': ['ia:wheeee'], 'isbn_10': []},
None,
),
],
)
def test_validate_record(name, rec, error) -> None:
if error:
with pytest.raises(error):
validate_record(rec)
else:
assert validate_record(rec) is None, f"Test failed: {name}" # type: ignore [func-returns-value]
def test_reimport_updates_edition_and_work_description(mock_site) -> None:
author = {
'type': {'key': '/type/author'},
'name': 'John Smith',
'key': '/authors/OL1A',
}
existing_work = {
'authors': [{'author': '/authors/OL1A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL1W',
'title': 'A Good Book',
'type': {'key': '/type/work'},
}
existing_edition = {
'key': '/books/OL1M',
'title': 'A Good Book',
'publishers': ['Black Spot'],
'type': {'key': '/type/edition'},
'source_records': ['ia:someocaid'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1234567890'],
'works': [{'key': '/works/OL1W'}],
}
mock_site.save(author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
rec = {
'source_records': 'ia:someocaid',
'title': 'A Good Book',
'authors': [{'name': 'John Smith'}],
'publishers': ['Black Spot'],
'publish_date': 'Jan 09, 2011',
'isbn_10': ['1234567890'],
'description': 'A genuinely enjoyable read.',
}
reply = load(rec)
assert reply['success'] is True
assert reply['edition']['status'] == 'modified'
assert reply['work']['status'] == 'modified'
assert reply['work']['key'] == '/works/OL1W'
edition = mock_site.get(reply['edition']['key'])
work = mock_site.get(reply['work']['key'])
assert edition.description == "A genuinely enjoyable read."
assert work.description == "A genuinely enjoyable read."
@pytest.mark.parametrize(
"name, edition, marc, expected",
[
(
"Overwrites revision 1 promise items with MARC data",
{'revision': 1, 'source_records': ['promise:bwb_daily_pallets_2022-03-17']},
True,
True,
),
(
"Doesn't overwrite rev 1 promise items WITHOUT MARC data",
{'revision': 1, 'source_records': ['promise:bwb_daily_pallets_2022-03-17']},
False,
False,
),
(
"Doesn't overwrite non-revision 1 promise items",
{'revision': 2, 'source_records': ['promise:bwb_daily_pallets_2022-03-17']},
True,
False,
),
(
"Doesn't overwrite revision 1 NON-promise items",
{'revision': 1, 'source_records': ['ia:test']},
True,
False,
),
(
"Can handle editions with an empty source record",
{'revision': 1, 'source_records': ['']},
True,
False,
),
("Can handle editions without a source record", {'revision': 1}, True, False),
(
"Can handle editions without a revision",
{'source_records': ['promise:bwb_daily_pallets_2022-03-17']},
True,
False,
),
],
)
def test_overwrite_if_rev1_promise_item(name, edition, marc, expected) -> None:
"""
Specifically unit test the function that determines if a promise
item should be overwritten.
"""
result = should_overwrite_promise_item(edition=edition, from_marc_record=marc)
assert (
result == expected
), f"Test {name} failed. Expected {expected}, but got {result}"
@pytest.fixture
def setup_load_data(mock_site):
existing_author = {
'key': '/authors/OL1A',
'name': 'John Smith',
'type': {'key': '/type/author'},
}
existing_work = {
'authors': [{'author': '/authors/OL1A', 'type': {'key': '/type/author_role'}}],
'key': '/works/OL1W',
'title': 'Finding Existing Works',
'type': {'key': '/type/work'},
}
existing_edition = {
'isbn_10': ['1234567890'],
'key': '/books/OL1M',
'publish_date': 'Jan 1st, 3000',
'publishers': ['BOOK BOOK BOOK'],
'source_records': ['promise:bwb_daily_pallets_2022-03-17'],
'title': 'Originally A Promise Item',
'type': {'key': '/type/edition'},
'works': [{'key': '/works/OL1W'}],
}
incoming_rec = {
'authors': [{'name': 'John Smith'}],
'description': 'A really fun book.',
'dewey_decimal_class': ['853.92'],
'identifiers': {'goodreads': ['1234'], 'librarything': ['5678']},
'isbn_10': ['1234567890'],
'ocaid': 'newlyscannedpromiseitem',
'publish_country': 'fr',
'publish_date': '2017',
'publish_places': ['Paris'],
'publishers': ['Gallimard'],
'series': ['Folio, Policier : roman noir -- 820'],
'source_records': ['ia:newlyscannedpromiseitem'],
'title': 'Originally A Promise Item',
'translated_from': ['yid'],
}
mock_site.save(existing_author)
mock_site.save(existing_work)
mock_site.save(existing_edition)
return incoming_rec
class TestLoadDataWithARev1PromiseItem:
"""
Test the process of overwriting a rev1 promise item by passing it, and
an incoming record with MARC data, to load_data.
"""
def test_passing_edition_to_load_data_overwrites_edition_with_rec_data(
self, mock_site, add_languages, ia_writeback, setup_load_data
) -> None:
rec: dict = setup_load_data
edition = mock_site.get('/books/OL1M')
reply = load_data(rec=rec, existing_edition=edition)
assert reply['edition']['status'] == 'modified'
assert reply['success'] is True
assert reply['work']['key'] == '/works/OL1W'
assert reply['work']['status'] == 'matched'
edition = mock_site.get(reply['edition']['key'])
assert edition.dewey_decimal_class == ['853.92']
assert edition.publish_date == '2017'
assert edition.publish_places == ['Paris']
assert edition.publishers == ['Gallimard']
assert edition.series == ['Folio, Policier : roman noir -- 820']
assert edition.source_records == [
'promise:bwb_daily_pallets_2022-03-17',
'ia:newlyscannedpromiseitem',
]
assert edition.works[0]['key'] == '/works/OL1W'
class TestNormalizeImportRecord:
@pytest.mark.parametrize(
'year, expected',
[
("2000-11-11", True),
(str(datetime.now().year), True),
(str(datetime.now().year + 1), False),
("9999-01-01", False),
],
)
def test_future_publication_dates_are_deleted(self, year, expected):
"""It should be impossible to import books publish_date in a future year."""
rec = {
'title': 'test book',
'source_records': ['ia:blob'],
'publish_date': year,
}
normalize_import_record(rec=rec)
result = 'publish_date' in rec
assert result == expected
@pytest.mark.parametrize(
'rec, expected',
[
(
{
'title': 'first title',
'source_records': ['ia:someid'],
'publishers': ['????'],
'authors': [{'name': 'an author'}],
'publish_date': '2000',
},
{
'title': 'first title',
'source_records': ['ia:someid'],
'authors': [{'name': 'an author'}],
'publish_date': '2000',
},
),
(
{
'title': 'second title',
'source_records': ['ia:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '2000',
},
{
'title': 'second title',
'source_records': ['ia:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '2000',
},
),
],
)
def test_dummy_data_to_satisfy_parse_data_is_removed(self, rec, expected):
normalize_import_record(rec=rec)
assert rec == expected
@pytest.mark.parametrize(
["rec", "expected"],
[
(
# 1900 publication from non AMZ/BWB is okay.
{
'title': 'a title',
'source_records': ['ia:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '1900',
},
{
'title': 'a title',
'source_records': ['ia:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '1900',
},
),
(
# 1900 publication from AMZ disappears.
{
'title': 'a title',
'source_records': ['amazon:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '1900',
},
{
'title': 'a title',
'source_records': ['amazon:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
},
),
(
# 1900 publication from bwb item disappears.
{
'title': 'a title',
'source_records': ['bwb:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': '1900',
},
{
'title': 'a title',
'source_records': ['bwb:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
},
),
(
# 1900 publication from promise item disappears.
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': 'January 1, 1900',
},
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
},
),
(
# An otherwise valid date from AMZ is okay.
{
'title': 'a title',
'source_records': ['amazon:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': 'January 2, 1900',
},
{
'title': 'a title',
'source_records': ['amazon:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': 'January 2, 1900',
},
),
(
# An otherwise valid date from promise is okay.
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': 'January 2, 1900',
},
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
'publish_date': 'January 2, 1900',
},
),
(
# Handle records without publish_date.
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
},
{
'title': 'a title',
'source_records': ['promise:someid'],
'publishers': ['a publisher'],
'authors': [{'name': 'an author'}],
},
),
],
)
def test_year_1900_removed_from_amz_and_bwb_promise_items(self, rec, expected):
"""
A few import sources (e.g. promise items, BWB, and Amazon) have `publish_date`
values that are known to be inaccurate, so those `publish_date` values are
removed.
"""
normalize_import_record(rec=rec)
assert rec == expected
def test_find_match_title_only_promiseitem_against_noisbn_marc(mock_site):
# An existing light title + ISBN only record
existing_edition = {
'key': '/books/OL113M',
# NO author
# NO date
# NO publisher
'title': 'Just A Title',
'isbn_13': ['9780000000002'],
'source_records': ['promise:someid'],
'type': {'key': '/type/edition'},
}
marc_import = {
'authors': [{'name': 'Bob Smith'}],
'publish_date': '1913',
'publishers': ['Early Editions'],
'title': 'Just A Title',
'source_records': ['marc:somelibrary/some_marc.mrc'],
}
mock_site.save(existing_edition)
result = find_match(marc_import, {'title': [existing_edition['key']]})
assert result != '/books/OL113M'
assert result is None
| 59,742 | Python | .py | 1,614 | 27.512392 | 106 | 0.529031 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
219 | edit.py | internetarchive_openlibrary/openlibrary/catalog/utils/edit.py | import re
import requests
import web
from openlibrary.catalog.utils.query import get_mc
from openlibrary.api import unmarshal
from time import sleep
re_meta_mrc = re.compile('([^/]+)_(meta|marc).(mrc|xml)')
re_skip = re.compile(r'\b([A-Z]|Co|Dr|Jr|Capt|Mr|Mrs|Ms|Prof|Rev|Revd|Hon)\.$')
db_amazon = web.database(dbn='postgres', db='amazon')
db_amazon.printing = False
def query_with_retry(ol, q):
for attempt in range(50):
try:
return ol.query(q)
except:
sleep(5)
print('retry attempt', attempt)
def get_with_retry(ol, k):
for attempt in range(50):
try:
return ol.get(k)
except:
sleep(5)
print('retry attempt', attempt)
def amazon_source_records(asin):
iter = db_amazon.select('amazon', where='asin = $asin', vars={'asin': asin})
return ["amazon:%s:%s:%d:%d" % (asin, r.seg, r.start, r.length) for r in iter]
def has_dot(s):
return s.endswith('.') and not re_skip.search(s)
def fix_toc(e):
toc = e.get('table_of_contents', None)
if not toc:
return
if isinstance(toc[0], dict) and toc[0]['type'] == '/type/toc_item':
if len(toc) == 1 and 'title' not in toc[0]:
del e['table_of_contents'] # remove empty toc
return
new_toc = [{'title': str(i), 'type': '/type/toc_item'} for i in toc if i]
e['table_of_contents'] = new_toc
def fix_subject(e):
if e.get('subjects', None) and any(has_dot(s) for s in e['subjects']):
subjects = [s[:-1] if has_dot(s) else s for s in e['subjects']]
e['subjects'] = subjects
def undelete_author(a, ol):
key = a['key']
assert a['type'] == '/type/delete'
url = 'http://openlibrary.org' + key + '.json?v=' + str(a['revision'] - 1)
prev = unmarshal(requests.get(url).json())
assert prev['type'] == '/type/author'
ol.save(key, prev, 'undelete author')
def undelete_authors(authors, ol):
for a in authors:
if a['type'] == '/type/delete':
undelete_author(a, ol)
else:
assert a['type'] == '/type/author'
def fix_authors(e, ol):
if 'authors' not in e:
return
authors = [get_with_retry(ol, akey) for akey in e['authors']]
while any(a['type'] == '/type/redirect' for a in authors):
print('following redirects')
authors = [
get_with_retry(ol, a['location']) if a['type'] == '/type/redirect' else a
for a in authors
]
e['authors'] = [{'key': a['key']} for a in authors]
undelete_authors(authors, ol)
def fix_edition(key, e, ol):
existing = get_mc(key)
if 'source_records' not in e and existing:
amazon = 'amazon:'
if existing.startswith('ia:'):
sr = [existing]
elif existing.startswith(amazon):
sr = amazon_source_records(existing[len(amazon) :]) or [existing]
else:
print('existing:', existing)
m = re_meta_mrc.search(existing)
sr = ['marc:' + existing if not m else 'ia:' + m.group(1)]
e['source_records'] = sr
if 'ocaid' in e:
ia = 'ia:' + e['ocaid']
if 'source_records' not in e:
e['source_records'] = [ia]
elif ia not in e['source_records']:
e['source_records'].append(ia)
fix_toc(e)
fix_subject(e)
fix_authors(e, ol)
return e
| 3,388 | Python | .py | 91 | 30.263736 | 85 | 0.582468 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
220 | __init__.py | internetarchive_openlibrary/openlibrary/catalog/utils/__init__.py | import datetime
import re
from typing import TYPE_CHECKING
import web
from unicodedata import normalize
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import Author
EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS = 1400
BOOKSELLERS_WITH_ADDITIONAL_VALIDATION = ['amazon', 'bwb']
def cmp(x, y):
return (x > y) - (x < y)
re_date = map(
re.compile, # type: ignore[arg-type]
[
r'(?P<birth_date>\d+\??)-(?P<death_date>\d+\??)',
r'(?P<birth_date>\d+\??)-',
r'b\.? (?P<birth_date>(?:ca\. )?\d+\??)',
r'd\.? (?P<death_date>(?:ca\. )?\d+\??)',
r'(?P<birth_date>.*\d+.*)-(?P<death_date>.*\d+.*)',
r'^(?P<birth_date>[^-]*\d+[^-]+ cent\.[^-]*)$',
],
)
re_ad_bc = re.compile(r'\b(B\.C\.?|A\.D\.?)')
re_date_fl = re.compile('^fl[., ]')
re_number_dot = re.compile(r'\d{2,}[- ]*(\.+)$')
re_l_in_date = re.compile(r'(l\d|\dl)')
re_end_dot = re.compile(r'[^ .][^ .]\.$', re.UNICODE)
re_marc_name = re.compile('^(.*?),+ (.*)$')
re_year = re.compile(r'\b(\d{4})\b')
def key_int(rec):
# extract the number from a key like /a/OL1234A
return int(web.numify(rec['key']))
def author_dates_match(a: dict, b: "dict | Author") -> bool:
"""
Checks if the years of two authors match. Only compares years,
not names or keys. Works by returning False if any year specified in one record
does not match that in the other, otherwise True. If any one author does not have
dates, it will return True.
:param dict a: Author import dict {"name": "Some One", "birth_date": "1960"}
:param dict b: Author import dict {"name": "Some One"}
"""
for k in ['birth_date', 'death_date', 'date']:
if k not in a or a[k] is None or k not in b or b[k] is None:
continue
if a[k] == b[k] or a[k].startswith(b[k]) or b[k].startswith(a[k]):
continue
m1 = re_year.search(a[k])
if not m1:
return False
m2 = re_year.search(b[k])
if m2 and m1.group(1) == m2.group(1):
continue
return False
return True
def flip_name(name: str) -> str:
"""
Flip author name about the comma, stripping the comma, and removing non
abbreviated end dots. Returns name with end dot stripped if no comma+space found.
The intent is to convert a Library indexed name to natural name order.
:param str name: e.g. "Smith, John." or "Smith, J."
:return: e.g. "John Smith" or "J. Smith"
"""
m = re_end_dot.search(name)
if m:
name = name[:-1]
if name.find(', ') == -1:
return name
if m := re_marc_name.match(name):
return m.group(2) + ' ' + m.group(1)
return ''
def remove_trailing_number_dot(date):
if m := re_number_dot.search(date):
return date[: -len(m.group(1))]
else:
return date
def remove_trailing_dot(s):
if s.endswith(' Dept.'):
return s
elif m := re_end_dot.search(s):
return s[:-1]
return s
def fix_l_in_date(date):
if 'l' not in date:
return date
return re_l_in_date.sub(lambda m: m.group(1).replace('l', '1'), date)
re_ca = re.compile(r'ca\.([^ ])')
def parse_date(date):
if re_date_fl.match(date):
return {}
date = remove_trailing_number_dot(date)
date = re_ca.sub(lambda m: 'ca. ' + m.group(1), date)
if date.find('-') == -1:
for r in re_date:
m = r.search(date)
if m:
return {k: fix_l_in_date(v) for k, v in m.groupdict().items()}
return {}
parts = date.split('-')
i = {'birth_date': parts[0].strip()}
if len(parts) == 2:
parts[1] = parts[1].strip()
if parts[1]:
i['death_date'] = fix_l_in_date(parts[1])
if not re_ad_bc.search(i['birth_date']):
m = re_ad_bc.search(i['death_date'])
if m:
i['birth_date'] += ' ' + m.group(1)
if 'birth_date' in i and 'l' in i['birth_date']:
i['birth_date'] = fix_l_in_date(i['birth_date'])
return i
re_cent = re.compile(r'^[\dl][^-]+ cent\.$')
def pick_first_date(dates):
# this is to handle this case:
# 100: $aLogan, Olive (Logan), $cSikes, $dMrs., $d1839-
# see http://archive.org/download/gettheebehindmes00logaiala/gettheebehindmes00logaiala_meta.mrc
# or http://pharosdb.us.archive.org:9090/show-marc?record=gettheebehindmes00logaiala/gettheebehindmes00logaiala_meta.mrc:0:521
dates = list(dates)
if len(dates) == 1 and re_cent.match(dates[0]):
return {'date': fix_l_in_date(dates[0])}
for date in dates:
result = parse_date(date)
if result != {}:
return result
return {
'date': fix_l_in_date(' '.join([remove_trailing_number_dot(d) for d in dates]))
}
re_drop = re.compile('[?,]')
def match_with_bad_chars(a, b):
if str(a) == str(b):
return True
a = normalize('NFKD', str(a)).lower()
b = normalize('NFKD', str(b)).lower()
if a == b:
return True
a = a.encode('ASCII', 'ignore')
b = b.encode('ASCII', 'ignore')
if a == b:
return True
def drop(s):
return re_drop.sub('', s.decode() if isinstance(s, bytes) else s)
return drop(a) == drop(b)
def accent_count(s):
return len([c for c in norm(s) if ord(c) > 127])
def norm(s):
return normalize('NFC', s) if isinstance(s, str) else s
def pick_best_name(names):
names = [norm(n) for n in names]
n1 = names[0]
assert all(match_with_bad_chars(n1, n2) for n2 in names[1:])
names.sort(key=lambda n: accent_count(n), reverse=True)
assert '?' not in names[0]
return names[0]
def pick_best_author(authors):
n1 = authors[0]['name']
assert all(match_with_bad_chars(n1, a['name']) for a in authors[1:])
authors.sort(key=lambda a: accent_count(a['name']), reverse=True)
assert '?' not in authors[0]['name']
return authors[0]
def tidy_isbn(input):
output = []
for i in input:
i = i.replace('-', '')
if len(i) in (10, 13):
output.append(i)
continue
if len(i) == 20 and all(c.isdigit() for c in i):
output.extend([i[:10], i[10:]])
continue
if len(i) == 21 and not i[10].isdigit():
output.extend([i[:10], i[11:]])
continue
if i.find(';') != -1:
no_semicolon = i.replace(';', '')
if len(no_semicolon) in (10, 13):
output.append(no_semicolon)
continue
split = i.split(';')
if all(len(j) in (10, 13) for j in split):
output.extend(split)
continue
output.append(i)
return output
def strip_count(counts):
foo = {}
for i, j in counts:
foo.setdefault(i.rstrip('.').lower() if isinstance(i, str) else i, []).append(
(i, j)
)
ret = {}
for v in foo.values():
m = max(v, key=lambda x: len(x[1]))[0]
bar = []
for i, j in v:
bar.extend(j)
ret[m] = bar
return sorted(ret.items(), key=lambda x: len(x[1]), reverse=True)
def fmt_author(a):
if 'birth_date' in a or 'death_date' in a:
return "{} ({}-{})".format(
a['name'], a.get('birth_date', ''), a.get('death_date', '')
)
return a['name']
def get_title(e):
if e.get('title_prefix', None) is not None:
prefix = e['title_prefix']
if prefix[-1] != ' ':
prefix += ' '
title = prefix + e['title']
else:
title = e['title']
return title
def get_publication_year(publish_date: str | int | None) -> int | None:
"""
Return the publication year from a book in YYYY format by looking for four
consecutive digits not followed by another digit. If no match, return None.
>>> get_publication_year('1999-01')
1999
>>> get_publication_year('January 1, 1999')
1999
"""
if publish_date is None:
return None
match = re_year.search(str(publish_date))
return int(match.group(0)) if match else None
def published_in_future_year(publish_year: int) -> bool:
"""
Return True if a book is published in a future year as compared to the
current year.
Some import sources have publication dates in a future year, and the
likelihood is high that this is bad data. So we don't want to import these.
"""
return publish_year > datetime.datetime.now().year
def publication_too_old_and_not_exempt(rec: dict) -> bool:
"""
Returns True for books that are 'too old' per
EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS, but that only applies to
source records in BOOKSELLERS_WITH_ADDITIONAL_VALIDATION.
For sources not in BOOKSELLERS_WITH_ADDITIONAL_VALIDATION, return False,
as there is higher trust in their publication dates.
"""
def source_requires_date_validation(rec: dict) -> bool:
return any(
record.split(":")[0] in BOOKSELLERS_WITH_ADDITIONAL_VALIDATION
for record in rec.get('source_records', [])
)
if (
publish_year := get_publication_year(rec.get('publish_date'))
) and source_requires_date_validation(rec):
return publish_year < EARLIEST_PUBLISH_YEAR_FOR_BOOKSELLERS
return False
def is_independently_published(publishers: list[str]) -> bool:
"""
Return True if the book is independently published.
"""
independent_publisher_names = ['independently published', 'independent publisher']
return any(
publisher.casefold() in independent_publisher_names for publisher in publishers
)
def needs_isbn_and_lacks_one(rec: dict) -> bool:
"""
Return True if the book is identified as requiring an ISBN.
If an ISBN is NOT required, return False. If an ISBN is required:
- return False if an ISBN is present (because the rec needs an ISBN and
has one); or
- return True if there's no ISBN.
This exists because certain sources do not have great records and requiring
an ISBN may help improve quality:
https://docs.google.com/document/d/1dlN9klj27HeidWn3G9GUYwDNZ2F5ORoEZnG4L-7PcgA/edit#heading=h.1t78b24dg68q
:param dict rec: an import dictionary record.
"""
def needs_isbn(rec: dict) -> bool:
# Exception for Amazon-specific ASINs, which often accompany ebooks
if any(
name == "amazon" and identifier.startswith("B")
for record in rec.get("source_records", [])
if record and ":" in record
for name, identifier in [record.split(":", 1)]
):
return False
return any(
record.split(":")[0] in BOOKSELLERS_WITH_ADDITIONAL_VALIDATION
for record in rec.get('source_records', [])
)
def has_isbn(rec: dict) -> bool:
return any(rec.get('isbn_10', []) or rec.get('isbn_13', []))
return needs_isbn(rec) and not has_isbn(rec)
def is_promise_item(rec: dict) -> bool:
"""Returns True if the record is a promise item."""
return any(
record.startswith("promise:".lower())
for record in rec.get('source_records', "")
)
def get_non_isbn_asin(rec: dict) -> str | None:
"""
Return a non-ISBN ASIN (e.g. B012345678) if one exists.
There is a tacit assumption that at most one will exist.
"""
# Look first in identifiers.
amz_identifiers = rec.get("identifiers", {}).get("amazon", [])
if asin := next(
(identifier for identifier in amz_identifiers if identifier.startswith("B")),
None,
):
return asin
# Finally, check source_records.
if asin := next(
(
record.split(":")[-1]
for record in rec.get("source_records", [])
if record.startswith("amazon:B")
),
None,
):
return asin
return None
def is_asin_only(rec: dict) -> bool:
"""Returns True if the rec has only an ASIN and no ISBN, and False otherwise."""
# Immediately return False if any ISBNs are present
if any(isbn_type in rec for isbn_type in ("isbn_10", "isbn_13")):
return False
# Check for Amazon source records starting with "B".
if any(record.startswith("amazon:B") for record in rec.get("source_records", [])):
return True
# Check for Amazon identifiers starting with "B".
amz_identifiers = rec.get("identifiers", {}).get("amazon", [])
return any(identifier.startswith("B") for identifier in amz_identifiers)
def get_missing_fields(rec: dict) -> list[str]:
"""Return missing fields, if any."""
required_fields = [
'title',
'source_records',
]
return [field for field in required_fields if rec.get(field) is None]
| 12,779 | Python | .py | 333 | 31.531532 | 130 | 0.599158 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
221 | query.py | internetarchive_openlibrary/openlibrary/catalog/utils/query.py | import requests
import web
import json
from time import sleep
import urllib
import sys
query_host = 'openlibrary.org'
def urlopen(url, data=None):
version = "%s.%s.%s" % sys.version_info[:3]
user_agent = f'Mozilla/5.0 (openlibrary; {__name__}) Python/{version}'
headers = {'User-Agent': user_agent}
return requests.get(url, data=data, headers=headers)
def jsonload(url):
return urlopen(url).json()
def urlread(url):
return urlopen(url).content
def set_query_host(host):
global query_host
query_host = host
def has_cover(key):
url = 'https://covers.openlibrary.org/' + key[1] + '/query?olid=' + key[3:]
return urlread(url).strip() != '[]'
def has_cover_retry(key):
for attempt in range(5):
try:
return has_cover(key)
except KeyboardInterrupt:
raise
except:
pass
sleep(2)
def base_url():
return "http://" + query_host
def query_url():
return base_url() + "/query.json?query="
def get_all_ia():
print('c')
q = {'source_records~': 'ia:*', 'type': '/type/edition'}
limit = 10
q['limit'] = limit
q['offset'] = 0
while True:
url = base_url() + "/api/things?query=" + web.urlquote(json.dumps(q))
ret = jsonload(url)['result']
yield from ret
if not ret:
return
q['offset'] += limit
def query(q):
url = query_url() + urllib.parse.quote(json.dumps(q))
ret = None
for i in range(20):
try:
ret = urlread(url)
while ret.startswith(b'canceling statement due to statement timeout'):
ret = urlread(url)
if not ret:
print('ret == None')
except OSError:
pass
if ret:
try:
data = json.loads(ret)
if isinstance(data, dict):
if 'error' in data:
print('error:')
print(ret)
assert 'error' not in data
return data
except:
print(ret)
print(url)
sleep(20)
def query_iter(q, limit=500, offset=0):
q['limit'] = limit
q['offset'] = offset
while True:
ret = query(q)
if not ret:
return
yield from ret
# We haven't got as many we have requested. No point making one more request
if len(ret) < limit:
break
q['offset'] += limit
def get_editions_with_covers_by_author(author, count):
q = {
'type': '/type/edition',
'title_prefix': None,
'subtitle': None,
'title': None,
'authors': author,
}
with_covers = []
for e in query_iter(q, limit=count):
if not has_cover(e['key']):
continue
with_covers.append(e)
if len(with_covers) == count:
return with_covers
return with_covers
def version_iter(q, limit=500, offset=0):
q['limit'] = limit
q['offset'] = offset
while True:
url = base_url() + '/version'
v = jsonload(url)
if not v:
return
yield from query(q)
q['offset'] += limit
def withKey(key):
url = base_url() + key + '.json'
for i in range(20):
try:
return jsonload(url)
except:
pass
print('retry:', i)
print(url)
def get_marc_src(e):
mc = get_mc(e['key'])
if mc:
yield mc
if not e.get('source_records', []):
return
for src in e['source_records']:
if src.startswith('marc:') and src != 'marc:' + mc:
yield src[5:]
def get_mc(key): # get machine comment
v = jsonload(base_url() + key + '.json?m=history')
comments = [
i['machine_comment']
for i in v
if i.get('machine_comment', None) and ':' in i['machine_comment']
]
if len(comments) == 0:
return None
if len(set(comments)) != 1:
print(key)
print(comments)
assert len(set(comments)) == 1
if comments[0] == 'initial import':
return None
return comments[0]
| 4,179 | Python | .py | 145 | 20.862069 | 84 | 0.542407 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
222 | marc_binary.py | internetarchive_openlibrary/openlibrary/catalog/marc/marc_binary.py | from pymarc import MARC8ToUnicode
from unicodedata import normalize
from collections.abc import Iterator
from openlibrary.catalog.marc import mnemonics
from openlibrary.catalog.marc.marc_base import (
MarcBase,
MarcFieldBase,
MarcException,
BadMARC,
)
marc8 = MARC8ToUnicode(quiet=True)
class BadLength(MarcException):
pass
def handle_wrapped_lines(_iter):
"""
Handles wrapped MARC fields, which appear to be multiple
fields with the same field number ending with ++
Have not found an official spec which describe this.
"""
cur_lines = []
cur_tag = None
for tag, line in _iter:
if len(line) > 500 and line.endswith(b'++\x1e'):
assert not cur_tag or cur_tag == tag
cur_tag = tag
cur_lines.append(line)
continue
if cur_lines:
yield cur_tag, cur_lines[0][:-3] + b''.join(
i[2:-3] for i in cur_lines[1:]
) + line[2:]
cur_tag = None
cur_lines = []
continue
yield tag, line
assert not cur_lines
class BinaryDataField(MarcFieldBase):
def __init__(self, rec, line: bytes) -> None:
"""
:param rec MarcBinary:
:param line bytes: Content of a MARC21 binary field
"""
self.rec: MarcBinary = rec
if line:
while line[-2] == b'\x1e'[0]: # ia:engineercorpsofhe00sher
line = line[:-1]
self.line = line
def translate(self, data: bytes) -> str:
"""
:param data bytes: raw MARC21 field data content, in either utf8 or marc8 encoding
:rtype: str
:return: A NFC normalized unicode str
"""
if self.rec.marc8():
data = mnemonics.read(data)
return marc8.translate(data)
return normalize('NFC', data.decode('utf8'))
def ind1(self) -> str:
return chr(self.line[0])
def ind2(self) -> str:
return chr(self.line[1])
def get_all_subfields(self) -> Iterator[tuple[str, str]]:
for i in self.line[3:-1].split(b'\x1f'):
if i:
j = self.translate(i)
yield j[0], j[1:]
class MarcBinary(MarcBase):
def __init__(self, data: bytes) -> None:
try:
assert len(data)
assert isinstance(data, bytes)
length = int(data[:5])
except AssertionError:
raise BadMARC("No MARC data found")
if len(data) != length:
raise BadLength(
f"Record length {len(data)} does not match reported length {length}."
)
self.data = data
self.directory_end = data.find(b'\x1e')
if self.directory_end == -1:
raise BadMARC("MARC directory not found")
def iter_directory(self):
data = self.data
directory = data[24 : self.directory_end]
if len(directory) % 12 != 0:
# directory is the wrong size
# sometimes the leader includes some utf-8 by mistake
directory = data[: self.directory_end].decode('utf-8')[24:]
if len(directory) % 12 != 0:
raise BadMARC("MARC directory invalid length")
iter_dir = (
directory[i * 12 : (i + 1) * 12] for i in range(len(directory) // 12)
)
return iter_dir
def leader(self) -> str:
return self.data[:24].decode('utf-8', errors='replace')
def marc8(self) -> bool:
"""
Is this binary MARC21 MARC8 encoded? (utf-8 if False)
"""
return self.leader()[9] == ' '
def read_fields(
self, want: list[str] | None = None
) -> Iterator[tuple[str, str | BinaryDataField]]:
"""
:param want list | None: list of str, 3 digit MARC field ids, or None for all fields (no limit)
:rtype: generator
:return: Generator of (tag (str), field (str if 00x, otherwise BinaryDataField))
"""
if want is None:
fields = self.get_all_tag_lines()
else:
fields = self.get_tag_lines(want)
for tag, line in handle_wrapped_lines(fields):
if want and tag not in want:
continue
if tag.startswith('00'):
# marc_upei/marc-for-openlibrary-bigset.mrc:78997353:588
if tag == '008' and line == b'':
continue
assert line[-1] == b'\x1e'[0]
# Tag contents should be strings in utf-8 by this point
# if not, the MARC is corrupt in some way. Attempt to rescue
# using 'replace' error handling. We don't want to change offsets
# in positionaly defined control fields like 008
yield tag, line[:-1].decode('utf-8', errors='replace')
else:
yield tag, BinaryDataField(self, line)
def get_all_tag_lines(self):
for line in self.iter_directory():
yield (line[:3].decode(), self.get_tag_line(line))
def get_tag_lines(self, want):
"""
Returns a list of selected fields, (tag, field contents)
:param want list: List of str, 3 digit MARC field ids
:rtype: list
:return: list of tuples (MARC tag (str), field contents ... bytes or str?)
"""
return [
(line[:3].decode(), self.get_tag_line(line))
for line in self.iter_directory()
if line[:3].decode() in want
]
def get_tag_line(self, line):
length = int(line[3:7])
offset = int(line[7:12])
data = self.data[self.directory_end :]
# handle off-by-one errors in MARC records
try:
if data[offset] != b'\x1e':
offset += data[offset:].find(b'\x1e')
last = offset + length
if data[last] != b'\x1e':
length += data[last:].find(b'\x1e')
except IndexError:
pass
tag_line = data[offset + 1 : offset + length + 1]
# marc_western_washington_univ/wwu_bibs.mrc_revrev.mrc:636441290:1277
if line[0:2] != '00' and tag_line[1:8] == b'{llig}\x1f':
tag_line = tag_line[0] + '\uFE20' + tag_line[7:]
return tag_line
| 6,266 | Python | .py | 162 | 28.839506 | 103 | 0.561678 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
223 | marc_base.py | internetarchive_openlibrary/openlibrary/catalog/marc/marc_base.py | import re
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Iterator
re_isbn = re.compile(r'([^ ()]+[\dX])(?: \((?:v\. (\d+)(?: : )?)?(.*)\))?')
# handle ISBN like: 1402563884c$26.95
re_isbn_and_price = re.compile(r'^([-\d]+X?)c\$[\d.]+$')
class MarcException(Exception):
# Base MARC exception class
pass
class BadMARC(MarcException):
pass
class NoTitle(MarcException):
pass
class MarcFieldBase:
rec: "MarcBase"
@abstractmethod
def ind1(self) -> str:
raise NotImplementedError
@abstractmethod
def ind2(self) -> str:
raise NotImplementedError
def get_subfield_values(self, want: str) -> list[str]:
return [v.strip() for _, v in self.get_subfields(want) if v]
@abstractmethod
def get_all_subfields(self) -> Iterator[tuple[str, str]]:
raise NotImplementedError
def get_contents(self, want: str) -> dict[str, list[str]]:
contents = defaultdict(list)
for k, v in self.get_subfields(want):
if v:
contents[k].append(v)
return contents
def get_subfields(self, want: str) -> Iterator[tuple[str, str]]:
for k, v in self.get_all_subfields():
if k in want:
yield k, v
def get_lower_subfield_values(self) -> Iterator[str]:
for k, v in self.get_all_subfields():
if k.islower():
yield v
class MarcBase:
def read_isbn(self, f: MarcFieldBase) -> list[str]:
found = []
for v in f.get_subfield_values('az'):
m = re_isbn_and_price.match(v)
if not m:
m = re_isbn.match(v)
if not m:
continue
found.append(m.group(1))
return found
def get_control(self, tag: str) -> str | None:
control = self.read_fields([tag])
_, v = next(control, (tag, None))
assert isinstance(v, (str, type(None)))
if tag == '008' and v: # noqa: SIM102
# Handle duplicate 008s, even though control fields are non-repeatable.
if others := [str(d) for _, d in list(control) if len(str(d)) == 40]:
return min(others + [v], key=lambda s: s.count(' '))
return v
def get_fields(self, tag: str) -> list[MarcFieldBase]:
return [v for _, v in self.read_fields([tag]) if isinstance(v, MarcFieldBase)]
@abstractmethod
def read_fields(self, want: list[str]) -> Iterator[tuple[str, str | MarcFieldBase]]:
raise NotImplementedError
def get_linkage(self, original: str, link: str) -> MarcFieldBase | None:
"""
:param original str: The original field e.g. '245'
:param link str: The linkage {original}$6 value e.g. '880-01'
:rtype: MarcFieldBase | None
:return: alternate script field (880) corresponding to original, or None
"""
linkages = self.read_fields(['880'])
target = link.replace('880', original)
for tag, f in linkages:
assert isinstance(f, MarcFieldBase)
if f.get_subfield_values('6')[0].startswith(target):
return f
return None
| 3,200 | Python | .py | 80 | 31.725 | 88 | 0.597805 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
224 | get_subjects.py | internetarchive_openlibrary/openlibrary/catalog/marc/get_subjects.py | from collections import defaultdict
import re
from openlibrary.catalog.utils import remove_trailing_dot, flip_name
re_flip_name = re.compile('^(.+), ([A-Z].+)$')
# 'Rhodes, Dan (Fictitious character)'
re_fictitious_character = re.compile(r'^(.+), (.+)( \(.* character\))$')
re_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)
re_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')
re_place_comma = re.compile('^(.+), (.+)$')
re_paren = re.compile('[()]')
def flip_place(s: str) -> str:
s = remove_trailing_dot(s).strip()
# Whitechapel (London, England)
# East End (London, England)
# Whitechapel (Londres, Inglaterra)
if re_paren.search(s):
return s
if m := re_place_comma.match(s):
return f'{m.group(2)} {m.group(1)}'.strip()
return s
def flip_subject(s: str) -> str:
if m := re_comma.match(s):
return m.group(3) + ' ' + m.group(1).lower() + m.group(2)
else:
return s
def tidy_subject(s: str) -> str:
s = remove_trailing_dot(s.strip()).strip()
if len(s) > 1:
s = s[0].upper() + s[1:]
if m := re_etc.search(s):
return m.group(1)
if m := re_fictitious_character.match(s):
return f'{m.group(2)} {m.group(1)}{m.group(3)}'
if m := re_comma.match(s):
return f'{m.group(3)} {m.group(1)}{m.group(2)}'
return s
def four_types(i):
want = {'subject', 'time', 'place', 'person'}
ret = {k: i[k] for k in want if k in i}
for j in (j for j in i if j not in want):
for k, v in i[j].items():
if 'subject' in ret:
ret['subject'][k] = ret['subject'].get(k, 0) + v
else:
ret['subject'] = {k: v}
return ret
def read_subjects(rec):
subject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}
subjects = defaultdict(lambda: defaultdict(int))
# {'subject': defaultdict(<class 'int'>, {'Japanese tea ceremony': 1, 'Book reviews': 1})}
for tag, field in rec.read_fields(subject_fields):
if tag == '600': # people
name_and_date = []
for k, v in field.get_subfields('abcd'):
v = '(' + v.strip('.() ') + ')' if k == 'd' else v.strip(' /,;:')
if k == 'a' and re_flip_name.match(v):
v = flip_name(v)
name_and_date.append(v)
if name := remove_trailing_dot(' '.join(name_and_date)).strip():
subjects['person'][name] += 1
elif tag == '610': # org
if v := tidy_subject(' '.join(field.get_subfield_values('abcd'))):
subjects['org'][v] += 1
elif tag == '611': # Meeting Name (event)
v = ' '.join(
j.strip() for i, j in field.get_all_subfields() if i not in 'vxyz'
)
subjects['event'][tidy_subject(v)] += 1
elif tag == '630': # Uniform Title (work)
for v in field.get_subfield_values('a'):
subjects['work'][tidy_subject(v)] += 1
elif tag == '650': # Topical Term (subject)
for v in field.get_subfield_values('a'):
subjects['subject'][tidy_subject(v)] += 1
elif tag == '651': # Geographical Name (place)
for v in field.get_subfield_values('a'):
subjects['place'][flip_place(v)] += 1
for v in field.get_subfield_values('vx'): # Form and General subdivisions
subjects['subject'][tidy_subject(v)] += 1
for v in field.get_subfield_values('y'): # Chronological subdivision
subjects['time'][tidy_subject(v)] += 1
for v in field.get_subfield_values('z'): # Geographic subdivision
subjects['place'][flip_place(v)] += 1
return {k: dict(v) for k, v in subjects.items()}
def subjects_for_work(rec):
field_map = {
'subject': 'subjects',
'place': 'subject_places',
'time': 'subject_times',
'person': 'subject_people',
}
subjects = four_types(read_subjects(rec))
return {field_map[k]: list(v) for k, v in subjects.items()}
| 4,088 | Python | .py | 93 | 35.645161 | 94 | 0.540588 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
225 | html.py | internetarchive_openlibrary/openlibrary/catalog/marc/html.py | import re
from pymarc.record import Record
trans = {'&': '&', '<': '<', '>': '>', '\n': '<br>', '\x1b': '<b>[esc]</b>'}
re_html_replace = re.compile('([&<>\n\x1b])')
def esc(s):
return re_html_replace.sub(lambda m: trans[m.group(1)], s)
def subfields(line):
if isinstance(line, str):
return esc(line)
return f"{line['ind1']}{line['ind2']} " + ''.join(
[f'<b>${k}</b>{esc(v)}' for s in line['subfields'] for k, v in s.items()]
)
class html_record:
def __init__(self, data):
assert len(data) == int(data[:5])
self.data = data
self.record = Record(data)
self.leader = self.record.leader
def html(self):
return '<br>\n'.join(
[
f'<b>{tag}</b> <code>{subfields(value)}</code>'
for r in self.record.as_dict()['fields']
for tag, value in r.items()
]
)
| 924 | Python | .py | 26 | 28.038462 | 86 | 0.514061 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
226 | parse.py | internetarchive_openlibrary/openlibrary/catalog/marc/parse.py | import logging
import re
from typing import Any
from collections.abc import Callable
from openlibrary.catalog.marc.get_subjects import subjects_for_work
from openlibrary.catalog.marc.marc_base import (
MarcBase,
MarcFieldBase,
BadMARC,
NoTitle,
MarcException,
)
from openlibrary.catalog.utils import (
pick_first_date,
remove_trailing_dot,
remove_trailing_number_dot,
tidy_isbn,
)
DNB_AGENCY_CODE = 'DE-101'
logger = logging.getLogger('openlibrary.catalog.marc')
max_number_of_pages = 50000 # no monograph should be longer than 50,000 pages
re_bad_char = re.compile('\ufffd')
re_date = re.compile(r'^[0-9]+u*$')
re_question = re.compile(r'^\?+$')
re_lccn = re.compile(r'([ \dA-Za-z\-]{3}[\d/-]+).*')
re_oclc = re.compile(r'^\(OCoLC\).*?0*(\d+)')
re_ocolc = re.compile('^ocolc *$', re.I)
re_ocn_or_ocm = re.compile(r'^oc[nm]0*(\d+) *$')
re_int = re.compile(r'\d{2,}')
re_bracket_field = re.compile(r'^\s*(\[.*\])\.?\s*$')
def strip_foc(s: str) -> str:
foc = '[from old catalog]'
return s[: -len(foc)].rstrip() if s.endswith(foc) else s
class SeeAlsoAsTitle(MarcException):
pass
# FIXME: This is SUPER hard to find when needing to add a new field. Why not just decode everything?
FIELDS_WANTED = (
[
'001',
'003', # for OCLC
'008', # publish date, country and language
'010', # lccn
'016', # National Bibliographic Agency Control Number (for DNB)
'020', # isbn
'022', # issn
'035', # oclc
'041', # languages
'050', # lc classification
'082', # dewey
'100',
'110',
'111', # authors
'130',
'240', # work title
'245', # title
'250', # edition
'260',
'264', # publisher
'300', # pagination
'440',
'490',
'830', # series
]
+ [str(i) for i in range(500, 588)]
+ [ # notes + toc + description
# 6XX subjects are extracted separately by get_subjects.subjects_for_work()
'700',
'710',
'711',
'720', # contributions
'246',
'730',
'740', # other titles
'852', # location
'856', # electronic location / URL
]
)
def read_dnb(rec: MarcBase) -> dict[str, list[str]] | None:
fields = rec.get_fields('016')
for f in fields:
(source,) = f.get_subfield_values('2') or ['']
(control_number,) = f.get_subfield_values('a') or ['']
if source == DNB_AGENCY_CODE and control_number:
return {'dnb': [control_number]}
return None
def read_issn(rec: MarcBase) -> dict[str, list[str]] | None:
fields = rec.get_fields('022')
if not fields:
return None
return {'issn': [v for f in fields for v in f.get_subfield_values('a')]}
def read_lccn(rec: MarcBase) -> list[str]:
fields = rec.get_fields('010')
found = []
for f in fields:
for lccn in f.get_subfield_values('a'):
if re_question.match(lccn):
continue
m = re_lccn.search(lccn)
if not m:
continue
lccn = m.group(1).strip()
# zero-pad any dashes so the final digit group has size = 6
lccn = lccn.replace('-', '0' * (7 - (len(lccn) - lccn.find('-'))))
if lccn:
found.append(lccn)
return found
def remove_duplicates(seq: list[Any]) -> list[Any]:
u = []
for x in seq:
if x not in u:
u.append(x)
return u
def read_oclc(rec: MarcBase) -> list[str]:
found = []
tag_001 = rec.get_control('001')
tag_003 = rec.get_control('003')
if tag_001 and tag_003 and re_ocolc.match(tag_003):
oclc = tag_001
m = re_ocn_or_ocm.match(oclc)
if m:
oclc = m.group(1)
if oclc.isdigit():
found.append(oclc)
for f in rec.get_fields('035'):
for v in f.get_subfield_values('a'):
m = re_oclc.match(v)
if not m:
m = re_ocn_or_ocm.match(v)
if m and not m.group(1).isdigit():
m = None
if m:
oclc = m.group(1)
if oclc not in found:
found.append(oclc)
return remove_duplicates(found)
def read_lc_classification(rec: MarcBase) -> list[str]:
fields = rec.get_fields('050')
found = []
for f in fields:
contents = f.get_contents('ab')
if 'b' in contents:
b = ' '.join(contents['b'])
if 'a' in contents:
found += [f'{a} {b}' for a in contents['a']]
else:
found += [b]
# https://openlibrary.org/show-marc/marc_university_of_toronto/uoft.marc:671135731:596
elif 'a' in contents:
found += contents['a']
return found
def read_isbn(rec: MarcBase) -> dict[str, str] | None:
fields = rec.get_fields('020')
if not fields:
return None
found = [isbn for f in fields for isbn in tidy_isbn(rec.read_isbn(f))]
isbns: dict[str, Any] = {'isbn_10': [], 'isbn_13': []}
for isbn in remove_duplicates(found):
if len(isbn) == 13:
isbns['isbn_13'].append(isbn)
elif len(isbn) <= 16:
isbns['isbn_10'].append(isbn)
return {k: v for k, v in isbns.items() if v}
def read_dewey(rec: MarcBase) -> list[str]:
fields = rec.get_fields('082')
return [v for f in fields for v in f.get_subfield_values('a')]
def read_work_titles(rec: MarcBase) -> list[str]:
found = []
if tag_240 := rec.get_fields('240'):
for f in tag_240:
parts = f.get_subfield_values('amnpr')
found.append(remove_trailing_dot(' '.join(parts).strip(',')))
if tag_130 := rec.get_fields('130'):
for f in tag_130:
title = title_from_list(
[v for k, v in f.get_all_subfields() if k.islower() and k != 'n']
)
found.append(title)
return remove_duplicates(found)
def title_from_list(title_parts: list[str], delim: str = ' ') -> str:
# For cataloging punctuation complexities, see https://www.oclc.org/bibformats/en/onlinecataloging.html#punctuation
STRIP_CHARS = r' /,;:=' # Typical trailing punctuation for 245 subfields in ISBD cataloging standards
return delim.join(remove_trailing_dot(s.strip(STRIP_CHARS)) for s in title_parts)
def read_title(rec: MarcBase) -> dict[str, Any]:
fields = rec.get_fields('245') or rec.get_fields('740')
if not fields:
raise NoTitle('No Title found in either 245 or 740 fields.')
# example MARC record with multiple titles:
# https://openlibrary.org/show-marc/marc_western_washington_univ/wwu_bibs.mrc_revrev.mrc:299505697:862
contents = fields[0].get_contents('ach')
linkages = fields[0].get_contents('6')
bnps = fields[0].get_subfield_values('bnps')
ret: dict[str, Any] = {}
title = alternate = None
if '6' in linkages:
alternate = rec.get_linkage('245', linkages['6'][0])
# MARC record with 245$a missing:
# https://openlibrary.org/show-marc/marc_western_washington_univ/wwu_bibs.mrc_revrev.mrc:516779055:1304
if 'a' in contents:
title = title_from_list(contents['a'])
elif bnps:
title = title_from_list([bnps.pop(0)])
# talis_openlibrary_contribution/talis-openlibrary-contribution.mrc:183427199:255
if title in ('See', 'See also'):
raise SeeAlsoAsTitle(f'Title is: {title}')
# talis_openlibrary_contribution/talis-openlibrary-contribution.mrc:5654086:483
if not title:
subfields = fields[0].get_lower_subfield_values()
title = title_from_list(list(subfields))
if not title: # ia:scrapbooksofmoun03tupp
raise NoTitle('No title found from joining subfields.')
if alternate:
ret['title'] = title_from_list(list(alternate.get_subfield_values('a')))
ret['other_titles'] = [title]
else:
ret['title'] = title
# Subtitle
if bnps:
ret['subtitle'] = title_from_list(bnps, delim=' : ')
elif alternate:
subtitle = alternate.get_subfield_values('bnps')
if subtitle:
ret['subtitle'] = title_from_list(subtitle, delim=' : ')
if 'subtitle' in ret and re_bracket_field.match(ret['subtitle']):
# Remove entirely bracketed subtitles
ret.pop('subtitle')
# By statement
if 'c' in contents:
ret['by_statement'] = remove_trailing_dot(' '.join(contents['c']))
# Physical format
if 'h' in contents:
h = ' '.join(contents['h']).strip(' ')
m = re_bracket_field.match(h)
if m:
h = m.group(1)
assert h
ret['physical_format'] = h
return ret
def read_edition_name(rec: MarcBase) -> str:
fields = rec.get_fields('250')
found = [v for f in fields for v in f.get_lower_subfield_values()]
return ' '.join(found).strip('[]')
lang_map = {
'ser': 'srp', # https://www.archive.org/details/zadovoljstvauivo00lubb
'end': 'eng',
'enk': 'eng',
'ent': 'eng',
'jap': 'jpn',
'fra': 'fre',
'fle': 'dut', # Flemish -> Dutch
# 2 character to 3 character codes
'fr ': 'fre',
'it ': 'ita',
# LOC MARC Deprecated code updates
# Only covers deprecated codes where there
# is a direct 1-to-1 mapping to a single new code.
'cam': 'khm', # Khmer
'esp': 'epo', # Esperanto
'eth': 'gez', # Ethiopic
'far': 'fao', # Faroese
'fri': 'fry', # Frisian
'gae': 'gla', # Scottish Gaelic
'gag': 'glg', # Galician
'gal': 'orm', # Oromo
'gua': 'grn', # Guarani
'int': 'ina', # Interlingua (International Auxiliary Language Association)
'iri': 'gle', # Irish
'lan': 'oci', # Occitan (post 1500)
'lap': 'smi', # Sami
'mla': 'mlg', # Malagasy
'mol': 'rum', # Romanian
'sao': 'smo', # Samoan
'scc': 'srp', # Serbian
'scr': 'hrv', # Croatian
'sho': 'sna', # Shona
'snh': 'sin', # Sinhalese
'sso': 'sot', # Sotho
'swz': 'ssw', # Swazi
'tag': 'tgl', # Tagalog
'taj': 'tgk', # Tajik
'tar': 'tat', # Tatar
'tsw': 'tsn', # Tswana
}
def read_original_languages(rec: MarcBase) -> list[str]:
found = []
fields = rec.get_fields('041')
for f in fields:
is_translation = f.ind1() == '1'
found += [v.lower() for v in f.get_subfield_values('h') if len(v) == 3]
return [lang_map.get(v, v) for v in found if v != 'zxx']
def read_languages(rec: MarcBase, lang_008: str | None = None) -> list[str]:
"""Read languages from 041, if present, and combine with language from 008:35-37"""
found = []
if lang_008:
lang_008 = lang_008.lower()
if lang_008 not in (' ', '###', '|||', '', '???', 'zxx', 'n/a'):
found.append(lang_008)
for f in rec.get_fields('041'):
if f.ind2() == '7':
code_source = ' '.join(f.get_subfield_values('2'))
logger.error(f'Unrecognised language source = {code_source}')
continue # Skip anything which is using a non-MARC code source e.g. iso639-1
for value in f.get_subfield_values('a'):
value = value.replace(' ', '').replace('-', '') # remove pad/separators
if len(value) % 3 == 0:
# Obsolete cataloging practice was to concatenate all language codes in a single subfield
for k in range(0, len(value), 3):
code = value[k : k + 3].lower()
if code != 'zxx' and code not in found:
found.append(code)
else:
logger.error(f'Unrecognised MARC language code(s) = {value}')
return [lang_map.get(code, code) for code in found]
def read_pub_date(rec: MarcBase) -> str | None:
"""
Read publish date from 260$c.
"""
def publish_date(s: str) -> str:
date = s.strip('[]')
if date.lower() in ('n.d.', 's.d.'): # No date
date = '[n.d.]'
return remove_trailing_number_dot(date)
found = [v for f in rec.get_fields('260') for v in f.get_subfield_values('c')]
return publish_date(found[0]) if found else None
def read_publisher(rec: MarcBase) -> dict[str, Any] | None:
def publisher_name(s: str) -> str:
name = s.strip(' /,;:[]')
if name.lower().startswith('s.n'): # Sine nomine
name = '[s.n.]'
return name
def publish_place(s: str) -> str:
place = s.strip(' /.,;:')
# remove encompassing []
if (place[0], place[-1]) == ('[', ']'):
place = place[1:-1]
# clear unbalanced []
if place.count('[') != place.count(']'):
place = place.strip('[]')
if place.lower().startswith('s.l'): # Sine loco
place = '[s.l.]'
return place
fields = (
rec.get_fields('260')
or rec.get_fields('264')[:1]
or [link for link in [rec.get_linkage('260', '880')] if link]
)
if not fields:
return None
publisher = []
publish_places = []
for f in fields:
contents = f.get_contents('ab')
if 'b' in contents:
publisher += [publisher_name(v) for v in contents['b']]
if 'a' in contents:
publish_places += [publish_place(v) for v in contents['a']]
edition = {}
if publisher:
edition['publishers'] = publisher
if len(publish_places) and publish_places[0]:
edition['publish_places'] = publish_places
return edition
def name_from_list(name_parts: list[str]) -> str:
STRIP_CHARS = r' /,;:[]'
name = ' '.join(strip_foc(s).strip(STRIP_CHARS) for s in name_parts)
return remove_trailing_dot(name)
def read_author_person(field: MarcFieldBase, tag: str = '100') -> dict | None:
"""
This take either a MARC 100 Main Entry - Personal Name (non-repeatable) field
or
700 Added Entry - Personal Name (repeatable)
or
720 Added Entry - Uncontrolled Name (repeatable)
and returns an author import dict.
"""
author = {}
contents = field.get_contents('abcde6')
if 'a' not in contents and 'c' not in contents:
# Should have at least a name or title.
return None
if 'd' in contents:
author = pick_first_date(strip_foc(d).strip(',[]') for d in contents['d'])
author['name'] = name_from_list(field.get_subfield_values('abc'))
author['entity_type'] = 'person'
subfields = [
('a', 'personal_name'),
('b', 'numeration'),
('c', 'title'),
('e', 'role'),
]
for subfield, field_name in subfields:
if subfield in contents:
author[field_name] = name_from_list(contents[subfield])
if 'q' in contents:
author['fuller_name'] = ' '.join(contents['q'])
if '6' in contents: # noqa: SIM102 - alternate script name exists
if (link := field.rec.get_linkage(tag, contents['6'][0])) and (
alt_name := link.get_subfield_values('a')
):
author['alternate_names'] = [name_from_list(alt_name)]
return author
# 1. if authors in 100, 110, 111 use them
# 2. if first contrib is 700, 710, or 711 use it
def person_last_name(field: MarcFieldBase) -> str:
v = field.get_subfield_values('a')[0]
return v[: v.find(', ')] if ', ' in v else v
def last_name_in_245c(rec: MarcBase, person: MarcFieldBase) -> bool:
fields = rec.get_fields('245')
last_name = person_last_name(person).lower()
return any(
any(last_name in v.lower() for v in f.get_subfield_values('c')) for f in fields
)
def read_authors(rec: MarcBase) -> list[dict] | None:
count = 0
fields_100 = rec.get_fields('100')
fields_110 = rec.get_fields('110')
fields_111 = rec.get_fields('111')
if not any([fields_100, fields_110, fields_111]):
return None
# talis_openlibrary_contribution/talis-openlibrary-contribution.mrc:11601515:773 has two authors:
# 100 1 $aDowling, James Walter Frederick.
# 111 2 $aConference on Civil Engineering Problems Overseas.
found = [a for a in (read_author_person(f, tag='100') for f in fields_100) if a]
for f in fields_110:
name = name_from_list(f.get_subfield_values('ab'))
found.append({'entity_type': 'org', 'name': name})
for f in fields_111:
name = name_from_list(f.get_subfield_values('acdn'))
found.append({'entity_type': 'event', 'name': name})
return found or None
def read_pagination(rec: MarcBase) -> dict[str, Any] | None:
fields = rec.get_fields('300')
if not fields:
return None
pagination = []
edition: dict[str, Any] = {}
for f in fields:
pagination += f.get_subfield_values('a')
if pagination:
edition['pagination'] = ' '.join(pagination)
# strip trailing characters from pagination
edition['pagination'] = edition['pagination'].strip(' ,:;')
num = []
for x in pagination:
num += [int(i) for i in re_int.findall(x.replace(',', ''))]
num += [int(i) for i in re_int.findall(x)]
valid = [i for i in num if i < max_number_of_pages]
if valid:
edition['number_of_pages'] = max(valid)
return edition
def read_series(rec: MarcBase) -> list[str]:
found = []
for tag in ('440', '490', '830'):
fields = rec.get_fields(tag)
for f in fields:
this = []
for v in f.get_subfield_values('av'):
if v := v.rstrip('.,; '):
this.append(v)
if this:
found.append(' -- '.join(this))
return remove_duplicates(found)
def read_notes(rec: MarcBase) -> str:
found = []
for tag in range(500, 590):
if tag in (505, 520):
continue
fields = rec.get_fields(str(tag))
for f in fields:
found.append(' '.join(f.get_lower_subfield_values()).strip())
return '\n\n'.join(found)
def read_description(rec: MarcBase) -> str:
fields = rec.get_fields('520')
found = [v for f in fields for v in f.get_subfield_values('a')]
return "\n\n".join(found)
def read_url(rec: MarcBase) -> list:
found = []
for f in rec.get_fields('856'):
contents = f.get_contents('uy3zx')
if not contents.get('u'):
continue
parts = (
contents.get('y')
or contents.get('3')
or contents.get('z')
or contents.get('x', ['External source'])
)
if parts:
title = parts[0].strip()
found += [{'url': u.strip(), 'title': title} for u in contents['u']]
return found
def read_other_titles(rec: MarcBase):
return (
[' '.join(f.get_subfield_values('a')) for f in rec.get_fields('246')]
+ [' '.join(f.get_lower_subfield_values()) for f in rec.get_fields('730')]
+ [' '.join(f.get_subfield_values('apn')) for f in rec.get_fields('740')]
)
def read_location(rec: MarcBase) -> list[str] | None:
fields = rec.get_fields('852')
found = [v for f in fields for v in f.get_subfield_values('a')]
return remove_duplicates(found) if fields else None
def read_contributions(rec: MarcBase) -> dict[str, Any]:
"""
Reads contributors from a MARC record
and use values in 7xx fields to set 'authors'
if the 1xx fields do not exist. Otherwise set
additional 'contributions'
:param (MarcBinary | MarcXml) rec:
:rtype: dict
"""
want = {
'700': 'abcdeq',
'710': 'ab',
'711': 'acdn',
'720': 'a',
}
ret: dict[str, Any] = {}
skip_authors = set()
for tag in ('100', '110', '111'):
fields = rec.get_fields(tag)
for f in fields:
skip_authors.add(tuple(f.get_all_subfields()))
if not skip_authors:
for tag, marc_field_base in rec.read_fields(['700', '710', '711', '720']):
assert isinstance(marc_field_base, MarcFieldBase)
f = marc_field_base
if tag in ('700', '720'):
if 'authors' not in ret or last_name_in_245c(rec, f):
ret.setdefault('authors', []).append(read_author_person(f, tag=tag))
skip_authors.add(tuple(f.get_subfields(want[tag])))
continue
elif 'authors' in ret:
break
if tag == '710':
name = [v.strip(' /,;:') for v in f.get_subfield_values(want[tag])]
ret['authors'] = [
{'entity_type': 'org', 'name': remove_trailing_dot(' '.join(name))}
]
skip_authors.add(tuple(f.get_subfields(want[tag])))
break
if tag == '711':
name = [v.strip(' /,;:') for v in f.get_subfield_values(want[tag])]
ret['authors'] = [
{
'entity_type': 'event',
'name': remove_trailing_dot(' '.join(name)),
}
]
skip_authors.add(tuple(f.get_subfields(want[tag])))
break
for tag, marc_field_base in rec.read_fields(['700', '710', '711', '720']):
assert isinstance(marc_field_base, MarcFieldBase)
f = marc_field_base
sub = want[tag]
cur = tuple(f.get_subfields(sub))
if tuple(cur) in skip_authors:
continue
name = remove_trailing_dot(' '.join(strip_foc(i[1]) for i in cur).strip(','))
ret.setdefault('contributions', []).append(name) # need to add flip_name
return ret
def read_toc(rec: MarcBase) -> list:
fields = rec.get_fields('505')
toc = []
for f in fields:
toc_line: list[str] = []
for k, v in f.get_all_subfields():
if k == 'a':
toc_split = [i.strip() for i in v.split('--')]
if any(len(i) > 2048 for i in toc_split):
toc_split = [i.strip() for i in v.split(' - ')]
# http://openlibrary.org/show-marc/marc_miami_univ_ohio/allbibs0036.out:3918815:7321
if any(len(i) > 2048 for i in toc_split):
toc_split = [i.strip() for i in v.split('; ')]
# FIXME:
# http://openlibrary.org/show-marc/marc_western_washington_univ/wwu_bibs.mrc_revrev.mrc:938969487:3862
if any(len(i) > 2048 for i in toc_split):
toc_split = [i.strip() for i in v.split(' / ')]
assert isinstance(toc_split, list)
toc.extend(toc_split)
continue
if k == 't':
if toc_line:
toc.append(' -- '.join(toc_line))
if len(v) > 2048:
toc_line = [i.strip() for i in v.strip('/').split('--')]
else:
toc_line = [v.strip('/')]
continue
if k.islower(): # Exclude numeric, non-display subfields like $6, $7, $8
toc_line.append(v.strip(' -'))
if toc_line:
toc.append('-- '.join(toc_line))
return [{'title': s, 'type': '/type/toc_item'} for s in toc]
def update_edition(
rec: MarcBase, edition: dict[str, Any], func: Callable, field: str
) -> None:
if v := func(rec):
if field in edition and isinstance(edition[field], list):
edition[field] += v
else:
edition[field] = v
def read_edition(rec: MarcBase) -> dict[str, Any]:
"""
Converts MARC record object into a dict representation of an edition
suitable for importing into Open Library.
:param (MarcBinary | MarcXml) rec:
:rtype: dict
:return: Edition representation
"""
handle_missing_008 = True
edition: dict[str, Any] = {}
if tag_008 := rec.get_control('008'):
f = re_bad_char.sub(' ', tag_008)
if not f:
raise BadMARC("'008' field must not be blank")
publish_date = f[7:11]
if re_date.match(publish_date) and publish_date not in ('0000', '9999'):
edition['publish_date'] = publish_date
if f[6] == 'r' and f[11:15] > publish_date:
# Incorrect reprint date order
update_edition(rec, edition, read_pub_date, 'publish_date')
elif f[6] == 't': # Copyright date
edition['copyright_date'] = f[11:15]
if 'publish_date' not in edition: # Publication date fallback to 260$c
update_edition(rec, edition, read_pub_date, 'publish_date')
publish_country = f[15:18]
if publish_country not in ('|||', ' ', '\x01\x01\x01', '???'):
edition['publish_country'] = publish_country.strip()
if languages := read_languages(rec, lang_008=f[35:38].lower()):
edition['languages'] = languages
elif handle_missing_008:
update_edition(rec, edition, read_languages, 'languages')
update_edition(rec, edition, read_pub_date, 'publish_date')
else:
raise BadMARC("single '008' field required")
update_edition(rec, edition, read_work_titles, 'work_titles')
try:
edition.update(read_title(rec))
except NoTitle:
if 'work_titles' in edition:
assert len(edition['work_titles']) == 1
edition['title'] = edition['work_titles'][0]
del edition['work_titles']
else:
raise
update_edition(rec, edition, read_lccn, 'lccn')
update_edition(rec, edition, read_dnb, 'identifiers')
update_edition(rec, edition, read_issn, 'identifiers')
update_edition(rec, edition, read_authors, 'authors')
update_edition(rec, edition, read_oclc, 'oclc_numbers')
update_edition(rec, edition, read_lc_classification, 'lc_classifications')
update_edition(rec, edition, read_dewey, 'dewey_decimal_class')
update_edition(rec, edition, read_other_titles, 'other_titles')
update_edition(rec, edition, read_edition_name, 'edition_name')
update_edition(rec, edition, read_series, 'series')
update_edition(rec, edition, read_notes, 'notes')
update_edition(rec, edition, read_description, 'description')
update_edition(rec, edition, read_location, 'location')
update_edition(rec, edition, read_toc, 'table_of_contents')
update_edition(rec, edition, read_url, 'links')
update_edition(rec, edition, read_original_languages, 'translated_from')
edition.update(read_contributions(rec))
edition.update(subjects_for_work(rec))
for func in (read_publisher, read_isbn, read_pagination):
v = func(rec)
if v:
edition.update(v)
return edition
| 26,720 | Python | .py | 667 | 31.923538 | 119 | 0.573283 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
227 | mnemonics.py | internetarchive_openlibrary/openlibrary/catalog/marc/mnemonics.py | # read MARC mnemonics
# result is in MARC8 and still needs to be converted to Unicode
import re
re_brace = re.compile(b'(\\{.+?\\})')
mapping = {
b'{00}': b'\x00',
b'{01}': b'\x01',
b'{02}': b'\x02',
b'{03}': b'\x03',
b'{04}': b'\x04',
b'{05}': b'\x05',
b'{06}': b'\x06',
b'{07}': b'\x07',
b'{08}': b'\x08',
b'{09}': b'\t',
b'{0A}': b'\n',
b'{0B}': b'\x0b',
b'{0C}': b'\x0c',
b'{0D}': b'\r',
b'{0E}': b'\x0e',
b'{0F}': b'\x0f',
b'{0}': b'0',
b'{10}': b'\x10',
b'{11}': b'\x11',
b'{12}': b'\x12',
b'{13}': b'\x13',
b'{14}': b'\x14',
b'{15}': b'\x15',
b'{16}': b'\x16',
b'{17}': b'\x17',
b'{18}': b'\x18',
b'{19}': b'\x19',
b'{1A}': b'\x1a',
b'{1B}': b'\x1b',
b'{1C}': b'\x1c',
b'{1D}': b'\x1d',
b'{1E}': b'\x1e',
b'{1F}': b'\x1f',
b'{1}': b'1',
b'{20}': b' ',
b'{21}': b'!',
b'{22}': b'"',
b'{23}': b'#',
b'{24}': b'$',
b'{25}': b'%',
b'{26}': b'&',
b'{27}': "'",
b'{28}': b'(',
b'{29}': b')',
b'{2A}': b'*',
b'{2B}': b'+',
b'{2C}': b',',
b'{2D}': b'-',
b'{2E}': b'.',
b'{2F}': b'/',
b'{2}': b'2',
b'{30}': b'0',
b'{31}': b'1',
b'{32}': b'2',
b'{33}': b'3',
b'{34}': b'4',
b'{35}': b'5',
b'{36}': b'6',
b'{37}': b'7',
b'{38}': b'8',
b'{39}': b'9',
b'{3A}': b':',
b'{3B}': b';',
b'{3C}': b'<',
b'{3D}': b'=',
b'{3E}': b'>',
b'{3F}': b'?',
b'{3}': b'3',
b'{40}': b'@',
b'{41}': b'A',
b'{42}': b'B',
b'{43}': b'C',
b'{44}': b'D',
b'{45}': b'E',
b'{46}': b'F',
b'{47}': b'G',
b'{48}': b'H',
b'{49}': b'I',
b'{4A}': b'J',
b'{4B}': b'K',
b'{4C}': b'L',
b'{4D}': b'M',
b'{4E}': b'N',
b'{4F}': b'O',
b'{4}': b'4',
b'{50}': b'P',
b'{51}': b'Q',
b'{52}': b'R',
b'{53}': b'S',
b'{54}': b'T',
b'{55}': b'U',
b'{56}': b'V',
b'{57}': b'W',
b'{58}': b'X',
b'{59}': b'Y',
b'{5A}': b'Z',
b'{5B}': b'[',
b'{5C}': b'\\',
b'{5D}': b']',
b'{5E}': b'^',
b'{5F}': b'_',
b'{5}': b'5',
b'{60}': b'`',
b'{61}': b'a',
b'{62}': b'b',
b'{63}': b'c',
b'{64}': b'd',
b'{65}': b'e',
b'{66}': b'f',
b'{67}': b'g',
b'{68}': b'h',
b'{69}': b'i',
b'{6A}': b'j',
b'{6B}': b'k',
b'{6C}': b'l',
b'{6D}': b'm',
b'{6E}': b'n',
b'{6F}': b'o',
b'{6}': b'6',
b'{70}': b'p',
b'{71}': b'q',
b'{72}': b'r',
b'{73}': b's',
b'{74}': b't',
b'{75}': b'u',
b'{76}': b'v',
b'{77}': b'w',
b'{78}': b'x',
b'{79}': b'y',
b'{7A}': b'z',
b'{7B}': b'{',
b'{7C}': b'|',
b'{7D}': b'}',
b'{7E}': b'~',
b'{7F}': b'\x7f',
b'{7}': b'7',
b'{80}': b'\x80',
b'{81}': b'\x81',
b'{82}': b'\x82',
b'{83}': b'\x83',
b'{84}': b'\x84',
b'{85}': b'\x85',
b'{86}': b'\x86',
b'{87}': b'\x87',
b'{88}': b'\x88',
b'{89}': b'\x89',
b'{8A}': b'\x8a',
b'{8B}': b'\x8b',
b'{8C}': b'\x8c',
b'{8D}': b'\x8d',
b'{8E}': b'\x8e',
b'{8F}': b'\x8f',
b'{8}': b'8',
b'{90}': b'\x90',
b'{91}': b'\x91',
b'{92}': b'\x92',
b'{93}': b'\x93',
b'{94}': b'\x94',
b'{95}': b'\x95',
b'{96}': b'\x96',
b'{97}': b'\x97',
b'{98}': b'\x98',
b'{99}': b'\x99',
b'{9A}': b'\x9a',
b'{9B}': b'\x9b',
b'{9C}': b'\x9c',
b'{9D}': b'\x9d',
b'{9E}': b'\x9e',
b'{9F}': b'\x9f',
b'{9}': b'9',
b'{A0}': b'\xa0',
b'{A1}': b'\xa1',
b'{A2}': b'\xa2',
b'{A3}': b'\xa3',
b'{A4}': b'\xa4',
b'{A5}': b'\xa5',
b'{A6}': b'\xa6',
b'{A7}': b'\xa7',
b'{A8}': b'\xa8',
b'{A9}': b'\xa9',
b'{AA}': b'\xaa',
b'{AB}': b'\xab',
b'{AC}': b'\xac',
b'{AD}': b'\xad',
b'{AElig}': b'\xa5',
b'{AE}': b'\xae',
b'{AF}': b'\xaf',
b'{Aacute}': b'\xe2A',
b'{Abreve}': b'\xe6A',
b'{Acirc}': b'\xe3A',
b'{Acy}': b'A',
b'{Agrave}': b'\xe1A',
b'{Aogon}': b'\xf1A',
b'{Aring}': b'\xeaA',
b'{Atilde}': b'\xe4A',
b'{Auml}': b'\xe8A',
b'{A}': b'A',
b'{B0}': b'\xb0',
b'{B1}': b'\xb1',
b'{B2}': b'\xb2',
b'{B3}': b'\xb3',
b'{B4}': b'\xb4',
b'{B5}': b'\xb5',
b'{B6}': b'\xb6',
b'{B7}': b'\xb7',
b'{B8}': b'\xb8',
b'{B9}': b'\xb9',
b'{BA}': b'\xba',
b'{BB}': b'\xbb',
b'{BC}': b'\xbc',
b'{BD}': b'\xbd',
b'{BE}': b'\xbe',
b'{BF}': b'\xbf',
b'{Bcy}': b'B',
b'{B}': b'B',
b'{C0}': b'\xc0',
b'{C1}': b'\xc1',
b'{C2}': b'\xc2',
b'{C3}': b'\xc3',
b'{C4}': b'\xc4',
b'{C5}': b'\xc5',
b'{C6}': b'\xc6',
b'{C7}': b'\xc7',
b'{C8}': b'\xc8',
b'{C9}': b'\xc9',
b'{CA}': b'\xca',
b'{CB}': b'\xcb',
b'{CC}': b'\xcc',
b'{CD}': b'\xcd',
b'{CE}': b'\xce',
b'{CF}': b'\xcf',
b'{CHcy}': b'Ch',
b'{Cacute}': b'\xe2C',
b'{Ccaron}': b'\xe9C',
b'{Ccedil}': b'\xf0C',
b'{C}': b'C',
b'{D0}': b'\xd0',
b'{D1}': b'\xd1',
b'{D2}': b'\xd2',
b'{D3}': b'\xd3',
b'{D4}': b'\xd4',
b'{D5}': b'\xd5',
b'{D6}': b'\xd6',
b'{D7}': b'\xd7',
b'{D8}': b'\xd8',
b'{D9}': b'\xd9',
b'{DA}': b'\xda',
b'{DB}': b'\xdb',
b'{DC}': b'\xdc',
b'{DD}': b'\xdd',
b'{DE}': b'\xde',
b'{DF}': b'\xdf',
b'{DJEcy}': b'\xa3',
b'{DZEcy}': b'Dz',
b'{DZHEcy}': b'D\xe9z',
b'{Dagger}': b'|',
b'{Dcaron}': b'\xe9D',
b'{Dcy}': b'D',
b'{Dstrok}': b'\xa3',
b'{D}': b'D',
b'{E0}': b'\xe0',
b'{E1}': b'\xe1',
b'{E2}': b'\xe2',
b'{E3}': b'\xe3',
b'{E4}': b'\xe4',
b'{E5}': b'\xe5',
b'{E6}': b'\xe6',
b'{E7}': b'\xe7',
b'{E8}': b'\xe8',
b'{E9}': b'\xe9',
b'{EA}': b'\xea',
b'{EB}': b'\xeb',
b'{EC}': b'\xec',
b'{ED}': b'\xed',
b'{EE}': b'\xee',
b'{EF}': b'\xef',
b'{ETH}': b'\xa3',
b'{Eacute}': b'\xe2E',
b'{Ecaron}': b'\xe9E',
b'{Ecirc}': b'\xe3E',
b'{Ecy}': b'\xe7E',
b'{Egrave}': b'\xe1E',
b'{Ehookr}': b'\xf1E',
b'{Eogon}': b'\xf1E',
b'{Euml}': b'\xe8E',
b'{E}': b'E',
b'{F0}': b'\xf0',
b'{F1}': b'\xf1',
b'{F2}': b'\xf2',
b'{F3}': b'\xf3',
b'{F4}': b'\xf4',
b'{F5}': b'\xf5',
b'{F6}': b'\xf6',
b'{F7}': b'\xf7',
b'{F8}': b'\xf8',
b'{F9}': b'\xf9',
b'{FA}': b'\xfa',
b'{FB}': b'\xfb',
b'{FC}': b'\xfc',
b'{FD}': b'\xfd',
b'{FE}': b'\xfe',
b'{FF}': b'\xff',
b'{Fcy}': b'F',
b'{F}': b'F',
b'{GEcy}': b'G',
b'{GHcy}': b'G',
b'{GJEcy}': b'\xe2G',
b'{Gcy}': b'G',
b'{G}': b'G',
b'{HARDcy}': b'\xb7',
b'{Hcy}': b'H',
b'{H}': b'H',
b'{IEcy}': b'\xebI\xecE',
b'{IJlig}': b'IJ',
b'{IOcy}': b'\xebI\xecO',
b'{IYcy}': b'Y',
b'{Iacute}': b'\xe2I',
b'{Icaron}': b'\xe9I',
b'{Icirc}': b'\xe3I',
b'{Icy}': b'I',
b'{Idot}': b'\xe7I',
b'{Igrave}': b'\xe1I',
b'{Iumlcy}': b'\xe8I',
b'{Iuml}': b'\xe8I',
b'{I}': b'I',
b'{JEcy}': b'J',
b'{JIcy}': b'\xe8I',
b'{Jcy}': b'\xe6I',
b'{J}': b'J',
b'{KHcy}': b'Kh',
b'{KJEcy}': b'\xe2K',
b'{Kcy}': b'K',
b'{K}': b'K',
b'{LJEcy}': b'Lj',
b'{Lacute}': b'\xe2L',
b'{Lcy}': b'L',
b'{Lstrok}': b'\xa1',
b'{L}': b'L',
b'{Mcy}': b'M',
b'{M}': b'M',
b'{NJEcy}': b'Nj',
b'{Nacute}': b'\xe2N',
b'{Ncaron}': b'\xe9N',
b'{Ncy}': b'N',
b'{No}': b'No.',
b'{Ntilde}': b'\xb4N',
b'{N}': b'N',
b'{OElig}': b'\xa6',
b'{Oacute}': b'\xe2O',
b'{Ocirc}': b'\xe3O',
b'{Ocy}': b'O',
b'{Odblac}': b'\xeeO',
b'{Ograve}': b'\xe1O',
b'{Ohorn}': b'\xac',
b'{Ostrok}': b'\xa2',
b'{Otilde}': b'\xe4O',
b'{Ouml}': b'\xe8O',
b'{O}': b'O',
b'{Pcy}': b'P',
b'{P}': b'P',
b'{Q}': b'Q',
b'{Racute}': b'\xe2R',
b'{Rcaron}': b'\xe9R',
b'{Rcy}': b'R',
b'{R}': b'R',
b'{SHCHcy}': b'Shch',
b'{SHcy}': b'Sh',
b'{SOFTcy}': b'\xa7',
b'{Sacute}': b'\xe2S',
b'{Scommab}': b'\xf7S',
b'{Scy}': b'S',
b'{S}': b'S',
b'{THORN}': b'\xa4',
b'{TSHEcy}': b'\xe2C',
b'{TScy}': b'\xebT\xecS',
b'{Tcaron}': b'\xe9T',
b'{Tcommab}': b'\xf7T',
b'{Tcy}': b'T',
b'{T}': b'T',
b'{Uacute}': b'\xe2U',
b'{Ubrevecy}': b'\xe6U',
b'{Ucirc}': b'\xe3U',
b'{Ucy}': b'U',
b'{Udblac}': b'\xeeU',
b'{Ugrave}': b'\xe1U',
b'{Uhorn}': b'\xad',
b'{Uring}': b'\xeaU',
b'{Uuml}': b'\xe8U',
b'{U}': b'U',
b'{Vcy}': b'V',
b'{V}': b'V',
b'{W}': b'W',
b'{X}': b'X',
b'{YAcy}': b'\xebI\xecA',
b'{YEcy}': b'E',
b'{YIcy}': b'I',
b'{YUcy}': b'\xebI\xecU',
b'{Yacute}': b'\xe2Y',
b'{Ycy}': b'Y',
b'{Y}': b'Y',
b'{ZHcy}': b'Zh',
b'{ZHuacy}': b'\xebZ\xech',
b'{Zacute}': b'\xe2Z',
b'{Zcy}': b'Z',
b'{Zdot}': b'\xe7Z',
b'{Z}': b'Z',
b'{aacute}': b'\xe2a',
b'{abreve}': b'\xe6a',
b'{acirc}': b'\xe3a',
b'{acute}': b'\xe2',
b'{acy}': b'a',
b'{aelig}': b'\xb5',
b'{agrave}': b'\xe1a',
b'{agr}': b'b',
b'{alif}': b'\xae',
b'{amp}': b'&',
b'{aogon}': b'\xf1a',
b'{apos}': b"'",
b'{arab}': b'(3',
b'{aring}': b'\xeaa',
b'{ast}': b'*',
b'{asuper}': b'a',
b'{atilde}': b'\xe4a',
b'{auml}': b'\xe8a',
b'{ayn}': b'\xb0',
b'{a}': b'a',
b'{bcy}': b'b',
b'{bgr}': b'c',
b'{breveb}': b'\xf9',
b'{breve}': b'\xe6',
b'{brvbar}': b'|',
b'{bsol}': b'\\',
b'{bull}': b'*',
b'{b}': b'b',
b'{cacute}': b'\xe2c',
b'{candra}': b'\xef',
b'{caron}': b'\xe9',
b'{ccaron}': b'\xe9c',
b'{ccedil}': b'\xf0c',
b'{cedil}': b'\xf0',
b'{cent}': b'c',
b'{chcy}': b'ch',
b'{circb}': b'\xf4',
b'{circ}': b'\xe3',
b'{cjk}': b'$1',
b'{colon}': b':',
b'{commaa}': b'\xfe',
b'{commab}': b'\xf7',
b'{commat}': b'@',
b'{comma}': b',',
b'{copy}': b'\xc3',
b'{curren}': b'*',
b'{cyril}': b'(N',
b'{c}': b'c',
b'{dagger}': b'|',
b'{dblac}': b'\xee',
b'{dbldotb}': b'\xf3',
b'{dblunder}': b'\xf5',
b'{dcaron}': b'\xe9d',
b'{dcy}': b'd',
b'{deg}': b'\xc0',
b'{diaer}': b'\xe8',
b'{divide}': b'/',
b'{djecy}': b'\xb3',
b'{dollar}': b'$',
b'{dotb}': b'\xf2',
b'{dot}': b'\xe7',
b'{dstrok}': b'\xb3',
b'{dzecy}': b'dz',
b'{dzhecy}': b'd\xe9z',
b'{d}': b'd',
b'{eacute}': b'\xe2e',
b'{ea}': b'\xea',
b'{ecaron}': b'\xe9e',
b'{ecirc}': b'\xe3e',
b'{ecy}': b'\xe7e',
b'{egrave}': b'\xe1e',
b'{ehookr}': b'\xf1e',
b'{eogon}': b'\xf1e',
b'{equals}': b'=',
b'{esc}': b'\x1b',
b'{eth}': b'\xba',
b'{euml}': b'\xe8e',
b'{excl}': b'!',
b'{e}': b'e',
b'{fcy}': b'f',
b'{flat}': b'\xa9',
b'{fnof}': b'f',
b'{frac12}': b'1/2',
b'{frac14}': b'1/4',
b'{frac34}': b'3/4',
b'{f}': b'f',
b'{gcy}': b'g',
b'{gecy}': b'g',
b'{ggr}': b'g',
b'{ghcy}': b'g',
b'{gjecy}': b'\xe2g',
b'{grave}': b'\xe1',
b'{greek}': b'g',
b'{gs}': b'\x1d',
b'{gt}': b'>',
b'{g}': b'g',
b'{hardcy}': b'\xb7',
b'{hardsign}': b'\xb7',
b'{hcy}': b'h',
b'{hebrew}': b'(2',
b'{hellip}': b'...',
b'{hooka}': b'\xe0',
b'{hookl}': b'\xf7',
b'{hookr}': b'\xf1',
b'{hyphen}': b'-',
b'{h}': b'h',
b'{iacute}': b'\xe2i',
b'{icaron}': b'\xe9i',
b'{icirc}': b'\xe3i',
b'{icy}': b'i',
b'{iecy}': b'\xebi\xece',
b'{iexcl}': b'\xc6',
b'{igrave}': b'\xe1i',
b'{ijlig}': b'ij',
b'{inodot}': b'\xb8',
b'{iocy}': b'\xebi\xeco',
b'{iquest}': b'\xc5',
b'{iumlcy}': b'\xe8i',
b'{iuml}': b'\xe8i',
b'{iycy}': b'y',
b'{i}': b'i',
b'{jcy}': b'\xe6i',
b'{jecy}': b'j',
b'{jicy}': b'\xe8i',
b'{joiner}': b'\x8d',
b'{j}': b'j',
b'{kcy}': b'k',
b'{khcy}': b'kh',
b'{kjecy}': b'\xe2k',
b'{k}': b'k',
b'{lacute}': b'\xe2l',
b'{laquo}': b'"',
b'{latin}': b'(B',
b'{lcub}': b'{',
b'{lcy}': b'l',
b'{ldbltil}': b'\xfa',
b'{ldquo}': b'"',
b'{ljecy}': b'lj',
b'{llig}': b'\xeb',
b'{lpar}': b'(',
b'{lsqb}': b'[',
b'{lsquor}': b"'",
b'{lsquo}': b"'",
b'{lstrok}': b'\xb1',
b'{lt}': b'<',
b'{l}': b'l',
b'{macr}': b'\xe5',
b'{mcy}': b'm',
b'{mdash}': b'--',
b'{middot}': b'\xa8',
b'{mlPrime}': b'\xb7',
b'{mllhring}': b'\xb0',
b'{mlprime}': b'\xa7',
b'{mlrhring}': b'\xae',
b'{m}': b'm',
b'{nacute}': b'\xe2n',
b'{ncaron}': b'\xe9n',
b'{ncy}': b'n',
b'{ndash}': b'--',
b'{njecy}': b'nj',
b'{nonjoin}': b'\x8e',
b'{ntilde}': b'\xb4n',
b'{num}': b'#',
b'{n}': b'n',
b'{oacute}': b'\xe2o',
b'{ocirc}': b'\xe3o',
b'{ocy}': b'o',
b'{odblac}': b'\xeeo',
b'{oelig}': b'\xb6',
b'{ogon}': b'\xf1',
b'{ograve}': b'\xe1o',
b'{ohorn}': b'\xbc',
b'{ordf}': b'a',
b'{ordm}': b'o',
b'{ostrok}': b'\xb2',
b'{osuper}': b'o',
b'{otilde}': b'\xe4o',
b'{ouml}': b'\xe8o',
b'{o}': b'o',
b'{para}': b'|',
b'{pcy}': b'p',
b'{percnt}': b'%',
b'{period}': b'.',
b'{phono}': b'\xc2',
b'{pipe}': b'|',
b'{plusmn}': b'\xab',
b'{plus}': b'+',
b'{pound}': b'\xb9',
b'{p}': b'p',
b'{quest}': b'?',
b'{quot}': b'"',
b'{q}': b'q',
b'{racute}': b'\xe2r',
b'{raquo}': b'"',
b'{rcaron}': b'\xe9r',
b'{rcedil}': b'\xf8',
b'{rcommaa}': b'\xed',
b'{rcub}': b'}',
b'{rcy}': b'r',
b'{rdbltil}': b'\xfb',
b'{rdquofh}': b'"',
b'{rdquor}': b'"',
b'{reg}': b'\xaa',
b'{ringb}': b'\xf4',
b'{ring}': b'\xea',
b'{rlig}': b'\xec',
b'{rpar}': b')',
b'{rsqb}': b']',
b'{rsquor}': b"'",
b'{rsquo}': b"'",
b'{rs}': b'\x1e',
b'{r}': b'r',
b'{sacute}': b'\xe2s',
b'{scommab}': b'\xf7s',
b'{scriptl}': b'\xc1',
b'{scy}': b's',
b'{sect}': b'|',
b'{semi}': b';',
b'{sharp}': b'\xc4',
b'{shchcy}': b'shch',
b'{shcy}': b'sh',
b'{shy}': b'-',
b'{softcy}': b'\xa7',
b'{softsign}': b'\xa7',
b'{sol}': b'/',
b'{space}': b' ',
b'{spcirc}': b'^',
b'{spgrave}': b'`',
b'{sptilde}': b'~',
b'{spundscr}': b'_',
b'{squf}': b'|',
b'{sub}': b'b',
b'{sup1}': b'\x1bp1\x1bs',
b'{sup2}': b'\x1bp2\x1bs',
b'{sup3}': b'\x1bp3\x1bs',
b'{super}': b'p',
b'{szlig}': b'ss',
b'{s}': b's',
b'{tcaron}': b'\xe9t',
b'{tcommab}': b'\xf7t',
b'{tcy}': b't',
b'{thorn}': b'\xb4',
b'{tilde}': b'\xe4',
b'{times}': b'x',
b'{trade}': b'(Tm)',
b'{tscy}': b'\xebt\xecs',
b'{tshecy}': b'\xe2c',
b'{t}': b't',
b'{uacute}': b'\xe2u',
b'{ubrevecy}': b'\xe6u',
b'{ucirc}': b'\xe3u',
b'{ucy}': b'u',
b'{udblac}': b'\xeeu',
b'{ugrave}': b'\xe1u',
b'{uhorn}': b'\xbd',
b'{uml}': b'\xe8',
b'{under}': b'\xf6',
b'{uring}': b'\xeau',
b'{us}': b'\x1f',
b'{uuml}': b'\xe8u',
b'{u}': b'u',
b'{vcy}': b'v',
b'{verbar}': b'|',
b'{vlineb}': b'\xf2',
b'{v}': b'v',
b'{w}': b'w',
b'{x}': b'x',
b'{yacute}': b'\xe2y',
b'{yacy}': b'\xebi\xeca',
b'{ycy}': b'y',
b'{yecy}': b'e',
b'{yen}': b'Y',
b'{yicy}': b'i',
b'{yucy}': b'\xebi\xecu',
b'{y}': b'y',
b'{zacute}': b'\xe2z',
b'{zcy}': b'z',
b'{zdot}': b'\xe7z',
b'{zhcy}': b'zh',
b'{zhuacy}': b'\xebz\xech',
b'{z}': b'z',
}
def load_table(filename):
mapping = {}
for line in (i.split(',') for i in open(filename) if i.startswith('{')):
key = line[0]
value = ''
for d in line[2].strip().split(" "):
assert len(d) == 4
assert d[3] == 'd'
value += chr(int(d[0:3]))
mapping[key] = value
return mapping
def read(input):
"""
:param input bytes: MARC21 binary field data
:rtype: bytes
"""
return re_brace.sub(lambda x: mapping.get(x.group(1), x.group(1)), input)
| 16,118 | Python | .py | 710 | 17.678873 | 77 | 0.369481 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
228 | marc_xml.py | internetarchive_openlibrary/openlibrary/catalog/marc/marc_xml.py | from lxml import etree
from unicodedata import normalize
from collections.abc import Iterator
from openlibrary.catalog.marc.marc_base import MarcBase, MarcFieldBase, MarcException
data_tag = '{http://www.loc.gov/MARC21/slim}datafield'
control_tag = '{http://www.loc.gov/MARC21/slim}controlfield'
subfield_tag = '{http://www.loc.gov/MARC21/slim}subfield'
leader_tag = '{http://www.loc.gov/MARC21/slim}leader'
record_tag = '{http://www.loc.gov/MARC21/slim}record'
collection_tag = '{http://www.loc.gov/MARC21/slim}collection'
class BlankTag(MarcException):
pass
class BadSubtag(MarcException):
pass
def read_marc_file(f):
for event, elem in etree.iterparse(f, tag=record_tag):
yield MarcXml(elem)
elem.clear()
def norm(s: str) -> str:
return normalize('NFC', str(s.replace('\xa0', ' ')))
def get_text(e: etree._Element) -> str:
return norm(e.text) if e.text else ''
class DataField(MarcFieldBase):
def __init__(self, rec, element: etree._Element) -> None:
assert element.tag == data_tag, f'Got {element.tag}'
self.element = element
assert isinstance(element, etree._Element)
self.rec = rec
self.tag = element.tag
def ind1(self) -> str:
return self.element.attrib['ind1']
def ind2(self) -> str:
return self.element.attrib['ind2']
def read_subfields(self) -> Iterator[tuple[str, etree._Element]]:
for sub in self.element:
assert sub.tag == subfield_tag
k = sub.attrib['code']
if k == '':
raise BadSubtag
yield k, sub
def get_all_subfields(self) -> Iterator[tuple[str, str]]:
for k, v in self.read_subfields():
yield k, get_text(v)
class MarcXml(MarcBase):
def __init__(self, record: etree._Element) -> None:
if record.tag == collection_tag:
record = record[0]
assert record.tag == record_tag
self.record = record
def leader(self) -> str:
leader_element = self.record[0]
if not isinstance(leader_element.tag, str):
leader_element = self.record[1]
assert leader_element.tag == leader_tag, (
'MARC XML is possibly corrupt in conversion. Unexpected non-Leader tag: '
f'{leader_element.tag}'
)
return get_text(leader_element)
def read_fields(self, want: list[str]) -> Iterator[tuple[str, str | DataField]]:
non_digit = False
for f in self.record:
if f.tag not in {data_tag, control_tag}:
continue
tag = f.attrib['tag']
if tag == '':
raise BlankTag
if tag == 'FMT':
continue
if not tag.isdigit():
non_digit = True
else:
if tag[0] != '9' and non_digit:
raise BadSubtag
if f.attrib['tag'] not in want:
continue
yield f.attrib['tag'], self.decode_field(f)
def decode_field(self, field: etree._Element) -> str | DataField:
if field.tag == control_tag:
return get_text(field)
elif field.tag == data_tag:
return DataField(self, field)
else:
return ''
| 3,278 | Python | .py | 83 | 30.746988 | 85 | 0.600567 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
229 | test_marc.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_marc.py | from openlibrary.catalog.marc.get_subjects import subjects_for_work
from openlibrary.catalog.marc.marc_base import MarcBase
from openlibrary.catalog.marc.parse import read_isbn, read_pagination, read_title
class MockField:
def __init__(self, subfields):
self.subfield_sequence = subfields
self.contents = {}
for k, v in subfields:
self.contents.setdefault(k, []).append(v)
def get_contents(self, want):
contents = {}
for k, v in self.get_subfields(want):
if v:
contents.setdefault(k, []).append(v)
return contents
def get_all_subfields(self):
return self.get_subfields(self.contents)
def get_subfields(self, want):
for w in want:
if w in self.contents:
for i in self.contents.get(w):
yield w, i
def get_subfield_values(self, want):
return [v for k, v in self.get_subfields(want)]
class MockRecord(MarcBase):
"""usage: MockRecord('020', [('a', 'value'), ('c', 'value'), ('c', 'value')])
Currently only supports a single tag per Record."""
def __init__(self, marc_field, subfields):
self.tag = marc_field
self.field = MockField(subfields)
def decode_field(self, field):
return field
def read_fields(self, want):
if self.tag in want:
yield self.tag, self.field
def get_fields(self, tag):
if tag == self.tag:
return [self.field]
def test_read_isbn():
data = [
('0300067003 (cloth : alk. paper)', '0300067003'),
('0197263771 (cased)', '0197263771'),
('8831789589 (pbk.)', '8831789589'),
('9788831789585 (pbk.)', '9788831789585'),
('1402051891 (hd.bd.)', '1402051891'),
('9061791308', '9061791308'),
('9788831789530', '9788831789530'),
('8831789538', '8831789538'),
('0-14-118250-4', '0141182504'),
('0321434250 (textbook)', '0321434250'),
# 12 character ISBNs currently get assigned to isbn_10
# unsure whether this is a common / valid usecase:
('97883178953X ', '97883178953X'),
]
for value, expect in data:
rec = MockRecord('020', [('a', value)])
output = read_isbn(rec)
isbn_type = 'isbn_13' if len(expect) == 13 else 'isbn_10'
assert output[isbn_type][0] == expect
def test_read_pagination():
data = [
('xx, 1065 , [57] p.', 1065),
('193 p., 31 p. of plates', 193),
]
for value, expect in data:
rec = MockRecord('300', [('a', value)])
output = read_pagination(rec)
assert output['number_of_pages'] == expect
assert output['pagination'] == value
def test_subjects_for_work():
data = [
(
[
('a', 'Authors, American'),
('y', '19th century'),
('x', 'Biography.'),
],
{
'subject_times': ['19th century'],
'subjects': ['American Authors', 'Biography'],
},
),
(
[('a', 'Western stories'), ('x', 'History and criticism.')],
{'subjects': ['Western stories', 'History and criticism']},
),
(
[
('a', 'United States'),
('x', 'History'),
('y', 'Revolution, 1775-1783'),
('x', 'Influence.'),
],
# TODO: this expectation does not capture the intent or ordering of the original MARC, investigate x subfield!
{
'subject_times': ['Revolution, 1775-1783'],
'subjects': ['United States', 'Influence', 'History'],
},
),
# 'United States -- History -- Revolution, 1775-1783 -- Influence.'
(
[
('a', 'West Indies, British'),
('x', 'History'),
('y', '18th century.'),
],
{
'subject_times': ['18th century'],
'subjects': ['British West Indies', 'History'],
},
),
# 'West Indies, British -- History -- 18th century.'),
(
[
('a', 'Great Britain'),
('x', 'Relations'),
('z', 'West Indies, British.'),
],
{
'subject_places': ['British West Indies'],
'subjects': ['Great Britain', 'Relations'],
},
),
# 'Great Britain -- Relations -- West Indies, British.'),
(
[
('a', 'West Indies, British'),
('x', 'Relations'),
('z', 'Great Britain.'),
],
{
'subject_places': ['Great Britain'],
'subjects': ['British West Indies', 'Relations'],
},
),
# 'West Indies, British -- Relations -- Great Britain.')
]
for value, expect in data:
output = subjects_for_work(MockRecord('650', value))
assert sorted(output) == sorted(expect)
for key in ('subjects', 'subject_places', 'subject_times'):
assert sorted(output.get(key, [])) == sorted(expect.get(key, []))
def test_read_title():
data = [
(
[
('a', 'Railroad construction.'),
('b', 'Theory and practice.'),
(
'b',
'A textbook for the use of students in colleges and technical schools.',
),
],
{
'title': 'Railroad construction',
# TODO: Investigate whether this colon between subtitles is spaced correctly
'subtitle': 'Theory and practice : A textbook for the use of students in colleges and technical schools',
},
)
]
for value, expect in data:
output = read_title(MockRecord('245', value))
assert output == expect
def test_by_statement():
data = [
(
[
('a', 'Trois contes de No\u0308el'),
('c', '[par] Madame Georges Renard,'),
('c', 'edited by F. Th. Meylan ...'),
],
{
'title': 'Trois contes de No\u0308el',
'by_statement': '[par] Madame Georges Renard, edited by F. Th. Meylan ...',
},
)
]
for value, expect in data:
output = read_title(MockRecord('245', value))
assert output == expect
| 6,578 | Python | .py | 180 | 25.644444 | 122 | 0.500314 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
230 | test_get_subjects.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_get_subjects.py | from openlibrary.catalog.marc.marc_xml import MarcXml
from openlibrary.catalog.marc.marc_binary import MarcBinary
from openlibrary.catalog.marc.get_subjects import four_types, read_subjects
from lxml import etree
from pathlib import Path
import pytest
import lxml.etree
xml_samples = [
('bijouorannualofl1828cole', {}),
('flatlandromanceo00abbouoft', {}),
('lesabndioeinas00sche', {}),
('onquietcomedyint00brid', {}),
('zweibchersatir01horauoft', {}),
('00schlgoog', {'subject': {'Jewish law': 1}}),
(
'0descriptionofta1682unit',
{
'place': {'United States': 1},
'subject': {
"Decedents' estates": 1,
'Taxation': 1,
'S. 1983 97th Congress': 1,
'S. 2479 97th Congress': 1,
},
},
),
(
'13dipolarcycload00burk',
{
'subject': {
'Allene': 1,
'Ring formation (Chemistry)': 1,
'Trimethylenemethane': 1,
}
},
),
(
'1733mmoiresdel00vill',
{'place': {'Spain': 1}, 'subject': {'Courts and court life': 1, 'History': 1}},
),
(
'39002054008678_yale_edu',
{
'place': {'Ontario': 2},
'subject': {'Description and travel': 1, 'History': 1},
},
),
(
'abhandlungender01ggoog',
{
'place': {'Lusatia': 1, 'Germany': 1},
'subject': {'Natural history': 2, 'Periodicals': 1},
},
),
(
'nybc200247',
{
'person': {'Simon Dubnow (1860-1941)': 1},
'subject': {'Philosophy': 1, 'Jews': 1, 'History': 1},
},
),
(
'scrapbooksofmoun03tupp',
{
'person': {'William Vaughn Tupper (1835-1898)': 1},
'subject': {
'Photographs': 4,
'Sources': 1,
'Description and travel': 2,
'Travel': 1,
'History': 1,
'Travel photography': 1,
},
'place': {'Europe': 3, 'Egypt': 2},
'time': {'19th century': 1},
},
),
('secretcodeofsucc00stjo', {'subject': {'Success in business': 1}}),
(
'warofrebellionco1473unit',
{
'time': {'Civil War, 1861-1865': 2},
'place': {'United States': 2, 'Confederate States of America': 1},
'subject': {'Sources': 2, 'Regimental histories': 1, 'History': 3},
},
),
]
bin_samples = [
('bpl_0486266893.mrc', {}),
('flatlandromanceo00abbouoft_meta.mrc', {}),
('lc_1416500308.mrc', {}),
('talis_245p.mrc', {}),
('talis_740.mrc', {}),
('talis_empty_245.mrc', {}),
('talis_multi_work_tiles.mrc', {}),
('talis_no_title2.mrc', {}),
('talis_no_title.mrc', {}),
('talis_see_also.mrc', {}),
('talis_two_authors.mrc', {}),
('zweibchersatir01horauoft_meta.mrc', {}),
(
'1733mmoiresdel00vill_meta.mrc',
{'place': {'Spain': 1}, 'subject': {'Courts and court life': 1, 'History': 1}},
),
(
'collingswood_520aa.mrc',
{
'subject': {
'Learning disabilities': 1,
'People with disabilities': 1,
'Talking books': 1,
'Juvenile literature': 1,
'Juvenile fiction': 3,
'Friendship': 1,
}
},
),
('collingswood_bad_008.mrc', {'subject': {'War games': 1, 'Battles': 1}}),
(
'histoirereligieu05cr_meta.mrc',
{'org': {'Jesuits': 2}, 'subject': {'Influence': 1, 'History': 1}},
),
(
'ithaca_college_75002321.mrc',
{
'place': {'New Jersey': 3},
'subject': {
'Congresses': 3,
'Negative income tax': 1,
'Guaranteed annual income': 1,
'Labor supply': 1,
},
},
),
(
'ithaca_two_856u.mrc',
{'place': {'Great Britain': 2}, 'subject': {'Statistics': 1, 'Periodicals': 2}},
),
(
'lc_0444897283.mrc',
{
'subject': {
'Shipyards': 1,
'Shipbuilding': 1,
'Data processing': 2,
'Congresses': 3,
'Naval architecture': 1,
'Automation': 1,
}
},
),
(
'ocm00400866.mrc',
{'subject': {'School songbooks': 1, 'Choruses (Mixed voices) with piano': 1}},
),
(
'scrapbooksofmoun03tupp_meta.mrc',
{
'person': {'William Vaughn Tupper (1835-1898)': 1},
'subject': {
'Photographs': 4,
'Sources': 1,
'Description and travel': 2,
'Travel': 1,
'History': 1,
'Travel photography': 1,
},
'place': {'Europe': 3, 'Egypt': 2},
'time': {'19th century': 1},
},
),
('secretcodeofsucc00stjo_meta.mrc', {'subject': {'Success in business': 1}}),
(
'talis_856.mrc',
{
'subject': {
'Politics and government': 1,
'Jewish-Arab relations': 1,
'Middle East': 1,
'Arab-Israeli conflict': 1,
},
'time': {'1945-': 1},
},
),
(
'uoft_4351105_1626.mrc',
{'subject': {'Aesthetics': 1, 'History and criticism': 1}},
),
(
'upei_broken_008.mrc',
{'place': {'West Africa': 1}, 'subject': {'Social life and customs': 1}},
),
(
'upei_short_008.mrc',
{
'place': {'Charlottetown (P.E.I.)': 1, 'Prince Edward Island': 1},
'subject': {
'Social conditions': 1,
'Economic conditions': 1,
'Guidebooks': 1,
'Description and travel': 2,
},
},
),
(
'warofrebellionco1473unit_meta.mrc',
{
'time': {'Civil War, 1861-1865': 2},
'place': {'United States': 2, 'Confederate States of America': 1},
'subject': {'Sources': 2, 'Regimental histories': 1, 'History': 3},
},
),
(
'wrapped_lines.mrc',
{
'org': {
'United States. Congress. House. Committee on Foreign Affairs': 1,
},
'place': {'United States': 1},
'subject': {'Foreign relations': 1},
},
),
(
'wwu_51323556.mrc',
{
'subject': {
'Statistical methods': 1,
'Spatial analysis (Statistics)': 1,
'Population geography': 1,
}
},
),
]
record_tag = '{http://www.loc.gov/MARC21/slim}record'
TEST_DATA = Path(__file__).with_name('test_data')
class TestSubjects:
@pytest.mark.parametrize('item,expected', xml_samples)
def test_subjects_xml(self, item, expected):
filepath = TEST_DATA / 'xml_input' / f'{item}_marc.xml'
element = etree.parse(
filepath, parser=lxml.etree.XMLParser(resolve_entities=False)
).getroot()
if element.tag != record_tag and element[0].tag == record_tag:
element = element[0]
rec = MarcXml(element)
assert read_subjects(rec) == expected
@pytest.mark.parametrize('item,expected', bin_samples)
def test_subjects_bin(self, item, expected):
filepath = TEST_DATA / 'bin_input' / item
rec = MarcBinary(filepath.read_bytes())
assert read_subjects(rec) == expected
def test_four_types_combine(self):
subjects = {'subject': {'Science': 2}, 'event': {'Party': 1}}
expect = {'subject': {'Science': 2, 'Party': 1}}
assert four_types(subjects) == expect
def test_four_types_event(self):
subjects = {'event': {'Party': 1}}
expect = {'subject': {'Party': 1}}
assert four_types(subjects) == expect
| 8,063 | Python | .py | 259 | 21.467181 | 88 | 0.481272 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
231 | test_mnemonics.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_mnemonics.py | from openlibrary.catalog.marc.mnemonics import read
def test_read_conversion_to_marc8():
input_ = (
b'Tha{mllhring}{macr}alib{macr}i, {mllhring}Abd al-Malik ibn Mu{dotb}hammad,'
)
output = b'Tha\xb0\xe5alib\xe5i, \xb0Abd al-Malik ibn Mu\xf2hammad,'
assert read(input_) == output
def test_read_no_change():
input_ = b'El Ing.{eniero} Federico E. Capurro y el nacimiento de la profesi\xe2on bibliotecaria en el Uruguay.'
assert read(input_) == input_
| 485 | Python | .py | 10 | 43.9 | 116 | 0.707006 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
232 | test_marc_html.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_marc_html.py | from pathlib import Path
from openlibrary.catalog.marc.html import html_record
TEST_DATA = Path(__file__).with_name('test_data') / 'bin_input'
def test_html_line_marc8():
filepath = TEST_DATA / 'uoft_4351105_1626.mrc'
expected_utf8 = (
'<b>700</b> <code>1 <b>$a</b>Ovsi︠a︡nnikov, Mikhail Fedotovich.</code><br>'
)
record = html_record(filepath.read_bytes())
result = record.html()
assert expected_utf8 in result
| 453 | Python | .py | 11 | 36.545455 | 84 | 0.679724 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
233 | test_marc_binary.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_marc_binary.py | from pathlib import Path
from openlibrary.catalog.marc.marc_binary import BinaryDataField, MarcBinary
TEST_DATA = Path(__file__).with_name('test_data') / 'bin_input'
class MockMARC:
def __init__(self, encoding):
"""
:param encoding str: 'utf8' or 'marc8'
"""
self.encoding = encoding
def marc8(self):
return self.encoding == 'marc8'
def test_wrapped_lines():
filepath = TEST_DATA / 'wrapped_lines.mrc'
rec = MarcBinary(filepath.read_bytes())
ret = list(rec.read_fields(['520']))
assert len(ret) == 2
a, b = ret
assert a[0] == '520'
assert b[0] == '520'
a_content = next(iter(a[1].get_all_subfields()))[1]
assert len(a_content) == 2290
b_content = next(iter(b[1].get_all_subfields()))[1]
assert len(b_content) == 243
class Test_BinaryDataField:
def test_translate(self):
bdf = BinaryDataField(MockMARC('marc8'), b'')
assert (
bdf.translate(b'Vieira, Claudio Bara\xe2una,') == 'Vieira, Claudio Baraúna,'
)
def test_bad_marc_line(self):
line = (
b'0 \x1f\xe2aEtude objective des ph\xe2enom\xe1enes neuro-psychiques;\x1e'
)
bdf = BinaryDataField(MockMARC('marc8'), line)
assert list(bdf.get_all_subfields()) == [
('á', 'Etude objective des phénomènes neuro-psychiques;')
]
class Test_MarcBinary:
def test_read_fields_returns_all(self):
filepath = TEST_DATA / 'onquietcomedyint00brid_meta.mrc'
rec = MarcBinary(filepath.read_bytes())
fields = list(rec.read_fields())
assert len(fields) == 13
assert fields[0][0] == '001'
for f, v in fields:
if f == '001':
f001 = v
elif f == '008':
f008 = v
elif f == '100':
f100 = v
assert isinstance(f001, str)
assert isinstance(f008, str)
assert isinstance(f100, BinaryDataField)
def test_get_subfield_value(self):
filepath = TEST_DATA / 'onquietcomedyint00brid_meta.mrc'
rec = MarcBinary(filepath.read_bytes())
author_field = rec.get_fields('100')
assert isinstance(author_field, list)
assert isinstance(author_field[0], BinaryDataField)
subfields = author_field[0].get_subfields('a')
assert next(subfields) == ('a', 'Bridgham, Gladys Ruth. [from old catalog]')
values = author_field[0].get_subfield_values('a')
(name,) = values # 100$a is non-repeatable, there will be only one
assert name == 'Bridgham, Gladys Ruth. [from old catalog]'
| 2,631 | Python | .py | 65 | 32.4 | 88 | 0.607843 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
234 | test_parse.py | internetarchive_openlibrary/openlibrary/catalog/marc/tests/test_parse.py | import json
import pytest
from openlibrary.catalog.marc.parse import (
read_author_person,
read_edition,
NoTitle,
SeeAlsoAsTitle,
)
from openlibrary.catalog.marc.marc_binary import MarcBinary
from openlibrary.catalog.marc.marc_xml import DataField, MarcXml
from lxml import etree
from pathlib import Path
from collections.abc import Iterable
import lxml.etree
collection_tag = '{http://www.loc.gov/MARC21/slim}collection'
record_tag = '{http://www.loc.gov/MARC21/slim}record'
xml_samples = [
'39002054008678_yale_edu',
'flatlandromanceo00abbouoft',
'nybc200247',
'secretcodeofsucc00stjo',
'warofrebellionco1473unit',
'zweibchersatir01horauoft',
'onquietcomedyint00brid',
'00schlgoog',
'0descriptionofta1682unit',
'1733mmoiresdel00vill',
'13dipolarcycload00burk',
'bijouorannualofl1828cole',
'soilsurveyrepor00statgoog',
'cu31924091184469', # MARC XML collection record
'engineercorpsofh00sher',
]
bin_samples = [
'bijouorannualofl1828cole_meta.mrc',
'onquietcomedyint00brid_meta.mrc', # LCCN with leading characters
'merchantsfromcat00ben_meta.mrc',
'memoirsofjosephf00fouc_meta.mrc', # MARC8 encoded with e-acute
'equalsign_title.mrc', # Title ending in '='
'bpl_0486266893.mrc',
'flatlandromanceo00abbouoft_meta.mrc',
'histoirereligieu05cr_meta.mrc',
'ithaca_college_75002321.mrc',
'lc_0444897283.mrc',
'lc_1416500308.mrc',
'lesnoirsetlesrou0000garl_meta.mrc',
'ocm00400866.mrc',
'secretcodeofsucc00stjo_meta.mrc',
'uoft_4351105_1626.mrc',
'warofrebellionco1473unit_meta.mrc',
'wrapped_lines.mrc',
'wwu_51323556.mrc',
'zweibchersatir01horauoft_meta.mrc',
'talis_two_authors.mrc',
'talis_no_title.mrc',
'talis_740.mrc',
'talis_245p.mrc',
'talis_856.mrc',
'talis_multi_work_tiles.mrc',
'talis_empty_245.mrc',
'ithaca_two_856u.mrc',
'collingswood_bad_008.mrc',
'collingswood_520aa.mrc',
'upei_broken_008.mrc',
'upei_short_008.mrc',
'diebrokeradical400poll_meta.mrc',
'cu31924091184469_meta.mrc',
'engineercorpsofh00sher_meta.mrc',
'henrywardbeecher00robauoft_meta.mrc',
'thewilliamsrecord_vol29b_meta.mrc',
'13dipolarcycload00burk_meta.mrc',
'710_org_name_in_direct_order.mrc',
'830_series.mrc',
'880_alternate_script.mrc',
'880_table_of_contents.mrc',
'880_Nihon_no_chasho.mrc',
'880_publisher_unlinked.mrc',
'880_arabic_french_many_linkages.mrc',
'test-publish-sn-sl.mrc',
'test-publish-sn-sl-nd.mrc',
]
date_tests = [ # MARC, expected publish_date
('9999_sd_dates.mrc', '[n.d.]'),
('reprint_date_wrong_order.mrc', '2010'),
('9999_with_correct_date_in_260.mrc', '2003'),
]
TEST_DATA = Path(__file__).with_name('test_data')
class TestParseMARCXML:
@pytest.mark.parametrize('i', xml_samples)
def test_xml(self, i):
expect_filepath = (TEST_DATA / 'xml_expect' / i).with_suffix('.json')
filepath = TEST_DATA / 'xml_input' / f'{i}_marc.xml'
element = etree.parse(
filepath, parser=lxml.etree.XMLParser(resolve_entities=False)
).getroot()
# Handle MARC XML collection elements in our test_data expectations:
if element.tag == collection_tag and element[0].tag == record_tag:
element = element[0]
rec = MarcXml(element)
edition_marc_xml = read_edition(rec)
assert edition_marc_xml
j = json.load(expect_filepath.open())
assert j, f'Unable to open test data: {expect_filepath}'
msg = (
f'Processed MARCXML values do not match expectations in {expect_filepath}.'
)
assert sorted(edition_marc_xml) == sorted(j), msg
msg += ' Key: '
for key, value in edition_marc_xml.items():
if isinstance(value, Iterable): # can not sort a list of dicts
assert len(value) == len(j[key]), msg + key
for item in j[key]:
assert item in value, msg + key
else:
assert value == j[key], msg + key
class TestParseMARCBinary:
@pytest.mark.parametrize('i', bin_samples)
def test_binary(self, i):
expect_filepath = (TEST_DATA / 'bin_expect' / i).with_suffix('.json')
filepath = TEST_DATA / 'bin_input' / i
rec = MarcBinary(filepath.read_bytes())
edition_marc_bin = read_edition(rec)
assert edition_marc_bin
if not Path(expect_filepath).is_file():
# Missing test expectations file. Create a template from the input, but fail the current test.
data = json.dumps(edition_marc_bin, indent=2)
pytest.fail(
f'Expectations file {expect_filepath} not found: Please review and commit this JSON:\n{data}'
)
j = json.load(expect_filepath.open())
assert j, f'Unable to open test data: {expect_filepath}'
assert sorted(edition_marc_bin) == sorted(
j
), f'Processed binary MARC fields do not match expectations in {expect_filepath}'
msg = f'Processed binary MARC values do not match expectations in {expect_filepath}'
for key, value in edition_marc_bin.items():
if isinstance(value, Iterable): # can not sort a list of dicts
assert len(value) == len(j[key]), msg
for item in j[key]:
assert item in value, f'{msg}. Key: {key}'
else:
assert value == j[key], msg
def test_raises_see_also(self):
filepath = TEST_DATA / 'bin_input' / 'talis_see_also.mrc'
rec = MarcBinary(filepath.read_bytes())
with pytest.raises(SeeAlsoAsTitle):
read_edition(rec)
def test_raises_no_title(self):
filepath = TEST_DATA / 'bin_input' / 'talis_no_title2.mrc'
rec = MarcBinary(filepath.read_bytes())
with pytest.raises(NoTitle):
read_edition(rec)
@pytest.mark.parametrize('marcfile,expect', date_tests)
def test_dates(self, marcfile, expect):
filepath = TEST_DATA / 'bin_input' / marcfile
rec = MarcBinary(filepath.read_bytes())
edition = read_edition(rec)
assert edition['publish_date'] == expect
class TestParse:
def test_read_author_person(self):
xml_author = """
<datafield xmlns="http://www.loc.gov/MARC21/slim" tag="100" ind1="1" ind2="0">
<subfield code="a">Rein, Wilhelm,</subfield>
<subfield code="d">1809-1865.</subfield>
</datafield>"""
test_field = DataField(
None,
etree.fromstring(
xml_author, parser=lxml.etree.XMLParser(resolve_entities=False)
),
)
result = read_author_person(test_field)
# Name order remains unchanged from MARC order
assert result['name'] == result['personal_name'] == 'Rein, Wilhelm'
assert result['birth_date'] == '1809'
assert result['death_date'] == '1865'
assert result['entity_type'] == 'person'
| 7,077 | Python | .py | 177 | 32.723164 | 109 | 0.64541 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
235 | __init__.py | internetarchive_openlibrary/openlibrary/olbase/__init__.py | """Infobase extension for Open Library.
"""
from . import events
from ..plugins import ol_infobase
def init_plugin():
ol_infobase.init_plugin()
events.setup()
| 170 | Python | .py | 7 | 21.714286 | 39 | 0.7375 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
236 | events.py | internetarchive_openlibrary/openlibrary/olbase/events.py | """Infobase event hooks for Open Library.
Triggers and handles various events from Infobase. All the events are triggered using eventer.
List of events:
* infobase.all: Triggered for any change in Infobase. The infobase event object is passed as argument.
* infobase.edit: Triggered for edits. Changeset is passed as argument.
"""
import logging
import web
import eventer
from infogami.infobase import config, server
from openlibrary.utils import olmemcache
logger = logging.getLogger("openlibrary.olbase")
def setup():
setup_event_listener()
def setup_event_listener():
logger.info("setting up infobase events for Open Library")
ol = server.get_site('openlibrary.org')
ib = server._infobase
# Convert infobase event into generic eventer event
ib.add_event_listener(lambda event: eventer.trigger("infobase.all", event))
@eventer.bind("infobase.all")
def trigger_subevents(event):
"""Trigger infobase.edit event for edits."""
if event.name in ['save', 'save_many']:
changeset = event.data['changeset']
author = changeset['author'] or changeset['ip']
keys = [c['key'] for c in changeset['changes']]
logger.info(
"Edit by %s, changeset_id=%s, changes=%s", author, changeset["id"], keys
)
eventer.trigger("infobase.edit", changeset)
@eventer.bind("infobase.edit")
def invalidate_memcache(changeset):
"""Invalidate memcache entries effected by this change."""
if memcache_client := get_memcache():
keys = MemcacheInvalidater().find_keys(changeset)
if keys:
logger.info("invalidating %s", keys)
memcache_client.delete_multi(keys)
class MemcacheInvalidater:
"""Class to find keys to invalidate from memcache on edit."""
def find_keys(self, changeset):
"""Returns keys for the effected entries by this change."""
methods = [
self.find_data,
self.find_lists,
self.find_edition_counts,
]
keys = set()
for m in methods:
keys.update(m(changeset))
return list(keys)
def find_data(self, changeset):
"""Returns the data entries effected by this change.
The data entry stores the history, lists and edition_count of a page.
"""
return ["d" + c['key'] for c in changeset['changes']]
def find_lists(self, changeset):
"""Returns the list entries effected by this change.
When a list is modified, the data of the user and the data of each
seed are invalidated.
"""
docs = changeset['docs'] + changeset['old_docs']
rx = web.re_compile(r"(/people/[^/]*)?/lists/OL\d+L")
for doc in docs:
if match := doc and rx.match(doc['key']):
if owner := match.group(1):
yield "d" + owner # d/people/foo
for seed in doc.get('seeds', []):
yield "d" + self.seed_to_key(seed)
def find_edition_counts(self, changeset):
"""Returns the edition_count entries effected by this change."""
docs = changeset['docs'] + changeset['old_docs']
return {k for doc in docs for k in self.find_edition_counts_for_doc(doc)}
def find_edition_counts_for_doc(self, doc):
"""Returns the memcache keys to be invalided for edition_counts effected by editing this doc."""
if doc and doc['type']['key'] == '/type/edition':
return ["d" + w['key'] for w in doc.get("works", [])]
else:
return []
def seed_to_key(self, seed):
"""Converts seed to key.
>>> invalidater = MemcacheInvalidater()
>>> invalidater.seed_to_key({"key": "/books/OL1M"})
'/books/OL1M'
>>> invalidater.seed_to_key("subject:love")
'/subjects/love'
>>> invalidater.seed_to_key("place:san_francisco")
'/subjects/place:san_francisco'
"""
if isinstance(seed, dict):
return seed['key']
elif seed.startswith("subject:"):
return "/subjects/" + seed[len("subject:") :]
else:
return "/subjects/" + seed
@web.memoize
def get_memcache():
"""Returns memcache client created from infobase configuration."""
cache = config.get("cache", {})
if cache.get("type") == "memcache":
return olmemcache.Client(cache['servers'])
| 4,406 | Python | .py | 102 | 35.343137 | 106 | 0.631172 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
237 | test_events.py | internetarchive_openlibrary/openlibrary/olbase/tests/test_events.py | from .. import events
class TestMemcacheInvalidater:
def test_seed_to_key(self):
m = events.MemcacheInvalidater()
assert m.seed_to_key({"key": "/books/OL1M"}) == "/books/OL1M"
assert m.seed_to_key("subject:love") == "/subjects/love"
assert m.seed_to_key("place:san_francisco") == "/subjects/place:san_francisco"
assert m.seed_to_key("person:mark_twain") == "/subjects/person:mark_twain"
assert m.seed_to_key("time:2000") == "/subjects/time:2000"
def test_find_lists(self):
changeset = {
"changes": [{"key": "/people/anand/lists/OL1L", "revision": 1}],
"old_docs": [None],
"docs": [
{
"key": "/people/anand/lists/OL1L",
"type": {"key": "/type/list"},
"revision": 1,
"seeds": [{"key": "/books/OL1M"}, "subject:love"],
}
],
}
m = events.MemcacheInvalidater()
assert sorted(m.find_lists(changeset)) == [
"d/books/OL1M",
"d/people/anand",
"d/subjects/love",
]
def test_find_lists2(self):
changeset = {
"changes": [{"key": "/people/anand/lists/OL1L", "revision": 2}],
"old_docs": [
{
"key": "/people/anand/lists/OL1L",
"type": {"key": "/type/list"},
"revision": 1,
"seeds": [{"key": "/books/OL1M"}, "subject:love"],
}
],
"docs": [
{
"key": "/people/anand/lists/OL1L",
"type": {"key": "/type/list"},
"revision": 2,
"seeds": [
{"key": "/authors/OL1A"},
"subject:love",
"place:san_francisco",
],
}
],
}
m = events.MemcacheInvalidater()
keys = sorted(set(m.find_lists(changeset)))
assert keys == [
"d/authors/OL1A",
"d/books/OL1M",
"d/people/anand",
"d/subjects/love",
"d/subjects/place:san_francisco",
]
def test_edition_count_for_doc(self):
m = events.MemcacheInvalidater()
assert m.find_edition_counts_for_doc(None) == []
doc = {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"works": [{"key": "/works/OL1W"}],
}
assert m.find_edition_counts_for_doc(doc) == ["d/works/OL1W"]
def test_find_keys(self):
m = events.MemcacheInvalidater()
changeset = {
"changes": [{"key": "/sandbox", "revision": 1}],
"old_docs": [None],
"docs": [
{
"key": "/sandbox",
"type": {"key": "/type/page"},
"revision": 1,
"title": "Sandbox",
}
],
}
assert m.find_keys(changeset) == ["d/sandbox"]
| 3,137 | Python | .py | 85 | 23.364706 | 86 | 0.431624 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
238 | test_ol_infobase.py | internetarchive_openlibrary/openlibrary/olbase/tests/test_ol_infobase.py | from openlibrary.plugins.ol_infobase import OLIndexer
class TestOLIndexer:
def test_expand_isbns(self):
indexer = OLIndexer()
isbn_10 = ['123456789X']
isbn_13 = ['9781234567897']
both = isbn_10 + isbn_13
assert indexer.expand_isbns([]) == []
assert sorted(indexer.expand_isbns(isbn_10)) == both
assert sorted(indexer.expand_isbns(isbn_13)) == both
assert sorted(indexer.expand_isbns(both)) == both
| 468 | Python | .py | 11 | 35.181818 | 60 | 0.650549 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
239 | showmarc.py | internetarchive_openlibrary/openlibrary/views/showmarc.py | """
Hook to show MARC or other source record details in Open Library.
"""
from .. import app
import web
import re
import requests
class old_show_marc(app.view):
path = "/show-marc/(.*)"
def GET(self, param):
raise web.seeother('/show-records/' + param)
class show_ia(app.view):
path = "/show-records/ia:(.*)"
def GET(self, ia):
error_404 = False
url = f'https://archive.org/download/{ia}/{ia}_meta.mrc'
try:
response = requests.get(url)
response.raise_for_status()
data = response.content
except requests.HTTPError as e:
if e.response.status_code == 404:
error_404 = True
else:
return "ERROR:" + str(e)
if error_404: # no MARC record
url = f'https://archive.org/download/{ia}/{ia}_meta.xml'
try:
response = requests.get(url)
response.raise_for_status()
data = response.content
except requests.HTTPError as e:
return "ERROR:" + str(e)
raise web.seeother('https://archive.org/details/' + ia)
books = web.ctx.site.things(
{
'type': '/type/edition',
'source_records': 'ia:' + ia,
}
) or web.ctx.site.things(
{
'type': '/type/edition',
'ocaid': ia,
}
)
from openlibrary.catalog.marc import html
try:
leader_len = int(data[:5])
except ValueError:
return "ERROR reading MARC for " + ia
if len(data) != leader_len:
data = data.decode('utf-8').encode('raw_unicode_escape')
assert len(data) == int(data[:5])
try:
record = html.html_record(data)
except ValueError:
record = None
return app.render_template("showia", ia, record, books)
class show_amazon(app.view):
path = "/show-records/amazon:(.*)"
def GET(self, asin):
return app.render_template("showamazon", asin)
class show_bwb(app.view):
path = "/show-records/bwb:(.*)"
def GET(self, isbn):
return app.render_template("showbwb", isbn)
class show_google_books(app.view):
path = "/show-records/google_books:(.*)"
def GET(self, isbn):
return app.render_template("showgoogle_books", isbn)
re_bad_meta_mrc = re.compile(r'^([^/]+)_meta\.mrc$')
re_lc_sanfranpl = re.compile(r'^sanfranpl(\d+)/sanfranpl(\d+)\.out')
class show_marc(app.view):
path = r"/show-records/(.*):(\d+):(\d+)"
def GET(self, filename, offset, length):
m = re_bad_meta_mrc.match(filename)
if m:
raise web.seeother('/show-records/ia:' + m.group(1))
m = re_lc_sanfranpl.match(filename)
if m: # archive.org is case-sensitive
mixed_case = (
f'SanFranPL{m.group(1)}/SanFranPL{m.group(2)}.out:{offset}:{length}'
)
raise web.seeother('/show-records/' + mixed_case)
if filename == 'collingswoodlibrarymarcdump10-27-2008/collingswood.out':
loc = f'CollingswoodLibraryMarcDump10-27-2008/Collingswood.out:{offset}:{length}'
raise web.seeother('/show-records/' + loc)
loc = f"marc:{filename}:{offset}:{length}"
books = web.ctx.site.things(
{
'type': '/type/edition',
'source_records': loc,
}
)
offset = int(offset)
length = int(length)
r0, r1 = offset, offset + 100000
url = 'https://archive.org/download/%s' % filename
headers = {'Range': 'bytes=%d-%d' % (r0, r1)}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
result = response.content[:100000]
except requests.HTTPError as e:
return "ERROR:" + str(e)
if (len_in_rec := int(result[:5])) != length:
raise web.seeother(
'/show-records/%s:%d:%d' % (filename, offset, len_in_rec)
)
from openlibrary.catalog.marc import html
try:
record = html.html_record(result[0:length])
except ValueError:
record = None
return app.render_template("showmarc", record, filename, offset, length, books)
| 4,403 | Python | .py | 115 | 28.252174 | 93 | 0.554013 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
240 | loanstats.py | internetarchive_openlibrary/openlibrary/views/loanstats.py | """Loan Stats"""
import web
from infogami.utils import delegate
from ..core.lending import get_availabilities
from infogami.utils.view import public
from ..utils import dateutil
from .. import app
from ..core import cache
from ..core.observations import Observations
from ..core.booknotes import Booknotes
from ..core.follows import PubSub
from ..core.bookshelves import Bookshelves
from ..core.yearly_reading_goals import YearlyReadingGoals
from ..core.ratings import Ratings
from ..plugins.admin.code import get_counts
from ..plugins.worksearch.code import get_solr_works
LENDING_TYPES = '(libraries|regions|countries|collections|subjects|format)'
SINCE_DAYS = {
'now': 0,
'daily': 1,
'weekly': 7,
'monthly': 30,
'yearly': 365,
'forever': None,
}
def reading_log_summary():
# enable to work w/ cached
if 'env' not in web.ctx:
delegate.fakeload()
stats = Bookshelves.summary()
stats.update(YearlyReadingGoals.summary())
stats.update(Ratings.summary())
stats.update(Observations.summary())
stats.update(Booknotes.summary())
stats.update(PubSub.summary())
return stats
cached_reading_log_summary = cache.memcache_memoize(
reading_log_summary, 'stats.readling_log_summary', timeout=dateutil.HOUR_SECS
)
@public
def get_trending_books(
since_days=1,
since_hours=0,
limit=18,
page=1,
books_only=False,
sort_by_count=True,
minimum=None,
):
logged_books = (
Bookshelves.fetch(get_activity_stream(limit=limit, page=page)) # i.e. "now"
if (since_days == 0 and since_hours == 0)
else Bookshelves.most_logged_books(
since=dateutil.todays_date_minus(days=since_days, hours=since_hours),
limit=limit,
page=page,
fetch=True,
sort_by_count=sort_by_count,
minimum=minimum,
)
)
return (
[book['work'] for book in logged_books if book.get('work')]
if books_only
else logged_books
)
def cached_get_most_logged_books(shelf_id=None, since_days=1, limit=20, page=1):
def get_cachable_trending_books(shelf_id=None, since_days=1, limit=20, page=1):
# enable to work w/ cached
if 'env' not in web.ctx:
delegate.fakeload()
# Return as dict to enable cache serialization
return [
dict(book)
for book in Bookshelves.most_logged_books(
shelf_id=shelf_id,
since=dateutil.date_n_days_ago(since_days),
limit=limit,
page=page,
)
]
return cache.memcache_memoize(
get_cachable_trending_books, 'stats.trending', timeout=dateutil.HOUR_SECS
)(shelf_id=shelf_id, since_days=since_days, limit=limit, page=page)
def reading_log_leaderboard(limit=None):
# enable to work w/ cached
if 'env' not in web.ctx:
delegate.fakeload()
most_read = Bookshelves.most_logged_books(
Bookshelves.PRESET_BOOKSHELVES['Already Read'], limit=limit
)
most_wanted_all = Bookshelves.most_logged_books(
Bookshelves.PRESET_BOOKSHELVES['Want to Read'], limit=limit
)
most_wanted_month = Bookshelves.most_logged_books(
Bookshelves.PRESET_BOOKSHELVES['Want to Read'],
limit=limit,
since=dateutil.DATE_ONE_MONTH_AGO,
)
return {
'leaderboard': {
'most_read': most_read,
'most_wanted_all': most_wanted_all,
'most_wanted_month': most_wanted_month,
'most_rated_all': Ratings.most_rated_books(),
}
}
def cached_reading_log_leaderboard(limit=None):
return cache.memcache_memoize(
reading_log_leaderboard,
'stats.readling_log_leaderboard',
timeout=dateutil.HOUR_SECS,
)(limit)
def get_cached_reading_log_stats(limit):
stats = cached_reading_log_summary()
stats.update(cached_reading_log_leaderboard(limit))
return stats
class stats(app.view):
path = "/stats"
def GET(self):
counts = get_counts()
counts.reading_log = cached_reading_log_summary()
return app.render_template("admin/index", counts)
class lending_stats(app.view):
path = "/stats/lending(?:/%s/(.+))?" % LENDING_TYPES
def GET(self, key, value):
raise web.seeother("/")
def get_activity_stream(limit=None, page=1):
# enable to work w/ cached
if 'env' not in web.ctx:
delegate.fakeload()
return Bookshelves.get_recently_logged_books(limit=limit, page=page)
def get_cached_activity_stream(limit):
return cache.memcache_memoize(
get_activity_stream,
'stats.activity_stream',
timeout=dateutil.HOUR_SECS,
)(limit)
class activity_stream(app.view):
path = "/trending(/?.*)"
def GET(self, mode=''):
i = web.input(page=1)
page = i.page
if not mode:
raise web.seeother("/trending/now")
mode = mode[1:] # remove slash
limit = 20
if mode == "now":
logged_books = Bookshelves.fetch(
get_activity_stream(limit=limit, page=page)
)
else:
shelf_id = None # optional; get from web.input()?
logged_books = Bookshelves.fetch(
cached_get_most_logged_books(
since_days=SINCE_DAYS[mode], limit=limit, page=page
)
)
return app.render_template("trending", logged_books=logged_books, mode=mode)
class readinglog_stats(app.view):
path = "/stats/readinglog"
def GET(self):
MAX_LEADERBOARD_SIZE = 50
i = web.input(limit="10", mode="all")
limit = min(int(i.limit), 50)
stats = get_cached_reading_log_stats(limit=limit)
solr_docs = get_solr_works(
f"/works/OL{item['work_id']}W"
for leaderboard in stats['leaderboard'].values()
for item in leaderboard
)
# Fetch works from solr and inject into leaderboard
for leaderboard in stats['leaderboard'].values():
for item in leaderboard:
key = f"/works/OL{item['work_id']}W"
if key in solr_docs:
item['work'] = solr_docs[key]
else:
item['work'] = web.ctx.site.get(key)
works = [
item['work']
for leaderboard in stats['leaderboard'].values()
for item in leaderboard
]
availabilities = get_availabilities(works)
for leaderboard in stats['leaderboard'].values():
for item in leaderboard:
if availabilities.get(item['work']['key']):
item['availability'] = availabilities.get(item['work']['key'])
return app.render_template("stats/readinglog", stats=stats)
| 6,853 | Python | .py | 190 | 28.221053 | 84 | 0.628286 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
241 | processors.py | internetarchive_openlibrary/openlibrary/utils/processors.py | """Generic web.py application processors.
"""
import web
import time
__all__ = ["RateLimitProcessor"]
class RateLimitProcessor:
"""Application processor to ratelimit the access per ip."""
def __init__(
self, limit: int, window_size: int = 600, path_regex: str = "/.*"
) -> None:
"""Creates a rate-limit processor to limit the number of
requests/ip in the time frame.
:param int limit: the maximum number of requests allowed in the given time window.
:param int window_size: the time frame in seconds during which the requests are measured.
:param int path_regex: regular expression to specify which urls are rate-limited.
"""
self.path_regex = web.re_compile(path_regex)
self.limit = limit
self.window_size = window_size
self.reset(None)
def reset(self, timestamp):
self.window = {}
self.window_timestamp = timestamp
def get_window(self):
t = int(time.time() / self.window_size)
if t != self.window_timestamp:
self.reset(t)
return self.window
def check_rate(self):
"""Returns True if access rate for the current IP address is
less than the allowed limit.
"""
window = self.get_window()
ip = web.ctx.ip
if window.get(ip, 0) < self.limit:
window[ip] = 1 + window.get(ip, 0)
return True
else:
return False
def __call__(self, handler):
if self.path_regex.match(web.ctx.path):
if self.check_rate():
return handler()
else:
raise web.HTTPError("503 Service Unavailable")
| 1,699 | Python | .py | 45 | 29.311111 | 97 | 0.608643 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
242 | compress.py | internetarchive_openlibrary/openlibrary/utils/compress.py | # incremental zlib compression, written by solrize, August 2009
import zlib
__doc__ = """
Compressor object for medium-sized, statistically-similar strings.
The idea is that you have a lot of moderate-sized strings (short email
messages or the like) that you would like to compress independently,
for storage in a lookup table where space is at a premium. They
strings might be a few hundred bytes long on average. That's not
enough to get much compression by gzipping without context. gzip
works by starting with no knowledge, then building up knowledge (and
improving its compression ratio) as it goes along.
The trick is to "pre-seed" the gzip compressor with a bunch of text
(say a few kilobytes of messages concatenated) similar to the ones
that you want to compress separately, and pre-seed the gzip
decompressor with the same initial text. That lets the compressor and
decompressor both start with enough knowledge to get good compression
even for fairly short strings. This class puts a compressor and
decompressor into the same object, called a Compressor for convenience.
Usage: running the three lines
compressor = Compressor(initial_seed)
compressed_record = compressor.compress(some_record)
restored_record = compressor.decompress(compressed_record)
where initial_seed is a few kilobytes of messages, and some_record is
a single record of maybe a few hundred bytes, for typical text, should
result in compressed_record being 50% or less of the size of
some_record, and restored_record being identical to some_record.
"""
class Compressor:
def __init__(self, seed):
c = zlib.compressobj(9)
d_seed = c.compress(seed.encode())
d_seed += c.flush(zlib.Z_SYNC_FLUSH)
self.c_context = c.copy()
d = zlib.decompressobj()
d.decompress(d_seed)
while d.unconsumed_tail:
d.decompress(d.unconsumed_tail)
self.d_context = d.copy()
def compress(self, text):
if not isinstance(text, str):
text = text.decode()
c = self.c_context.copy()
t = c.compress(text.encode())
t2 = c.flush(zlib.Z_FINISH)
return t + t2
def decompress(self, ctext):
if not isinstance(ctext, bytes):
ctext = ctext.encode()
d = self.d_context.copy()
t = d.decompress(ctext)
while d.unconsumed_tail:
t += d.decompress(d.unconsumed_tail)
return t.decode()
def test_compressor():
"""
>>> test_compressor() # Self-doctest this code.
"""
c = Compressor(__doc__)
test_string = "zlib is a pretty good compression algorithm"
ct = c.compress(test_string)
# print('initial length=%d, compressed=%d' % (len(test_string), len(ct)))
# the above string compresses from 43 bytes to 29 bytes using the
# current doc text as compression seed, not bad for such short input.
dt = c.decompress(ct)
assert dt == test_string, (dt, test_string)
# Test that utf-8 encoded bytes return the utf-8 string
ct = c.compress(test_string.encode("utf-8"))
# print('initial length=%d, compressed=%d' % (len(test_string), len(ct)))
# the above string compresses from 43 bytes to 29 bytes using the
# current doc text as compression seed, not bad for such short input.
dt = c.decompress(ct)
assert dt == test_string, (dt, test_string)
if __name__ == "__main__":
test_compressor()
| 3,422 | Python | .py | 74 | 41.094595 | 77 | 0.70447 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
243 | lccn.py | internetarchive_openlibrary/openlibrary/utils/lccn.py | import re
REV_RE = re.compile(r'rev.*')
REMOVESUFFIX_RE = re.compile(r'[^\/]+')
HYPHEN_RE = re.compile(r'(.+)-+([0-9]+)')
# Validates the syntax described at https://www.loc.gov/marc/lccn-namespace.html
LCCN_NORM_RE = re.compile(
r'([a-z]|[a-z]?([a-z]{2}|[0-9]{2})|[a-z]{2}[0-9]{2})?[0-9]{8}$'
)
def normalize_lccn(lccn):
lccn = lccn.strip().replace(' ', '')
lccn = lccn.strip('-').lower()
# remove any 'revised' text:
lccn = REV_RE.sub('', lccn)
m = REMOVESUFFIX_RE.match(lccn)
lccn = m.group(0) if m else ''
if hyph := HYPHEN_RE.match(lccn):
lccn = hyph.group(1) + hyph.group(2).zfill(6)
if LCCN_NORM_RE.match(lccn):
return lccn
| 687 | Python | .py | 19 | 32.210526 | 80 | 0.591867 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
244 | olmemcache.py | internetarchive_openlibrary/openlibrary/utils/olmemcache.py | """Memcache interface to store data in memcached."""
import memcache
from openlibrary.utils.olcompress import OLCompressor
import web
class Client:
"""Wrapper to memcache Client to enable OL specific compression and unicode keys.
Compatible with memcache Client API.
"""
def __init__(self, servers):
self._client = memcache.Client(servers)
compressor = OLCompressor()
self.compress = compressor.compress
self.decompress = compressor.decompress
def get(self, key):
try:
value = self._client.get(web.safestr(key))
except memcache.Client.MemcachedKeyError:
return None
return value and self.decompress(value)
def get_multi(self, keys):
keys = [web.safestr(k) for k in keys]
d = self._client.get_multi(keys)
return {web.safeunicode(k): self.decompress(v) for k, v in d.items()}
def set(self, key, val, time=0):
return self._client.set(web.safestr(key), self.compress(val), time=time)
def set_multi(self, mapping, time=0):
mapping = {web.safestr(k): self.compress(v) for k, v in mapping.items()}
return self._client.set_multi(mapping, time=time)
def add(self, key, val, time=0):
return self._client.add(web.safestr(key), self.compress(val), time=time)
def delete(self, key, time=0):
key = web.safestr(key)
return self._client.delete(key, time=time)
def delete_multi(self, keys, time=0):
keys = [web.safestr(k) for k in keys]
return self._client.delete_multi(keys, time=time)
| 1,594 | Python | .py | 36 | 37.055556 | 85 | 0.663001 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
245 | dateutil.py | internetarchive_openlibrary/openlibrary/utils/dateutil.py | """Generic date utilities.
"""
import calendar
import datetime
from contextlib import contextmanager
from sys import stderr
from time import perf_counter
from infogami.utils.view import public
MINUTE_SECS = 60
HALF_HOUR_SECS = MINUTE_SECS * 30
HOUR_SECS = MINUTE_SECS * 60
HALF_DAY_SECS = HOUR_SECS * 12
DAY_SECS = HOUR_SECS * 24
WEEK_SECS = DAY_SECS * 7
def days_in_current_month() -> int:
now = datetime.datetime.now()
return calendar.monthrange(now.year, now.month)[1]
def todays_date_minus(**kwargs) -> datetime.date:
return datetime.date.today() - datetime.timedelta(**kwargs)
def date_n_days_ago(n: int | None = None, start=None) -> datetime.date | None:
"""
Args:
n (int) - number of days since start
start (date) - date to start counting from (default: today)
Returns:
A (datetime.date) of `n` days ago if n is provided, else None
"""
_start = start or datetime.date.today()
return (_start - datetime.timedelta(days=n)) if n else None
DATE_ONE_YEAR_AGO = date_n_days_ago(n=365)
DATE_ONE_MONTH_AGO = date_n_days_ago(n=days_in_current_month())
DATE_ONE_WEEK_AGO = date_n_days_ago(n=7)
DATE_ONE_DAY_AGO = date_n_days_ago(n=1)
def parse_date(datestr: str) -> datetime.date:
"""Parses date string.
>>> parse_date("2010")
datetime.date(2010, 1, 1)
>>> parse_date("2010-02")
datetime.date(2010, 2, 1)
>>> parse_date("2010-02-04")
datetime.date(2010, 2, 4)
"""
tokens = datestr.split("-")
_resize_list(tokens, 3)
yyyy, mm, dd = tokens[:3]
return datetime.date(int(yyyy), mm and int(mm) or 1, dd and int(dd) or 1)
def parse_daterange(datestr: str) -> tuple[datetime.date, datetime.date]:
"""Parses date range.
>>> parse_daterange("2010-02")
(datetime.date(2010, 2, 1), datetime.date(2010, 3, 1))
"""
date = parse_date(datestr)
tokens = datestr.split("-")
if len(tokens) == 1: # only year specified
return date, nextyear(date)
elif len(tokens) == 2: # year and month specified
return date, nextmonth(date)
else:
return date, nextday(date)
def nextday(date: datetime.date) -> datetime.date:
return date + datetime.timedelta(1)
def nextmonth(date: datetime.date) -> datetime.date:
"""Returns a new date object with first day of the next month."""
year, month = date.year, date.month
month = month + 1
if month > 12:
month = 1
year += 1
return datetime.date(year, month, 1)
def nextyear(date: datetime.date) -> datetime.date:
"""Returns a new date object with first day of the next year."""
return datetime.date(date.year + 1, 1, 1)
def _resize_list(x, size: int) -> None:
"""Increase the size of the list x to the specified size it is smaller."""
if len(x) < size:
x += [None] * (size - len(x))
@public
def current_year() -> int:
return datetime.datetime.now().year
@contextmanager
def elapsed_time(name: str = "elapsed_time"):
"""
Two ways to use elapsed_time():
1. As a decorator to time the execution of an entire function:
@elapsed_time("my_slow_function")
def my_slow_function(n=10_000_000):
pass
2. As a context manager to time the execution of a block of code inside a function:
with elapsed_time("my_slow_block_of_code"):
pass
"""
start = perf_counter()
yield
print(f"Elapsed time ({name}): {perf_counter() - start:0.8} seconds", file=stderr)
| 3,499 | Python | .py | 94 | 32.5 | 87 | 0.659745 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
246 | form.py | internetarchive_openlibrary/openlibrary/utils/form.py | """New form library to use instead of web.form.
(this should go to web.py)
"""
import web
import copy
import re
from infogami.utils.view import render
class AttributeList(dict):
"""List of attributes of input.
>>> a = AttributeList(type='text', name='x', value=20)
>>> a
<attrs: 'type="text" name="x" value="20"'>
"""
def copy(self):
return AttributeList(self)
def __str__(self):
return " ".join(f'{k}="{web.websafe(v)}"' for k, v in self.items())
def __repr__(self):
return '<attrs: %s>' % repr(str(self))
class Input:
def __init__(self, name, description=None, value=None, **kw):
self.name = name
self.description = description or ""
self.value = value
self.validators = kw.pop('validators', [])
self.help = kw.pop('help', None)
self.note = kw.pop('note', None)
self.id = kw.pop('id', name)
self.__dict__.update(kw)
if 'klass' in kw:
kw['class'] = kw.pop('klass')
self.attrs = AttributeList(kw)
def get_type(self):
raise NotImplementedError
def is_hidden(self):
return False
def render(self):
attrs = self.attrs.copy()
attrs['id'] = self.id
attrs['type'] = self.get_type()
attrs['name'] = self.name
attrs['value'] = self.value or ''
return '<input ' + str(attrs) + ' />'
def validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
class Textbox(Input):
"""Textbox input.
>>> t = Textbox("name", description='Name', value='joe')
>>> t.render()
'<input type="text" id="name" value="joe" name="name" />'
>>> t = Textbox("name", description='Name', value='joe', id='name', klass='input', size=10)
>>> t.render()
'<input name="name" value="joe" class="input" type="text" id="name" size="10" />'
"""
def get_type(self):
return "text"
class Password(Input):
"""Password input.
>>> Password("password", description='Password', value='secret').render()
'<input type="password" id="password" value="secret" name="password" />'
"""
def get_type(self):
return "password"
class Email(Input):
"""Email input.
>>> Email("email", value='[email protected]').render()
'<input type="email" id="email" value="[email protected]" name="email" />'
"""
def get_type(self):
return "email"
class Checkbox(Input):
"""Checkbox input."""
@property
def checked(self):
return self.value is not None
def get_type(self):
return "checkbox"
def render(self):
if self.value is not None:
self.attrs['checked'] = ''
return Input.render(self)
class Hidden(Input):
"""Hidden input."""
def is_hidden(self):
return True
def get_type(self):
return "hidden"
class Form:
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.validators = kw.pop('validators', [])
self.note = None
def __call__(self):
return copy.deepcopy(self)
def __str__(self):
return web.safestr(self.render())
def __getitem__(self, key):
for i in self.inputs:
if i.name == key:
return i
raise KeyError(key)
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name:
return x
raise AttributeError(name)
def render(self):
return render.form(self)
def validates(self, source):
valid = True
for i in self.inputs:
v = source.get(i.name)
valid = i.validate(v) and valid
valid = self._validate(source) and valid
self.valid = valid
return valid
fill = validates
def _validate(self, value):
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
class Validator:
def __init__(self, msg, test):
self.msg = msg
self.test = test
def __deepcopy__(self, memo):
return copy.copy(self)
def valid(self, value):
try:
return self.test(value)
except:
raise
return False
def __repr__(self):
return "<validator: %r >" % self.msg
notnull = Validator("Required", bool)
class RegexpValidator(Validator):
def __init__(self, rexp, msg):
self.rexp = re.compile(rexp)
self.msg = msg
def valid(self, value):
return bool(self.rexp.match(value))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4,912 | Python | .py | 154 | 24.324675 | 95 | 0.568657 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
247 | sentry.py | internetarchive_openlibrary/openlibrary/utils/sentry.py | from dataclasses import dataclass
import logging
import re
import sentry_sdk
import web
from sentry_sdk.utils import capture_internal_exceptions
from sentry_sdk.tracing import Transaction, TRANSACTION_SOURCE_ROUTE
from infogami.utils.app import find_page, find_view, modes
from infogami.utils.types import type_patterns
from openlibrary.utils import get_software_version
def header_name_from_env(env_name: str) -> str:
"""
Convert an env name as stored in web.ctx.env to a "normal"
header name
>>> header_name_from_env('HTTP_FOO_BAR')
'foo-bar'
>>> header_name_from_env('CONTENT_LENGTH')
'content-length'
"""
header_name = env_name
if env_name.startswith('HTTP_'):
header_name = env_name[5:]
return header_name.lower().replace('_', '-')
def add_web_ctx_to_event(event: dict, hint: dict) -> dict:
if not web.ctx:
return event
with capture_internal_exceptions():
request_info = event.setdefault("request", {})
headers = {}
env = {}
for k, v in web.ctx.env.items():
# Don't forward cookies to Sentry
if k == 'HTTP_COOKIE':
continue
if k.startswith('HTTP_') or k in ('CONTENT_LENGTH', 'CONTENT_TYPE'):
headers[header_name_from_env(k)] = v
else:
env[k] = v
request_info["url"] = web.ctx.home + web.ctx.fullpath
request_info["headers"] = headers
request_info["env"] = env
request_info["method"] = web.ctx.method
return event
class Sentry:
def __init__(self, config: dict) -> None:
self.config = config
self.enabled = bool(config.get('enabled'))
self.logger = logging.getLogger("sentry")
self.logger.info(f"Setting up sentry (enabled={self.enabled})")
def init(self):
sentry_sdk.init(
dsn=self.config['dsn'],
environment=self.config['environment'],
traces_sample_rate=self.config.get('traces_sample_rate', 0.0),
release=get_software_version(),
)
def bind_to_webpy_app(self, app: web.application) -> None:
_internalerror = app.internalerror
def capture_exception():
self.capture_exception_webpy()
return _internalerror()
app.internalerror = capture_exception
app.add_processor(WebPySentryProcessor(app))
def capture_exception_webpy(self):
with sentry_sdk.push_scope() as scope:
scope.add_event_processor(add_web_ctx_to_event)
sentry_sdk.capture_exception()
def capture_exception(self, ex):
with sentry_sdk.push_scope() as scope:
scope.add_event_processor(add_web_ctx_to_event)
sentry_sdk.capture_exception(ex)
@dataclass
class InfogamiRoute:
route: str
mode: str = 'view'
encoding: str | None = None
def to_sentry_name(self) -> str:
return (
self.route
+ (f'.{self.encoding}' if self.encoding else '')
+ (f'?m={self.mode}' if self.mode != 'view' else '')
)
class WebPySentryProcessor:
def __init__(self, app: web.application):
self.app = app
def find_route_name(self) -> str:
handler, groups = self.app._match(self.app.mapping, web.ctx.path)
web.debug('ROUTE HANDLER', handler, groups)
return handler or '<other>'
def __call__(self, handler):
route_name = self.find_route_name()
hub = sentry_sdk.Hub.current
with sentry_sdk.Hub(hub) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(add_web_ctx_to_event)
environ = dict(web.ctx.env)
# Don't forward cookies to Sentry
if 'HTTP_COOKIE' in environ:
del environ['HTTP_COOKIE']
transaction = Transaction.continue_from_environ(
environ,
op="http.server",
name=route_name,
source=TRANSACTION_SOURCE_ROUTE,
)
with hub.start_transaction(transaction):
try:
return handler()
finally:
transaction.set_http_status(int(web.ctx.status.split()[0]))
class InfogamiSentryProcessor(WebPySentryProcessor):
"""
Processor to profile the webpage and send a transaction to Sentry.
"""
def find_route_name(self) -> str:
def find_type() -> tuple[str, str] | None:
return next(
(
(pattern, typename)
for pattern, typename in type_patterns.items()
if re.search(pattern, web.ctx.path)
),
None,
)
def find_route() -> InfogamiRoute:
result = InfogamiRoute('<other>')
cls, args = find_page()
if cls:
if hasattr(cls, 'path'):
result.route = cls.path
else:
result.route = web.ctx.path
elif type_page := find_type():
result.route = type_page[0]
if web.ctx.get('encoding'):
result.encoding = web.ctx.encoding
requested_mode = web.input(_method='GET').get('m', 'view')
if requested_mode in modes:
result.mode = requested_mode
return result
return find_route().to_sentry_name()
| 5,526 | Python | .py | 142 | 28.78169 | 80 | 0.582383 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
248 | solr.py | internetarchive_openlibrary/openlibrary/utils/solr.py | """Python library for accessing Solr"""
import logging
import re
from typing import Optional, TypeVar
from collections.abc import Callable, Iterable
import requests
import web
from urllib.parse import urlencode, urlsplit
logger = logging.getLogger("openlibrary.logger")
T = TypeVar('T')
class Solr:
def __init__(self, base_url):
"""
:param base_url: The base url of the solr server/collection. E.g. http://localhost:8983/solr/openlibrary
"""
self.base_url = base_url
self.host = urlsplit(self.base_url)[1]
self.session = requests.Session()
def escape(self, query):
r"""Escape special characters in the query string
>>> solr = Solr("")
>>> solr.escape("a[b]c")
'a\\[b\\]c'
"""
chars = r'+-!(){}[]^"~*?:\\'
pattern = "([%s])" % re.escape(chars)
return web.re_compile(pattern).sub(r'\\\1', query)
def get(
self,
key: str,
fields: list[str] | None = None,
doc_wrapper: Callable[[dict], T] = web.storage,
) -> T | None:
"""Get a specific item from solr"""
logger.info(f"solr /get: {key}, {fields}")
resp = self.session.get(
f"{self.base_url}/get",
# It's unclear how field=None is getting in here; a better fix would be at the source.
params={
'id': key,
**(
{'fl': ','.join([field for field in fields if field])}
if fields
else {}
),
},
).json()
# Solr returns {doc: null} if the record isn't there
return doc_wrapper(resp['doc']) if resp['doc'] else None
def get_many(
self,
keys: Iterable[str],
fields: Iterable[str] | None = None,
doc_wrapper: Callable[[dict], T] = web.storage,
) -> list[T]:
if not keys:
return []
logger.info(f"solr /get: {keys}, {fields}")
resp = self.session.post(
f"{self.base_url}/get",
data={
'ids': ','.join(keys),
**({'fl': ','.join(fields)} if fields else {}),
},
).json()
return [doc_wrapper(doc) for doc in resp['response']['docs']]
def select(
self,
query,
fields=None,
facets=None,
rows=None,
start=None,
doc_wrapper=None,
facet_wrapper=None,
**kw,
):
"""Execute a solr query.
query can be a string or a dictionary. If query is a dictionary, query
is constructed by concatenating all the key-value pairs with AND condition.
"""
params = {'wt': 'json'}
for k, v in kw.items():
# convert keys like facet_field to facet.field
params[k.replace('_', '.')] = v
params['q'] = self._prepare_select(query)
if rows is not None:
params['rows'] = rows
params['start'] = start or 0
if fields:
params['fl'] = ",".join(fields)
if facets:
params['facet'] = "true"
params['facet.field'] = []
for f in facets:
if isinstance(f, dict):
name = f.pop("name")
for k, v in f.items():
params[f"f.{name}.facet.{k}"] = v
else:
name = f
params['facet.field'].append(name)
json_data = self.raw_request(
'select',
urlencode(params, doseq=True),
).json()
return self._parse_solr_result(
json_data, doc_wrapper=doc_wrapper, facet_wrapper=facet_wrapper
)
def raw_request(self, path_or_url: str, payload: str) -> requests.Response:
if path_or_url.startswith("http"):
# TODO: Should this only take a path, not a full url? Would need to
# update worksearch.code.execute_solr_query accordingly.
url = path_or_url
else:
url = f'{self.base_url}/{path_or_url.lstrip("/")}'
# switch to POST request when the payload is too big.
# XXX: would it be a good idea to switch to POST always?
if len(payload) < 500:
sep = '&' if '?' in url else '?'
url = url + sep + payload
logger.info("solr request: %s", url)
return self.session.get(url, timeout=10)
else:
logger.info("solr request: %s ...", url)
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
return self.session.post(url, data=payload, headers=headers, timeout=10)
def _parse_solr_result(self, result, doc_wrapper, facet_wrapper):
response = result['response']
doc_wrapper = doc_wrapper or web.storage
facet_wrapper = facet_wrapper or (
lambda name, value, count: web.storage(locals())
)
d = web.storage()
d.num_found = response['numFound']
d.docs = [doc_wrapper(doc) for doc in response['docs']]
if 'facet_counts' in result:
d.facets = {}
for k, v in result['facet_counts']['facet_fields'].items():
d.facets[k] = [
facet_wrapper(k, value, count) for value, count in web.group(v, 2)
]
if 'highlighting' in result:
d.highlighting = result['highlighting']
if 'spellcheck' in result:
d.spellcheck = result['spellcheck']
return d
def _prepare_select(self, query):
def escape(v):
# TODO: improve this
return v.replace('"', r'\"').replace("(", "\\(").replace(")", "\\)")
def escape_value(v):
if isinstance(v, tuple): # hack for supporting range
return f"[{escape(v[0])} TO {escape(v[1])}]"
elif isinstance(v, list): # one of
return "(%s)" % " OR ".join(escape_value(x) for x in v)
else:
return '"%s"' % escape(v)
if isinstance(query, dict):
op = query.pop("_op", "AND")
if op.upper() != "OR":
op = "AND"
op = " " + op + " "
q = op.join(f'{k}:{escape_value(v)}' for k, v in query.items())
else:
q = query
return q
if __name__ == '__main__':
import doctest
doctest.testmod()
| 6,506 | Python | .py | 172 | 27.133721 | 112 | 0.517554 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
249 | olcompress.py | internetarchive_openlibrary/openlibrary/utils/olcompress.py | """Initial seed to create compress object.
see compress module for details.
"""
from openlibrary.utils.compress import Compressor
# using 4 random records from OL seed
seed1 = '{"subject_place": ["Great Britain", "Great Britain."], "lc_classifications": ["RT85.5 .K447 1994"], "contributions": ["Richardson, Eileen, RGN."], "id": 1875537, "title": "nursing process and quality care", "languages": [{"key": "/l/eng"}], "subjects": ["Nursing -- Quality control.", "Nursing -- Standards.", "Nursing audit.", "Nursing -- Great Britain -- Quality control.", "Nursing -- Standards -- Great Britain.", "Nursing audit -- Great Britain."], "publish_country": "cau", "title_prefix": "The ", "type": {"key": "/type/edition"}, "by_statement": "Nan Kemp, Eileen Richardson.", "revision": 1, "other_titles": ["Nursing process & quality care."], "publishers": ["Singular Pub. Group"], "last_modified": {"type": "/type/datetime", "value": "2008-04-01 03:28:50.625462"}, "key": "/b/OL1234567M", "authors": [{"key": "/a/OL448883A"}], "publish_places": ["San Diego, Calif"], "pagination": "132 p. :", "dewey_decimal_class": ["362.1/73/0685"], "notes": {"type": "/type/text", "value": "Includes bibliographical references and index.\nCover title: The nursing process & quality care."}, "number_of_pages": 132, "lccn": ["94237442"], "isbn_10": ["1565933834"], "publish_date": "1994"}' # noqa: E501
seed2 = '{"subtitle": "exploration & celebration : papers delivered at an academic conference honoring twenty years of women in the rabbinate, 1972-1992", "subject_place": ["United States"], "lc_classifications": ["BM652 .W66 1996"], "contributions": ["Zola, Gary Phillip."], "id": 1523482, "title": "Women rabbis", "languages": [{"key": "/l/eng"}], "subjects": ["Women rabbis -- United States -- Congresses.", "Reform Judaism -- United States -- Congresses.", "Women in Judaism -- Congresses."], "publish_country": "ohu", "by_statement": "edited by Gary P. Zola.", "type": {"key": "/type/edition"}, "revision": 1, "publishers": ["HUC-JIR Rabbinic Alumni Association Press"], "last_modified": {"type": "/type/datetime", "value": "2008-04-01 03:28:50.625462"}, "key": "/b/OL987654M", "publish_places": ["Cincinnati"], "pagination": "x, 135 p. ;", "dewey_decimal_class": ["296.6/1/082"], "notes": {"type": "/type/text", "value": "Includes bibliographical references and index."}, "number_of_pages": 135, "lccn": ["96025781"], "isbn_10": ["0878202145"], "publish_date": "1996"}' # noqa: E501
seed3 = '{"name": "W. Wilkins Davis", "personal_name": "W. Wilkins Davis", "death_date": "1866.", "last_modified": {"type": "/type/datetime", "value": "2008-08-21 07:57:48.336414"}, "key": "/a/OL948765A", "birth_date": "1842", "type": {"key": "/type/author"}, "id": 2985386, "revision": 2}' # noqa: E501
seed4 = '{"name": "Alberto Tebechrani", "personal_name": "Alberto Tebechrani", "last_modified": {"type": "/type/datetime", "value": "2008-09-08 15:29:34.798941"}, "key": "/a/OL94792A", "type": {"key": "/type/author"}, "id": 238564, "revision": 2}' # noqa: E501
seed = seed1 + seed2 + seed3 + seed4
def OLCompressor():
"""Create a compressor object for compressing OL data."""
return Compressor(seed)
| 3,189 | Python | .py | 13 | 243.153846 | 1,209 | 0.655727 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
250 | __init__.py | internetarchive_openlibrary/openlibrary/utils/__init__.py | """Generic utilities"""
from enum import Enum
import re
from subprocess import CalledProcessError, run
from typing import TypeVar, Literal
from collections.abc import Iterable, Callable
to_drop = set(''';/?:@&=+$,<>#%"{}|\\^[]`\n\r''')
def str_to_key(s: str) -> str:
"""
>>> str_to_key("?H$e##l{o}[0] -world!")
'helo0_-world!'
>>> str_to_key("".join(to_drop))
''
>>> str_to_key("")
''
"""
return ''.join(c if c != ' ' else '_' for c in s.lower() if c not in to_drop)
T = TypeVar('T')
def uniq(values: Iterable[T], key=None) -> list[T]:
"""Returns the unique entries from the given values in the original order.
The value of the optional `key` parameter should be a function that takes
a single argument and returns a key to test the uniqueness.
TODO: Moved this to core/utils.py
>>> uniq("abcbcddefefg")
['a', 'b', 'c', 'd', 'e', 'f', 'g']
>>> uniq("011223344556677889")
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
"""
key = key or (lambda x: x)
s = set()
result = []
for v in values:
k = key(v)
if k not in s:
s.add(k)
result.append(v)
return result
def take_best(
items: list[T],
optimization: Literal["min", "max"],
scoring_fn: Callable[[T], float],
) -> list[T]:
"""
>>> take_best([], 'min', lambda x: x)
[]
>>> take_best([3, 2, 1], 'min', lambda x: x)
[1]
>>> take_best([3, 4, 5], 'max', lambda x: x)
[5]
>>> take_best([4, 1, -1, -1], 'min', lambda x: x)
[-1, -1]
"""
best_score = float("-inf") if optimization == "max" else float("inf")
besties = []
for item in items:
score = scoring_fn(item)
if (optimization == "max" and score > best_score) or (
optimization == "min" and score < best_score
):
best_score = score
besties = [item]
elif score == best_score:
besties.append(item)
else:
continue
return besties
def multisort_best(
items: list[T], specs: list[tuple[Literal["min", "max"], Callable[[T], float]]]
) -> T | None:
"""
Takes the best item, taking into account the multisorts
>>> multisort_best([], [])
>>> multisort_best([3,4,5], [('max', lambda x: x)])
5
>>> multisort_best([
... {'provider': 'ia', 'size': 4},
... {'provider': 'ia', 'size': 12},
... {'provider': None, 'size': 42},
... ], [
... ('min', lambda x: 0 if x['provider'] == 'ia' else 1),
... ('max', lambda x: x['size']),
... ])
{'provider': 'ia', 'size': 12}
"""
if not items:
return None
pool = items
for optimization, fn in specs:
# Shrink the pool down each time
pool = take_best(pool, optimization, fn)
return pool[0]
def dicthash(d):
"""Dictionaries are not hashable. This function converts dictionary into nested
tuples, so that it can hashed.
"""
if isinstance(d, dict):
return tuple((k, dicthash(d[k])) for k in sorted(d))
elif isinstance(d, list):
return tuple(dicthash(v) for v in d)
else:
return d
olid_re = re.compile(r'OL\d+[A-Z]', re.IGNORECASE)
def find_olid_in_string(s: str, olid_suffix: str | None = None) -> str | None:
"""
>>> find_olid_in_string("ol123w")
'OL123W'
>>> find_olid_in_string("/authors/OL123A/DAVIE_BOWIE")
'OL123A'
>>> find_olid_in_string("/authors/OL123A/DAVIE_BOWIE", "W")
>>> find_olid_in_string("some random string")
"""
found = re.search(olid_re, s)
if not found:
return None
olid = found.group(0).upper()
if olid_suffix and not olid.endswith(olid_suffix):
return None
return olid
def olid_to_key(olid: str) -> str:
"""
>>> olid_to_key('OL123W')
'/works/OL123W'
>>> olid_to_key('OL123A')
'/authors/OL123A'
>>> olid_to_key('OL123M')
'/books/OL123M'
>>> olid_to_key("OL123L")
'/lists/OL123L'
"""
typ = {
'A': 'authors',
'W': 'works',
'M': 'books',
'L': 'lists',
}[olid[-1]]
if not typ:
raise ValueError(f"Invalid olid: {olid}")
return f"/{typ}/{olid}"
def extract_numeric_id_from_olid(olid):
"""
>>> extract_numeric_id_from_olid("OL123W")
'123'
>>> extract_numeric_id_from_olid("/authors/OL123A")
'123'
"""
if '/' in olid:
olid = olid.split('/')[-1]
if olid.lower().startswith('ol'):
olid = olid[2:]
if not is_number(olid[-1].lower()):
olid = olid[:-1]
return olid
def is_number(s):
"""
>>> all(is_number(n) for n in (1234, "1234", -1234, "-1234", 123.4, -123.4))
True
>>> not any(is_number(n) for n in ("123.4", "-123.4", "123a", "--1234"))
True
"""
try:
int(s)
return True
except ValueError:
return False
def get_software_version() -> str:
"""
assert get_software_version() # Should never return a falsy value
"""
cmd = "git rev-parse --short HEAD --".split()
try:
return run(cmd, capture_output=True, text=True, check=True).stdout.strip()
except CalledProcessError:
return "unknown"
# See https://docs.python.org/3/library/enum.html#orderedenum
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
| 5,931 | Python | .py | 191 | 25.120419 | 83 | 0.555595 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
251 | ia.py | internetarchive_openlibrary/openlibrary/utils/ia.py | from socket import socket, AF_INET, SOCK_DGRAM, SOL_UDP, SO_BROADCAST
import re
re_loc = re.compile(r'^(ia\d+\.us\.archive\.org):(/\d+/items/(.*))$')
class FindItemError(Exception):
pass
def find_item(ia):
s = socket(AF_INET, SOCK_DGRAM, SOL_UDP)
s.setblocking(1)
s.settimeout(2.0)
s.setsockopt(1, SO_BROADCAST, 1)
s.sendto(ia, ('<broadcast>', 8010))
for attempt in range(5):
(loc, address) = s.recvfrom(1024)
m = re_loc.match(loc)
ia_host = m.group(1)
ia_path = m.group(2)
if m.group(3) == ia:
return (ia_host, ia_path)
raise FindItemError
| 631 | Python | .py | 19 | 27.473684 | 69 | 0.610561 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
252 | retry.py | internetarchive_openlibrary/openlibrary/utils/retry.py | import time
class MaxRetriesExceeded(Exception):
last_exception: Exception
def __init__(self, last_exception: Exception):
self.last_exception = last_exception
class RetryStrategy:
retry_count = 0
last_exception: Exception | None = None
def __init__(self, exceptions: list[type[Exception]], max_retries=3, delay=1):
self.exceptions = exceptions
self.max_retries = max_retries
self.delay = delay
def __call__(self, func):
try:
return func()
except Exception as e:
if not any(isinstance(e, exc) for exc in self.exceptions):
raise e
self.last_exception = e
if self.retry_count < self.max_retries:
self.retry_count += 1
time.sleep(self.delay)
return self(func)
else:
raise MaxRetriesExceeded(self.last_exception)
| 929 | Python | .py | 25 | 27.32 | 82 | 0.601117 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
253 | bulkimport.py | internetarchive_openlibrary/openlibrary/utils/bulkimport.py | """Utility to bulk import documents into Open Library database without
going through infobase API.
"""
import json
import os
import web
import datetime
from collections import defaultdict
class DocumentLoader:
def __init__(self, **params):
params = params.copy()
params.setdefault('dbn', 'postgres')
params.setdefault('user', os.getenv('USER'))
self.db = web.database(**params)
self.db.printing = False
def new_work_keys(self, n):
"""Returns n new works keys."""
return ['/works/OL%dW' % i for i in self.incr_seq('type_work_seq', n)]
def incr_seq(self, seqname, n):
"""Increment a sequence by n and returns the latest value of
that sequence and returns list of n numbers.
"""
rows = self.db.query(
"SELECT setval($seqname, $n + (select last_value from %s)) as value"
% seqname,
vars=locals(),
)
end = rows[0].value + 1 # lastval is inclusive
begin = end - n
return range(begin, end)
def get_thing_ids(self, keys):
keys = list(set(keys))
rows = self.db.query(
"SELECT id, key FROM thing WHERE key in $keys", vars=locals()
)
return {r.key: r.id for r in rows}
def get_thing_id(self, key):
return self.get_thing_ids([key]).get(key)
def _with_transaction(f):
"""Decorator to run a method in a transaction."""
def g(self, *a, **kw):
t = self.db.transaction()
try:
value = f(self, *a, **kw)
except:
t.rollback()
raise
else:
t.commit()
return value
return g
def bulk_new(self, documents, author="/user/ImportBot", comment=None):
"""Create new documents in the database without going through
infobase. This approach is very fast, but this can lead to
errors in the database if the caller is not careful.
All records must contain "key" and "type"
properties. "last_modified" and "created" properties are
automatically added to all records.
Entries are not added to xxx_str, xxx_ref ... tables. reindex
method must be called separately to do that.
"""
return self._bulk_new(documents, author, comment)
@_with_transaction # type: ignore[arg-type]
def _bulk_new(self, documents, author, comment):
timestamp = datetime.datetime.utcnow()
type_ids = self.get_thing_ids(doc['type']['key'] for doc in documents)
# insert things
things = [
{
'key': doc['key'],
'type': type_ids[doc['type']['key']],
'created': timestamp,
'last_modified': timestamp,
}
for doc in documents
]
thing_ids = self.db.multiple_insert('thing', things)
# prepare documents
created = {'type': '/type/datetime', "value": timestamp.isoformat()}
for doc, thing_id in zip(documents, thing_ids):
doc['id'] = thing_id
doc['revision'] = 1
doc['latest_revision'] = 1
doc['created'] = created
doc['last_modified'] = created
# insert data
return self._insert_data(
documents, author=author, timestamp=timestamp, comment=comment
)
def _insert_data(self, documents, author, timestamp, comment, ip="127.0.0.1"):
"""Add entries in transaction and version tables for inseting
above documents.
It is assumed that correct value of id, revision and
last_modified is already set in all documents.
"""
author_id = author and self.get_thing_id(author)
# add an entry in the transaction table
txn_id = self.db.insert(
'transaction',
action="import",
comment=comment,
author_id=author_id,
created=timestamp,
ip=ip,
)
# add versions
versions = [
{
'transaction_id': txn_id,
'thing_id': doc['id'],
'revision': doc['revision'],
}
for doc in documents
]
self.db.multiple_insert('version', versions, seqname=False)
result = [
{'key': doc['key'], 'revision': doc['revision'], 'id': doc['id']}
for doc in documents
]
# insert data
data = []
for doc in documents:
try:
data.append(
{
'thing_id': doc.pop('id'),
'revision': doc['revision'],
'data': json.dumps(doc),
}
)
except UnicodeDecodeError:
print(repr(doc))
raise
self.db.multiple_insert('data', data, seqname=False)
return result
def bulk_update(self, documents, author='/user/ImportBot', comment=None):
"""Update existing documents in the database.
When adding new properties, it is sufficient to specify key and
new properties.
db.bulk_update([
{'key': '/b/OL1M', 'work': {'key': '/works/OL1W'}}
{'key': '/b/OL2M', 'work': {'key': '/works/OL2M'}}],
comment="link works")
When updating an existing property, it sufficient to specify key and new value of that property.
db.bulk_update([
{'key': '/b/OL1M', 'title': 'New title'}],
comment="unicode normalize titles")
When append new value to an existing property, entire list must be provided.
db.bulk_update([{
'key': '/a/OL1A',
'links': ['http://en.wikipedia.org/wiki/Foo', 'http://de.wikipedia.org/wiki/Foo']
}, comment="add german wikipedia links")
WARNING: This function should not be used to change the "type" property of documents.
"""
return self._bulk_update(documents, author, comment)
@_with_transaction # type: ignore[arg-type]
def _bulk_update(self, documents, author, comment):
timestamp = datetime.datetime.utcnow()
keys = [doc['key'] for doc in documents]
# update latest_revision and last_modified in thing table
self.db.query(
"UPDATE thing"
" SET last_modified=$timestamp, latest_revision=latest_revision+1"
" WHERE key IN $keys",
vars=locals(),
)
# fetch the current data
rows = self.db.query(
"SELECT thing.id, thing.key, thing.created, thing.latest_revision, data.data"
" FROM thing, data"
" WHERE data.thing_id=thing.id AND data.revision=thing.latest_revision-1 and thing.key in $keys",
vars=locals(),
)
rows = {r.key: r for r in rows}
last_modified = {'type': '/type/datetime', 'value': timestamp.isoformat()}
def prepare(doc):
"""Takes the existing document from db, update it with doc
and add revision, latest_revision, last_modified
properties.
"""
r = rows[doc['key']]
d = json.loads(r.data)
d.update(
doc,
revision=r.latest_revision,
latest_revision=r.latest_revision,
last_modified=last_modified,
id=r.id,
)
return d
documents = [prepare(doc) for doc in documents]
return self._insert_data(
documents, author=author, timestamp=timestamp, comment=comment
)
def reindex(self, keys, tables=None):
"""Delete existing entries and add new entries to xxx_str,
xxx_ref .. tables for the documents specified by keys.
If optional tables argument is specified then reindex is done only for values in those tables.
"""
return Reindexer(self.db).reindex(keys, tables)
# this is not required anymore
del _with_transaction
class Reindexer:
"""Utility to reindex documents."""
def __init__(self, db):
self.db = db
import openlibrary.plugins.openlibrary.schema
self.schema = openlibrary.plugins.openlibrary.schema.get_schema()
self.noindex = {
"id",
"key",
"type",
"type_id",
"revision",
"latest_revision",
"created",
"last_modified",
"permission",
"child_permission",
}
self._property_cache = {}
def reindex(self, keys, tables=None):
"""Reindex documents specified by the keys.
If tables is specified, index is recomputed only for those tables and other tables are ignored.
"""
t = self.db.transaction()
try:
documents = self.get_documents(keys)
self.delete_earlier_index(documents, tables)
self.create_new_index(documents, tables)
except:
t.rollback()
raise
else:
t.commit()
def get_documents(self, keys):
"""Get documents with given keys from database and add "id" and "type_id" to them."""
rows = self.db.query(
"SELECT thing.id, thing.type, data.data"
" FROM thing, data"
" WHERE data.thing_id=thing.id AND data.revision=thing.latest_revision and thing.key in $keys",
vars=locals(),
)
documents = [
dict(json.loads(row.data), id=row.id, type_id=row.type) for row in rows
]
return documents
def delete_earlier_index(self, documents, tables=None):
"""Remove all previous entries corresponding to the given documents"""
all_tables = tables or {
r.relname
for r in self.db.query("SELECT relname FROM pg_class WHERE relkind='r'")
}
data = defaultdict(list)
for doc in documents:
for table in self.schema.find_tables(doc['type']['key']):
if table in all_tables:
data[table].append(doc['id'])
for table in data:
self.db.delete(table, where="thing_id IN $thing_ids", vars=locals())
def create_new_index(self, documents, tables=None):
"""Insert data in to index tables for the specified documents."""
data = defaultdict(list)
def insert(doc, name, value, ordering=None):
# these are present in thing table. No need to index these keys
if name in [
"id",
"type",
"created",
"last_modified",
"permission",
"child_permission",
]:
return
if isinstance(value, list):
for i, v in enumerate(value):
insert(doc, name, v, ordering=i)
elif isinstance(value, dict) and 'key' not in value:
for k, v in value.items():
if k == "type": # no need to index type
continue
insert(doc, name + '.' + k, v, ordering=ordering)
else:
datatype = self._find_datatype(value)
table = datatype and self.schema.find_table(
doc['type']['key'], datatype, name
)
# when asked to index only some tables
if tables and table not in tables:
return
if table:
self.prepare_insert(
data[table],
doc['id'],
doc['type_id'],
name,
value,
ordering=ordering,
)
for doc in documents:
for name, value in doc.items():
insert(doc, name, value)
# replace keys with thing ids in xxx_ref tables
self.process_refs(data)
# insert the data
for table, rows in data.items():
self.db.multiple_insert(table, rows, seqname=False)
def get_property_id(self, type_id, name):
if (type_id, name) not in self._property_cache:
self._property_cache[type_id, name] = self._get_property_id(type_id, name)
return self._property_cache[type_id, name]
def _get_property_id(self, type_id, name):
d = self.db.select(
'property', where='name=$name AND type=$type_id', vars=locals()
)
if d:
return d[0].id
else:
return self.db.insert('property', type=type_id, name=name)
def prepare_insert(self, rows, thing_id, type_id, name, value, ordering=None):
"""Add data to be inserted to rows list."""
if name in self.noindex:
return
elif isinstance(value, list):
for i, v in enumerate(value):
self.prepare_insert(rows, thing_id, type_id, name, v, ordering=i)
else:
rows.append(
{
'thing_id': thing_id,
'key_id': self.get_property_id(type_id, name),
'value': value,
'ordering': ordering,
}
)
def process_refs(self, data):
"""Convert key values to thing ids for xxx_ref tables."""
keys = []
for table, rows in data.items():
if table.endswith('_ref'):
keys += [r['value']['key'] for r in rows]
if not keys:
return
thing_ids = {
r.key: r.id
for r in self.db.query(
"SELECT id, key FROM thing WHERE key in $keys", vars=locals()
)
}
for table, rows in data.items():
if table.endswith('_ref'):
for r in rows:
r['value'] = thing_ids[r['value']['key']]
def _find_datatype(self, value):
"""Find datatype of given value.
>>> _find_datatype = Reindexer(None)._find_datatype
>>> _find_datatype(1)
'int'
>>> _find_datatype('hello')
'str'
>>> _find_datatype({'key': '/a/OL1A'})
'ref'
>>> _find_datatype([{'key': '/a/OL1A'}])
'ref'
>>> _find_datatype({'type': '/type/text', 'value': 'foo'})
>>> _find_datatype({'type': '/type/datetime', 'value': '2009-10-10'})
'datetime'
"""
if isinstance(value, int):
return 'int'
elif isinstance(value, str):
return 'str'
elif isinstance(value, dict):
if 'key' in value:
return 'ref'
elif 'type' in value:
return {
'/type/int': 'int',
'/type/string': 'str',
'/type/datetime': 'datetime',
}.get(value['type'])
elif isinstance(value, list):
return value and self._find_datatype(value[0])
else:
return None
def _test():
loader = DocumentLoader(db='ol')
loader.db.printing = True
n = 2
print(
loader.bulk_new(
[
{
'key': "/b/OL%dM" % i,
'title': "book %d" % i,
'type': {"key": "/type/edition"},
'table_of_contents': [
{
"type": {"key": "/type/toc_item"},
"class": "part",
"label": "test",
"title": "test",
"pagenum": "10",
}
],
}
for i in range(1, n + 1)
],
comment="add books",
)
)
loader.reindex(["/b/OL%dM" % i for i in range(1, n + 1)])
if __name__ == "__main__":
_test()
| 16,125 | Python | .py | 414 | 26.898551 | 109 | 0.521937 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
254 | isbn.py | internetarchive_openlibrary/openlibrary/utils/isbn.py | from __future__ import annotations
from isbnlib import canonical
def check_digit_10(isbn):
"""Takes the first 9 digits of an ISBN10 and returns the calculated final checkdigit."""
if len(isbn) != 9:
raise ValueError("%s is not a valid ISBN 10" % isbn)
sum = 0
for i in range(len(isbn)):
c = int(isbn[i])
w = i + 1
sum += w * c
r = sum % 11
if r == 10:
return 'X'
else:
return str(r)
def check_digit_13(isbn):
"""Takes the first 12 digits of an ISBN13 and returns the calculated final checkdigit."""
if len(isbn) != 12:
raise ValueError
sum = 0
for i in range(len(isbn)):
c = int(isbn[i])
if i % 2:
w = 3
else:
w = 1
sum += w * c
r = 10 - (sum % 10)
if r == 10:
return '0'
else:
return str(r)
def isbn_13_to_isbn_10(isbn_13: str) -> str | None:
isbn_13 = canonical(isbn_13)
if (
len(isbn_13) != 13
or not isbn_13.isdigit()
or not isbn_13.startswith('978')
or check_digit_13(isbn_13[:-1]) != isbn_13[-1]
):
return None
return isbn_13[3:-1] + check_digit_10(isbn_13[3:-1])
def isbn_10_to_isbn_13(isbn_10: str) -> str | None:
isbn_10 = canonical(isbn_10)
if (
len(isbn_10) != 10
or not isbn_10[:-1].isdigit()
or check_digit_10(isbn_10[:-1]) != isbn_10[-1]
):
return None
isbn_13 = '978' + isbn_10[:-1]
return isbn_13 + check_digit_13(isbn_13)
def to_isbn_13(isbn: str) -> str | None:
"""
Tries to make an isbn into an isbn13; regardless of input isbn type
"""
isbn = normalize_isbn(isbn) or isbn
return isbn and (isbn if len(isbn) == 13 else isbn_10_to_isbn_13(isbn))
def opposite_isbn(isbn): # ISBN10 -> ISBN13 and ISBN13 -> ISBN10
for f in isbn_13_to_isbn_10, isbn_10_to_isbn_13:
alt = f(canonical(isbn))
if alt:
return alt
def normalize_isbn(isbn: str) -> str | None:
"""
Takes an isbn-like string, keeps only numbers and X/x, and returns an ISBN-like
string or None.
Does NOT validate length or checkdigits.
"""
return isbn and canonical(isbn) or None
def get_isbn_10_and_13(isbn: str) -> tuple[str | None, str | None]:
"""
Takes an ISBN 10 or 13 and returns an ISBN optional ISBN 10 and an ISBN 13,
both in canonical form.
"""
if canonical_isbn := normalize_isbn(isbn):
isbn_13 = (
canonical_isbn if len(canonical_isbn) == 13 else isbn_10_to_isbn_13(isbn)
)
isbn_10 = isbn_13_to_isbn_10(isbn_13) if isbn_13 else canonical_isbn
return isbn_10, isbn_13
return None, None
def normalize_identifier(
identifier: str,
) -> tuple[str | None, str | None, str | None]:
"""
Takes an identifier (e.g. an ISBN 10/13 or B* ASIN) and returns a tuple of:
ASIN, ISBN_10, ISBN_13 or None, with the ISBNs in canonical form.
"""
asin = identifier.upper() if identifier.upper().startswith("B") else None
return asin, *get_isbn_10_and_13(identifier)
def get_isbn_10s_and_13s(isbns: str | list[str]) -> tuple[list[str], list[str]]:
"""
Returns a tuple of list[isbn_10_strings], list[isbn_13_strings]
Internet Archive stores ISBNs in a a string, or a list of strings,
with no differentiation between ISBN 10 and ISBN 13. Open Library
records need ISBNs in `isbn_10` and `isbn_13` fields.
>>> get_isbn_10s_and_13s('1576079457')
(['1576079457'], [])
>>> get_isbn_10s_and_13s(['1576079457', '9781576079454', '1576079392'])
(['1576079457', '1576079392'], ['9781576079454'])
Notes:
- this does no validation whatsoever--it merely checks length.
- this assumes the ISBNs have no hyphens, etc.
"""
isbn_10 = []
isbn_13 = []
# If the input is a string, it's a single ISBN, so put it in a list.
isbns = [isbns] if isinstance(isbns, str) else isbns
# Handle the list of ISBNs
for isbn in isbns:
isbn = isbn.strip()
match len(isbn):
case 10:
isbn_10.append(isbn)
case 13:
isbn_13.append(isbn)
return (isbn_10, isbn_13)
| 4,242 | Python | .py | 119 | 29.142857 | 93 | 0.601807 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
255 | ddc.py | internetarchive_openlibrary/openlibrary/utils/ddc.py | """
Dewey Decimal Numbers
Known issues
## Further Reading
https://www.oclc.org/bibformats/en/0xx/082.html
"""
from __future__ import annotations
import re
from string import printable
from collections.abc import Iterable
MULTIPLE_SPACES_RE = re.compile(r'\s+')
DDC_RE = re.compile(
r'''
(
# Prefix
(?P<prestar>\*)? # Should be suffix
(?P<neg>-)? # Old standard; no longer used
(?P<j>j)? # Juvenile prefix
C? # Canadian CIP
# The number
(?P<number>\d{1,3}(\.+\s?\d+)?)
# Suffix
(?P<poststar>\*?)
(?P<s>\s?s)? # Series suffix
(?P<B>\s?\[?B\]?)? # Biographical
(?P<ninetwo>\s(092|920|92))? # No clue; shouldn't be its own DDC though
)
|
(\[?(?P<fic>Fic|E)\.?\]?)
''',
re.IGNORECASE | re.X,
)
def collapse_multiple_space(s: str) -> str:
return MULTIPLE_SPACES_RE.sub(' ', s)
VALID_CHARS = set(printable) - set("/'′’,")
def normalize_ddc(ddc: str) -> list[str]:
ddc = ''.join(
char for char in collapse_multiple_space(ddc.strip()) if char in VALID_CHARS
)
results: list[str] = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < len(ddc) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[-1].strip() if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Discard catalog edition number
# At least one classification number available
# And number is without decimal component
if len(results) and re.search(r'(^0?\d{1,2}$)', parts['number']):
continue
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
# Include the non-j-prefixed form as well for correct sorting
if prefix == 'j':
results.append(number + suffix)
return results
def normalize_ddc_range(start: str, end: str) -> list[str | None]:
"""
Normalizes the pieces of a lucene (i.e. solr)-style range.
E.g. ('23.23', '*')
>>> normalize_ddc_range('23.23', '*')
['023.23', '*']
"""
ddc_range_norm: list[str | None] = []
for ddc in start, end:
if ddc == '*':
ddc_range_norm.append('*')
else:
normed = normalize_ddc(ddc)
if normed:
ddc_range_norm.append(normed[0])
else:
ddc_range_norm.append(None)
return ddc_range_norm
def normalize_ddc_prefix(prefix: str) -> str:
"""
Normalizes a DDC prefix to be used in searching. Integer prefixes are not modified
>>> normalize_ddc_prefix('1')
'1'
>>> normalize_ddc_prefix('1.1')
'001.1'
"""
# 23.* should become 023*
# 23.45* should become 023.45*
if '.' in prefix:
normed = normalize_ddc(prefix)
return normed[0] if normed else prefix
# 0* should stay as is
# 23* should stay as is
# j* should stay as is
else:
return prefix
def choose_sorting_ddc(normalized_ddcs: Iterable[str]) -> str:
# Prefer unprefixed DDCs (so they sort correctly)
preferred_ddcs = [ddc for ddc in normalized_ddcs if ddc[0] in '0123456789']
# Choose longest; theoretically most precise?
return sorted(preferred_ddcs or normalized_ddcs, key=len, reverse=True)[0]
| 4,798 | Python | .py | 139 | 26.482014 | 86 | 0.557239 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
256 | lcc.py | internetarchive_openlibrary/openlibrary/utils/lcc.py | """
Crash course Library of Congress Classification (LCC)
Examples:
- HB1951 .R64 1995
- DP402.C8 O46 1995
- CS879 .R3 1995
- NC248.S22 A4 1992
- TJ563 .P66 1998
- PQ3919.2.M2866 C83 1994
- NA2500 .H64 1995
- DT423.E26 9th.ed. 2012
- PZ73.S758345255 2011
- PZ8.3.G276Lo 1971
Has 3 parts:
1. The class number: e.g. PQ3919.2 ; should match `[A-Z]{1,3}(\\d{1,4}(\\.\\d+)?)?`
2. Cutter number(s): e.g. .M2866 C83
3. Specification: Basically everything else, e.g. 2012, 9th.ed. 2012
### 1. The Class Number
The class number is what's used to determine the Library of Congress Subject. It has
a pretty well-defined structure, and should match `[A-Z]{1,3}(\\d{1,4}(\\.\\d+)?)?`
For example:
- QH -> Biology
- QH426-470 -> Genetics
_Note_: According to [1] (page 36), the first cutter is sometimes considered a part of
the class number. But this isn't discussed in [2], so it seems like it might not be
entirely well-defined.
### 2. Cutter Numbers
Cutter numbers are a somewhat phonetic hashing of a piece of "extra" information like
author last name, city, or whatever. Each character maps to a letter range, so for
example:
- Idaho -> I33 -> I[d][a-d]
- Campbell -> C36 -> C[a][m-o]
For the full mapping of character to letter ranges, see [1] Appendix B1 (page 355).
Because the number part of a cutter number maps to letters, even the numeral is sorted
lexicographically, so for example this is the correct sorting:
[I2, I23, I3], **not** [I2, I3, I23]
In essence they are sort of sorted as if they were decimal numbers.
### 3. Specification
These aren't very well defined and could be just about anything. They usually include at
least the publication year of the edition, but might include edition numbers.
## Sorting
To get _fully_ sortable LCCs, you likely need to use a multipart scheme (as described in
[2]). That's not really feasible for our Solr instance (especially since we store info
at the work level, which likely has multiple LCCs). The added complexity of that
approach is also not immediately worth it right now (but might be in the future).
As a compromise, we make the class number and the first cutter sortable by making the
class number fixed-width. For example:
- PZ73.S758345255 2011 -> PZ-0073.00000000.S758345255 2011
- PZ8.3.G276Lo 1971 -> PZ-0008.30000000.G276Lo 1971
This allows for range queries that could include the first cutter. It sorts incorrectly
if:
- The decimal of the class number is longer than 8 digits (few such cases in OL db)
- The sort is determined by information that appears after the first cutter
- The first cutter is a "double cutter", e.g. B945.D4B65 199
But it works for subject-related range queries, so we consider it sufficient.
## Further Reading
- Wagner, Scott etal. "A Comprehensive Approach to Algorithmic Machine Sorting of
Library of Congress Call Numbers" (2019) [1]
- LCTS/CCS-PCC Task Force on Library of Congress Classification Training. "Fundamentals
of Library of Congress Classification" (????) [2]
- LCC subjects as PDFs https://www.loc.gov/catdir/cpso/lcco/
- LCC subjects "walkable" tree http://id.loc.gov/authorities/classification.html
## References
[1]: https://www.terkko.helsinki.fi/files/9666/classify_trnee_manual.pdf
[2]: https://ejournals.bc.edu/index.php/ital/article/download/11585/9839/
"""
from __future__ import annotations
import re
from collections.abc import Iterable
from openlibrary.utils.ddc import collapse_multiple_space
# WARNING: Parts of this code have been translated into JS in
# LibraryExplorer/utils/lcc.js :(
# KEEP IN SYNC!
LCC_PARTS_RE = re.compile(
r'''
^
# trailing dash only valid in "sortable" LCCs
# Include W, even though technically part of NLM system
(?P<letters>[A-HJ-NP-VWZ][A-Z-]{0,2})
\s?
(?P<number>\d{1,4}(\.\d+)?)?
(?P<cutter1>\s*\.\s*[^\d\s\[]{1,3}\d*\S*)?
(?P<rest>\s.*)?
$
''',
re.IGNORECASE | re.X,
)
def short_lcc_to_sortable_lcc(lcc: str) -> str | None:
"""
See Sorting section of doc above
:param str lcc: unformatted lcc
"""
m = LCC_PARTS_RE.match(clean_raw_lcc(lcc))
if not m:
return None
parts = m.groupdict()
parts['letters'] = parts['letters'].upper().ljust(3, '-')
parts['number'] = float(parts['number'] or 0)
parts['cutter1'] = '.' + parts['cutter1'].lstrip(' .') if parts['cutter1'] else ''
parts['rest'] = ' ' + parts['rest'].strip() if parts['rest'] else ''
# There will often be a CPB Box No (whatever that is) in the LCC field;
# E.g. "CPB Box no. 1516 vol. 17"
# Although this might be useful to search by, it's not really an LCC,
# so considering it invalid here.
if parts['letters'] == 'CPB':
return None
return '%(letters)s%(number)013.8f%(cutter1)s%(rest)s' % parts
def sortable_lcc_to_short_lcc(lcc: str) -> str:
"""
As close to the inverse of make_sortable_lcc as possible
"""
m = LCC_PARTS_RE.match(lcc)
assert m, f'Unable to parse LCC "{lcc}"'
parts = m.groupdict()
parts['letters'] = parts['letters'].strip('-')
parts['number'] = parts['number'].strip('0').strip('.') # Need to do in order!
parts['cutter1'] = parts['cutter1'].strip(' ') if parts['cutter1'] else ''
parts['rest'] = ' ' + parts['rest'].strip() if parts['rest'] else ''
return '%(letters)s%(number)s%(cutter1)s%(rest)s' % parts
def clean_raw_lcc(raw_lcc: str) -> str:
"""
Remove noise in lcc before matching to LCC_PARTS_RE
"""
lcc = collapse_multiple_space(raw_lcc.replace('\\', ' ').strip(' '))
if (lcc.startswith('[') and lcc.endswith(']')) or (
lcc.startswith('(') and lcc.endswith(')')
):
lcc = lcc[1:-1]
return lcc
def normalize_lcc_prefix(prefix: str) -> str | None:
"""
:param str prefix: An LCC prefix
:return: Prefix transformed to be a prefix for sortable LCC
>>> normalize_lcc_prefix('A123')
'A--0123'
>>> normalize_lcc_prefix('A123.')
'A--0123'
>>> normalize_lcc_prefix('A123.0')
'A--0123.0'
>>> normalize_lcc_prefix('A123.C')
'A--0123.00000000.C'
>>> normalize_lcc_prefix('A123.C0')
'A--0123.00000000.C0'
>>> normalize_lcc_prefix('E--')
'E--'
>>> normalize_lcc_prefix('PN-')
'PN-'
"""
if re.match(r'^[A-Z]+$', prefix, re.I):
return prefix
else:
lcc_norm = short_lcc_to_sortable_lcc(prefix.rstrip('.'))
if lcc_norm:
result = lcc_norm.rstrip('0')
if '.' in prefix and prefix.endswith('0'):
zeros_to_add = len(prefix) - len(prefix.rstrip('0'))
result += '0' * zeros_to_add
elif result.endswith('-0000.'):
result = result.rstrip('0.')
return result.rstrip('.')
else:
return None
def normalize_lcc_range(start: str, end: str) -> list[str | None]:
"""
:param str start: LCC prefix to start range
:param str end: LCC prefix to end range
:return: range with prefixes being prefixes for sortable LCCs
"""
return [
lcc if lcc == '*' else short_lcc_to_sortable_lcc(lcc) for lcc in (start, end)
]
def choose_sorting_lcc(sortable_lccs: Iterable[str]) -> str:
# Choose longest; theoretically most precise?
# Note we go to short-form first, so eg 'A123' beats 'A'
def short_len(lcc: str) -> int:
return len(sortable_lcc_to_short_lcc(lcc))
return sorted(sortable_lccs, key=short_len, reverse=True)[0]
| 7,492 | Python | .py | 177 | 38.067797 | 88 | 0.668087 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
257 | schema.py | internetarchive_openlibrary/openlibrary/utils/schema.py | """utility to generate db schema for any database engine.
(should go to web.py)
"""
__all__ = [
"Schema",
"Table",
"Column",
"register_datatype",
"register_constant",
]
_datatypes = {}
def register_datatype(name, datatype):
_datatypes[name] = datatype
_adapters = {}
def register_adapter(name, adapter):
_adapters[name] = adapter
def get_adapter(name):
if isinstance(name, AbstractAdapter):
return name
else:
return _adapters[name]()
_constants = {}
def register_constant(name, constant):
_constants[name] = constant
def get_constant(name):
return _constants(name)
class AbstractAdapter:
def type_to_sql(self, type, limit=None):
sql = self.get_native_type(type)
if limit:
sql += '(%s)' % limit
return sql
def get_native_type(self, type):
return self.native_types[type]
def index_name(self, table, columns):
return "_".join([table] + columns + ["idx"])
def references_to_sql(self, column_name, value):
# foreign key constraints are not supported by default
return None
def column_option_to_sql(self, column, option, value):
if option == 'primary_key' and value is True:
return 'primary key'
elif option == 'unique' and value is True:
return 'unique'
elif option == 'default':
if hasattr(value, 'sql'):
value = value.sql(self)
else:
value = sqlrepr(value)
return "default %s" % (value)
elif option == 'null':
return {True: 'null', False: 'not null'}[value]
elif option == 'references':
return self.references_to_sql(column, value)
def get_constant(self, name):
return self.constants[name]
def quote(self):
raise NotImplementedError()
class MockAdapter(AbstractAdapter):
def get_native_type(self, type):
return type
def references_to_sql(self, column_name, value):
return 'references ' + value
def quote(self, value):
return repr(value)
class MySQLAdapter(AbstractAdapter):
native_types = {
'serial': 'int auto_increment not null',
'integer': 'int',
'float': 'float',
'string': 'varchar',
'text': 'text',
'datetime': 'datetime',
'timestamp': 'datetime',
'time': 'time',
'date': 'date',
'binary': 'blob',
'boolean': 'boolean',
}
constants = {
'CURRENT_TIMESTAMP': 'CURRENT_TIMESTAMP',
'CURRENT_DATE': 'CURRENT_DATE',
'CURRENT_TIME': 'CURRENT_TIME',
'CURRENT_UTC_TIMESTAMP': 'UTC_TIMESTAMP',
'CURRENT_UTC_DATE': 'UTC_DATE',
'CURRENT_UTC_TIME': 'UTC_TIME',
}
def references_to_sql(self, column_name, value):
return {'constraint': f'foreign key ({column_name}) references {value}'}
class PostgresAdapter(AbstractAdapter):
native_types = {
'serial': 'serial',
'integer': 'int',
'float': 'float',
'string': 'character varying',
'text': 'text',
'datetime': 'timestamp',
'timestamp': 'timestamp',
'time': 'time',
'date': 'date',
'binary': 'bytea',
'boolean': 'boolean',
}
constants = {
'CURRENT_TIMESTAMP': 'current_timestamp',
'CURRENT_DATE': 'current_date',
'CURRENT_TIME': 'current_time',
'CURRENT_UTC_TIMESTAMP': "(current_timestamp at time zone 'utc')",
'CURRENT_UTC_DATE': "(date (current_timestamp at timezone 'utc'))",
'CURRENT_UTC_TIME': "(current_time at time zone 'utc')",
}
def references_to_sql(self, column_name, value):
return 'references ' + value
class SQLiteAdapter(AbstractAdapter):
native_types = {
'serial': 'integer autoincrement',
'integer': 'integer',
'float': 'float',
'string': 'varchar',
'text': 'text',
'datetime': 'datetime',
'timestamp': 'datetime',
'time': 'datetime',
'date': 'date',
'binary': 'blob',
'boolean': 'boolean',
}
constants = {
'CURRENT_TIMESTAMP': "CURRENT_TIMESTAMP",
'CURRENT_DATE': "CURRENT_DATE",
'CURRENT_TIME': "CURRENT_TIME",
'CURRENT_UTC_TIMESTAMP': "CURRENT_TIMESTAMP",
'CURRENT_UTC_DATE': "CURRENT_DATE",
'CURRENT_UTC_TIME': "CURRENT_TIME",
}
register_adapter('mysql', MySQLAdapter)
register_adapter('postgres', PostgresAdapter)
register_adapter('sqlite', SQLiteAdapter)
def sqlrepr(s):
if isinstance(s, str):
return repr(s)
else:
return s
class Datatype:
def __init__(self, name=None):
self.name = name
def sql(self, engine):
return get_adapter(engine).type_to_sql(self.name)
class Constant:
def __init__(self, name=None):
self.name = name
def sql(self, engine):
return get_adapter(engine).get_constant(self.name)
for c in [
'CURRENT_TIMESTAMP',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_UTC_TIMESTAMP',
'CURRENT_UTC_DATE',
'CURRENT_UTC_TIME',
]:
register_constant(c, Constant(c))
class Schema:
def __init__(self):
self.tables = []
self.indexes = []
for name, value in _constants.items():
setattr(self, name, value)
def column(self, name, type, **options):
return Column(name, type, **options)
def add_table(self, name, *columns, **options):
t = Table(name, *columns, **options)
self.tables.append(t)
def add_index(self, table, columns, **options):
i = Index(table, columns, **options)
self.indexes.append(i)
def sql(self, engine):
return "\n".join(x.sql(engine) for x in self.tables + self.indexes)
class Table:
"""Database table.
>>> t = Table('user', Column('name', 'string'))
>>> print(t.sql('postgres'))
create table user (
name character varying(255)
);
"""
def __init__(self, name, *columns, **options):
self.name = name
self.columns = columns
self.options = options
def sql(self, engine):
columns = [c.sql(engine) for c in self.columns]
for c in self.columns:
for constraint in c.constraints:
columns.append(constraint)
return "create table {} (\n {}\n);".format(
self.name, ",\n ".join(columns)
)
class Column:
"""Column in a database table.
>>> Column('name', 'text').sql('mock')
'name text'
>>> Column('id', 'serial', primary_key=True).sql('mock')
'id serial primary key'
>>> Column('revision', 'integer', default=1).sql('mock')
'revision integer default 1'
>>> Column('name', 'string', default='joe').sql('mock')
"name string(255) default 'joe'"
"""
def __init__(self, name, type, **options):
self.name = name
self.type = type
self.options = options
self.limit = self.options.pop('limit', None)
self.constraints = []
self.primary_key = self.options.get('primary_key')
self.unique = self.options.get('unique')
self.references = self.options.get('references')
# string type is of variable length. set default length as 255.
if type == 'string':
self.limit = self.limit or 255
def sql(self, engine):
adapter = get_adapter(engine)
tokens = [self.name, adapter.type_to_sql(self.type, self.limit)]
for k, v in self.options.items():
result = adapter.column_option_to_sql(self.name, k, v)
if result is None:
continue
elif isinstance(
result, dict
): # a way for column options to add constraints
self.constraints.append(result['constraint'])
else:
tokens.append(result)
return " ".join(tokens)
class Index:
"""Database index.
>>> Index('user', 'email').sql('mock')
'create index user_email_idx on user(email);'
>>> Index('user', 'email', unique=True).sql('mock')
'create unique index user_email_idx on user(email);'
>>> Index('page', ['path', 'revision']).sql('mock')
'create index page_path_revision_idx on page(path, revision);'
"""
def __init__(self, table, columns, **options):
self.table = table
if not isinstance(columns, list):
self.columns = [columns]
else:
self.columns = columns
self.unique = options.get('unique')
self.name = options.get('name')
def sql(self, engine):
adapter = get_adapter(engine)
name = self.name or adapter.index_name(self.table, self.columns)
if self.unique:
s = 'create unique index '
else:
s = 'create index '
s += adapter.index_name(self.table, self.columns)
s += ' on {}({});'.format(self.table, ", ".join(self.columns))
return s
def _test():
"""
Define a sample schema.
>>> s = Schema()
>>> s.add_table('posts',
... s.column('id', 'serial', primary_key=True),
... s.column('slug', 'string', unique=True, null=False),
... s.column('title', 'string', null=False),
... s.column('body', 'text'),
... s.column('created_on', 'timestamp', default=s.CURRENT_UTC_TIMESTAMP))
...
>>> s.add_table('comments',
... s.column('id', 'serial', primary_key=True),
... s.column('post_id', 'integer', references='posts(id)'),
... s.column('comment', 'text'))
...
>>> s.add_index('posts', 'slug')
Validate postgres schema.
>>> print(s.sql('postgres'))
create table posts (
id serial primary key,
slug character varying(255) unique not null,
title character varying(255) not null,
body text,
created_on timestamp default (current_timestamp at time zone 'utc')
);
create table comments (
id serial primary key,
post_id int references posts(id),
comment text
);
create index posts_slug_idx on posts(slug);
Validate MySQL schema.
>>> print(s.sql('mysql'))
create table posts (
id int auto_increment not null primary key,
slug varchar(255) unique not null,
title varchar(255) not null,
body text,
created_on datetime default UTC_TIMESTAMP
);
create table comments (
id int auto_increment not null primary key,
post_id int,
comment text,
foreign key (post_id) references posts(id)
);
create index posts_slug_idx on posts(slug);
That's all.
"""
if __name__ == "__main__":
register_adapter('mock', MockAdapter)
import doctest
doctest.testmod()
| 11,039 | Python | .py | 316 | 26.971519 | 85 | 0.578373 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
258 | open_syllabus_project.py | internetarchive_openlibrary/openlibrary/utils/open_syllabus_project.py | import logging
import os
import json
import sqlite3
import gzip
from contextlib import closing
from pathlib import Path
osp_dump_location: Path | None = None
logger = logging.getLogger("openlibrary.open_syllabus_project")
def get_osp_dump_location() -> Path | None:
"""
Get whether the location of the Open Syllabus project counts dump
"""
global osp_dump_location
return osp_dump_location
def set_osp_dump_location(val: Path | None):
global osp_dump_location
osp_dump_location = val
# Function to get the total based on OLID
def get_total_by_olid(olid: str) -> int | None:
"""
Retrieves the total number of times a book with the given Open Library ID (OLID) has been assigned in syllabi
from the Open Syllabus Project database.
:param olid: The Open Library ID (OLID) of the book to retrieve the total for. (eg `/works/OL123W` or `OL123W`)
Raises:
Exception: If there is an error querying the database.
"""
olid_int = olid.replace("/works/", "").replace("OL", "").replace("W", "")
db_file = get_osp_dump_location()
if not db_file:
logger.warning("Open Syllabus Project database not found.")
return None
with closing(sqlite3.connect(db_file)) as conn:
cursor = conn.cursor()
# Query the database for the total based on OLID
cursor.execute("SELECT total FROM data WHERE olid = ?", (olid_int,))
result = cursor.fetchone()
if result:
return result[0]
return None
def generate_osp_db(input_directory: Path, output_file: str) -> None:
"""
This function generates an SQLite database from a directory of .json.gz files.
The database contains data extracted from the JSON files, including the OLID and total fields.
The function excludes lines where the 'total' is less than one.
The function creates an index on the OLID column for faster querying.
Args:
input_directory (Path): The directory containing the .json.gz files.
Returns:
None
"""
# Initialize a list to store the data
data = []
# Create an SQLite database and table
with closing(sqlite3.connect(output_file)) as conn:
cursor = conn.cursor()
# Drop the table if it exists so we only have fresh data
cursor.execute('DROP TABLE IF EXISTS data;')
cursor.execute(
'''
CREATE TABLE IF NOT EXISTS data (
olid INTEGER PRIMARY KEY,
total INTEGER
)
'''
)
# Iterate through the files in the input directory
# input_directory_path = Path(input_directory)
for i, filename in enumerate(input_directory.iterdir()):
print(i)
if str(filename).endswith(".json.gz"):
with gzip.open(os.path.join(input_directory, filename), "rt") as file:
for line in file:
# Parse the JSON data
json_data = json.loads(line)
# Extract the 'ol_id' and 'total' fields
ol_id = int(
json_data["ol_id"].replace("/works/OL", "").replace("W", "")
)
total = json_data["total"]
# Exclude lines where the 'total' is less than one
if total >= 1:
data.append((ol_id, total))
# Insert the filtered data into the SQLite database
cursor.executemany("INSERT INTO data (olid, total) VALUES (?, ?)", data)
# Commit changes, sort the olid column in ascending order, and close the database connection
cursor.execute("CREATE INDEX IF NOT EXISTS olid_index ON data (olid)")
conn.commit()
print(f'SQLite database created successfully: {output_file}')
| 3,885 | Python | .py | 89 | 34.348315 | 115 | 0.621385 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
259 | decorators.py | internetarchive_openlibrary/openlibrary/utils/decorators.py | import web
from functools import wraps
from openlibrary.accounts import get_current_user
def authorized_for(*expected_args):
"""Check for membership in any given usergroup before proceeding."""
def decorator_authorized(func):
@wraps(func)
def wrapper_authorized(*args, **kwargs):
user = get_current_user()
if not user:
raise web.unauthorized(message='Requires log-in.')
authorized = False
for usergroup in expected_args:
if user.is_usergroup_member(usergroup):
authorized = True
if not authorized:
# Throw some authorization error
raise web.forbidden(message='Requires elevated permissions.')
return func(*args, *kwargs)
return wrapper_authorized
return decorator_authorized
| 876 | Python | .py | 21 | 31.142857 | 77 | 0.634752 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
260 | test_dateutil.py | internetarchive_openlibrary/openlibrary/utils/tests/test_dateutil.py | from .. import dateutil
import datetime
def test_parse_date():
assert dateutil.parse_date("2010") == datetime.date(2010, 1, 1)
assert dateutil.parse_date("2010-02") == datetime.date(2010, 2, 1)
assert dateutil.parse_date("2010-02-03") == datetime.date(2010, 2, 3)
def test_nextday():
assert dateutil.nextday(datetime.date(2008, 1, 1)) == datetime.date(2008, 1, 2)
assert dateutil.nextday(datetime.date(2008, 1, 31)) == datetime.date(2008, 2, 1)
assert dateutil.nextday(datetime.date(2008, 2, 28)) == datetime.date(2008, 2, 29)
assert dateutil.nextday(datetime.date(2008, 2, 29)) == datetime.date(2008, 3, 1)
assert dateutil.nextday(datetime.date(2008, 12, 31)) == datetime.date(2009, 1, 1)
def test_nextmonth():
assert dateutil.nextmonth(datetime.date(2008, 1, 1)) == datetime.date(2008, 2, 1)
assert dateutil.nextmonth(datetime.date(2008, 1, 12)) == datetime.date(2008, 2, 1)
assert dateutil.nextmonth(datetime.date(2008, 12, 12)) == datetime.date(2009, 1, 1)
def test_nextyear():
assert dateutil.nextyear(datetime.date(2008, 1, 1)) == datetime.date(2009, 1, 1)
assert dateutil.nextyear(datetime.date(2008, 2, 12)) == datetime.date(2009, 1, 1)
def test_parse_daterange():
assert dateutil.parse_daterange("2010") == (
datetime.date(2010, 1, 1),
datetime.date(2011, 1, 1),
)
assert dateutil.parse_daterange("2010-02") == (
datetime.date(2010, 2, 1),
datetime.date(2010, 3, 1),
)
assert dateutil.parse_daterange("2010-02-03") == (
datetime.date(2010, 2, 3),
datetime.date(2010, 2, 4),
)
| 1,622 | Python | .py | 32 | 45.40625 | 87 | 0.667724 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
261 | test_lccn.py | internetarchive_openlibrary/openlibrary/utils/tests/test_lccn.py | import pytest
from openlibrary.utils.lccn import normalize_lccn
def test_normalize_lccn_prenormalized():
prenormalized = '94200274'
assert normalize_lccn(prenormalized) == prenormalized
lccns = [
('96-39190', '96039190'),
('agr 62000298', 'agr62000298'),
('agr 62-298', 'agr62000298'),
('agr62000298', 'agr62000298'),
('agr 62-298 Revised', 'agr62000298'),
]
# Cases from https://www.loc.gov/marc/lccn-namespace.html
lccns += [
('n78-89035', 'n78089035'),
('n 78890351 ', 'n78890351'),
(' 85000002 ', '85000002'),
('85-2 ', '85000002'),
('2001-000002', '2001000002'),
('75-425165//r75', '75425165'),
(' 79139101 /AC/r932', '79139101'),
]
@pytest.mark.parametrize('raw,norm', lccns)
def test_normalize_lccn(raw, norm):
assert normalize_lccn(raw) == norm
| 821 | Python | .py | 25 | 29.12 | 57 | 0.661168 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
262 | test_utils.py | internetarchive_openlibrary/openlibrary/utils/tests/test_utils.py | from openlibrary.utils import (
extract_numeric_id_from_olid,
str_to_key,
)
def test_str_to_key():
assert str_to_key('x') == 'x'
assert str_to_key('X') == 'x'
assert str_to_key('[X]') == 'x'
assert str_to_key('!@<X>;:') == '!x'
assert str_to_key('!@(X);:') == '!(x)'
def test_extract_numeric_id_from_olid():
assert extract_numeric_id_from_olid('/works/OL123W') == '123'
assert extract_numeric_id_from_olid('OL123W') == '123'
| 465 | Python | .py | 13 | 31.692308 | 65 | 0.595982 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
263 | test_retry.py | internetarchive_openlibrary/openlibrary/utils/tests/test_retry.py | from unittest.mock import Mock
import pytest
from openlibrary.utils.retry import MaxRetriesExceeded, RetryStrategy
class TestRetryStrategy:
def test_exception_filter(self, monkeytime):
foo = Mock(side_effect=ZeroDivisionError)
retry = RetryStrategy([ZeroDivisionError], max_retries=3)
with pytest.raises(MaxRetriesExceeded):
retry(foo)
assert foo.call_count == 4
def test_no_retry(self):
foo = Mock(return_value=1)
retry = RetryStrategy([ZeroDivisionError], max_retries=3)
assert retry(foo) == 1
assert foo.call_count == 1
def test_retry(self, monkeytime):
foo = Mock(side_effect=[ZeroDivisionError, 1])
retry = RetryStrategy([ZeroDivisionError], max_retries=3)
assert retry(foo) == 1
assert foo.call_count == 2
def test_unhandled_error(self):
foo = Mock(side_effect=ZeroDivisionError)
retry = RetryStrategy([ValueError], max_retries=3)
with pytest.raises(ZeroDivisionError):
retry(foo)
assert foo.call_count == 1
def test_last_exception(self, monkeytime):
retry = RetryStrategy([ZeroDivisionError], max_retries=3)
with pytest.raises(MaxRetriesExceeded):
try:
retry(lambda: 1 / 0)
except MaxRetriesExceeded as e:
assert isinstance(e.last_exception, ZeroDivisionError)
raise
| 1,440 | Python | .py | 34 | 33.529412 | 70 | 0.66 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
264 | test_processors.py | internetarchive_openlibrary/openlibrary/utils/tests/test_processors.py | import web
import time
from ..processors import RateLimitProcessor
class TestRateLimitProcessor:
"""py.test testcase for testing RateLimitProcessor."""
def setup_method(self, method):
web.ctx.ip = "127.0.0.1"
def test_check_rate(self, monkeypatch):
monkeypatch.setattr(time, "time", lambda: 123456)
p = RateLimitProcessor(10)
for i in range(10):
assert p.check_rate() is True
assert p.check_rate() is False
def test_get_window(self, monkeypatch):
p = RateLimitProcessor(10, window_size=10)
d = web.storage(time=1)
monkeypatch.setattr(time, "time", lambda: d.time)
# window should continue to be the same from time 1 to 9.
w = p.get_window()
w['foo'] = 'bar'
d.time = 9
assert p.get_window() == {'foo': 'bar'}
# and the window should get cleared when time becomes 10.
d.time = 10
assert p.get_window() == {}
| 974 | Python | .py | 25 | 31.28 | 65 | 0.624733 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
265 | test_solr.py | internetarchive_openlibrary/openlibrary/utils/tests/test_solr.py | from ..solr import Solr
def test_prepare_select():
solr = Solr("http://localhost:8983/solr")
assert solr._prepare_select("foo") == "foo"
assert solr._prepare_select({"isbn": "1234567890"}) == 'isbn:"1234567890"'
assert (
solr._prepare_select({"isbn": ["1234567890", "9876543210"]})
== 'isbn:("1234567890" OR "9876543210")'
)
assert (
solr._prepare_select({"publish_year": ("1990", "2000")})
== 'publish_year:[1990 TO 2000]'
)
| 490 | Python | .py | 13 | 31.769231 | 78 | 0.598309 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
266 | test_lcc.py | internetarchive_openlibrary/openlibrary/utils/tests/test_lcc.py | import pytest
from openlibrary.utils.lcc import (
choose_sorting_lcc,
normalize_lcc_prefix,
normalize_lcc_range,
short_lcc_to_sortable_lcc,
sortable_lcc_to_short_lcc,
)
TESTS = [
('PZ-0073.00000000', 'pz73', 'PZ73', 'lower case'),
('PZ-0000.00000000', 'PZ', 'PZ', 'just class'),
('PZ-0123.00000000 [P]', 'PZ123 [P]', 'PZ123 [P]', 'keeps brackets at end'),
(
'BP-0166.94000000.S277 1999',
'\\BP\\166.94\\.S277\\1999',
'BP166.94.S277 1999',
'backslashes instead of spaces',
),
(
'LC-6252.00000000.T4 T4 vol. 33, no. 10',
'[LC6252.T4 T4 vol. 33, no. 10]',
'LC6252.T4 T4 vol. 33, no. 10',
'brackets',
),
('SF-0427.00000000.G74', 'SF427 . G74', 'SF427.G74', 'space in cutter1'),
]
@pytest.mark.parametrize(
"sortable_lcc,raw_lcc,short_lcc,name", TESTS, ids=[t[-1] for t in TESTS]
)
def test_to_sortable(sortable_lcc, raw_lcc, short_lcc, name):
assert short_lcc_to_sortable_lcc(raw_lcc) == sortable_lcc
@pytest.mark.parametrize(
"sortable_lcc,raw_lcc,short_lcc,name", TESTS, ids=[t[-1] for t in TESTS]
)
def test_to_short_lcc(sortable_lcc, raw_lcc, short_lcc, name):
assert sortable_lcc_to_short_lcc(sortable_lcc) == short_lcc
INVALID_TESTS = [
('6113 .136', 'dewey decimal'),
('9608 BOOK NOT YET IN LC', 'noise'),
('#M8184', 'hash prefixed'),
('', 'empty'),
('MLCS 92/14990', 'too much class'),
('PZ123.234.234', 'too much decimal'),
# The following are "real world" data from open library
('IN PROCESS', 'noise'),
('African Section Pamphlet Coll', 'real ol data'),
('Microfilm 99/20', 'real ol data'),
('Microfilm 61948 E', 'real ol data'),
('Microfiche 92/80965 (G)', 'real ol data'),
('MLCSN+', 'real ol data'),
('UNCLASSIFIED 809 (S)', 'real ol data'),
('CPB Box no. 1516 vol. 17', 'CPB box number'),
]
@pytest.mark.parametrize("text,name", INVALID_TESTS, ids=[t[-1] for t in INVALID_TESTS])
def test_invalid_lccs(text, name):
assert short_lcc_to_sortable_lcc(text) is None
# Note: we don't handle all of these _entirely_ correctly as the paper says they should
# be, but we handle enough (See lcc.py)
# Src: https://ejournals.bc.edu/index.php/ital/article/download/11585/9839/
WAGNER_2019_EXAMPLES = [
('B--1190.00000000 1951', 'B1190 1951', 'no Cutter string'),
('DT-0423.00000000.E26 9th.ed. 2012', 'DT423.E26 9th.ed. 2012', 'compound spec'),
('E--0505.50000000 102nd.F57 1999', 'E505.5 102nd.F57 1999', 'ordinal in classif.'),
('HB-3717.00000000 1929.E37 2015', 'HB3717 1929.E37 2015 ', 'date in classif.'),
('KBD0000.00000000.G189s', 'KBD.G189s ', 'no caption number, no specification'),
('N--8354.00000000.B67 2000x', 'N8354.B67 2000x', 'date with suffix '),
('PS-0634.00000000.B4 1958-63', 'PS634.B4 1958-63', 'hyphenated range of dates'),
('PS-3557.00000000.A28R4 1955', 'PS3557.A28R4 1955', '"double Cutter"'),
('PZ-0008.30000000.G276Lo 1971', 'PZ8.3.G276Lo 1971 ', 'Cutter with "work mark"'),
('PZ-0073.00000000.S758345255 2011', 'PZ73.S758345255 2011', 'long Cutter decimal'),
]
@pytest.mark.parametrize(
"sortable_lcc,short_lcc,name",
WAGNER_2019_EXAMPLES,
ids=[t[-1] for t in WAGNER_2019_EXAMPLES],
)
def test_wagner_2019_to_sortable(sortable_lcc, short_lcc, name):
assert short_lcc_to_sortable_lcc(short_lcc) == sortable_lcc
@pytest.mark.parametrize(
"sortable_lcc,short_lcc,name",
WAGNER_2019_EXAMPLES,
ids=[t[-1] for t in WAGNER_2019_EXAMPLES],
)
def test_wagner_2019_to_short_lcc(sortable_lcc, short_lcc, name):
assert sortable_lcc_to_short_lcc(sortable_lcc) == short_lcc.strip()
PREFIX_TESTS = [
('A', 'A', 'Single letter'),
('ADC', 'ADC', 'multi letter'),
('A5', 'A--0005', 'Alphanum'),
('A5.00', 'A--0005.00', 'Alphanum'),
('A10', 'A--0010', 'Alphanum trailing 0'),
('A10.5', 'A--0010.5', 'Alphanum with decimal'),
('A10.', 'A--0010', 'Alphanum with trailing decimal'),
('A10.C', 'A--0010.00000000.C', 'Alphanum with partial cutter'),
('F349.N2 A77', 'F--0349.00000000.N2 A77', '2 cutters'),
('123', None, 'Invalid returns None'),
('*B55', None, 'Invalid returns None'),
]
@pytest.mark.parametrize(
"prefix,normed,name", PREFIX_TESTS, ids=[t[-1] for t in PREFIX_TESTS]
)
def test_normalize_lcc_prefix(prefix, normed, name):
assert normalize_lcc_prefix(prefix) == normed
RANGE_TESTS = [
(['A', 'B'], ['A--0000.00000000', 'B--0000.00000000'], 'Single letters'),
(['A1', 'A100'], ['A--0001.00000000', 'A--0100.00000000'], 'Letter nums'),
(['A1', 'B1.13.C89'], ['A--0001.00000000', 'B--0001.13000000.C89'], 'Cutter num'),
(['A1', 'noise'], ['A--0001.00000000', None], 'One Invalid'),
(['blah', 'blah'], [None, None], 'Both invalid'),
(['A1', '*'], ['A--0001.00000000', '*'], 'Star'),
]
@pytest.mark.parametrize(
"raw,normed,name", RANGE_TESTS, ids=[t[-1] for t in RANGE_TESTS]
)
def test_normalize_lcc_range(raw, normed, name):
assert normalize_lcc_range(*raw) == normed
SORTING_TESTS = [
(['A--0001.00000000', 'B--0001.13000000.C89'], 1, 'Chooses longest'),
(['A--0001.00000000', 'A--0001.13000000'], 1, 'Chooses most precise'),
]
@pytest.mark.parametrize(
"lccs,result,name", SORTING_TESTS, ids=[t[-1] for t in SORTING_TESTS]
)
def test_choose_sorting_lcc(lccs, result, name):
assert choose_sorting_lcc(lccs) == lccs[result]
| 5,438 | Python | .py | 125 | 39.336 | 88 | 0.639735 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
267 | test_isbn.py | internetarchive_openlibrary/openlibrary/utils/tests/test_isbn.py | import pytest
from openlibrary.utils.isbn import (
get_isbn_10_and_13,
isbn_10_to_isbn_13,
isbn_13_to_isbn_10,
normalize_identifier,
normalize_isbn,
opposite_isbn,
get_isbn_10s_and_13s,
)
def test_isbn_13_to_isbn_10():
assert isbn_13_to_isbn_10('978-0-940787-08-7') == '0940787083'
assert isbn_13_to_isbn_10('9780940787087') == '0940787083'
assert isbn_13_to_isbn_10('BAD-ISBN') is None
def test_isbn_10_to_isbn_13():
assert isbn_10_to_isbn_13('0-940787-08-3') == '9780940787087'
assert isbn_10_to_isbn_13('0940787083') == '9780940787087'
assert isbn_10_to_isbn_13('BAD-ISBN') is None
def test_opposite_isbn():
assert opposite_isbn('0-940787-08-3') == '9780940787087'
assert opposite_isbn('978-0-940787-08-7') == '0940787083'
assert opposite_isbn('BAD-ISBN') is None
def test_normalize_isbn_returns_None():
assert normalize_isbn(None) is None
assert normalize_isbn('') is None
assert normalize_isbn('a') is None
isbn_cases = [
('1841151866', '1841151866'),
('184115186x', '184115186X'),
('184115186X', '184115186X'),
('184-115-1866', '1841151866'),
('9781841151861', '9781841151861'),
('978-1841151861', '9781841151861'),
('123-456-789-X ', '123456789X'),
('ISBN: 123-456-789-X ', '123456789X'),
('56', None),
]
@pytest.mark.parametrize('isbnlike,expected', isbn_cases)
def test_normalize_isbn(isbnlike, expected):
assert normalize_isbn(isbnlike) == expected
def test_get_isbn_10s_and_13s() -> None:
# isbn 10 only
result = get_isbn_10s_and_13s(["1576079457"])
assert result == (["1576079457"], [])
# isbn 13 only
result = get_isbn_10s_and_13s(["9781576079454"])
assert result == ([], ["9781576079454"])
# mixed isbn 10 and 13, with multiple elements in each, one which has an extra space.
result = get_isbn_10s_and_13s(
["9781576079454", "1576079457", "1576079392 ", "9781280711190"]
)
assert result == (["1576079457", "1576079392"], ["9781576079454", "9781280711190"])
# an empty list
result = get_isbn_10s_and_13s([])
assert result == ([], [])
# not an isbn
result = get_isbn_10s_and_13s(["flop"])
assert result == ([], [])
# isbn 10 string, with an extra space.
result = get_isbn_10s_and_13s(" 1576079457")
assert result == (["1576079457"], [])
# isbn 13 string
result = get_isbn_10s_and_13s("9781280711190")
assert result == ([], ["9781280711190"])
@pytest.mark.parametrize(
["isbn", "expected"],
[
("1111111111", ("1111111111", "9781111111113")),
("9781111111113", ("1111111111", "9781111111113")),
("979-1-23456-789-6", (None, "9791234567896")),
("", (None, None)),
(None, (None, None)),
],
)
def test_get_isbn_10_and_13(isbn, expected) -> None:
got = get_isbn_10_and_13(isbn)
assert got == expected
@pytest.mark.parametrize(
["identifier", "expected"],
[
("B01234678", ("B01234678", None, None)),
("1111111111", (None, "1111111111", "9781111111113")),
("9781111111113", (None, "1111111111", "9781111111113")),
("", (None, None, None)),
],
)
def test_normalize_identifier(identifier, expected) -> None:
got = normalize_identifier(identifier)
assert got == expected
| 3,320 | Python | .py | 89 | 32.393258 | 89 | 0.639539 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
268 | test_ddc.py | internetarchive_openlibrary/openlibrary/utils/tests/test_ddc.py | import pytest
from openlibrary.utils.ddc import (
choose_sorting_ddc,
normalize_ddc,
normalize_ddc_prefix,
normalize_ddc_range,
)
# Src: https://www.oclc.org/bibformats/en/0xx/082.html
TESTS_FROM_OCLC = [
("370.19'342", ['370.19342'], "Segmentation (prime) marks"),
("370.19/342", ['370.19342'], "Segmentation (prime) marks"),
("j574", ['j574', '574'], "Juvenile works."),
("[E]", ['[E]'], "Juvenile works with [E]"),
("[Fic]", ['[Fic]'], "Juvenile works with [Fic]."),
("658.404 92", ['658.404 92'], "Dewey numbers followed by 92 or 920."),
("658.404 920", ['658.404 920'], "Dewey numbers followed by 92 or 920."),
("942.082 [B]", ['942.082 B'], "Uppercase B in post-1971 numbers."),
(
"*657.6",
['657.6*'],
"LC assigned Dewey numbers according to both the 14th and the 15th editions of "
"the Dewey schedules.",
),
(
"*735.29 735.42",
['735.29*', '735.42'],
"LC assigned Dewey numbers according to both the 14th and the 15th editions of "
"the Dewey schedules.",
),
("081s", ['081 s'], "Series numbers."),
("081 s", ['081 s'], "Series numbers."),
(
"(015.73)",
['015.73 s'],
"Parentheses indicating Dewey numbers assigned to a series.",
),
(
"015.73 s",
['015.73 s'],
"Parentheses indicating Dewey numbers assigned to a series.",
),
(
"(015.73) 015.791",
['015.73 s', '015.791'],
"Two Dewey numbers: one in parentheses, one not.",
),
("-222.14", ['-222.14'], "Dewey numbers with minus signs."),
("-222.14 (927.5)", ['-222.14', '927.5 s'], "Dewey numbers with minus signs."),
("[320.9777]", ['320.9777'], "Dewey numbers in brackets."),
("[016.3584] 012", ['016.3584'], "Dewey numbers in brackets."),
(
"081s [370.19'342]",
['081 s', '370.19342'],
"Dewey number followed by lowercase s and a second number in brackets.",
),
("C364/.971", ['364.971'], "Canadian CIP"),
]
TESTS = [
('123', ['123'], 'whole number'),
('1', ['001'], 'whole number padding'),
('hello world!', [], 'junk'),
('978123412341', [], 'junk'),
(
'338.9/009171/7 019',
['338.90091717'],
'Dewey number with segmentation and edition number',
),
('332.6 021', ['332.6'], 'Dewey number and DDC edition number'),
('[E] 021', ['[E]'], 'Juvenile work and DDC edition number'),
('015', ['015'], 'Single Dewey number with edition number pattern'),
(
'(015.73) 015.791 021',
['015.73 s', '015.791'],
'Two Dewey numbers and one edition number',
),
('813. 54', ['813.54'], 'Space after decimal'),
('813.′54', ['813.54'], 'Curly quote separator (real world)'),
('813’.54', ['813.54'], 'Other kind of curly quote (real world)'),
('813. 54 (ddc21)', ['813.54'], 'catalog number with ddc prefix (real world)'),
('823/.92 22', ['823.92'], 'catalog number without leading 0 (real world)'),
("813.' 54", ['813.54'], 'Space and quote separate (real world)'),
("726. 6' 0945' 51 (ddc21)", ['726.6'], 'Random spaces (real world)'),
(
'813./54', # noqa: PLE2515
['813.54'],
'Random non-printable chars (real world)',
),
('868 G216m', ['868'], 'Cutter number (real world)'),
('863.,64', ['863.64'], 'Random comma (real world)'),
('616..8/3', ['616.83'], 'Double dot (real world)'),
('813.54 P38 1995', ['813.54'], 'Cutter/year (real world)'),
('21ddc', [], 'DDCs must end at word boundaries'),
('123; 216;', ['123', '216'], 'DDCs ending at word boundaries are ok'),
('[Fic] 2 21', ['[Fic]'], 'Ignores single digits after Fic'),
('[Fic] 813', ['[Fic]', '813'], 'Does not ignore tridigits after Fic'),
('813/.52/.52', ['813.52'], 'Too much decimal'),
]
@pytest.mark.parametrize("raw_ddc,expected,name", TESTS, ids=[t[2] for t in TESTS])
def test_noramlize_ddc(raw_ddc, expected, name):
assert normalize_ddc(raw_ddc) == expected
@pytest.mark.parametrize(
"raw_ddc,expected,name", TESTS_FROM_OCLC, ids=[t[2] for t in TESTS_FROM_OCLC]
)
def test_normalize_ddc_with_oclc_spec(raw_ddc, expected, name):
assert normalize_ddc(raw_ddc) == expected
PREFIX_TESTS = [
('0', '0', 'Single number'),
('j', 'j', 'Only juvenile'),
('12', '12', 'Integer'),
('12.3', '012.3', 'Decimal'),
('12.300', '012.300', 'Trailing decimal zeros'),
('100', '100', 'Trailing zeros'),
('noise', 'noise', 'Noise'),
('j100', 'j100', 'Limited juvenile'),
]
@pytest.mark.parametrize(
"prefix,normed,name", PREFIX_TESTS, ids=[t[-1] for t in PREFIX_TESTS]
)
def test_normalize_ddc_prefix(prefix, normed, name):
assert normalize_ddc_prefix(prefix) == normed
RANGE_TESTS = [
(['0', '3'], ['000', '003'], 'Single numbers'),
(['100', '300'], ['100', '300'], 'Single numbers'),
(['100', '*'], ['100', '*'], 'Star'),
]
@pytest.mark.parametrize(
"raw,normed,name", RANGE_TESTS, ids=[t[-1] for t in RANGE_TESTS]
)
def test_normalize_ddc_range(raw, normed, name):
assert normalize_ddc_range(*raw) == normed
SORTING_DDC_TEST = [
(['123', '123.554'], '123.554', 'Chooses longest'),
(['j123', '123'], '123', 'Prefer without j'),
(['-222.14', '927.5'], '927.5', 'Prefer without -'),
(['-222.14'], '-222.14', 'Begrudgingly uses prefixed if only option'),
]
@pytest.mark.parametrize(
"ddcs,outpt,name", SORTING_DDC_TEST, ids=[t[-1] for t in SORTING_DDC_TEST]
)
def test_choose_sorting_ddc(ddcs, outpt, name):
assert choose_sorting_ddc(ddcs) == outpt
| 5,634 | Python | .py | 141 | 34.758865 | 88 | 0.574877 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
269 | __init__.py | internetarchive_openlibrary/openlibrary/accounts/__init__.py | import web
# FIXME: several modules import things from accounts.model
# directly through openlibrary.accounts
from .model import * # noqa: F403
from .model import Account, Link
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import User
# Unconfirmed functions (I'm not sure that these should be here)
def get_group(name):
"""
Returns the group named 'name'.
"""
return web.ctx.site.get("/usergroup/%s" % name)
class RunAs:
"""
Escalates privileges to become username, performs action as user,
and then de-escalates to original user.
"""
def __init__(self, username: str) -> None:
"""
:param str username: Username e.g. /people/mekBot of user to run action as
"""
self.tmp_account = find(username=username)
self.calling_user_auth_token = None
if not self.tmp_account:
raise KeyError('Invalid username')
def __enter__(self):
# Save token of currently logged in user (or no-user)
account = get_current_user()
self.calling_user_auth_token = account and account.generate_login_code()
# Temporarily become user
web.ctx.conn.set_auth_token(self.tmp_account.generate_login_code())
return self.tmp_account
def __exit__(self, exc_type, exc_val, exc_tb):
# Return auth token to original user or no-user
web.ctx.conn.set_auth_token(self.calling_user_auth_token)
# Confirmed functions (these have to be here)
def get_current_user() -> "User | None":
"""
Returns the currently logged in user. None if not logged in.
"""
return web.ctx.site.get_user()
def find(
username: str | None = None, lusername: str | None = None, email: str | None = None
) -> Account | None:
"""Finds an account by username, email or lowercase username."""
def query(name, value):
try:
return web.ctx.site.store.values(
type="account", name=name, value=value, limit=1
)[0]
except IndexError:
return None
if username:
doc = web.ctx.site.store.get("account/" + username)
elif lusername:
doc = query("lusername", lusername)
elif email:
# the email stored in account doc is case-sensitive.
# The lowercase of email is used in the account-email document.
# querying that first and taking the username from there to make
# the email search case-insensitive.
#
# There are accounts with case-variation of emails. To handle those,
# searching with the original case and using lower case if that fails.
email_doc = web.ctx.site.store.get(
"account-email/" + email
) or web.ctx.site.store.get("account-email/" + email.lower())
doc = email_doc and web.ctx.site.store.get("account/" + email_doc['username'])
else:
doc = None
return doc and Account(doc)
def register(username, email, password, displayname):
web.ctx.site.register(
username=username, email=email, password=password, displayname=displayname
)
def login(username, password):
web.ctx.site.login(username, password)
def update_account(username, **kargs):
web.ctx.site.update_account(username, **kargs)
def get_link(code: str) -> Link | bool:
docs = web.ctx.site.store.values(type="account-link", name="code", value=code)
if docs:
doc = docs[0]
return Link(doc)
else:
return False
| 3,526 | Python | .py | 88 | 33.488636 | 87 | 0.661096 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
270 | model.py | internetarchive_openlibrary/openlibrary/accounts/model.py | """
"""
import secrets
import time
import datetime
import hashlib
import hmac
import random
import string
from typing import TYPE_CHECKING, Any
import uuid
import logging
import requests
from validate_email import validate_email
import web
from infogami import config
from infogami.utils.view import render_template, public
from infogami.infobase.client import ClientException
from openlibrary.core import stats, helpers
from openlibrary.core.booknotes import Booknotes
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.observations import Observations
from openlibrary.core.ratings import Ratings
from openlibrary.core.edits import CommunityEditsQueue
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError # type: ignore[misc, assignment]
if TYPE_CHECKING:
from openlibrary.plugins.upstream.models import User
logger = logging.getLogger("openlibrary.account.model")
class OLAuthenticationError(Exception):
pass
def append_random_suffix(text, limit=9999):
return f'{text}{random.randint(0, limit)}'
def valid_email(email):
return validate_email(email)
def sendmail(to, msg, cc=None):
cc = cc or []
if config.get('dummy_sendmail'):
message = (
f"To: {to}\n"
f"From:{config.from_address}\n"
f"Subject: {msg.subject}\n"
f"\n{web.safestr(msg)}"
)
print("sending email", message, file=web.debug)
else:
web.sendmail(
config.from_address,
to,
subject=msg.subject.strip(),
message=web.safestr(msg),
cc=cc,
)
def verify_hash(secret_key, text, hash):
"""Verifies if the hash is generated"""
salt = hash.split('$', 1)[0]
return generate_hash(secret_key, text, salt) == hash
def generate_hash(secret_key, text, salt=None):
if not isinstance(secret_key, bytes):
secret_key = secret_key.encode('utf-8')
salt = (
salt
or hmac.HMAC(
secret_key, str(random.random()).encode('utf-8'), hashlib.md5
).hexdigest()[:5]
)
hash = hmac.HMAC(
secret_key, (salt + web.safestr(text)).encode('utf-8'), hashlib.md5
).hexdigest()
return f'{salt}${hash}'
def get_secret_key():
return config.infobase['secret_key']
def generate_uuid():
return str(uuid.uuid4()).replace("-", "")
def send_verification_email(username, email):
"""Sends account verification email."""
key = "account/%s/verify" % username
doc = create_link_doc(key, username, email)
web.ctx.site.store[key] = doc
link = web.ctx.home + "/account/verify/" + doc['code']
msg = render_template(
"email/account/verify", username=username, email=email, password=None, link=link
)
sendmail(email, msg)
def create_link_doc(key, username, email):
"""Creates doc required for generating verification link email.
The doc contains username, email and a generated code.
"""
code = generate_uuid()
now = datetime.datetime.now()
expires = now + datetime.timedelta(days=14)
return {
"_key": key,
"_rev": None,
"type": "account-link",
"username": username,
"email": email,
"code": code,
"created_on": now.isoformat(),
"expires_on": expires.isoformat(),
}
def clear_cookies():
web.setcookie('pd', "", expires=-1)
web.setcookie('sfw', "", expires=-1)
class Link(web.storage):
def get_expiration_time(self):
d = self['expires_on'].split(".")[0]
return datetime.datetime.strptime(d, "%Y-%m-%dT%H:%M:%S")
def get_creation_time(self):
d = self['created_on'].split(".")[0]
return datetime.datetime.strptime(d, "%Y-%m-%dT%H:%M:%S")
def delete(self):
del web.ctx.site.store[self['_key']]
class Account(web.storage):
@property
def username(self):
return self._key.split("/")[-1]
def get_edit_count(self):
user = self.get_user()
return user and user.get_edit_count() or 0
@property
def registered_on(self):
"""Returns the registration time."""
t = self.get("created_on")
return t and helpers.parse_datetime(t)
@property
def activated_on(self):
user = self.get_user()
return user and user.created
@property
def displayname(self):
if doc := self.get_user():
return doc.displayname or self.username
elif "data" in self:
return self.data.get("displayname") or self.username
else:
return self.username
def creation_time(self):
d = self['created_on'].split(".")[0]
return datetime.datetime.strptime(d, "%Y-%m-%dT%H:%M:%S")
def get_recentchanges(self, limit=100, offset=0):
q = {"author": self.get_user().key, "limit": limit, "offset": offset}
return web.ctx.site.recentchanges(q)
def verify_password(self, password):
return verify_hash(get_secret_key(), password, self.enc_password)
def update_password(self, new_password):
web.ctx.site.update_account(self.username, password=new_password)
def update_email(self, email):
web.ctx.site.update_account(self.username, email=email)
def send_verification_email(self):
send_verification_email(self.username, self.email)
def activate(self):
web.ctx.site.activate_account(username=self.username)
def block(self):
"""Blocks this account."""
web.ctx.site.update_account(self.username, status="blocked")
def unblock(self):
"""Unblocks this account."""
web.ctx.site.update_account(self.username, status="active")
def is_blocked(self) -> bool:
"""Tests if this account is blocked."""
return getattr(self, 'status', '') == "blocked"
def login(self, password):
"""Tries to login with the given password and returns the status.
The return value can be one of the following:
* ok
* account_not_verified
* account_not_found
* account_incorrect_password
* account_blocked
If the login is successful, the `last_login` time is updated.
"""
if self.is_blocked():
return "account_blocked"
try:
web.ctx.site.login(self.username, password)
except ClientException as e:
code = e.get_data().get("code")
return code
else:
self['last_login'] = datetime.datetime.utcnow().isoformat()
self._save()
return "ok"
@classmethod
def generate_random_password(cls, n=12):
return ''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for _ in range(n)
)
def generate_login_code(self):
"""Returns a string that can be set as login cookie to log in as this user."""
user_key = "/people/" + self.username
t = datetime.datetime(*time.gmtime()[:6]).isoformat()
text = f"{user_key},{t}"
return text + "," + generate_hash(get_secret_key(), text)
def _save(self):
"""Saves this account in store."""
web.ctx.site.store[self._key] = self
@property
def last_login(self):
"""Returns the last_login time of the user, if available.
The `last_login` will not be available for accounts, who haven't
been logged in after this feature is added.
"""
t = self.get("last_login")
return t and helpers.parse_datetime(t)
def get_user(self) -> 'User':
"""A user is where preferences are attached to an account. An
"Account" is outside of infogami in a separate table and is
used to store private user information.
:rtype: User
:returns: Not an Account obj, but a /people/xxx User
"""
key = "/people/" + self.username
return web.ctx.site.get(key)
def get_creation_info(self):
key = "/people/" + self.username
doc = web.ctx.site.get(key)
return doc.get_creation_info()
def get_activation_link(self):
key = "account/%s/verify" % self.username
if doc := web.ctx.site.store.get(key):
return Link(doc)
else:
return False
def get_password_reset_link(self):
key = "account/%s/password" % self.username
if doc := web.ctx.site.store.get(key):
return Link(doc)
else:
return False
def get_links(self):
"""Returns all the verification links present in the database."""
return web.ctx.site.store.values(
type="account-link", name="username", value=self.username
)
def get_tags(self) -> list[str]:
"""Returns list of tags that this user has."""
return self.get("tags", [])
def has_tag(self, tag: str) -> bool:
return tag in self.get_tags()
def add_tag(self, tag):
tags = self.get_tags()
if tag not in tags:
tags.append(tag)
self['tags'] = tags
self._save()
def remove_tag(self, tag):
tags = self.get_tags()
if tag in tags:
tags.remove(tag)
self['tags'] = tags
self._save()
def set_bot_flag(self, flag):
"""Enables/disables the bot flag."""
self.bot = flag
self._save()
def anonymize(self, test=False):
# Generate new unique username for patron:
# Note: Cannot test get_activation_link() locally
uuid = (
self.get_activation_link()['code']
if self.get_activation_link()
else generate_uuid()
)
new_username = f'anonymous-{uuid}'
results = {'new_username': new_username}
# Delete all of the patron's book notes:
results['booknotes_count'] = Booknotes.delete_all_by_username(
self.username, _test=test
)
# Anonymize patron's username in OL DB tables:
results['ratings_count'] = Ratings.update_username(
self.username, new_username, _test=test
)
results['observations_count'] = Observations.update_username(
self.username, new_username, _test=test
)
results['bookshelves_count'] = Bookshelves.update_username(
self.username, new_username, _test=test
)
results['merge_request_count'] = CommunityEditsQueue.update_submitter_name(
self.username, new_username, _test=test
)
if not test:
patron = self.get_user()
email = self.email
username = self.username
# Remove patron from all usergroups:
for grp in patron.usergroups:
grp.remove_user(patron.key)
# Set preferences to default:
patron.save_preferences({'updates': 'no', 'public_readlog': 'no'})
# Clear patron's profile page:
data = {'key': patron.key, 'type': '/type/delete'}
patron.set_data(data)
# Remove account information from store:
del web.ctx.site.store[f'account/{username}']
del web.ctx.site.store[f'account/{username}/verify']
del web.ctx.site.store[f'account/{username}/password']
del web.ctx.site.store[f'account-email/{email}']
return results
@property
def itemname(self) -> str | None:
"""Retrieves the Archive.org itemname which links Open Library and
Internet Archive accounts
"""
return getattr(self, 'internetarchive_itemname', None)
def get_linked_ia_account(self):
if self.itemname:
act = InternetArchiveAccount.xauth('info', itemname=self.itemname)
if 'values' in act and 'email' in act['values']:
return InternetArchiveAccount.get(email=act['values']['email'])
def render_link(self):
return f'<a href="/people/{self.username}">{web.net.htmlquote(self.displayname)}</a>'
class OpenLibraryAccount(Account):
@classmethod
def create(
cls,
username,
email,
password,
displayname=None,
verified=False,
retries=0,
test=False,
):
"""
Args:
username (unicode) - the username (slug) of the account.
Usernames must be unique
email (unicode) - the login and email of the account
password (unicode)
displayname (unicode) - human readable, changeable screenname
retries (int) - If the username is unavailable, how many
subsequent attempts should be made to find
an available username.
"""
if cls.get(email=email):
raise ValueError('email_registered')
username = username[1:] if username[0] == '@' else username
displayname = displayname or username
# tests whether a user w/ this username exists
_user = cls.get(username=username)
new_username = username
attempt = 0
while _user:
if attempt >= retries:
ve = ValueError('username_registered')
ve.value = username
raise ve
new_username = append_random_suffix(username)
attempt += 1
_user = cls.get(username=new_username)
username = new_username
if test:
return cls(
itemname=f'@{username}',
email=email,
username=username,
displayname=displayname,
test=True,
)
try:
account = web.ctx.site.register(
username=username,
email=email,
password=password,
displayname=displayname,
)
except ClientException as e:
raise ValueError('something_went_wrong')
if verified:
key = "account/%s/verify" % username
doc = create_link_doc(key, username, email)
web.ctx.site.store[key] = doc
web.ctx.site.activate_account(username=username)
ol_account = cls.get(email=email)
# Update user preferences; reading log public by default
from openlibrary.accounts import RunAs
with RunAs(username):
ol_account.get_user().save_preferences({'public_readlog': 'yes'})
return ol_account
@classmethod
def get(
cls,
link: str | None = None,
email: str | None = None,
username: str | None = None,
key: str | None = None,
test: bool = False,
) -> 'OpenLibraryAccount | None':
"""Utility method retrieve an openlibrary account by its email,
username or archive.org itemname (i.e. link)
"""
if link:
return cls.get_by_link(link, test=test)
elif email:
return cls.get_by_email(email, test=test)
elif username:
return cls.get_by_username(username, test=test)
elif key:
return cls.get_by_key(key, test=test)
raise ValueError("Open Library email or Archive.org itemname required.")
@classmethod
def get_by_key(cls, key, test=False):
username = key.split('/')[-1]
return cls.get_by_username(username)
@classmethod
def get_by_username(
cls, username: str, test: bool = False
) -> 'OpenLibraryAccount | None':
"""Retrieves and OpenLibraryAccount by username if it exists or"""
match = web.ctx.site.store.values(
type="account", name="username", value=username, limit=1
)
if len(match):
return cls(match[0])
lower_match = web.ctx.site.store.values(
type="account", name="lusername", value=username, limit=1
)
if len(lower_match):
return cls(lower_match[0])
return None
@classmethod
def get_by_link(cls, link: str, test: bool = False) -> 'OpenLibraryAccount | None':
"""
:rtype: OpenLibraryAccount or None
"""
ol_accounts = web.ctx.site.store.values(
type="account", name="internetarchive_itemname", value=link
)
return cls(ol_accounts[0]) if ol_accounts else None
@classmethod
def get_by_email(
cls, email: str, test: bool = False
) -> 'OpenLibraryAccount | None':
"""the email stored in account doc is case-sensitive.
The lowercase of email is used in the account-email document.
querying that first and taking the username from there to make
the email search case-insensitive.
There are accounts with case-variation of emails. To handle
those, searching with the original case and using lower case
if that fails.
"""
email = email.strip()
email_doc = web.ctx.site.store.get(
"account-email/" + email
) or web.ctx.site.store.get("account-email/" + email.lower())
if email_doc and 'username' in email_doc:
doc = web.ctx.site.store.get("account/" + email_doc['username'])
return cls(doc) if doc else None
return None
@property
def verified(self):
return getattr(self, 'status', '') != 'pending'
@property
def blocked(self):
return getattr(self, 'status', '') == 'blocked'
def unlink(self):
"""Careful, this will save any other changes to the ol user object as
well
"""
_ol_account = web.ctx.site.store.get(self._key)
_ol_account['internetarchive_itemname'] = None
web.ctx.site.store[self._key] = _ol_account
self.internetarchive_itemname = None
stats.increment('ol.account.xauth.unlinked')
def link(self, itemname):
"""Careful, this will save any other changes to the ol user object as
well
"""
itemname = itemname if itemname.startswith('@') else '@%s' % itemname
_ol_account = web.ctx.site.store.get(self._key)
_ol_account['internetarchive_itemname'] = itemname
web.ctx.site.store[self._key] = _ol_account
self.internetarchive_itemname = itemname
stats.increment('ol.account.xauth.linked')
def save_s3_keys(self, s3_keys):
_ol_account = web.ctx.site.store.get(self._key)
_ol_account['s3_keys'] = s3_keys
web.ctx.site.store[self._key] = _ol_account
self.s3_keys = s3_keys
def update_last_login(self):
_ol_account = web.ctx.site.store.get(self._key)
last_login = datetime.datetime.utcnow().isoformat()
_ol_account['last_login'] = last_login
web.ctx.site.store[self._key] = _ol_account
self.last_login = last_login
@classmethod
def authenticate(cls, email, password, test=False):
ol_account = cls.get(email=email, test=test)
if not ol_account:
return "account_not_found"
if ol_account.is_blocked():
return "account_blocked"
try:
web.ctx.site.login(ol_account.username, password)
except ClientException as e:
code = e.get_data().get("code")
return code
else:
return "ok"
class InternetArchiveAccount(web.storage):
def __init__(self, **kwargs):
for k in kwargs:
setattr(self, k, kwargs[k])
@classmethod
def create(
cls,
screenname,
email,
password,
notifications=None,
retries=0,
verified=False,
test=None,
):
"""
:param unicode screenname: changeable human readable archive.org username.
The slug / itemname is generated automatically from this value.
:param unicode email:
:param unicode password:
:param List[Union[
Literal['ml_best_of'], Literal['ml_donors'],
Literal['ml_events'], Literal['ml_updates']
]] notifications:
newsletters to subscribe user to (NOTE: these must be kept in sync
with the values in the `MAILING_LIST_KEYS` array in
https://git.archive.org/ia/petabox/blob/master/www/common/MailSync/Settings.inc)
:param int retries: If the username is unavailable, how many
subsequent attempts should be made to find an available
username.
"""
email = email.strip().lower()
screenname = screenname[1:] if screenname[0] == '@' else screenname
notifications = notifications or []
if cls.get(email=email):
raise OLAuthenticationError('email_registered')
if not screenname:
raise OLAuthenticationError('missing_fields')
_screenname = screenname
attempt = 0
while True:
response = cls.xauth(
'create',
email=email,
password=password,
screenname=_screenname,
notifications=notifications,
test=test,
verified=verified,
service='openlibrary',
)
if response.get('success'):
ia_account = cls.get(email=email)
if test:
ia_account.test = True
return ia_account
elif 'screenname' not in response.get('values', {}):
raise OLAuthenticationError('undefined_error')
elif attempt >= retries:
e = OLAuthenticationError('username_registered')
e.value = _screenname
raise e
_screenname = append_random_suffix(screenname)
attempt += 1
@classmethod
def xauth(cls, op, test=None, s3_key=None, s3_secret=None, xauth_url=None, **data):
"""
See https://git.archive.org/ia/petabox/tree/master/www/sf/services/xauthn
"""
from openlibrary.core import lending
url = xauth_url or lending.config_ia_xauth_api_url
params = {'op': op}
data.update(
{
'access': s3_key or lending.config_ia_ol_xauth_s3.get('s3_key'),
'secret': s3_secret or lending.config_ia_ol_xauth_s3.get('s3_secret'),
}
)
# Currently, optional parameters, like `service` are passed as
# **kwargs (i.e. **data). The xauthn service uses the named
# parameter `activation-type` which contains a dash and thus
# is unsuitable as a kwarg name. Therefore, if we're
# performing an account `create` xauthn operation and the
# `service` parameter is present, we need to rename `service`
# as `activation-type` so it is forwarded correctly to xauth:
if op == 'create' and 'service' in data:
data['activation-type'] = data.pop('service')
if test:
params['developer'] = test
response = requests.post(url, params=params, json=data)
try:
# This API should always return json, even on error (Unless
# the server is down or something :P)
return response.json()
except ValueError:
return {'error': response.text, 'code': response.status_code}
@classmethod
def s3auth(cls, access_key, secret_key):
"""Authenticates an Archive.org user based on s3 keys"""
from openlibrary.core import lending
url = lending.config_ia_s3_auth_url
try:
response = requests.get(
url,
headers={
'Content-Type': 'application/json',
'authorization': f'LOW {access_key}:{secret_key}',
},
)
response.raise_for_status()
return response.json()
except requests.HTTPError as e:
return {'error': e.response.text, 'code': e.response.status_code}
except JSONDecodeError as e:
return {'error': str(e), 'code': response.status_code}
@classmethod
def get(
cls, email, test=False, _json=False, s3_key=None, s3_secret=None, xauth_url=None
):
email = email.strip().lower()
response = cls.xauth(
email=email,
test=test,
op="info",
s3_key=s3_key,
s3_secret=s3_secret,
xauth_url=xauth_url,
)
if 'success' in response:
values = response.get('values', {})
return values if _json else cls(**values)
@classmethod
def authenticate(cls, email, password, test=False):
email = email.strip().lower()
response = cls.xauth('authenticate', test=test, email=email, password=password)
if not response.get('success'):
reason = response['values'].get('reason')
if reason == 'account_not_verified':
response['values']['reason'] = 'ia_account_not_verified'
return response
def audit_accounts(
email,
password,
require_link=False,
s3_access_key=None,
s3_secret_key=None,
test=False,
):
"""Performs an audit of the IA or OL account having this email.
The audit:
- verifies the password is correct for this account
- aborts if any sort of error (e.g. account blocked, unverified)
- reports whether the account is linked (to a secondary account)
- if unlinked, reports whether a secondary account exists w/
matching email
Args:
email (unicode)
password (unicode)
require_link (bool) - if True, returns `accounts_not_connected`
if accounts are not linked
test (bool) - not currently used; is there to allow testing in
the absence of archive.org dependency
"""
if s3_access_key and s3_secret_key:
r = InternetArchiveAccount.s3auth(s3_access_key, s3_secret_key)
if not r.get('authorized', False):
return {'error': 'invalid_s3keys'}
ia_login = {
'success': True,
'values': {'access': s3_access_key, 'secret': s3_secret_key},
}
email = r['username']
else:
if not valid_email(email):
return {'error': 'invalid_email'}
ia_login = InternetArchiveAccount.authenticate(email, password)
if 'values' in ia_login and any(
ia_login['values'].get('reason') == err
for err in ['account_blocked', 'account_locked']
):
return {'error': 'account_locked'}
if not ia_login.get('success'):
# Prioritize returning other errors over `account_not_found`
if ia_login['values'].get('reason') != "account_not_found":
return {'error': ia_login['values'].get('reason')}
return {'error': 'account_not_found'}
else:
ia_account = InternetArchiveAccount.get(email=email, test=test)
# Get the OL account which links to this IA account
ol_account = OpenLibraryAccount.get(link=ia_account.itemname, test=test)
link = ol_account.itemname if ol_account else None
# The fact that there is no link implies either:
# 1. There was no Open Library account ever linked to this IA account
# 2. There is an OL account, and it was linked to this IA account at some point,
# but the linkage was broken at some point.
# Today, it is possible for #2 to occur if a patron creates an IA account, deletes said
# account, then creates a new IA account using the same email that was used to create the
# original account.
if not link:
# If no account linkage is found, then check if there's an Open Library account
# which shares the same email as this IA account.
ol_account = OpenLibraryAccount.get(email=email, test=test)
# If an Open Library account with a matching email account exists...
# Check if it is linked already, i.e. has an itemname set. We already
# determined that no OL account is linked to our IA account. Therefore this
# Open Library account having the same email as our IA account must have
# been linked to a different Internet Archive account.
if ol_account and ol_account.itemname:
logger.error(
'IA <-> OL itemname mismatch',
extra={
'ol_itemname': ol_account.itemname,
'ia_itemname': ia_account.itemname,
},
)
ol_account.unlink()
ol_account.link(ia_account.itemname)
# At this point, it must either be the case that
# (a) `ol_account` already links to our IA account (in which case `link` has a
# correct value),
# (b) that an unlinked `ol_account` shares the same email as our IA account and
# thus can and should be safely linked to our IA account, or
# (c) no `ol_account` which is linked or can be linked has been found and
# therefore, assuming lending.config_ia_auth_only is enabled, we need to
# create and link it.
if not ol_account:
try:
ol_account = OpenLibraryAccount.create(
ia_account.itemname,
email,
# since switching to IA creds, OL password not used; make
# challenging random
secrets.token_urlsafe(32),
displayname=ia_account.screenname,
verified=True,
retries=5,
test=test,
)
except ValueError as e:
return {'error': 'max_retries_exceeded'}
ol_account.link(ia_account.itemname)
stats.increment('ol.account.xauth.ia-auto-created-ol')
# So long as there's either a linked OL account, or an unlinked OL account with
# the same email, set them as linked (and let the finalize logic link them, if
# needed)
else:
if not ol_account.itemname:
ol_account.link(ia_account.itemname)
stats.increment('ol.account.xauth.auto-linked')
if not ol_account.verified:
# The IA account is activated (verifying the integrity of their email),
# so we make a judgement call to safely activate them.
ol_account.activate()
if ol_account.blocked:
return {'error': 'account_blocked'}
if require_link:
ol_account = OpenLibraryAccount.get(link=ia_account.itemname, test=test)
if ol_account and not ol_account.itemname:
return {'error': 'accounts_not_connected'}
if 'values' in ia_login:
s3_keys = {
'access': ia_login['values'].pop('access'),
'secret': ia_login['values'].pop('secret'),
}
ol_account.save_s3_keys(s3_keys)
# When a user logs in with OL credentials, the web.ctx.site.login() is called with
# their OL user credentials, which internally sets an auth_token enabling the
# user's session. The web.ctx.site.login method requires OL credentials which are
# not present in the case where a user logs in with their IA credentials. As a
# result, when users login with their valid IA credentials, the following kludge
# allows us to fetch the OL account linked to their IA account, bypass this
# web.ctx.site.login method (which requires OL credentials), and directly set an
# auth_token to enable the user's session.
web.ctx.conn.set_auth_token(ol_account.generate_login_code())
ol_account.update_last_login()
return {
'authenticated': True,
'special_access': getattr(ia_account, 'has_disability_access', False),
'ia_email': ia_account.email,
'ol_email': ol_account.email,
'ia_username': ia_account.screenname,
'ol_username': ol_account.username,
'link': ol_account.itemname,
}
@public
def get_internet_archive_id(key: str) -> str | None:
username = key.split('/')[-1]
ol_account = OpenLibraryAccount.get(username=username)
return ol_account.itemname if ol_account else None
| 32,512 | Python | .py | 794 | 31.309824 | 97 | 0.602953 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
271 | test_dump.py | internetarchive_openlibrary/openlibrary/tests/data/test_dump.py | import json
from openlibrary.data.dump import print_dump, pgdecode
class TestPrintDump:
def test_fixes_prefixes(self, capsys):
records = [
{
"key": "/b/OL1M",
"type": {"key": "/type/edition"},
"revision": 1,
"last_modified": {"value": "2019-01-01T00:00:00.000"},
},
]
print_dump(map(json.dumps, records))
assert capsys.readouterr().out.strip() == "\t".join(
[
"/type/edition",
"/books/OL1M",
"1",
"2019-01-01T00:00:00.000",
json.dumps(
{
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"revision": 1,
"last_modified": {"value": "2019-01-01T00:00:00.000"},
}
),
]
)
def test_excludes_sensitive_pages(self, capsys):
records = [
{"key": "/people/foo"},
{"key": "/user/foo"},
{"key": "/admin/foo"},
]
print_dump(map(json.dumps, records))
assert capsys.readouterr().out == ""
def test_excludes_obsolete_pages(self, capsys):
records = [
{"key": "/scan_record/foo"},
{"key": "/old/what"},
]
print_dump(map(json.dumps, records))
assert capsys.readouterr().out == ""
class TestPgDecode:
def test_pgdecode_substitute(self):
assert pgdecode(r"\n\r\t\\") == "\n\r\t\\"
def test_pgdecode_ascii_printable(self):
import string
assert pgdecode(string.printable) == string.printable
| 1,737 | Python | .py | 50 | 22.48 | 78 | 0.4642 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
272 | test_utils.py | internetarchive_openlibrary/openlibrary/tests/solr/test_utils.py | import json
from unittest.mock import MagicMock
import httpx
from httpx import Response, ConnectError
from openlibrary.solr.utils import SolrUpdateRequest, solr_update
class TestSolrUpdate:
def sample_response_200(self):
return Response(
200,
request=MagicMock(),
content=json.dumps(
{
"responseHeader": {
"errors": [],
"maxErrors": -1,
"status": 0,
"QTime": 183,
}
}
),
)
def sample_global_error(self):
return Response(
400,
request=MagicMock(),
content=json.dumps(
{
'responseHeader': {
'errors': [],
'maxErrors': -1,
'status': 400,
'QTime': 76,
},
'error': {
'metadata': [
'error-class',
'org.apache.solr.common.SolrException',
'root-error-class',
'org.apache.solr.common.SolrException',
],
'msg': "Unknown key 'key' at [14]",
'code': 400,
},
}
),
)
def sample_individual_error(self):
return Response(
400,
request=MagicMock(),
content=json.dumps(
{
'responseHeader': {
'errors': [
{
'type': 'ADD',
'id': '/books/OL1M',
'message': '[doc=/books/OL1M] missing required field: type',
}
],
'maxErrors': -1,
'status': 0,
'QTime': 10,
}
}
),
)
def sample_response_503(self):
return Response(
503,
request=MagicMock(),
content=b"<html><body><h1>503 Service Unavailable</h1>",
)
def test_successful_response(self, monkeypatch, monkeytime):
mock_post = MagicMock(return_value=self.sample_response_200())
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count == 1
def test_non_json_solr_503(self, monkeypatch, monkeytime):
mock_post = MagicMock(return_value=self.sample_response_503())
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count > 1
def test_solr_offline(self, monkeypatch, monkeytime):
mock_post = MagicMock(side_effect=ConnectError('', request=None))
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count > 1
def test_invalid_solr_request(self, monkeypatch, monkeytime):
mock_post = MagicMock(return_value=self.sample_global_error())
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count == 1
def test_bad_apple_in_solr_request(self, monkeypatch, monkeytime):
mock_post = MagicMock(return_value=self.sample_individual_error())
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count == 1
def test_other_non_ok_status(self, monkeypatch, monkeytime):
mock_post = MagicMock(
return_value=Response(500, request=MagicMock(), content="{}")
)
monkeypatch.setattr(httpx, "post", mock_post)
solr_update(
SolrUpdateRequest(commit=True),
solr_base_url="http://localhost:8983/solr/foobar",
)
assert mock_post.call_count > 1
| 4,647 | Python | .py | 123 | 23.439024 | 92 | 0.492109 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
273 | test_types_generator.py | internetarchive_openlibrary/openlibrary/tests/solr/test_types_generator.py | import os
from openlibrary.solr.types_generator import generate
root = os.path.dirname(__file__)
def test_up_to_date():
types_path = os.path.join(root, '..', '..', 'solr', 'solr_types.py')
assert (
generate().strip() == open(types_path).read().strip()
), """
This auto-generated file is out-of-date. Run:
./openlibrary/solr/types_generator.py > ./openlibrary/solr/solr_types.py
"""
| 419 | Python | .py | 11 | 33.727273 | 76 | 0.650124 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
274 | test_update.py | internetarchive_openlibrary/openlibrary/tests/solr/test_update.py | import pytest
from openlibrary.core.ratings import WorkRatingsSummary
from openlibrary.solr import update
from openlibrary.solr.data_provider import DataProvider, WorkReadingLogSolrSummary
author_counter = 0
edition_counter = 0
work_counter = 0
def make_author(**kw):
"""
Create a fake author
:param kw: author data
:rtype: dict
"""
global author_counter
author_counter += 1
kw.setdefault("key", "/authors/OL%dA" % author_counter)
kw.setdefault("type", {"key": "/type/author"})
kw.setdefault("name", "Foo")
return kw
def make_edition(work=None, **kw):
"""
Create a fake edition
:param dict work: Work dict which this is an edition of
:param kw: edition data
:rtype: dict
"""
global edition_counter
edition_counter += 1
kw.setdefault("key", "/books/OL%dM" % edition_counter)
kw.setdefault("type", {"key": "/type/edition"})
kw.setdefault("title", "Foo")
if work:
kw.setdefault("works", [{"key": work["key"]}])
return kw
def make_work(**kw):
"""
Create a fake work
:param kw:
:rtype: dict
"""
global work_counter
work_counter += 1
kw.setdefault("key", "/works/OL%dW" % work_counter)
kw.setdefault("type", {"key": "/type/work"})
kw.setdefault("title", "Foo")
return kw
class FakeDataProvider(DataProvider):
"""Stub data_provider and methods which are used by build_data."""
docs: list = []
docs_by_key: dict = {}
def __init__(self, docs=None):
docs = docs or []
"""
:param list[dict] docs: Documents in the DataProvider
"""
self.docs = docs
self.docs_by_key = {doc["key"]: doc for doc in docs}
def add_docs(self, docs):
self.docs.extend(docs)
self.docs_by_key.update({doc["key"]: doc for doc in docs})
def find_redirects(self, key):
return []
async def get_document(self, key):
return self.docs_by_key.get(key)
def get_editions_of_work(self, work):
return [
doc for doc in self.docs if {"key": work["key"]} in doc.get("works", [])
]
def get_metadata(self, id):
return {}
def get_work_ratings(self, work_key: str) -> WorkRatingsSummary | None:
return None
def get_work_reading_log(self, work_key: str) -> WorkReadingLogSolrSummary | None:
return None
class Test_update_keys:
@classmethod
def setup_class(cls):
update.data_provider = FakeDataProvider()
@pytest.mark.asyncio
async def test_delete(self):
update.data_provider.add_docs(
[
{'key': '/works/OL23W', 'type': {'key': '/type/delete'}},
make_author(key='/authors/OL23A', type={'key': '/type/delete'}),
{'key': '/books/OL23M', 'type': {'key': '/type/delete'}},
]
)
update_state = await update.update_keys(
[
'/works/OL23W',
'/authors/OL23A',
'/books/OL23M',
],
update='quiet',
)
assert set(update_state.deletes) == {
'/works/OL23W',
'/authors/OL23A',
'/books/OL23M',
}
assert update_state.adds == []
@pytest.mark.asyncio
async def test_redirects(self):
update.data_provider.add_docs(
[
{
'key': '/books/OL23M',
'type': {'key': '/type/redirect'},
'location': '/books/OL24M',
},
{'key': '/books/OL24M', 'type': {'key': '/type/delete'}},
]
)
update_state = await update.update_keys(['/books/OL23M'], update='quiet')
assert update_state.deletes == ['/books/OL23M', '/books/OL24M']
assert update_state.adds == []
| 3,860 | Python | .py | 116 | 25.362069 | 86 | 0.569123 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
275 | test_data_provider.py | internetarchive_openlibrary/openlibrary/tests/solr/test_data_provider.py | from unittest.mock import MagicMock
import pytest
from infogami.infobase.client import Thing
from openlibrary.solr.data_provider import BetterDataProvider
class TestBetterDataProvider:
@pytest.mark.asyncio
async def test_get_document(self):
mock_site = MagicMock()
dp = BetterDataProvider(
site=mock_site,
db=MagicMock(),
)
mock_site.get_many.return_value = [
Thing(
mock_site,
'/works/OL1W',
{
'key': '/works/OL1W',
'type': {'key': '/type/work'},
},
)
]
assert mock_site.get_many.call_count == 0
await dp.get_document('/works/OL1W')
assert mock_site.get_many.call_count == 1
await dp.get_document('/works/OL1W')
assert mock_site.get_many.call_count == 1
@pytest.mark.asyncio
async def test_clear_cache(self):
mock_site = MagicMock()
dp = BetterDataProvider(
site=mock_site,
db=MagicMock(),
)
mock_site.get_many.return_value = [
Thing(
mock_site,
'/works/OL1W',
{
'key': '/works/OL1W',
'type': {'key': '/type/work'},
},
)
]
assert mock_site.get_many.call_count == 0
await dp.get_document('/works/OL1W')
assert mock_site.get_many.call_count == 1
dp.clear_cache()
await dp.get_document('/works/OL1W')
assert mock_site.get_many.call_count == 2
| 1,634 | Python | .py | 50 | 21.82 | 61 | 0.522483 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
276 | test_query_utils.py | internetarchive_openlibrary/openlibrary/tests/solr/test_query_utils.py | import pytest
from openlibrary.solr.query_utils import (
EmptyTreeError,
luqum_parser,
luqum_remove_child,
luqum_replace_child,
luqum_traverse,
luqum_replace_field,
luqum_remove_field,
)
REMOVE_TESTS = {
'Complete match': ('title:foo', 'title:foo', ''),
'Binary Op Left': ('title:foo OR bar:baz', 'bar:baz', 'title:foo'),
'Binary Op Right': ('title:foo OR bar:baz', 'title:foo', 'bar:baz'),
'Group': ('(title:foo)', 'title:foo', ''),
'Unary': ('NOT title:foo', 'title:foo', ''),
}
@pytest.mark.parametrize(
"query,to_rem,expected", REMOVE_TESTS.values(), ids=REMOVE_TESTS.keys()
)
def test_luqum_remove_child(query: str, to_rem: str, expected: str):
def fn(query: str, remove: str) -> str:
q_tree = luqum_parser(query)
for node, parents in luqum_traverse(q_tree):
if str(node).strip() == remove:
try:
luqum_remove_child(node, parents)
except EmptyTreeError:
return ''
return str(q_tree).strip()
assert fn(query, to_rem) == expected
REPLACE_TESTS = {
'Complex replace': (
'title:foo OR id:1',
'title:foo',
'(title:foo OR bar:foo)',
'(title:foo OR bar:foo)OR id:1',
),
'Deeply nested': (
'title:foo OR (id:1 OR id:2)',
'id:2',
'(subject:horror)',
'title:foo OR (id:1 OR(subject:horror))',
),
}
@pytest.mark.parametrize(
"query,to_rep,rep_with,expected", REPLACE_TESTS.values(), ids=REPLACE_TESTS.keys()
)
def test_luqum_replace_child(query: str, to_rep: str, rep_with: str, expected: str):
def fn(query: str, to_replace: str, replace_with: str) -> str:
q_tree = luqum_parser(query)
for node, parents in luqum_traverse(q_tree):
if str(node).strip() == to_replace:
luqum_replace_child(parents[-1], node, luqum_parser(replace_with))
break
return str(q_tree).strip()
assert fn(query, to_rep, rep_with) == expected
def test_luqum_parser():
def fn(query: str) -> str:
return str(luqum_parser(query))
assert fn('title:foo') == 'title:foo'
assert fn('title:foo bar') == 'title:(foo bar)'
assert fn('title:foo AND bar') == 'title:(foo AND bar)'
assert fn('title:foo AND bar AND by:boo') == 'title:(foo AND bar) AND by:boo'
assert (
fn('title:foo AND bar AND by:boo blah blah')
== 'title:(foo AND bar) AND by:(boo blah blah)'
)
assert (
fn('title:foo AND bar AND NOT by:boo') == 'title:(foo AND bar) AND NOT by:boo'
)
assert (
fn('title:(foo bar) AND NOT title:blue') == 'title:(foo bar) AND NOT title:blue'
)
assert fn('no fields here!') == 'no fields here!'
# This is non-ideal
assert fn('NOT title:foo bar') == 'NOT title:foo bar'
def test_luqum_replace_field():
def replace_work_prefix(string: str):
return string.partition(".")[2] if string.startswith("work.") else string
def fn(query: str) -> str:
q = luqum_parser(query)
luqum_replace_field(q, replace_work_prefix)
return str(q)
assert fn('work.title:Bob') == 'title:Bob'
assert fn('title:Joe') == 'title:Joe'
assert fn('work.title:Bob work.title:OL5M') == 'title:Bob title:OL5M'
assert fn('edition_key:Joe OR work.title:Bob') == 'edition_key:Joe OR title:Bob'
def test_luqum_remove_field():
def fn(query: str) -> str:
q = luqum_parser(query)
try:
luqum_remove_field(q, lambda x: x.startswith("edition."))
return str(q).strip()
except EmptyTreeError:
return '*:*'
assert fn('edition.title:Bob') == '*:*'
assert fn('title:Joe') == 'title:Joe'
assert fn('edition.title:Bob edition.title:OL5M') == '*:*'
assert fn('edition_key:Joe OR edition.title:Bob') == 'edition_key:Joe'
assert fn('edition.title:Joe OR work.title:Bob') == 'work.title:Bob'
# Test brackets
assert fn('(edition.title:Bob)') == '*:*'
assert fn('(edition.title:Bob OR edition.title:OL5M)') == '*:*'
# Note some weirdness with spaces
assert fn('(edition.title:Bob OR work.title:OL5M)') == '( work.title:OL5M)'
assert fn('edition.title:Bob OR (work.title:OL5M)') == '(work.title:OL5M)'
assert fn('edition.title: foo bar bar author: blah') == 'author:blah'
| 4,376 | Python | .py | 108 | 33.851852 | 88 | 0.605461 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
277 | test_author.py | internetarchive_openlibrary/openlibrary/tests/solr/updater/test_author.py | import httpx
import pytest
from openlibrary.solr.updater.author import AuthorSolrUpdater
from openlibrary.tests.solr.test_update import FakeDataProvider, make_author
class MockResponse:
def __init__(self, json_data, status_code=200):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
class TestAuthorUpdater:
@pytest.mark.asyncio
async def test_workless_author(self, monkeypatch):
class MockAsyncClient:
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def post(self, *a, **kw):
return MockResponse(
{
"facets": {
"ratings_count_1": 0.0,
"ratings_count_2": 0.0,
"ratings_count_3": 0.0,
"ratings_count_4": 0.0,
"ratings_count_5": 0.0,
"subject_facet": {"buckets": []},
"place_facet": {"buckets": []},
"time_facet": {"buckets": []},
"person_facet": {"buckets": []},
},
"response": {"numFound": 0},
}
)
monkeypatch.setattr(httpx, 'AsyncClient', MockAsyncClient)
req, _ = await AuthorSolrUpdater(FakeDataProvider()).update_key(
make_author(key='/authors/OL25A', name='Somebody')
)
assert req.deletes == []
assert len(req.adds) == 1
assert req.adds[0]['key'] == "/authors/OL25A"
| 1,756 | Python | .py | 42 | 27 | 76 | 0.490035 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
278 | test_work.py | internetarchive_openlibrary/openlibrary/tests/solr/updater/test_work.py | import pytest
from openlibrary.solr.updater.work import (
WorkSolrBuilder,
WorkSolrUpdater,
)
from openlibrary.tests.solr.test_update import (
FakeDataProvider,
make_author,
make_edition,
make_work,
)
def sorted_split_semicolon(s):
"""
>>> sorted_split_semicolon("z;c;x;a;y;b")
['a', 'b', 'c', 'x', 'y', 'z']
"""
return sorted(s.split(';'))
sss = sorted_split_semicolon
class TestWorkSolrUpdater:
@pytest.mark.asyncio
async def test_no_title(self):
req, _ = await WorkSolrUpdater(FakeDataProvider()).update_key(
{'key': '/books/OL1M', 'type': {'key': '/type/edition'}}
)
assert len(req.deletes) == 0
assert len(req.adds) == 1
assert req.adds[0]['title'] == "__None__"
req, _ = await WorkSolrUpdater(FakeDataProvider()).update_key(
{'key': '/works/OL23W', 'type': {'key': '/type/work'}}
)
assert len(req.deletes) == 0
assert len(req.adds) == 1
assert req.adds[0]['title'] == "__None__"
@pytest.mark.asyncio
async def test_work_no_title(self):
work = {'key': '/works/OL23W', 'type': {'key': '/type/work'}}
ed = make_edition(work)
ed['title'] = 'Some Title!'
req, _ = await WorkSolrUpdater(FakeDataProvider([work, ed])).update_key(work)
assert len(req.deletes) == 0
assert len(req.adds) == 1
assert req.adds[0]['title'] == "Some Title!"
@pytest.mark.asyncio
async def test_edition_count_when_editions_in_data_provider(self):
work = make_work()
req, _ = await WorkSolrUpdater(FakeDataProvider()).update_key(work)
assert req.adds[0]['edition_count'] == 0
req, _ = await WorkSolrUpdater(
FakeDataProvider([work, make_edition(work)])
).update_key(work)
assert req.adds[0]['edition_count'] == 1
req, _ = await WorkSolrUpdater(
FakeDataProvider([work, make_edition(work), make_edition(work)])
).update_key(work)
assert req.adds[0]['edition_count'] == 2
class TestWorkSolrBuilder:
def test_simple_work(self):
work = {"key": "/works/OL1M", "type": {"key": "/type/work"}, "title": "Foo"}
wsb = WorkSolrBuilder(work, [], [], FakeDataProvider(), {})
assert wsb.key == "/works/OL1M"
assert wsb.title == "Foo"
assert wsb.has_fulltext is False
assert wsb.edition_count == 0
def test_edition_count_when_editions_on_work(self):
work = make_work()
wsb = WorkSolrBuilder(work, [], [], FakeDataProvider(), {})
assert wsb.edition_count == 0
wsb = WorkSolrBuilder(work, [make_edition()], [], FakeDataProvider(), {})
assert wsb.edition_count == 1
wsb = WorkSolrBuilder(
work, [make_edition(), make_edition()], [], FakeDataProvider(), {}
)
assert wsb.edition_count == 2
def test_edition_key(self):
wsb = WorkSolrBuilder(
work={},
editions=[
{'key': '/books/OL1M'},
{'key': '/books/OL2M'},
{'key': '/books/OL3M'},
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
)
assert wsb.edition_key == ["OL1M", "OL2M", "OL3M"]
def test_publish_year(self):
test_dates = [
"2000",
"Another 2000",
"2001-01-02", # ISO 8601 formatted dates now supported
"01-02-2003",
"2004 May 23",
"Jan 2002",
"Bad date 12",
"Bad date 123412314",
]
work = make_work()
wsb = WorkSolrBuilder(
work=work,
editions=[make_edition(work, publish_date=date) for date in test_dates],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
)
assert wsb.publish_year == {2000, 2001, 2002, 2003, 2004}
assert wsb.first_publish_year == 2000
def test_isbns(self):
work = make_work()
wsb = WorkSolrBuilder(
work,
[make_edition(work, isbn_10=["123456789X"])],
[],
FakeDataProvider(),
{},
)
assert wsb.isbn == {'123456789X', '9781234567897'}
wsb = WorkSolrBuilder(
work,
[make_edition(work, isbn_10=["9781234567897"])],
[],
FakeDataProvider(),
{},
)
assert wsb.isbn == {'123456789X', '9781234567897'}
def test_other_identifiers(self):
work = make_work()
wsb = WorkSolrBuilder(
work,
editions=[
make_edition(work, oclc_numbers=["123"], lccn=["lccn-1", "lccn-2"]),
make_edition(work, oclc_numbers=["234"], lccn=["lccn-2", "lccn-3"]),
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
)
assert wsb.oclc == {'123', '234'}
assert wsb.lccn == {'lccn-1', 'lccn-2', 'lccn-3'}
def test_identifiers(self):
work = make_work()
d = WorkSolrBuilder(
work=work,
editions=[
make_edition(work, identifiers={"librarything": ["lt-1"]}),
make_edition(work, identifiers={"librarything": ["lt-2"]}),
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
).build_identifiers()
assert sorted(d.get('id_librarything', [])) == ['lt-1', 'lt-2']
def test_ia_boxid(self):
w = make_work()
d = WorkSolrBuilder(
w, [make_edition(w)], [], FakeDataProvider(), {}
).build_legacy_ia_fields()
assert 'ia_box_id' not in d
w = make_work()
d = WorkSolrBuilder(
w, [make_edition(w, ia_box_id='foo')], [], FakeDataProvider(), {}
).build_legacy_ia_fields()
assert d['ia_box_id'] == ['foo']
def test_with_one_lending_edition(self):
w = make_work()
d = WorkSolrBuilder(
work=w,
editions=[make_edition(w, key="/books/OL1M", ocaid='foo00bar')],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={"foo00bar": {"collection": ['inlibrary', 'americana']}},
)
assert d.has_fulltext is True
assert d.public_scan_b is False
assert d.printdisabled_s is None
assert d.lending_edition_s == 'OL1M'
assert d.ia == ['foo00bar']
assert sss(d.ia_collection_s) == sss("americana;inlibrary")
assert d.edition_count == 1
assert d.ebook_count_i == 1
def test_with_two_lending_editions(self):
w = make_work()
d = WorkSolrBuilder(
work=w,
editions=[
make_edition(w, key="/books/OL1M", ocaid='foo01bar'),
make_edition(w, key="/books/OL2M", ocaid='foo02bar'),
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={
"foo01bar": {"collection": ['inlibrary', 'americana']},
"foo02bar": {"collection": ['inlibrary', 'internetarchivebooks']},
},
)
assert d.has_fulltext is True
assert d.public_scan_b is False
assert d.printdisabled_s is None
assert d.lending_edition_s == 'OL1M'
assert sorted(d.ia) == ['foo01bar', 'foo02bar']
assert sss(d.ia_collection_s) == sss("inlibrary;americana;internetarchivebooks")
assert d.edition_count == 2
assert d.ebook_count_i == 2
def test_with_one_inlibrary_edition(self):
w = make_work()
d = WorkSolrBuilder(
work=w,
editions=[make_edition(w, key="/books/OL1M", ocaid='foo00bar')],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={"foo00bar": {"collection": ['printdisabled', 'inlibrary']}},
)
assert d.has_fulltext is True
assert d.public_scan_b is False
assert d.printdisabled_s == 'OL1M'
assert d.lending_edition_s == 'OL1M'
assert d.ia == ['foo00bar']
assert sss(d.ia_collection_s) == sss("printdisabled;inlibrary")
assert d.edition_count == 1
assert d.ebook_count_i == 1
def test_with_one_printdisabled_edition(self):
w = make_work()
d = WorkSolrBuilder(
work=w,
editions=[make_edition(w, key="/books/OL1M", ocaid='foo00bar')],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={"foo00bar": {"collection": ['printdisabled', 'americana']}},
)
assert d.has_fulltext is True
assert d.public_scan_b is False
assert d.printdisabled_s == 'OL1M'
assert d.lending_edition_s is None
assert d.ia == ['foo00bar']
assert sss(d.ia_collection_s) == sss("printdisabled;americana")
assert d.edition_count == 1
assert d.ebook_count_i == 1
def test_alternative_title(self):
def f(editions):
return WorkSolrBuilder(
{'key': '/works/OL1W'}, editions, [], FakeDataProvider(), {}
).alternative_title
no_title = make_work()
del no_title['title']
only_title = make_work(title='foo')
with_subtitle = make_work(title='foo 2', subtitle='bar')
assert f([]) == set()
assert f([no_title]) == set()
assert f([only_title, no_title]) == {'foo'}
assert f([with_subtitle, only_title]) == {'foo 2: bar', 'foo'}
def test_with_multiple_editions(self):
w = make_work()
d = WorkSolrBuilder(
work=w,
editions=[
make_edition(w, key="/books/OL1M"),
make_edition(w, key="/books/OL2M", ocaid='foo00bar'),
make_edition(w, key="/books/OL3M", ocaid='foo01bar'),
make_edition(w, key="/books/OL4M", ocaid='foo02bar'),
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={
"foo00bar": {"collection": ['americana']},
"foo01bar": {"collection": ['inlibrary', 'americana']},
"foo02bar": {"collection": ['printdisabled', 'inlibrary']},
},
)
assert d.has_fulltext is True
assert d.public_scan_b is True
assert d.printdisabled_s == 'OL4M'
assert d.lending_edition_s == 'OL2M'
assert sorted(d.ia) == ['foo00bar', 'foo01bar', 'foo02bar']
assert sss(d.ia_collection_s) == sss("americana;inlibrary;printdisabled")
assert d.edition_count == 4
assert d.ebook_count_i == 3
def test_subjects(self):
w = make_work(subjects=["a", "b c"])
d = WorkSolrBuilder(w, [], [], FakeDataProvider(), {}).build_subjects()
assert d['subject'] == ['a', "b c"]
assert d['subject_facet'] == ['a', "b c"]
assert d['subject_key'] == ['a', "b_c"]
assert "people" not in d
assert "place" not in d
assert "time" not in d
w = make_work(
subjects=["a", "b c"],
subject_places=["a", "b c"],
subject_people=["a", "b c"],
subject_times=["a", "b c"],
)
d = WorkSolrBuilder(w, [], [], FakeDataProvider(), {}).build_subjects()
for k in ['subject', 'person', 'place', 'time']:
assert d[k] == ['a', "b c"]
assert d[k + '_facet'] == ['a', "b c"]
assert d[k + '_key'] == ['a', "b_c"]
def test_author_info(self):
authors = [
{
'key': "/authors/OL1A",
'name': "Author One",
'alternate_names': ["Author 1"],
},
{'key': "/authors/OL2A", 'name': "Author Two"},
]
w = make_work(
authors=[make_author(key='/authors/OL1A'), make_author(key='/authors/OL2A')]
)
d = WorkSolrBuilder(w, [], authors, FakeDataProvider(), {})
assert d.author_name == ["Author One", "Author Two"]
assert d.author_key == ['OL1A', 'OL2A']
assert d.author_facet == ['OL1A Author One', 'OL2A Author Two']
assert d.author_alternative_name == {"Author 1"}
# {'Test name': (doc_lccs, solr_lccs, sort_lcc_index)}
LCC_TESTS = {
'Remove dupes': (['A', 'A'], ['A--0000.00000000'], 0),
'Ignores garbage': (['$9.99'], None, None),
'Handles none': ([], None, None),
'Handles empty string': ([''], None, None),
'Stores multiple': (
['A123', 'B42'],
['A--0123.00000000', 'B--0042.00000000'],
None,
),
'Handles full LCC': (
['PT2603.0.E46 Z589 1991'],
['PT-2603.00000000.E46 Z589 1991'],
0,
),
'Stores longest for sorting': (
['A123.C14', 'B42'],
['A--0123.00000000.C14', 'B--0042.00000000'],
0,
),
'Ignores ISBNs/DDCs': (
['9781234123411', 'ML410', '123.4'],
['ML-0410.00000000'],
0,
),
}
@pytest.mark.parametrize(
"doc_lccs,solr_lccs,sort_lcc_index", LCC_TESTS.values(), ids=LCC_TESTS.keys()
)
def test_lccs(self, doc_lccs, solr_lccs, sort_lcc_index):
work = make_work()
d = WorkSolrBuilder(
work,
editions=[make_edition(work, lc_classifications=doc_lccs)],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
)
if solr_lccs:
assert d.lcc == set(solr_lccs)
if sort_lcc_index is not None:
assert d.lcc_sort == solr_lccs[sort_lcc_index]
else:
assert d.lcc == set()
assert d.lcc_sort is None
DDC_TESTS = {
'Remove dupes': (['123.5', '123.5'], ['123.5'], 0),
'Handles none': ([], None, None),
'Handles empty string': ([''], None, None),
'Stores multiple': (['05', '123.5'], ['005', '123.5'], 1),
'Handles full DDC': (['j132.452939 [B]'], ['132.452939 B', 'j132.452939 B'], 0),
'Handles alternate DDCs': (['132.52 153.6'], ['132.52', '153.6'], 0),
'Stores longest for sorting': (
['123.4', '123.41422'],
['123.4', '123.41422'],
1,
),
'Ignores ISBNs/LCCs': (['9781234123411', 'ML410', '132.3'], ['132.3'], 0),
'Ignores superfluous 920s': (['123.5', '920'], ['123.5'], 0),
'Ignores superfluous 92s': (['123.5', '92'], ['123.5'], 0),
'Ignores superfluous 92s (2)': (['123.5', 'B', '92'], ['123.5'], 0),
'Skips 920s': (['920', '123.5'], ['123.5'], 0),
'Skips 92s': (['92', '123.5'], ['123.5'], 0),
'Skips 092s': (['092', '123.5'], ['123.5'], 0),
}
@pytest.mark.asyncio
@pytest.mark.parametrize(
"doc_ddcs,solr_ddcs,sort_ddc_index", DDC_TESTS.values(), ids=DDC_TESTS.keys()
)
async def test_ddcs(self, doc_ddcs, solr_ddcs, sort_ddc_index):
work = make_work()
d = WorkSolrBuilder(
work,
[make_edition(work, dewey_decimal_class=doc_ddcs)],
[],
FakeDataProvider(),
{},
)
if solr_ddcs:
assert d.ddc == set(solr_ddcs)
assert d.ddc_sort == solr_ddcs[sort_ddc_index]
else:
assert d.ddc == set()
assert d.ddc_sort is None
def test_contributor(self):
work = make_work()
d = WorkSolrBuilder(
work,
[make_edition(work, contributors=[{'role': 'Illustrator', 'name': 'Foo'}])],
[],
FakeDataProvider(),
{},
)
# For now it should ignore it and not error
assert d.contributor == set()
class Test_number_of_pages_median:
def test_no_editions(self):
wsb = WorkSolrBuilder(
{"key": "/works/OL1W", "type": {"key": "/type/work"}},
[],
[],
FakeDataProvider(),
{},
)
assert wsb.number_of_pages_median is None
def test_invalid_type(self):
wsb = WorkSolrBuilder(
{"key": "/works/OL1W", "type": {"key": "/type/work"}},
[make_edition(number_of_pages='spam')],
[],
FakeDataProvider(),
{},
)
assert wsb.number_of_pages_median is None
wsb = WorkSolrBuilder(
{"key": "/works/OL1W", "type": {"key": "/type/work"}},
[make_edition(number_of_pages=n) for n in [123, 122, 'spam']],
[],
FakeDataProvider(),
{},
)
assert wsb.number_of_pages_median == 123
def test_normal_case(self):
wsb = WorkSolrBuilder(
{"key": "/works/OL1W", "type": {"key": "/type/work"}},
[make_edition(number_of_pages=n) for n in [123, 122, 1]],
[],
FakeDataProvider(),
{},
)
assert wsb.number_of_pages_median == 122
wsb = WorkSolrBuilder(
{"key": "/works/OL1W", "type": {"key": "/type/work"}},
[make_edition(), make_edition()]
+ [make_edition(number_of_pages=n) for n in [123, 122, 1]],
[],
FakeDataProvider(),
{},
)
assert wsb.number_of_pages_median == 122
class Test_Sort_Editions_Ocaids:
def test_sort(self):
wsb = WorkSolrBuilder(
work={},
editions=[
{"key": "/books/OL789M", "ocaid": "ocaid_restricted"},
{"key": "/books/OL567M", "ocaid": "ocaid_printdisabled"},
{"key": "/books/OL234M", "ocaid": "ocaid_borrowable"},
{"key": "/books/OL123M", "ocaid": "ocaid_open"},
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={
"ocaid_restricted": {
"access_restricted_item": "true",
'collection': {},
},
"ocaid_printdisabled": {
"access_restricted_item": "true",
"collection": {"printdisabled"},
},
"ocaid_borrowable": {
"access_restricted_item": "true",
"collection": {"inlibrary"},
},
"ocaid_open": {
"access_restricted_item": "false",
"collection": {"americanlibraries"},
},
},
)
assert wsb.ia == [
"ocaid_open",
"ocaid_borrowable",
"ocaid_printdisabled",
"ocaid_restricted",
]
def test_goog_deprioritized(self):
wsb = WorkSolrBuilder(
work={},
editions=[
{"key": "/books/OL789M", "ocaid": "foobargoog"},
{"key": "/books/OL789M", "ocaid": "foobarblah"},
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={},
)
assert wsb.ia == [
"foobarblah",
"foobargoog",
]
def test_excludes_fav_ia_collections(self):
wsb = WorkSolrBuilder(
work={},
editions=[
{"key": "/books/OL789M", "ocaid": "foobargoog"},
{"key": "/books/OL789M", "ocaid": "foobarblah"},
],
authors=[],
data_provider=FakeDataProvider(),
ia_metadata={
"foobargoog": {"collection": ['americanlibraries', 'fav-foobar']},
"foobarblah": {"collection": ['fav-bluebar', 'blah']},
},
)
assert wsb.ia_collection_s == "americanlibraries;blah"
| 19,960 | Python | .py | 521 | 27.570058 | 88 | 0.509443 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
279 | test_edition.py | internetarchive_openlibrary/openlibrary/tests/solr/updater/test_edition.py | import pytest
from openlibrary.solr.updater.edition import EditionSolrUpdater
from openlibrary.tests.solr.test_update import FakeDataProvider
class TestEditionSolrUpdater:
@pytest.mark.asyncio
async def test_deletes_old_orphans(self):
req, new_keys = await EditionSolrUpdater(FakeDataProvider()).update_key(
{
'key': '/books/OL1M',
'type': {'key': '/type/edition'},
'works': [{'key': '/works/OL1W'}],
}
)
assert req.deletes == ['/works/OL1M']
assert req.adds == []
assert new_keys == ['/works/OL1W']
@pytest.mark.asyncio
async def test_enqueues_orphans_as_works(self):
req, new_keys = await EditionSolrUpdater(FakeDataProvider()).update_key(
{'key': '/books/OL1M', 'type': {'key': '/type/edition'}}
)
assert req.deletes == []
assert req.adds == []
assert new_keys == ['/works/OL1M']
| 969 | Python | .py | 24 | 31.625 | 80 | 0.597444 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
280 | test_get_ia.py | internetarchive_openlibrary/openlibrary/tests/catalog/test_get_ia.py | import pytest
from pathlib import Path
from openlibrary.catalog import get_ia
from openlibrary.core import ia
from openlibrary.catalog.marc.marc_xml import MarcXml
from openlibrary.catalog.marc.marc_binary import MarcBinary, BadLength, BadMARC
TEST_DATA = Path(__file__).parents[2] / 'catalog' / 'marc' / 'tests' / 'test_data'
class MockResponse:
"""MockResponse is used to pass the contents of the read file back as an object that acts like a requests.Response
object instead of a file object. This is because the urlopen_keep_trying function was moved from urllib to requests.
"""
def __init__(self, data):
self.content = data
self.text = data.decode('utf-8')
def return_test_marc_bin(url):
return return_test_marc_data(url, 'bin_input')
def return_test_marc_xml(url):
return return_test_marc_data(url, 'xml_input')
def return_test_marc_data(url, test_data_subdir='xml_input'):
filename = url.split('/')[-1]
path = TEST_DATA / test_data_subdir / filename
return MockResponse(path.read_bytes())
class TestGetIA:
bad_marcs = [
'dasrmischepriv00rein', # binary representation of unicode interpreted as unicode codepoints
'lesabndioeinas00sche', # Original MARC8 0xE2 interpreted as u00E2 => \xC3\xA2, leader still MARC8
'poganucpeoplethe00stowuoft', # junk / unexpected character at end of publishers in field 260
]
bin_items = [
'0descriptionofta1682unit',
'13dipolarcycload00burk',
'bijouorannualofl1828cole',
'cu31924091184469',
'diebrokeradical400poll',
'engineercorpsofh00sher',
'flatlandromanceo00abbouoft',
'henrywardbeecher00robauoft',
'lincolncentenary00horn',
'livrodostermosh00bragoog',
'mytwocountries1954asto',
'onquietcomedyint00brid',
'secretcodeofsucc00stjo',
'thewilliamsrecord_vol29b',
'warofrebellionco1473unit',
]
xml_items = [
'1733mmoiresdel00vill', # no <?xml
'0descriptionofta1682unit', # has <?xml
'cu31924091184469', # is <collection>
'00schlgoog',
'13dipolarcycload00burk',
'39002054008678_yale_edu',
'abhandlungender01ggoog',
'bijouorannualofl1828cole',
'dasrmischepriv00rein',
'engineercorpsofh00sher',
'flatlandromanceo00abbouoft',
'lesabndioeinas00sche',
'lincolncentenary00horn',
'livrodostermosh00bragoog',
'mytwocountries1954asto',
'nybc200247',
'onquietcomedyint00brid',
'scrapbooksofmoun03tupp',
'secretcodeofsucc00stjo',
'soilsurveyrepor00statgoog',
'warofrebellionco1473unit',
'zweibchersatir01horauoft',
]
@pytest.mark.parametrize('item', bin_items)
def test_get_marc_record_from_ia(self, item, monkeypatch):
"""Tests the method returning MARC records from IA
used by the import API. It should return a binary MARC if one exists."""
monkeypatch.setattr(get_ia, 'urlopen_keep_trying', return_test_marc_bin)
monkeypatch.setattr(
ia,
'get_metadata',
lambda itemid: {
'_filenames': [f'{itemid}_{s}' for s in ('marc.xml', 'meta.mrc')]
},
)
result = get_ia.get_marc_record_from_ia(item)
assert isinstance(
result, MarcBinary
), f"{item}: expected instanceof MarcBinary, got {type(result)}"
@pytest.mark.parametrize('item', xml_items)
def test_no_marc_xml(self, item, monkeypatch):
"""When no binary MARC is listed in _filenames, the MARC XML should be fetched."""
monkeypatch.setattr(get_ia, 'urlopen_keep_trying', return_test_marc_xml)
monkeypatch.setattr(
ia, 'get_metadata', lambda itemid: {'_filenames': [f'{itemid}_marc.xml']}
)
result = get_ia.get_marc_record_from_ia(item)
assert isinstance(
result, MarcXml
), f"{item}: expected instanceof MarcXml, got {type(result)}"
@pytest.mark.parametrize('bad_marc', bad_marcs)
def test_incorrect_length_marcs(self, bad_marc, monkeypatch):
"""If a Binary MARC has a different length than stated in the MARC leader, it is probably due to bad character conversions."""
monkeypatch.setattr(get_ia, 'urlopen_keep_trying', return_test_marc_bin)
monkeypatch.setattr(
ia, 'get_metadata', lambda itemid: {'_filenames': [f'{itemid}_meta.mrc']}
)
with pytest.raises(BadLength):
result = get_ia.get_marc_record_from_ia(bad_marc)
def test_bad_binary_data(self):
with pytest.raises(BadMARC):
result = MarcBinary('nonMARCdata')
| 4,747 | Python | .py | 108 | 36.037037 | 134 | 0.664719 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
281 | test_utils.py | internetarchive_openlibrary/openlibrary/tests/catalog/test_utils.py | import pytest
from datetime import datetime, timedelta
from openlibrary.catalog.utils import (
author_dates_match,
flip_name,
get_missing_fields,
get_non_isbn_asin,
get_publication_year,
is_asin_only,
is_independently_published,
is_promise_item,
match_with_bad_chars,
needs_isbn_and_lacks_one,
pick_best_author,
pick_best_name,
pick_first_date,
publication_too_old_and_not_exempt,
published_in_future_year,
remove_trailing_dot,
remove_trailing_number_dot,
strip_count,
)
def test_author_dates_match():
_atype = {'key': '/type/author'}
basic = {
'name': 'John Smith',
'death_date': '1688',
'key': '/a/OL6398451A',
'birth_date': '1650',
'type': _atype,
}
full_dates = {
'name': 'John Smith',
'death_date': '23 June 1688',
'key': '/a/OL6398452A',
'birth_date': '01 January 1650',
'type': _atype,
}
full_different = {
'name': 'John Smith',
'death_date': '12 June 1688',
'key': '/a/OL6398453A',
'birth_date': '01 December 1650',
'type': _atype,
}
no_death = {
'name': 'John Smith',
'key': '/a/OL6398454A',
'birth_date': '1650',
'type': _atype,
}
no_dates = {'name': 'John Smith', 'key': '/a/OL6398455A', 'type': _atype}
non_match = {
'name': 'John Smith',
'death_date': '1999',
'key': '/a/OL6398456A',
'birth_date': '1950',
'type': _atype,
}
different_name = {'name': 'Jane Farrier', 'key': '/a/OL6398457A', 'type': _atype}
assert author_dates_match(basic, basic)
assert author_dates_match(basic, full_dates)
assert author_dates_match(basic, no_death)
assert author_dates_match(basic, no_dates)
assert author_dates_match(no_dates, no_dates)
# Without dates, the match returns True
assert author_dates_match(no_dates, non_match)
# This method only compares dates and ignores names
assert author_dates_match(no_dates, different_name)
assert author_dates_match(basic, non_match) is False
# FIXME: the following should properly be False:
assert author_dates_match(
full_different, full_dates
) # this shows matches are only occurring on year, full dates are ignored!
def test_flip_name():
assert flip_name('Smith, John.') == 'John Smith'
assert flip_name('Smith, J.') == 'J. Smith'
assert flip_name('No comma.') == 'No comma'
def test_pick_first_date():
assert pick_first_date(["Mrs.", "1839-"]) == {'birth_date': '1839'}
assert pick_first_date(["1882-."]) == {'birth_date': '1882'}
assert pick_first_date(["1900-1990.."]) == {
'birth_date': '1900',
'death_date': '1990',
}
assert pick_first_date(["4th/5th cent."]) == {'date': '4th/5th cent.'}
def test_pick_best_name():
names = [
'Andre\u0301 Joa\u0303o Antonil',
'Andr\xe9 Jo\xe3o Antonil',
'Andre? Joa?o Antonil',
]
best = names[1]
assert pick_best_name(names) == best
names = [
'Antonio Carvalho da Costa',
'Anto\u0301nio Carvalho da Costa',
'Ant\xf3nio Carvalho da Costa',
]
best = names[2]
assert pick_best_name(names) == best
def test_pick_best_author():
a1 = {
'name': 'Bretteville, Etienne Dubois abb\xe9 de',
'death_date': '1688',
'key': '/a/OL6398452A',
'birth_date': '1650',
'title': 'abb\xe9 de',
'personal_name': 'Bretteville, Etienne Dubois',
'type': {'key': '/type/author'},
}
a2 = {
'name': 'Bretteville, \xc9tienne Dubois abb\xe9 de',
'death_date': '1688',
'key': '/a/OL4953701A',
'birth_date': '1650',
'title': 'abb\xe9 de',
'personal_name': 'Bretteville, \xc9tienne Dubois',
'type': {'key': '/type/author'},
}
assert pick_best_author([a1, a2])['key'] == a2['key']
def combinations(items, n):
if n == 0:
yield []
else:
for i in range(len(items)):
for cc in combinations(items[i + 1 :], n - 1):
yield [items[i]] + cc
def test_match_with_bad_chars():
samples = [
['Machiavelli, Niccolo, 1469-1527', 'Machiavelli, Niccol\xf2 1469-1527'],
['Humanitas Publica\xe7\xf5es', 'Humanitas Publicac?o?es'],
[
'A pesquisa ling\xfc\xedstica no Brasil',
'A pesquisa lingu?i?stica no Brasil',
],
['S\xe3o Paulo', 'Sa?o Paulo'],
[
'Diccionario espa\xf1ol-ingl\xe9s de bienes ra\xedces',
'Diccionario Espan\u0303ol-Ingle\u0301s de bienes rai\u0301ces',
],
[
'Konfliktunterdru?ckung in O?sterreich seit 1918',
'Konfliktunterdru\u0308ckung in O\u0308sterreich seit 1918',
'Konfliktunterdr\xfcckung in \xd6sterreich seit 1918',
],
[
'Soi\ufe20u\ufe21z khudozhnikov SSSR.',
'Soi?u?z khudozhnikov SSSR.',
'Soi\u0361uz khudozhnikov SSSR.',
],
['Andrzej Weronski', 'Andrzej Wero\u0144ski', 'Andrzej Weron\u0301ski'],
]
for sample in samples:
for a, b in combinations(sample, 2):
assert match_with_bad_chars(a, b)
def test_strip_count():
input = [
('Side by side', ['a', 'b', 'c', 'd']),
('Side by side.', ['e', 'f', 'g']),
('Other.', ['h', 'i']),
]
expect = [
('Side by side', ['a', 'b', 'c', 'd', 'e', 'f', 'g']),
('Other.', ['h', 'i']),
]
assert strip_count(input) == expect
def test_remove_trailing_dot():
data = [
('Test', 'Test'),
('Test.', 'Test'),
('Test J.', 'Test J.'),
('Test...', 'Test...'),
# ('Test Jr.', 'Test Jr.'),
]
for input, expect in data:
output = remove_trailing_dot(input)
assert output == expect
@pytest.mark.parametrize(
'year, expected',
[
('1999-01', 1999),
('1999', 1999),
('01-1999', 1999),
('May 5, 1999', 1999),
('May 5, 19999', None),
('1999-01-01', 1999),
('1999/1/1', 1999),
('01-01-1999', 1999),
('1/1/1999', 1999),
('199', None),
('19990101', None),
(None, None),
(1999, 1999),
(19999, None),
],
)
def test_publication_year(year, expected) -> None:
assert get_publication_year(year) == expected
@pytest.mark.parametrize(
'years_from_today, expected',
[
(1, True),
(0, False),
(-1, False),
],
)
def test_published_in_future_year(years_from_today, expected) -> None:
"""Test with last year, this year, and next year."""
def get_datetime_for_years_from_now(years: int) -> datetime:
"""Get a datetime for now +/- x years."""
now = datetime.now()
return now + timedelta(days=365 * years)
year = get_datetime_for_years_from_now(years_from_today).year
assert published_in_future_year(year) == expected
@pytest.mark.parametrize(
'name, rec, expected',
[
(
"1399 is too old for an Amazon source",
{'source_records': ['amazon:123'], 'publish_date': '1399'},
True,
),
(
"1400 is acceptable for an Amazon source",
{'source_records': ['amazon:123'], 'publish_date': '1400'},
False,
),
(
"1401 is acceptable for an Amazon source",
{'source_records': ['amazon:123'], 'publish_date': '1401'},
False,
),
(
"1399 is acceptable for an IA source",
{'source_records': ['ia:123'], 'publish_date': '1399'},
False,
),
(
"1400 is acceptable for an IA source",
{'source_records': ['ia:123'], 'publish_date': '1400'},
False,
),
(
"1401 is acceptable for an IA source",
{'source_records': ['ia:123'], 'publish_date': '1401'},
False,
),
],
)
def test_publication_too_old_and_not_exempt(name, rec, expected) -> None:
"""
See publication_too_old_and_not_exempt() for an explanation of which sources require
which publication years.
"""
assert publication_too_old_and_not_exempt(rec) == expected, f"Test failed: {name}"
@pytest.mark.parametrize(
'publishers, expected',
[
(['INDEPENDENTLY PUBLISHED'], True),
(['Independent publisher'], True),
(['Another Publisher', 'independently published'], True),
(['Another Publisher', 'independent publisher'], True),
(['Another Publisher'], False),
],
)
def test_independently_published(publishers, expected) -> None:
assert is_independently_published(publishers) == expected
@pytest.mark.parametrize(
'rec, expected',
[
({'source_records': ['bwb:123'], 'isbn_10': ['1234567890']}, False),
({'source_records': ['amazon:123'], 'isbn_13': ['1234567890123']}, False),
({'source_records': ['bwb:123'], 'isbn_10': []}, True),
({'source_records': ['bwb:123']}, True),
({'source_records': ['ia:someocaid']}, False),
({'source_records': ['amazon:123']}, True),
],
)
def test_needs_isbn_and_lacks_one(rec, expected) -> None:
assert needs_isbn_and_lacks_one(rec) == expected
@pytest.mark.parametrize(
'rec, expected',
[
({'source_records': ['promise:123', 'ia:456']}, True),
({'source_records': ['ia:456']}, False),
({'source_records': []}, False),
({}, False),
],
)
def test_is_promise_item(rec, expected) -> None:
assert is_promise_item(rec) == expected
@pytest.mark.parametrize(
["rec", "expected"],
[
({"source_records": ["amazon:B01234568"]}, "B01234568"),
({"source_records": ["amazon:123456890"]}, None),
({"source_records": ["ia:BLOB"]}, None),
({"source_records": []}, None),
({"identifiers": {"ia": ["B01234568"]}}, None),
({"identifiers": {"amazon": ["123456890"]}}, None),
({"identifiers": {"amazon": ["B01234568"]}}, "B01234568"),
({"identifiers": {"amazon": []}}, None),
({"identifiers": {}}, None),
({}, None),
],
)
def test_get_non_isbn_asin(rec, expected) -> None:
got = get_non_isbn_asin(rec)
assert got == expected
@pytest.mark.parametrize(
["rec", "expected"],
[
({"isbn_10": "123456890", "source_records": ["amazon:B01234568"]}, False),
({"isbn_13": "1234567890123", "source_records": ["amazon:B01234568"]}, False),
({"isbn_10": "1234567890", "identifiers": {"amazon": ["B01234568"]}}, False),
({"source_records": ["amazon:1234567890"]}, False),
({"identifiers": {"amazon": ["123456890"]}}, False),
({}, False),
({"identifiers": {"amazon": ["B01234568"]}}, True),
({"source_records": ["amazon:B01234568"]}, True),
],
)
def test_is_asin_only(rec, expected) -> None:
got = is_asin_only(rec)
assert got == expected
@pytest.mark.parametrize(
'name, rec, expected',
[
(
"Returns an empty list if no fields are missing",
{'title': 'A Great Book', 'source_records': ['ia:123']},
[],
),
(
"Catches a missing required field",
{'source_records': ['ia:123']},
['title'],
),
(
"Catches multiple missing required fields",
{'publish_date': '1999'},
['source_records', 'title'],
),
],
)
def test_get_missing_field(name, rec, expected) -> None:
assert sorted(get_missing_fields(rec=rec)) == sorted(
expected
), f"Test failed: {name}"
@pytest.mark.parametrize(
("date, expected"),
[
("", ""),
("1865.", "1865"),
("1865", "1865"), # No period to remove
("1865.5", "1865.5"), # Period not at the end
("1865,", "1865,"), # Comma instead of period
("18.", "18"), # Minimum digits
("1.", "1."), # Fewer than minimum digits with period
("18651.", "18651"), # More than minimum digits
("123blap.", "123blap."), # Non-digit before period
("123...", "123"), # Multiple periods at the end
("123 -..", "123 -"), # Spaces and hyphens before multiple periods
("123-.", "123-"), # Hyphen directly before single period
(" 123 .", " 123 "), # Spaces around digits and single period
("123 - .", "123 - "), # Space between hyphen and single period
("abc123...", "abc123"), # Leading characters
("123...xyz", "123...xyz"), # Trailing characters after periods
("12 34..", "12 34"), # Spaces within digits before periods
("123", "123"), # Spaces between periods
("12-34.", "12-34"), # Hyphens within digits
("100-200.", "100-200"), # Hyphens within digits, ending with period
],
)
def test_remove_trailing_number_dot(date: str, expected: str) -> None:
got = remove_trailing_number_dot(date)
assert got == expected
| 13,158 | Python | .py | 378 | 27.537037 | 88 | 0.554605 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
282 | test_models.py | internetarchive_openlibrary/openlibrary/tests/accounts/test_models.py | from openlibrary.accounts import model, InternetArchiveAccount, OpenLibraryAccount
from requests.models import Response
from unittest import mock
def get_username(account):
return account and account.value
def test_verify_hash():
secret_key = b"aqXwLJVOcV"
hash = model.generate_hash(secret_key, b"foo")
assert model.verify_hash(secret_key, b"foo", hash)
def test_xauth_http_error_without_json(monkeypatch):
xauth = InternetArchiveAccount.xauth
resp = Response()
resp.status_code = 500
resp._content = b'Internal Server Error'
monkeypatch.setattr(model.requests, 'post', lambda url, **kwargs: resp)
assert xauth('create', s3_key='_', s3_secret='_') == {
'code': 500,
'error': 'Internal Server Error',
}
def test_xauth_http_error_with_json(monkeypatch):
xauth = InternetArchiveAccount.xauth
resp = Response()
resp.status_code = 400
resp._content = b'{"error": "Unknown Parameter Blah"}'
monkeypatch.setattr(model.requests, 'post', lambda url, **kwargs: resp)
assert xauth('create', s3_key='_', s3_secret='_') == {
"error": "Unknown Parameter Blah"
}
@mock.patch("openlibrary.accounts.model.web")
def test_get(mock_web):
test = True
email = "[email protected]"
account = OpenLibraryAccount.get_by_email(email)
assert account is None
test_account = OpenLibraryAccount.create(
username="test",
email=email,
password="password",
displayname="Test User",
verified=True,
retries=0,
test=True,
)
mock_site = mock_web.ctx.site
mock_site.store.get.return_value = {
"username": "test",
"itemname": "@test",
"email": "[email protected]",
"displayname": "Test User",
"test": test,
}
key = "test/test"
test_username = test_account.username
retrieved_account = OpenLibraryAccount.get(email=email, test=test)
assert retrieved_account == test_account
mock_site = mock_web.ctx.site
mock_site.store.values.return_value = [
{
"username": "test",
"itemname": "@test",
"email": "[email protected]",
"displayname": "Test User",
"test": test,
"type": "account",
"name": "internetarchive_itemname",
"value": test_username,
}
]
retrieved_account = OpenLibraryAccount.get(link=test_username, test=test)
assert retrieved_account
retrieved_username = get_username(retrieved_account)
assert retrieved_username == test_username
mock_site.store.values.return_value[0]["name"] = "username"
retrieved_account = OpenLibraryAccount.get(username=test_username, test=test)
assert retrieved_account
retrieved_username = get_username(retrieved_account)
assert retrieved_username == test_username
key = f'test/{retrieved_username}'
retrieved_account = OpenLibraryAccount.get(key=key, test=test)
assert retrieved_account
| 3,011 | Python | .py | 80 | 31.2 | 82 | 0.665179 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
283 | conftest.py | internetarchive_openlibrary/openlibrary/tests/core/conftest.py | import os
import pytest
@pytest.fixture
def dummy_crontabfile(request):
"Creates a dummy crontab file that can be used for to try things"
cronfile = os.tmpnam()
ip = """* * * * * /bin/true
* * * * * /bin/true"""
f = open(cronfile, "w")
f.write(ip)
f.close()
request.addfinalizer(lambda: os.remove(cronfile))
return cronfile
@pytest.fixture
def crontabfile(request):
"""Creates a file with an actual command that we can use to test
running of cron lines"""
if os.path.exists("/tmp/crontest"):
os.unlink("/tmp/crontest")
cronfile = os.tmpnam()
ip = "* * * * * touch /tmp/crontest"
f = open(cronfile, "w")
f.write(ip)
f.close()
request.addfinalizer(lambda: os.remove(cronfile))
return cronfile
@pytest.fixture
def counter(request):
"""Returns a decorator that will create a 'counted' version of the
functions. The number of times it's been called is kept in the
.invocations attribute"""
def counter(fn):
def _counted(*largs, **kargs):
_counted.invocations += 1
fn(*largs, **kargs)
_counted.invocations = 0
return _counted
return counter
@pytest.fixture
def sequence(request):
"""Returns a function that can be called for sequence numbers
similar to web.ctx.site.sequence.get_next"""
t = (x for x in range(100))
def seq_counter(*largs, **kargs):
return next(t)
import web
# Clean up this mess to mock sequences
web.ctx = lambda: 0
web.ctx.site = lambda: 0
web.ctx.site.seq = lambda: 0
web.ctx.site.seq.next_value = seq_counter
# Now run the test
return seq_counter
| 1,680 | Python | .py | 53 | 26.584906 | 70 | 0.656541 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
284 | test_ia.py | internetarchive_openlibrary/openlibrary/tests/core/test_ia.py | from openlibrary.core import ia
def test_get_metadata(monkeypatch, mock_memcache):
metadata = {
"metadata": {
"title": "Foo",
"identifier": "foo00bar",
"collection": ["printdisabled", "inlibrary"],
}
}
monkeypatch.setattr(ia, 'get_api_response', lambda *args: metadata)
assert ia.get_metadata('foo00bar') == {
"title": "Foo",
"identifier": "foo00bar",
"collection": ["printdisabled", "inlibrary"],
"access-restricted": False,
"_filenames": [],
}
def test_get_metadata_empty(monkeypatch, mock_memcache):
monkeypatch.setattr(ia, 'get_api_response', lambda *args: {})
assert ia.get_metadata('foo02bar') == {}
| 731 | Python | .py | 20 | 29.3 | 71 | 0.603399 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
285 | test_observations.py | internetarchive_openlibrary/openlibrary/tests/core/test_observations.py | from openlibrary.core.observations import _sort_values
def test_sort_values():
orders_list = [3, 4, 2, 1]
values_list = [
{'id': 1, 'name': 'order'},
{'id': 2, 'name': 'in'},
{'id': 3, 'name': 'this'},
{'id': 4, 'name': 'is'},
]
# sorted values returned given unsorted list
assert _sort_values(orders_list, values_list) == ['this', 'is', 'in', 'order']
# no errors thrown when orders list contains an ID not found in the values list
orders_list.insert(0, 5)
assert _sort_values(orders_list, values_list) == ['this', 'is', 'in', 'order']
# value with ID that is not in orders list will not be included in sorted list
values_list.append({'id': 100, 'name': 'impossible!'})
assert _sort_values(orders_list, values_list) == ['this', 'is', 'in', 'order']
| 831 | Python | .py | 17 | 43.117647 | 83 | 0.605686 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
286 | test_processors_invalidation.py | internetarchive_openlibrary/openlibrary/tests/core/test_processors_invalidation.py | import web
import datetime
from infogami.infobase import client
from openlibrary.core.processors import invalidation
from openlibrary.mocks.mock_infobase import MockSite
class MockHook:
def __init__(self):
self.call_count = 0
self.recent_doc = None
def on_new_version(self, doc):
self.recent_doc = doc
self.call_count += 1
class MockDatetime:
"""Class to mock datetime.datetime to overwrite now() method."""
def __init__(self, mock_now):
self._now = mock_now
def now(self):
return self._now
class TestInvalidationProcessor:
def test_hook(self, monkeypatch):
"""When a document is saved, cookie must be set with its timestamp."""
self._monkeypatch_web(monkeypatch)
doc = {"key": "/templates/site.tmpl", "type": "/type/template"}
web.ctx.site.save(doc, timestamp=datetime.datetime(2010, 1, 1))
hook = invalidation._InvalidationHook(
"/templates/site.tmpl", cookie_name="invalidation-cookie", expire_time=120
)
hook.on_new_version(web.ctx.site.get(doc['key']))
assert self.cookie == {
"name": "invalidation-cookie",
"value": "2010-01-01T00:00:00",
"expires": 120,
}
def test_reload(self, monkeypatch):
"""If reload is called and there are some modifications, each hook should get called."""
self._monkeypatch_web(monkeypatch)
self._monkeypatch_hooks(monkeypatch)
# create the processor
p = invalidation.InvalidationProcessor(prefixes=['/templates/'])
# save a doc after creating the processor
doc = {"key": "/templates/site.tmpl", "type": "/type/template"}
web.ctx.site.save(doc)
# reload and make sure the hook gets called
p.reload()
assert self.hook.call_count == 1
assert (
self.hook.recent_doc.dict()
== web.ctx.site.get("/templates/site.tmpl").dict()
)
# last_update_time must get updated
assert (
p.last_update_time == web.ctx.site.get("/templates/site.tmpl").last_modified
)
def test_reload_on_timeout(self, monkeypatch):
# create the processor at 60 seconds past in time
mock_now = datetime.datetime.now() - datetime.timedelta(seconds=60)
monkeypatch.setattr(datetime, "datetime", MockDatetime(mock_now))
p = invalidation.InvalidationProcessor(prefixes=['/templates'], timeout=60)
# come back to real time
monkeypatch.undo()
# monkeypatch web
self._monkeypatch_web(monkeypatch)
self._monkeypatch_hooks(monkeypatch)
# save a doc
doc = {"key": "/templates/site.tmpl", "type": "/type/template"}
web.ctx.site.save(doc)
# call the processor
p(lambda: None)
assert self.hook.call_count == 1
assert (
self.hook.recent_doc.dict()
== web.ctx.site.get("/templates/site.tmpl").dict()
)
def test_is_timeout(self, monkeypatch):
# create the processor at 60 seconds past in time
mock_now = datetime.datetime.now() - datetime.timedelta(seconds=60)
monkeypatch.setattr(datetime, "datetime", MockDatetime(mock_now))
p = invalidation.InvalidationProcessor(prefixes=['/templates'], timeout=60)
# come back to real time
monkeypatch.undo()
# monkeypatch web
self._monkeypatch_web(monkeypatch)
self._monkeypatch_hooks(monkeypatch)
p.reload()
# until next 60 seconds, is_timeout must be false.
assert p.is_timeout() is False
def test_reload_on_cookie(self, monkeypatch):
self._monkeypatch_web(monkeypatch)
self._monkeypatch_hooks(monkeypatch)
p = invalidation.InvalidationProcessor(
prefixes=['/templates'], cookie_name="invalidation_cookie"
)
# save a doc
doc = {"key": "/templates/site.tmpl", "type": "/type/template"}
web.ctx.site.save(doc)
# call the processor
p(lambda: None)
# no cookie, no hook call
assert self.hook.call_count == 0
web.ctx.env['HTTP_COOKIE'] = (
"invalidation_cookie=" + datetime.datetime.now().isoformat()
)
# Clear parsed cookie cache to force our new value to be parsed
if "_parsed_cookies" in web.ctx:
del web.ctx._parsed_cookies
p(lambda: None)
# cookie is set, hook call is expected
assert self.hook.call_count == 1
assert (
self.hook.recent_doc.dict()
== web.ctx.site.get("/templates/site.tmpl").dict()
)
def test_setcookie_after_reload(self, monkeypatch):
self._monkeypatch_web(monkeypatch)
self._monkeypatch_hooks(monkeypatch)
p = invalidation.InvalidationProcessor(
prefixes=['/templates'], cookie_name="invalidation_cookie", timeout=60
)
# save a doc
doc = {"key": "/templates/site.tmpl", "type": "/type/template"}
web.ctx.site.save(doc)
p.reload()
# A cookie must be set when there is a recent update known to the processor
p(lambda: None)
assert self.cookie == {
"name": "invalidation_cookie",
"expires": p.expire_time,
"value": web.ctx.site.get("/templates/site.tmpl").last_modified.isoformat(),
}
def _load_fake_context(self):
app = web.application()
env = {'PATH_INFO': '/', 'HTTP_METHOD': 'GET'}
app.load(env)
def _monkeypatch_web(self, monkeypatch):
monkeypatch.setattr(web, "ctx", web.storage(x=1))
monkeypatch.setattr(web.webapi, "ctx", web.ctx)
self._load_fake_context()
web.ctx.site = MockSite()
def setcookie(name, value, expires):
self.cookie = {"name": name, "value": value, "expires": expires}
monkeypatch.setattr(web, "setcookie", setcookie)
def _monkeypatch_hooks(self, monkeypatch):
self.hook = MockHook()
monkeypatch.setattr(client, "hooks", [self.hook])
| 6,150 | Python | .py | 144 | 33.666667 | 96 | 0.621222 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
287 | test_unmarshal.py | internetarchive_openlibrary/openlibrary/tests/core/test_unmarshal.py | from datetime import datetime
import re
import pytest
from openlibrary.api import unmarshal
class Text(str):
__slots__ = ()
def __repr__(self):
return "<text: %s>" % str.__repr__(self)
class Reference(str):
__slots__ = ()
def __repr__(self):
return "<ref: %s>" % str.__repr__(self)
def parse_datetime(value: datetime | str) -> datetime:
"""Parses ISO datetime formatted string.::
>>> parse_datetime("2009-01-02T03:04:05.006789")
datetime.datetime(2009, 1, 2, 3, 4, 5, 6789)
"""
if isinstance(value, datetime):
return value
tokens = re.split(r'-|T|:|\.| ', value)
return datetime(*(int(token) for token in tokens)) # type: ignore[arg-type]
@pytest.mark.parametrize(
"data,expected",
[
({}, {}),
({"value": "", "type": "/type/text"}, ""),
({"value": "hello, world"}, {"value": "hello, world"}),
({"value": "hello, world", "type": "/type/text"}, Text("hello, world")),
({"type": "/type/invalid", "value": "hello, world"}, "hello, world"),
([{"type": "/type/invalid", "value": "hello, world"}], ["hello, world"]),
(
{"value": "2009-01-02T03:04:05.006789", "type": "/type/datetime"},
parse_datetime("2009-01-02T03:04:05.006789"),
),
(
[
{"type": "/type/text", "value": "hello, world"},
{"type": "/type/datetime", "value": "2009-01-02T03:04:05.006789"},
],
[
Text("hello, world"),
parse_datetime("2009-01-02T03:04:05.006789"),
],
),
(
{
"key1": "value1",
"key2": {"value2": "value2", "type": "/type/text"},
"key3": "2009-01-02T03:04:05.006789",
},
{
"key1": "value1",
"key2": {"value2": "value2", "type": "/type/text"},
"key3": "2009-01-02T03:04:05.006789",
},
),
],
)
def test_unmarshal(data, expected) -> None:
assert unmarshal(data) == expected
| 2,116 | Python | .py | 60 | 26.383333 | 82 | 0.493392 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
288 | test_connections.py | internetarchive_openlibrary/openlibrary/tests/core/test_connections.py | # This will be moved to core soon.
from openlibrary.plugins.openlibrary import connection as connections
import json
class MockConnection:
def __init__(self):
self.docs = {}
def request(self, sitename, path, method="GET", data=None):
data = data or {}
if path == "/get":
key = data['key']
if key in self.docs:
return json.dumps(self.docs[key])
if path == "/get_many":
keys = json.loads(data['keys'])
return json.dumps({k: self.docs[k] for k in keys})
else:
return None
class TestMigrationMiddleware:
def test_title_prefix(self):
conn = connections.MigrationMiddleware(MockConnection())
def add(doc):
conn.conn.docs[doc['key']] = doc
def get(key):
json_data = conn.request("openlibrary.org", "/get", data={"key": key})
return json.loads(json_data)
add(
{
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"title_prefix": "The",
"title": "Book",
}
)
assert get("/books/OL1M") == {
"key": "/books/OL1M",
"type": {"key": "/type/edition"},
"title": "The Book",
}
add(
{
"key": "/books/OL2M",
"type": {"key": "/type/edition"},
"title_prefix": "The ",
"title": "Book",
}
)
assert get("/books/OL2M") == {
"key": "/books/OL2M",
"type": {"key": "/type/edition"},
"title": "The Book",
}
add(
{
"key": "/books/OL3M",
"type": {"key": "/type/edition"},
"title_prefix": "The Book",
}
)
assert get("/books/OL3M") == {
"key": "/books/OL3M",
"type": {"key": "/type/edition"},
"title": "The Book",
}
def test_authors(self):
conn = connections.MigrationMiddleware(MockConnection())
def add(doc):
conn.conn.docs[doc['key']] = doc
def get(key):
json_data = conn.request("openlibrary.org", "/get", data={"key": key})
return json.loads(json_data)
def get_many(keys):
data = {"keys": json.dumps(keys)}
json_data = conn.request("openlibrary.org", "/get_many", data=data)
return json.loads(json_data)
add(
{
"key": "/works/OL1W",
"type": {"key": "/type/work"},
"authors": [{"type": {"key": "/type/author_role"}}],
}
)
assert get("/works/OL1W") == {
"key": "/works/OL1W",
"type": {"key": "/type/work"},
"authors": [],
}
assert get_many(["/works/OL1W"]) == {
"/works/OL1W": {
"key": "/works/OL1W",
"type": {"key": "/type/work"},
"authors": [],
}
}
OL2W = {
"key": "/works/OL2W",
"type": {"key": "/type/work"},
"authors": [
{
"type": {"key": "/type/author_role"},
"author": {"key": "/authors/OL2A"},
}
],
}
add(OL2W)
assert get("/works/OL2W") == OL2W
| 3,476 | Python | .py | 105 | 21.161905 | 82 | 0.430746 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
289 | test_helpers.py | internetarchive_openlibrary/openlibrary/tests/core/test_helpers.py | import web
from openlibrary.core import helpers as h
from openlibrary.mocks.mock_infobase import MockSite
def _load_fake_context():
app = web.application()
env = {
"PATH_INFO": "/",
"HTTP_METHOD": "GET",
}
app.load(env)
def _monkeypatch_web(monkeypatch):
monkeypatch.setattr(web, "ctx", web.storage(x=1))
monkeypatch.setattr(web.webapi, "ctx", web.ctx)
_load_fake_context()
web.ctx.lang = 'en'
web.ctx.site = MockSite()
def test_sanitize():
# plain html should pass through
assert h.sanitize("hello") == "hello"
assert h.sanitize("<p>hello</p>") == "<p>hello</p>"
# broken html must be corrected
assert h.sanitize("<p>hello") == "<p>hello</p>"
# css class is fine
assert h.sanitize('<p class="foo">hello</p>') == '<p class="foo">hello</p>'
# style attribute must be stripped
assert h.sanitize('<p style="color: red">hello</p>') == '<p>hello</p>'
# style tags must be stripped
assert (
h.sanitize('<style type="text/css">p{color: red;}</style><p>hello</p>')
== '<p>hello</p>'
)
# script tags must be stripped
assert h.sanitize('<script>alert("dhoom")</script>hello') == 'hello'
# rel="nofollow" must be added absolute links
assert (
h.sanitize('<a href="https://example.com">hello</a>')
== '<a href="https://example.com" rel="nofollow">hello</a>'
)
# relative links should pass through
assert h.sanitize('<a href="relpath">hello</a>') == '<a href="relpath">hello</a>'
def test_safesort():
from datetime import datetime
y2000 = datetime(2000, 1, 1)
y2005 = datetime(2005, 1, 1)
y2010 = datetime(2010, 1, 1)
assert h.safesort([y2005, y2010, y2000, None]) == [None, y2000, y2005, y2010]
assert h.safesort([y2005, y2010, y2000, None], reverse=True) == [
y2010,
y2005,
y2000,
None,
]
assert h.safesort([[y2005], [None]], key=lambda x: x[0]) == [[None], [y2005]]
def test_datestr(monkeypatch):
from datetime import datetime
then = datetime(2010, 1, 1, 0, 0, 0)
_monkeypatch_web(monkeypatch)
# assert h.datestr(then, datetime(2010, 1, 1, 0, 0, 0, 10)) == u"just moments ago"
assert h.datestr(then, datetime(2010, 1, 1, 0, 0, 1)) == "1 second ago"
assert h.datestr(then, datetime(2010, 1, 1, 0, 0, 9)) == "9 seconds ago"
assert h.datestr(then, datetime(2010, 1, 1, 0, 1, 1)) == "1 minute ago"
assert h.datestr(then, datetime(2010, 1, 1, 0, 9, 1)) == "9 minutes ago"
assert h.datestr(then, datetime(2010, 1, 1, 1, 0, 1)) == "1 hour ago"
assert h.datestr(then, datetime(2010, 1, 1, 9, 0, 1)) == "9 hours ago"
assert h.datestr(then, datetime(2010, 1, 2, 0, 0, 1)) == "1 day ago"
assert h.datestr(then, datetime(2010, 1, 9, 0, 0, 1)) == "January 1, 2010"
assert h.datestr(then, datetime(2010, 1, 9, 0, 0, 1), lang='fr') == '1 janvier 2010'
def test_sprintf():
assert h.sprintf('hello %s', 'python') == 'hello python'
assert h.sprintf('hello %(name)s', name='python') == 'hello python'
def test_commify():
assert h.commify(123) == "123"
assert h.commify(1234) == "1,234"
assert h.commify(1234567) == "1,234,567"
assert h.commify(123, lang="te") == "123"
assert h.commify(1234, lang="te") == "1,234"
assert h.commify(1234567, lang="te") == "12,34,567"
def test_truncate():
assert h.truncate("hello", 6) == "hello"
assert h.truncate("hello", 5) == "hello"
assert h.truncate("hello", 4) == "hell..."
def test_urlsafe():
assert h.urlsafe("a b") == "a_b"
assert h.urlsafe("a?b") == "a_b"
assert h.urlsafe("a?&b") == "a_b"
assert h.urlsafe("?a") == "a"
assert h.urlsafe("a?") == "a"
def test_texsafe():
assert h.texsafe("hello") == r"hello"
assert h.texsafe("a_b") == r"a\_{}b"
assert h.texsafe("a < b") == r"a \textless{} b"
def test_percentage():
assert h.percentage(1, 10) == 10.0
assert h.percentage(0, 0) == 0
| 3,995 | Python | .py | 94 | 37.244681 | 88 | 0.609428 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
290 | test_imports.py | internetarchive_openlibrary/openlibrary/tests/core/test_imports.py | import pytest
from typing import Final
import web
from openlibrary.core.db import get_db
from openlibrary.core.imports import Batch, ImportItem
IMPORT_ITEM_DDL: Final = """
CREATE TABLE import_item (
id serial primary key,
batch_id integer,
status text default 'pending',
error text,
ia_id text,
data text,
ol_key text,
comments text,
UNIQUE (batch_id, ia_id)
);
"""
IMPORT_BATCH_DDL: Final = """
CREATE TABLE import_batch (
id integer primary key,
name text,
submitter text,
submit_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
"""
IMPORT_ITEM_DATA: Final = [
{
'id': 1,
'batch_id': 1,
'ia_id': 'unique_id_1',
'status': 'pending',
},
{
'id': 2,
'batch_id': 1,
'ia_id': 'unique_id_2',
'status': 'pending',
},
{
'id': 3,
'batch_id': 2,
'ia_id': 'unique_id_1',
'status': 'pending',
},
]
IMPORT_ITEM_DATA_STAGED: Final = [
{
'id': 1,
'batch_id': 1,
'ia_id': 'unique_id_1',
'status': 'staged',
},
{
'id': 2,
'batch_id': 1,
'ia_id': 'unique_id_2',
'status': 'staged',
},
{
'id': 3,
'batch_id': 2,
'ia_id': 'unique_id_1',
'status': 'staged',
},
]
IMPORT_ITEM_DATA_STAGED_AND_PENDING: Final = [
{
'id': 1,
'batch_id': 1,
'ia_id': 'idb:unique_id_1',
'status': 'pending',
},
{
'id': 2,
'batch_id': 1,
'ia_id': 'idb:unique_id_2',
'status': 'staged',
},
{
'id': 3,
'batch_id': 2,
'ia_id': 'idb:unique_id_1',
'status': 'staged',
},
]
@pytest.fixture(scope="module")
def setup_item_db():
web.config.db_parameters = {'dbn': 'sqlite', 'db': ':memory:'}
db = get_db()
db.query(IMPORT_ITEM_DDL)
yield db
db.query('delete from import_item;')
@pytest.fixture
def import_item_db(setup_item_db):
setup_item_db.multiple_insert('import_item', IMPORT_ITEM_DATA)
yield setup_item_db
setup_item_db.query('delete from import_item;')
@pytest.fixture
def import_item_db_staged(setup_item_db):
setup_item_db.multiple_insert('import_item', IMPORT_ITEM_DATA_STAGED)
yield setup_item_db
setup_item_db.query('delete from import_item;')
@pytest.fixture
def import_item_db_staged_and_pending(setup_item_db):
setup_item_db.multiple_insert('import_item', IMPORT_ITEM_DATA_STAGED_AND_PENDING)
yield setup_item_db
setup_item_db.query('delete from import_item;')
class TestImportItem:
def test_delete(self, import_item_db):
assert len(list(import_item_db.select('import_item'))) == 3
ImportItem.delete_items(['unique_id_1'])
assert len(list(import_item_db.select('import_item'))) == 1
def test_delete_with_batch_id(self, import_item_db):
assert len(list(import_item_db.select('import_item'))) == 3
ImportItem.delete_items(['unique_id_1'], batch_id=1)
assert len(list(import_item_db.select('import_item'))) == 2
ImportItem.delete_items(['unique_id_1'], batch_id=2)
assert len(list(import_item_db.select('import_item'))) == 1
def test_find_pending_returns_none_with_no_results(self, import_item_db_staged):
"""Try with only staged items in the DB."""
assert ImportItem.find_pending() is None
def test_find_pending_returns_pending(self, import_item_db):
"""Try with some pending items now."""
items = ImportItem.find_pending()
assert isinstance(items, map)
@pytest.mark.parametrize(
'ia_id, expected',
[
('unique_id_1', [1, 3]),
('unique_id_2', [2]),
('unique_id_4', []),
],
)
def test_find_staged_or_pending(
self, import_item_db_staged_and_pending, ia_id, expected
):
"""Get some staged and pending items by ia_id identifiers."""
items = ImportItem.find_staged_or_pending([ia_id], sources=["idb"])
assert [item['id'] for item in items] == expected
@pytest.fixture(scope="module")
def setup_batch_db():
web.config.db_parameters = {'dbn': 'sqlite', 'db': ':memory:'}
db = get_db()
db.query(IMPORT_BATCH_DDL)
yield db
db.query('delete from import_batch;')
class TestBatchItem:
def test_add_items_legacy(self, setup_batch_db):
"""This tests the legacy format of list[str] for items."""
legacy_items = ["ocaid_1", "ocaid_2"]
batch = Batch.new("test-legacy-batch")
result = batch.normalize_items(legacy_items)
assert result == [
{'batch_id': 1, 'ia_id': 'ocaid_1'},
{'batch_id': 1, 'ia_id': 'ocaid_2'},
]
| 4,775 | Python | .py | 157 | 24.191083 | 85 | 0.589107 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
291 | test_olmarkdown.py | internetarchive_openlibrary/openlibrary/tests/core/test_olmarkdown.py | from openlibrary.core.olmarkdown import OLMarkdown
def test_olmarkdown():
def md(text):
return OLMarkdown(text).convert().strip()
def p(html):
# markdown always wraps the result in <p>.
return "<p>%s\n</p>" % html
assert md("**foo**") == p("<strong>foo</strong>")
assert md("<b>foo</b>") == p('<b>foo</b>')
assert md("https://openlibrary.org") == p(
'<a href="https://openlibrary.org" rel="nofollow">'
'https://openlibrary.org'
'</a>'
)
assert md("http://example.org") == p(
'<a href="http://example.org" rel="nofollow">http://example.org</a>'
)
# why extra spaces?
assert md("a\nb") == p("a<br/>\n b")
| 706 | Python | .py | 19 | 30.842105 | 76 | 0.56305 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
292 | test_lists_model.py | internetarchive_openlibrary/openlibrary/tests/core/test_lists_model.py | from typing import cast
from openlibrary.core.lists.model import List, Seed, ThingReferenceDict
def test_seed_with_string():
lst = List(None, "/list/OL1L", None)
seed = Seed(lst, "subject/Politics and government")
assert seed._list == lst
assert seed.value == "subject/Politics and government"
assert seed.key == "subject/Politics and government"
assert seed.type == "subject"
def test_seed_with_nonstring():
lst = List(None, "/list/OL1L", None)
not_a_string = cast(ThingReferenceDict, {"key": "not_a_string.key"})
seed = Seed.from_json(lst, not_a_string)
assert seed._list == lst
assert seed.key == "not_a_string.key"
assert hasattr(seed, "type") is False
| 710 | Python | .py | 16 | 40.0625 | 72 | 0.698113 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
293 | test_db.py | internetarchive_openlibrary/openlibrary/tests/core/test_db.py | import web
from openlibrary.core.db import get_db
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.bookshelves_events import BookshelvesEvents
from openlibrary.core.booknotes import Booknotes
from openlibrary.core.edits import CommunityEditsQueue
from openlibrary.core.observations import Observations
from openlibrary.core.ratings import Ratings
from openlibrary.core.yearly_reading_goals import YearlyReadingGoals
READING_LOG_DDL = """
CREATE TABLE bookshelves_books (
username text NOT NULL,
work_id integer NOT NULL,
bookshelf_id INTEGER references bookshelves(id) ON DELETE CASCADE ON UPDATE CASCADE,
edition_id integer default null,
primary key (username, work_id, bookshelf_id)
);
"""
BOOKNOTES_DDL = """
CREATE TABLE booknotes (
username text NOT NULL,
work_id integer NOT NULL,
edition_id integer NOT NULL default -1,
notes text NOT NULL,
primary key (username, work_id, edition_id)
);
"""
RATINGS_DDL = """
CREATE TABLE ratings (
username text NOT NULL,
work_id integer NOT NULL,
rating integer,
edition_id integer default null,
primary key (username, work_id)
);
"""
OBSERVATIONS_DDL = """
CREATE TABLE observations (
work_id INTEGER not null,
edition_id INTEGER default -1,
username text not null,
observation_type INTEGER not null,
observation_value INTEGER not null,
primary key (work_id, edition_id, username, observation_value, observation_type)
);
"""
COMMUNITY_EDITS_QUEUE_DDL = """
CREATE TABLE community_edits_queue (
title text,
submitter text not null,
reviewer text default null,
url text not null,
status int not null default 1
);
"""
BOOKSHELVES_EVENTS_DDL = """
CREATE TABLE bookshelves_events (
id serial primary key,
username text not null,
work_id integer not null,
edition_id integer not null,
event_type integer not null,
event_date text not null,
updated timestamp
);
"""
YEARLY_READING_GOALS_DDL = """
CREATE TABLE yearly_reading_goals (
username text not null,
year integer not null,
target integer not null,
current integer default 0,
updated timestamp
);
"""
class TestUpdateWorkID:
@classmethod
def setup_class(cls):
web.config.db_parameters = {"dbn": "sqlite", "db": ":memory:"}
db = get_db()
db.query(READING_LOG_DDL)
db.query(BOOKNOTES_DDL)
@classmethod
def teardown_class(cls):
db = get_db()
db.query("delete from bookshelves_books;")
db.query("delete from booknotes;")
def setup_method(self, method):
self.db = get_db()
self.source_book = {
"username": "@cdrini",
"work_id": "1",
"edition_id": "1",
"bookshelf_id": "1",
}
assert not len(list(self.db.select("bookshelves_books")))
self.db.insert("bookshelves_books", **self.source_book)
def teardown_method(self):
self.db.query("delete from bookshelves_books;")
def test_update_collision(self):
existing_book = {
"username": "@cdrini",
"work_id": "2",
"edition_id": "2",
"bookshelf_id": "1",
}
self.db.insert("bookshelves_books", **existing_book)
assert len(list(self.db.select("bookshelves_books"))) == 2
Bookshelves.update_work_id(
self.source_book['work_id'], existing_book['work_id']
)
assert len(
list(
self.db.select(
"bookshelves_books",
where={"username": "@cdrini", "work_id": "2", "edition_id": "2"},
)
)
), "failed to update 1 to 2"
assert not len(
list(
self.db.select(
"bookshelves_books",
where={"username": "@cdrini", "work_id": "1", "edition_id": "1"},
)
)
), "old work_id 1 present"
def test_update_simple(self):
assert len(list(self.db.select("bookshelves_books"))) == 1
Bookshelves.update_work_id(self.source_book['work_id'], "2")
def test_no_allow_delete_on_conflict(self):
rows = [
{"username": "@mek", "work_id": 1, "edition_id": 1, "notes": "Jimmeny"},
{"username": "@mek", "work_id": 2, "edition_id": 1, "notes": "Cricket"},
]
self.db.multiple_insert("booknotes", rows)
resp = Booknotes.update_work_id("1", "2")
assert resp == {'rows_changed': 0, 'rows_deleted': 0, 'failed_deletes': 1}
assert [dict(row) for row in self.db.select("booknotes")] == rows
class TestUsernameUpdate:
READING_LOG_SETUP_ROWS = [
{
"username": "@kilgore_trout",
"work_id": 1,
"edition_id": 1,
"bookshelf_id": 1,
},
{
"username": "@kilgore_trout",
"work_id": 2,
"edition_id": 2,
"bookshelf_id": 1,
},
{
"username": "@billy_pilgrim",
"work_id": 1,
"edition_id": 1,
"bookshelf_id": 2,
},
]
BOOKNOTES_SETUP_ROWS = [
{"username": "@kilgore_trout", "work_id": 1, "edition_id": 1, "notes": "Hello"},
{"username": "@billy_pilgrim", "work_id": 1, "edition_id": 1, "notes": "World"},
]
RATINGS_SETUP_ROWS = [
{"username": "@kilgore_trout", "work_id": 1, "edition_id": 1, "rating": 4},
{"username": "@billy_pilgrim", "work_id": 5, "edition_id": 1, "rating": 2},
]
OBSERVATIONS_SETUP_ROWS = [
{
"username": "@kilgore_trout",
"work_id": 1,
"edition_id": 3,
"observation_type": 1,
"observation_value": 2,
},
{
"username": "@billy_pilgrim",
"work_id": 2,
"edition_id": 4,
"observation_type": 4,
"observation_value": 1,
},
]
EDITS_QUEUE_SETUP_ROWS = [
{
"title": "One Fish, Two Fish, Red Fish, Blue Fish",
"submitter": "@kilgore_trout",
"reviewer": None,
"url": "/works/merge?records=OL1W,OL2W,OL3W",
"status": 1,
},
{
"title": "The Lorax",
"submitter": "@kilgore_trout",
"reviewer": "@billy_pilgrim",
"url": "/works/merge?records=OL4W,OL5W,OL6W",
"status": 2,
},
{
"title": "Green Eggs and Ham",
"submitter": "@eliot_rosewater",
"reviewer": None,
"url": "/works/merge?records=OL10W,OL11W,OL12W,OL13W",
"status": 1,
},
]
@classmethod
def setup_class(cls):
web.config.db_parameters = {"dbn": "sqlite", "db": ":memory:"}
db = get_db()
db.query(RATINGS_DDL)
db.query(OBSERVATIONS_DDL)
db.query(COMMUNITY_EDITS_QUEUE_DDL)
def setup_method(self):
self.db = get_db()
self.db.multiple_insert("bookshelves_books", self.READING_LOG_SETUP_ROWS)
self.db.multiple_insert("booknotes", self.BOOKNOTES_SETUP_ROWS)
self.db.multiple_insert("ratings", self.RATINGS_SETUP_ROWS)
self.db.multiple_insert("observations", self.OBSERVATIONS_SETUP_ROWS)
def teardown_method(self):
self.db.query("delete from bookshelves_books;")
self.db.query("delete from booknotes;")
self.db.query("delete from ratings;")
self.db.query("delete from observations;")
def test_delete_all_by_username(self):
assert len(list(self.db.select("bookshelves_books"))) == 3
Bookshelves.delete_all_by_username("@kilgore_trout")
assert len(list(self.db.select("bookshelves_books"))) == 1
assert len(list(self.db.select("booknotes"))) == 2
Booknotes.delete_all_by_username('@kilgore_trout')
assert len(list(self.db.select("booknotes"))) == 1
assert len(list(self.db.select("ratings"))) == 2
Ratings.delete_all_by_username("@kilgore_trout")
assert len(list(self.db.select("ratings"))) == 1
assert len(list(self.db.select("observations"))) == 2
Observations.delete_all_by_username("@kilgore_trout")
assert len(list(self.db.select("observations"))) == 1
def test_update_username(self):
self.db.multiple_insert("community_edits_queue", self.EDITS_QUEUE_SETUP_ROWS)
before_where = {"username": "@kilgore_trout"}
after_where = {"username": "@anonymous"}
assert len(list(self.db.select("bookshelves_books", where=before_where))) == 2
Bookshelves.update_username("@kilgore_trout", "@anonymous")
assert len(list(self.db.select("bookshelves_books", where=before_where))) == 0
assert len(list(self.db.select("bookshelves_books", where=after_where))) == 2
assert len(list(self.db.select("booknotes", where=before_where))) == 1
Booknotes.update_username("@kilgore_trout", "@anonymous")
assert len(list(self.db.select("booknotes", where=before_where))) == 0
assert len(list(self.db.select("booknotes", where=after_where))) == 1
assert len(list(self.db.select("ratings", where=before_where))) == 1
Ratings.update_username("@kilgore_trout", "@anonymous")
assert len(list(self.db.select("ratings", where=before_where))) == 0
assert len(list(self.db.select("ratings", where=after_where))) == 1
assert len(list(self.db.select("observations", where=before_where))) == 1
Observations.update_username("@kilgore_trout", "@anonymous")
assert len(list(self.db.select("observations", where=before_where))) == 0
assert len(list(self.db.select("observations", where=after_where))) == 1
results = self.db.select(
"community_edits_queue", where={"submitter": "@kilgore_trout"}
)
assert len(list(results)) == 2
CommunityEditsQueue.update_submitter_name('@kilgore_trout', '@anonymous')
results = self.db.select(
"community_edits_queue", where={"submitter": "@kilgore_trout"}
)
assert len(list(results)) == 0
results = self.db.select(
"community_edits_queue", where={"submitter": "@anonymous"}
)
assert len(list(results)) == 2
self.db.query('delete from community_edits_queue;')
class TestCheckIns:
BOOKSHELVES_EVENTS_SETUP_ROWS = [
{
"id": 1,
"username": "@kilgore_trout",
"work_id": 1,
"edition_id": 2,
"event_type": 1,
"event_date": "2022-04-17",
},
{
"id": 2,
"username": "@kilgore_trout",
"work_id": 1,
"edition_id": 2,
"event_type": 2,
"event_date": "2022-05-10",
},
{
"id": 3,
"username": "@kilgore_trout",
"work_id": 1,
"edition_id": 2,
"event_type": 3,
"event_date": "2022-06-20",
},
{
"id": 4,
"username": "@billy_pilgrim",
"work_id": 3,
"edition_id": 4,
"event_type": 1,
"event_date": "2020",
},
{
"id": 5,
"username": "@eliot_rosewater",
"work_id": 3,
"edition_id": 4,
"event_type": 3,
"event_date": "2019-08-20",
},
{
"id": 6,
"username": "@eliot_rosewater",
"work_id": 3,
"edition_id": 4,
"event_type": 3,
"event_date": "2019-10",
},
]
@classmethod
def setup_class(cls):
web.config.db_parameters = {"dbn": "sqlite", "db": ":memory:"}
db = get_db()
db.query(BOOKSHELVES_EVENTS_DDL)
def setup_method(self):
self.db = get_db()
self.db.multiple_insert(
'bookshelves_events', self.BOOKSHELVES_EVENTS_SETUP_ROWS
)
def teardown_method(self):
self.db.query("delete from bookshelves_events;")
def test_create_event(self):
assert len(list(self.db.select('bookshelves_events'))) == 6
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@billy_pilgrim"}
)
)
)
== 1
)
BookshelvesEvents.create_event('@billy_pilgrim', 5, 6, '2022-01', event_type=1)
assert len(list(self.db.select('bookshelves_events'))) == 7
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@billy_pilgrim"}
)
)
)
== 2
)
def test_select_all_by_username(self):
assert len(list(self.db.select('bookshelves_events'))) == 6
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@kilgore_trout"}
)
)
)
== 3
)
BookshelvesEvents.create_event(
'@kilgore_trout', 7, 8, '2011-01-09', event_type=1
)
assert len(list(self.db.select('bookshelves_events'))) == 7
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@kilgore_trout"}
)
)
)
== 4
)
def test_update_event_date(self):
assert len(list(self.db.select('bookshelves_events', where={"id": 1}))) == 1
row = self.db.select('bookshelves_events', where={"id": 1})[0]
assert row['event_date'] == "2022-04-17"
new_date = "1999-01-01"
BookshelvesEvents.update_event_date(1, new_date)
row = self.db.select('bookshelves_events', where={"id": 1})[0]
assert row['event_date'] == new_date
def test_delete_by_id(self):
assert len(list(self.db.select('bookshelves_events'))) == 6
assert len(list(self.db.select('bookshelves_events', where={"id": 1}))) == 1
BookshelvesEvents.delete_by_id(1)
assert len(list(self.db.select('bookshelves_events'))) == 5
assert len(list(self.db.select('bookshelves_events', where={"id": 1}))) == 0
def test_delete_by_username(self):
assert len(list(self.db.select('bookshelves_events'))) == 6
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@kilgore_trout"}
)
)
)
== 3
)
BookshelvesEvents.delete_by_username('@kilgore_trout')
assert len(list(self.db.select('bookshelves_events'))) == 3
assert (
len(
list(
self.db.select(
'bookshelves_events', where={"username": "@kilgore_trout"}
)
)
)
== 0
)
def test_get_latest_event_date(self):
assert (
BookshelvesEvents.get_latest_event_date('@eliot_rosewater', 3, 3)[
'event_date'
]
== "2019-10"
)
assert (
BookshelvesEvents.get_latest_event_date('@eliot_rosewater', 3, 3)['id'] == 6
)
assert BookshelvesEvents.get_latest_event_date('@eliot_rosewater', 3, 1) is None
class TestYearlyReadingGoals:
SETUP_ROWS = [
{
'username': '@billy_pilgrim',
'year': 2022,
'target': 5,
'current': 6,
},
{
'username': '@billy_pilgrim',
'year': 2023,
'target': 7,
'current': 0,
},
{
'username': '@kilgore_trout',
'year': 2022,
'target': 4,
'current': 4,
},
]
TABLENAME = YearlyReadingGoals.TABLENAME
@classmethod
def setup_class(cls):
web.config.db_parameters = {"dbn": 'sqlite', "db": ':memory:'}
db = get_db()
db.query(YEARLY_READING_GOALS_DDL)
def setup_method(self):
self.db = get_db()
self.db.multiple_insert(self.TABLENAME, self.SETUP_ROWS)
def teardown_method(self):
self.db.query('delete from yearly_reading_goals')
def test_create(self):
assert len(list(self.db.select(self.TABLENAME))) == 3
assert (
len(
list(
self.db.select(self.TABLENAME, where={'username': '@kilgore_trout'})
)
)
== 1
)
YearlyReadingGoals.create('@kilgore_trout', 2023, 5)
assert (
len(
list(
self.db.select(self.TABLENAME, where={'username': '@kilgore_trout'})
)
)
== 2
)
new_row = list(
self.db.select(
self.TABLENAME, where={'username': '@kilgore_trout', 'year': 2023}
)
)
assert len(new_row) == 1
assert new_row[0]['current'] == 0
def test_select_by_username_and_year(self):
assert (
len(YearlyReadingGoals.select_by_username_and_year('@billy_pilgrim', 2022))
== 1
)
def test_has_reached_goal(self):
assert YearlyReadingGoals.has_reached_goal('@billy_pilgrim', 2022)
assert not YearlyReadingGoals.has_reached_goal('@billy_pilgrim', 2023)
assert YearlyReadingGoals.has_reached_goal('@kilgore_trout', 2022)
def test_update_current_count(self):
assert (
next(
iter(
self.db.select(
self.TABLENAME,
where={'username': '@billy_pilgrim', 'year': 2023},
)
)
)['current']
== 0
)
YearlyReadingGoals.update_current_count('@billy_pilgrim', 2023, 10)
assert (
next(
iter(
self.db.select(
self.TABLENAME,
where={'username': '@billy_pilgrim', 'year': 2023},
)
)
)['current']
== 10
)
def test_update_target(self):
assert (
next(
iter(
self.db.select(
self.TABLENAME,
where={'username': '@billy_pilgrim', 'year': 2023},
)
)
)['target']
== 7
)
YearlyReadingGoals.update_target('@billy_pilgrim', 2023, 14)
assert (
next(
iter(
self.db.select(
self.TABLENAME,
where={'username': '@billy_pilgrim', 'year': 2023},
)
)
)['target']
== 14
)
def test_delete_by_username(self):
assert (
len(
list(
self.db.select(self.TABLENAME, where={'username': '@billy_pilgrim'})
)
)
== 2
)
YearlyReadingGoals.delete_by_username('@billy_pilgrim')
assert (
len(
list(
self.db.select(self.TABLENAME, where={'username': '@billy_pilgrim'})
)
)
== 0
)
| 19,855 | Python | .py | 567 | 24.555556 | 88 | 0.52613 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
294 | test_vendors.py | internetarchive_openlibrary/openlibrary/tests/core/test_vendors.py | from dataclasses import dataclass
from unittest.mock import patch
import pytest
from openlibrary.core.vendors import (
get_amazon_metadata,
split_amazon_title,
clean_amazon_metadata_for_load,
betterworldbooks_fmt,
AmazonAPI,
is_dvd,
)
def test_clean_amazon_metadata_for_load_non_ISBN():
# results from get_amazon_metadata() -> _serialize_amazon_product()
# available from /prices?asin=B000KRRIZI
amazon = {
"publishers": ["Dutton"],
"languages": [],
"price_amt": "74.00",
"source_records": ["amazon:B000KRRIZI"],
"title": "The Man With the Crimson Box",
"url": "https://www.amazon.com/dp/B000KRRIZI/?tag=internetarchi-20",
"price": "$74.00 (used)",
"number_of_pages": None,
"cover": "https://images-na.ssl-images-amazon.com/images/I/31aTq%2BNA1EL.jpg",
"qlt": "used",
"physical_format": "hardcover",
"edition": "First Edition",
"publish_date": "1940",
"authors": [{"name": "H.S. Keeler"}],
"product_group": "Book",
"offer_summary": {
"total_used": 1,
"total_new": 0,
"total_collectible": 0,
"lowest_used": 7400,
"amazon_offers": 0,
},
}
result = clean_amazon_metadata_for_load(amazon)
# this result is passed to load() from vendors.create_edition_from_amazon_metadata()
assert isinstance(result['publishers'], list)
assert result['publishers'][0] == 'Dutton'
assert (
result['cover']
== 'https://images-na.ssl-images-amazon.com/images/I/31aTq%2BNA1EL.jpg'
)
assert result['authors'][0]['name'] == 'H.S. Keeler'
for isbn in ('isbn', 'isbn_10', 'isbn_13'):
assert result.get(isbn) is None
assert result['identifiers']['amazon'] == ['B000KRRIZI']
assert result['source_records'] == ['amazon:B000KRRIZI']
assert result['publish_date'] == '1940'
def test_clean_amazon_metadata_for_load_ISBN():
amazon = {
"publishers": ["Oxford University Press"],
"price": "$9.50 (used)",
"physical_format": "paperback",
"edition": "3",
"authors": [{"name": "Rachel Carson"}],
"isbn_13": ["9780190906764"],
"price_amt": "9.50",
"source_records": ["amazon:0190906766"],
"title": "The Sea Around Us",
"url": "https://www.amazon.com/dp/0190906766/?tag=internetarchi-20",
"offer_summary": {
"amazon_offers": 1,
"lowest_new": 1050,
"total_new": 31,
"lowest_used": 950,
"total_collectible": 0,
"total_used": 15,
},
"number_of_pages": "256",
"cover": "https://images-na.ssl-images-amazon.com/images/I/51XKo3FsUyL.jpg",
"languages": ["english"],
"isbn_10": ["0190906766"],
"publish_date": "Dec 18, 2018",
"product_group": "Book",
"qlt": "used",
}
result = clean_amazon_metadata_for_load(amazon)
# TODO: implement and test edition number
assert isinstance(result['publishers'], list)
assert (
result['cover']
== 'https://images-na.ssl-images-amazon.com/images/I/51XKo3FsUyL.jpg'
)
assert result['authors'][0]['name'] == 'Rachel Carson'
assert result.get('isbn') is None
assert result.get('isbn_13') == ['9780190906764']
assert result.get('isbn_10') == ['0190906766']
assert result.get('identifiers') is None # No Amazon id present
assert result['source_records'] == ['amazon:0190906766']
assert result['publish_date'] == 'Dec 18, 2018'
assert result['physical_format'] == 'paperback'
assert result['number_of_pages'] == '256'
assert result.get('price') is None
assert result.get('qlt') is None
assert result.get('offer_summary') is None
amazon_titles = [
# Original title, title, subtitle
['Test Title', 'Test Title', None],
[
'Killers of the Flower Moon: The Osage Murders and the Birth of the FBI',
'Killers of the Flower Moon',
'The Osage Murders and the Birth of the FBI',
],
['Pachinko (National Book Award Finalist)', 'Pachinko', None],
['Trapped in a Video Game (Book 1) (Volume 1)', 'Trapped in a Video Game', None],
[
"An American Marriage (Oprah's Book Club): A Novel",
'An American Marriage',
'A Novel',
],
['A Novel (German Edition)', 'A Novel', None],
[
'Vietnam Travel Guide 2019: Ho Chi Minh City - First Journey : 10 Tips For an Amazing Trip',
'Vietnam Travel Guide 2019 : Ho Chi Minh City - First Journey',
'10 Tips For an Amazing Trip',
],
[
'Secrets of Adobe(r) Acrobat(r) 7. 150 Best Practices and Tips (Russian Edition)',
'Secrets of Adobe Acrobat 7. 150 Best Practices and Tips',
None,
],
[
'Last Days at Hot Slit: The Radical Feminism of Andrea Dworkin (Semiotext(e) / Native Agents)',
'Last Days at Hot Slit',
'The Radical Feminism of Andrea Dworkin',
],
[
'Bloody Times: The Funeral of Abraham Lincoln and the Manhunt for Jefferson Davis',
'Bloody Times',
'The Funeral of Abraham Lincoln and the Manhunt for Jefferson Davis',
],
]
@pytest.mark.parametrize('amazon,title,subtitle', amazon_titles)
def test_split_amazon_title(amazon, title, subtitle):
assert split_amazon_title(amazon) == (title, subtitle)
def test_clean_amazon_metadata_for_load_subtitle():
amazon = {
"publishers": ["Vintage"],
"price": "$4.12 (used)",
"physical_format": "paperback",
"edition": "Reprint",
"authors": [{"name": "David Grann"}],
"isbn_13": ["9780307742483"],
"price_amt": "4.12",
"source_records": ["amazon:0307742482"],
"title": "Killers of the Flower Moon: The Osage Murders and the Birth of the FBI",
"url": "https://www.amazon.com/dp/0307742482/?tag=internetarchi-20",
"offer_summary": {
"lowest_new": 869,
"amazon_offers": 1,
"total_new": 57,
"lowest_used": 412,
"total_collectible": 2,
"total_used": 133,
"lowest_collectible": 1475,
},
"number_of_pages": "400",
"cover": "https://images-na.ssl-images-amazon.com/images/I/51PP3iTK8DL.jpg",
"languages": ["english"],
"isbn_10": ["0307742482"],
"publish_date": "Apr 03, 2018",
"product_group": "Book",
"qlt": "used",
}
result = clean_amazon_metadata_for_load(amazon)
assert result['title'] == 'Killers of the Flower Moon'
assert result.get('subtitle') == 'The Osage Murders and the Birth of the FBI'
assert (
result.get('full_title')
== 'Killers of the Flower Moon : The Osage Murders and the Birth of the FBI'
)
# TODO: test for, and implement languages
def test_betterworldbooks_fmt():
isbn = '9780393062274'
bad_data = betterworldbooks_fmt(isbn)
assert bad_data.get('isbn') == isbn
assert bad_data.get('price') is None
assert bad_data.get('price_amt') is None
assert bad_data.get('qlt') is None
# Test cases to add:
# Multiple authors
def test_get_amazon_metadata() -> None:
"""
Mock a reply from the Amazon Products API so we can do a basic test for
get_amazon_metadata() and cached_get_amazon_metadata().
"""
class MockRequests:
def get(self):
pass
def raise_for_status(self):
return True
def json(self):
return mock_response
mock_response = {
'status': 'success',
'hit': {
'url': 'https://www.amazon.com/dp/059035342X/?tag=internetarchi-20',
'source_records': ['amazon:059035342X'],
'isbn_10': ['059035342X'],
'isbn_13': ['9780590353427'],
'price': '$5.10',
'price_amt': 509,
'title': "Harry Potter and the Sorcerer's Stone",
'cover': 'https://m.media-amazon.com/images/I/51Wbz5GypgL._SL500_.jpg',
'authors': [{'name': 'Rowling, J.K.'}, {'name': 'GrandPr_, Mary'}],
'publishers': ['Scholastic'],
'number_of_pages': 309,
'edition_num': '1',
'publish_date': 'Sep 02, 1998',
'product_group': 'Book',
'physical_format': 'paperback',
},
}
expected = {
'url': 'https://www.amazon.com/dp/059035342X/?tag=internetarchi-20',
'source_records': ['amazon:059035342X'],
'isbn_10': ['059035342X'],
'isbn_13': ['9780590353427'],
'price': '$5.10',
'price_amt': 509,
'title': "Harry Potter and the Sorcerer's Stone",
'cover': 'https://m.media-amazon.com/images/I/51Wbz5GypgL._SL500_.jpg',
'authors': [{'name': 'Rowling, J.K.'}, {'name': 'GrandPr_, Mary'}],
'publishers': ['Scholastic'],
'number_of_pages': 309,
'edition_num': '1',
'publish_date': 'Sep 02, 1998',
'product_group': 'Book',
'physical_format': 'paperback',
}
isbn = "059035342X"
with (
patch("requests.get", return_value=MockRequests()),
patch("openlibrary.core.vendors.affiliate_server_url", new=True),
):
got = get_amazon_metadata(id_=isbn, id_type="isbn")
assert got == expected
@dataclass
class ProductGroup:
display_value: str | None
@dataclass
class Binding:
display_value: str | None
@dataclass
class Classifications:
product_group: ProductGroup | None
binding: Binding
@dataclass
class ItemInfo:
classifications: Classifications | None
content_info: str
by_line_info: str
title: str
@dataclass
class AmazonAPIReply:
item_info: ItemInfo
images: str
offers: str
asin: str
@pytest.mark.parametrize(
("product_group", "expected"),
[
('dvd', {}),
('DVD', {}),
('Dvd', {}),
],
)
def test_clean_amazon_metadata_does_not_load_DVDS_product_group(
product_group, expected
) -> None:
"""Ensure data load does not load dvds and relies on fake API response objects"""
dvd_product_group = ProductGroup(product_group)
classification = Classifications(
product_group=dvd_product_group, binding=Binding('')
)
item_info = ItemInfo(
classifications=classification, content_info='', by_line_info='', title=''
)
amazon_metadata = AmazonAPIReply(
item_info=item_info,
images='',
offers='',
asin='',
)
result = AmazonAPI.serialize(amazon_metadata)
assert result == expected
@pytest.mark.parametrize(
("physical_format", "expected"),
[
('dvd', {}),
('DVD', {}),
('Dvd', {}),
],
)
def test_clean_amazon_metadata_does_not_load_DVDS_physical_format(
physical_format, expected
) -> None:
dvd_product_group = ProductGroup('isolate_physical_format')
binding = Binding(physical_format)
classification = Classifications(product_group=dvd_product_group, binding=binding)
item_info = ItemInfo(
classifications=classification, content_info='', by_line_info='', title=''
)
amazon_metadata = AmazonAPIReply(
item_info=item_info,
images='',
offers='',
asin='',
)
result = AmazonAPI.serialize(amazon_metadata)
assert result == expected
@pytest.mark.parametrize(
("physical_format", "product_group", "expected"),
[
('dvd', 'dvd', True),
(None, None, False),
('Book', 'Book', False),
('DVD', None, True),
('Dvd', None, True),
('dvd', None, True),
('Book', 'dvd', True),
(None, 'dvd', True),
(None, 'Book', False),
('dvd', 'book', True),
],
)
def test_is_dvd(physical_format, product_group, expected):
book = {
'physical_format': physical_format,
'product_group': product_group,
}
got = is_dvd(book)
assert got is expected
| 12,035 | Python | .py | 337 | 28.593472 | 103 | 0.596913 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
295 | test_cache.py | internetarchive_openlibrary/openlibrary/tests/core/test_cache.py | import time
from openlibrary.core import cache
from openlibrary.mocks import mock_memcache
class Test_memcache_memoize:
def test_encode_args(self):
m = cache.memcache_memoize(None, key_prefix="foo")
assert m.encode_args([]) == ''
assert m.encode_args(["a"]) == '"a"'
assert m.encode_args([1]) == '1'
assert m.encode_args(["a", 1]) == '"a",1'
assert m.encode_args([{"a": 1}]) == '{"a":1}'
assert m.encode_args([["a", 1]]) == '["a",1]'
def test_generate_key_prefix(self):
def foo():
pass
m = cache.memcache_memoize(foo)
assert m.key_prefix[:4] == "foo_"
def test_random_string(self):
m = cache.memcache_memoize(None, "foo")
assert m._random_string(0) == ""
s1 = m._random_string(1)
assert isinstance(s1, str)
assert len(s1) == 1
s10 = m._random_string(10)
assert isinstance(s10, str)
assert len(s10) == 10
def square_memoize(self):
def square(x):
return x * x
m = cache.memcache_memoize(square, key_prefix="square")
m._memcache = mock_memcache.Client([])
return m
def test_call(self):
m = self.square_memoize()
s = m.stats
assert m(10) == 100
assert [s.calls, s.hits, s.updates, s.async_updates] == [1, 0, 1, 0]
assert m(10) == 100
assert [s.calls, s.hits, s.updates, s.async_updates] == [2, 1, 1, 0]
def test_update_async(self):
m = self.square_memoize()
m.update_async(20)
m.join_threads()
assert m.memcache_get([20], {})[0] == 400
def test_timeout(self, monkeytime):
m = self.square_memoize()
m.timeout = 0.1
s = m.stats
assert m(10) == 100
time.sleep(0.1)
assert m(10) == 100
assert [s.calls, s.hits, s.updates, s.async_updates] == [2, 1, 1, 0]
time.sleep(0.01)
assert m(10) == 100
assert [s.calls, s.hits, s.updates, s.async_updates] == [3, 2, 1, 1]
def test_delete(self):
m = self.square_memoize()
m(10)
m(10)
assert m.stats.updates == 1
# this should clear the cache and the next call should update the cache.
m(10, _cache="delete")
m(10)
assert m.stats.updates == 2
class Test_memoize:
def teardown_method(self, method):
cache.memory_cache.clear()
def get(self, key):
return cache.memory_cache.get(key)
def set(self, key, value):
cache.memory_cache.set(key, value)
def test_signatures(self):
def square(x):
"""Returns square x."""
return x * x
msquare = cache.memoize(engine="memory", key="square")(square)
assert msquare.__name__ == square.__name__
assert msquare.__doc__ == square.__doc__
def test_cache(self):
@cache.memoize(engine="memory", key="square")
def square(x):
return x * x
assert square(2) == 4
assert self.get("square-2") == 4
# It should read from cache instead of computing if entry is present in the cache
self.set('square-42', 43)
assert square(42) == 43
def test_cache_with_tuple_keys(self):
@cache.memoize(engine="memory", key=lambda x: (str(x), "square"))
def square(x):
return x * x
@cache.memoize(engine="memory", key=lambda x: (str(x), "double"))
def double(x):
return x + x
assert self.get("3") is None
assert square(3) == 9
assert self.get("3") == {"square": 9}
assert double(3) == 6
assert self.get("3") == {"square": 9, "double": 6}
class Test_method_memoize:
def test_handles_no_args(self):
class A:
def __init__(self):
self.result = 0
@cache.method_memoize
def foo(self):
self.result += 1
return self.result
a = A()
assert a.foo() == 1
assert a.foo() == 1
assert a.result == 1
def test_handles_args(self):
class A:
def __init__(self):
self.result = 1
@cache.method_memoize
def foo(self, multiplier):
self.result *= multiplier
return self.result
a = A()
assert a.foo(2) == 2
assert a.foo(2) == 2
assert a.result == 2
assert a.foo(3) == 6
assert a.foo(2) == 2
assert a.result == 6
| 4,564 | Python | .py | 127 | 26.685039 | 89 | 0.538812 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
296 | test_lending.py | internetarchive_openlibrary/openlibrary/tests/core/test_lending.py | from unittest.mock import Mock, patch
from openlibrary.core import lending
class TestAddAvailability:
def test_reads_ocaids(self, monkeypatch):
def mock_get_availability_of_ocaids(ocaids):
return {'foo': {'status': 'available'}}
monkeypatch.setattr(
lending, "get_availability_of_ocaids", mock_get_availability_of_ocaids
)
f = lending.add_availability
assert f([{'ocaid': 'foo'}]) == [
{'ocaid': 'foo', 'availability': {'status': 'available'}}
]
assert f([{'identifier': 'foo'}]) == [
{'identifier': 'foo', 'availability': {'status': 'available'}}
]
assert f([{'ia': 'foo'}]) == [
{'ia': 'foo', 'availability': {'status': 'available'}}
]
assert f([{'ia': ['foo']}]) == [
{'ia': ['foo'], 'availability': {'status': 'available'}}
]
def test_handles_ocaid_none(self):
f = lending.add_availability
assert f([{}]) == [{}]
def test_handles_availability_none(self, monkeypatch):
def mock_get_availability_of_ocaids(ocaids):
return {'foo': {'status': 'error'}}
monkeypatch.setattr(
lending, "get_availability_of_ocaids", mock_get_availability_of_ocaids
)
f = lending.add_availability
r = f([{'ocaid': 'foo'}])
print(r)
assert r[0]['availability']['status'] == 'error'
class TestGetAvailability:
def test_cache(self):
with patch("openlibrary.core.lending.requests.get") as mock_get:
mock_get.return_value = Mock()
mock_get.return_value.json.return_value = {
"responses": {"foo": {"status": "open"}}
}
foo_expected = {
"status": "open",
"identifier": "foo",
"is_restricted": False,
"is_browseable": False,
"__src__": 'core.models.lending.get_availability',
}
bar_expected = {
"status": "error",
"identifier": "bar",
"is_restricted": True,
"is_browseable": False,
"__src__": 'core.models.lending.get_availability',
}
r = lending.get_availability("identifier", ["foo"])
assert mock_get.call_count == 1
assert r == {"foo": foo_expected}
# Should not make a call to the API again
r2 = lending.get_availability("identifier", ["foo"])
assert mock_get.call_count == 1
assert r2 == {"foo": foo_expected}
# Now should make a call for just the new identifier
mock_get.return_value.json.return_value = {
"responses": {"bar": {"status": "error"}}
}
r3 = lending.get_availability("identifier", ["foo", "bar"])
assert mock_get.call_count == 2
assert mock_get.call_args[1]['params']['identifier'] == "bar"
assert r3 == {"foo": foo_expected, "bar": bar_expected}
| 3,084 | Python | .py | 71 | 31.915493 | 82 | 0.528019 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
297 | test_waitinglist.py | internetarchive_openlibrary/openlibrary/tests/core/test_waitinglist.py | from openlibrary.core.waitinglist import WaitingLoan
from openlibrary.core import lending
import json
import pytest
class TestWaitingLoan:
def test_new(self, monkeypatch):
user_key = '/people/user1'
identifier = 'foobar'
monkeypatch.setattr(
lending.ia_lending_api, 'join_waitinglist', lambda identifier, userid: True
)
monkeypatch.setattr(
lending.ia_lending_api, 'query', lambda **kw: [({'status': 'waiting'})]
)
# POSTs to api to add to waiting list, then queries ia_lending_api for the result
w = WaitingLoan.new(
user_key=user_key, identifier=identifier, itemname='@ol_foobar'
)
assert w is not None
assert w['status'] == 'waiting'
@pytest.mark.xfail(run=False)
def test_update(self):
w = WaitingLoan.new(user_key="U1", identifier="B1")
assert w['status'] == 'waiting'
w.update(status='available')
assert w['status'] == 'available'
w2 = WaitingLoan.find(user_key="U1", identifier="B1")
assert w2['status'] == 'available'
@pytest.mark.xfail(run=False)
def test_dict(self):
user_key = '/people/user1'
book_key = '/books/OL1234M'
w = WaitingLoan.new(user_key=user_key, identifier=book_key)
# ensure that w.dict() is JSON-able
json.dumps(w.dict())
def test_prune_expired(self):
# prune_expired does nothing now but 'return'
assert WaitingLoan.prune_expired() is None
| 1,526 | Python | .py | 38 | 32.368421 | 89 | 0.633603 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
298 | test_fulltext.py | internetarchive_openlibrary/openlibrary/tests/core/test_fulltext.py | from json.decoder import JSONDecodeError
from unittest.mock import Mock, patch
import requests
from infogami import config
from openlibrary.core import fulltext
class Test_fulltext_search_api:
def test_no_config(self):
response = fulltext.fulltext_search_api({})
assert response == {"error": "Unable to prepare search engine"}
def test_query_exception(self):
with patch("openlibrary.core.fulltext.requests.get") as mock_get:
config.plugin_inside = {"search_endpoint": "mock"}
raiser = Mock(
side_effect=requests.exceptions.HTTPError("Unable to Connect")
)
mock_response = Mock()
mock_response.raise_for_status = raiser
mock_get.return_value = mock_response
response = fulltext.fulltext_search_api({"q": "hello"})
assert response == {"error": "Unable to query search engine"}
def test_bad_json(self):
with patch("openlibrary.core.fulltext.requests.get") as mock_get:
config.plugin_inside = {"search_endpoint": "mock"}
mock_response = Mock(
json=Mock(side_effect=JSONDecodeError('Not JSON', 'Not JSON', 0))
)
mock_get.return_value = mock_response
response = fulltext.fulltext_search_api({"q": "hello"})
assert response == {"error": "Error converting search engine data to JSON"}
| 1,426 | Python | .py | 29 | 39.586207 | 87 | 0.641523 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |
299 | test_lists_engine.py | internetarchive_openlibrary/openlibrary/tests/core/test_lists_engine.py | from openlibrary.core.lists import engine
def test_reduce():
def test_reduce(self):
d1 = [1, 2, 1, "2010-11-11 10:20:30", {"subjects": ["Love", "Hate"]}]
d2 = [1, 1, 0, "2009-01-02 10:20:30", {"subjects": ["Love"]}]
assert engine.reduce([d1, d2]) == {
"works": 2,
"editions": 3,
"ebooks": 1,
"last_modified": "2010-11-11 10:20:30",
"subjects": [
{"name": "Love", "key": "subject:love", "count": 2},
{"name": "Hate", "key": "subject:hate", "count": 1},
],
}
| 600 | Python | .py | 15 | 29.466667 | 77 | 0.457045 | internetarchive/openlibrary | 5,078 | 1,311 | 956 | AGPL-3.0 | 9/5/2024, 5:07:13 PM (Europe/Amsterdam) |