nwo
stringlengths
5
58
sha
stringlengths
40
40
path
stringlengths
5
172
language
stringclasses
1 value
identifier
stringlengths
1
100
parameters
stringlengths
2
3.5k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
21.5k
docstring
stringlengths
2
17k
docstring_summary
stringlengths
0
6.58k
docstring_tokens
sequence
function
stringlengths
35
55.6k
function_tokens
sequence
url
stringlengths
89
269
redapple0204/my-boring-python
1ab378e9d4f39ad920ff542ef3b2db68f0575a98
pythonenv3.8/lib/python3.8/site-packages/pip/_internal/utils/temp_dir.py
python
AdjacentTempDirectory._generate_names
(cls, name)
Generates a series of temporary names. The algorithm replaces the leading characters in the name with ones that are valid filesystem characters, but are not valid package names (for both Python and pip definitions of package).
Generates a series of temporary names.
[ "Generates", "a", "series", "of", "temporary", "names", "." ]
def _generate_names(cls, name): """Generates a series of temporary names. The algorithm replaces the leading characters in the name with ones that are valid filesystem characters, but are not valid package names (for both Python and pip definitions of package). """ for i in range(1, len(name)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i - 1): new_name = '~' + ''.join(candidate) + name[i:] if new_name != name: yield new_name # If we make it this far, we will have to make a longer name for i in range(len(cls.LEADING_CHARS)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i): new_name = '~' + ''.join(candidate) + name if new_name != name: yield new_name
[ "def", "_generate_names", "(", "cls", ",", "name", ")", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "name", ")", ")", ":", "for", "candidate", "in", "itertools", ".", "combinations_with_replacement", "(", "cls", ".", "LEADING_CHARS", ",", "i", "-", "1", ")", ":", "new_name", "=", "'~'", "+", "''", ".", "join", "(", "candidate", ")", "+", "name", "[", "i", ":", "]", "if", "new_name", "!=", "name", ":", "yield", "new_name", "# If we make it this far, we will have to make a longer name", "for", "i", "in", "range", "(", "len", "(", "cls", ".", "LEADING_CHARS", ")", ")", ":", "for", "candidate", "in", "itertools", ".", "combinations_with_replacement", "(", "cls", ".", "LEADING_CHARS", ",", "i", ")", ":", "new_name", "=", "'~'", "+", "''", ".", "join", "(", "candidate", ")", "+", "name", "if", "new_name", "!=", "name", ":", "yield", "new_name" ]
https://github.com/redapple0204/my-boring-python/blob/1ab378e9d4f39ad920ff542ef3b2db68f0575a98/pythonenv3.8/lib/python3.8/site-packages/pip/_internal/utils/temp_dir.py#L113-L134
HaliteChallenge/Halite-II
5cf95b4aef38621a44a503f90399af598fb51214
apiserver/apiserver/web/util.py
python
hackathon_status
(start_date, end_date)
return status
Return the status of the hackathon based on its start/end dates. `end_date` may be null (for ongoing hackathons).
Return the status of the hackathon based on its start/end dates.
[ "Return", "the", "status", "of", "the", "hackathon", "based", "on", "its", "start", "/", "end", "dates", "." ]
def hackathon_status(start_date, end_date): """ Return the status of the hackathon based on its start/end dates. `end_date` may be null (for ongoing hackathons). """ status = "open" if end_date and end_date < datetime.datetime.now(): status = "closed" elif start_date > datetime.datetime.now(): status = "upcoming" return status
[ "def", "hackathon_status", "(", "start_date", ",", "end_date", ")", ":", "status", "=", "\"open\"", "if", "end_date", "and", "end_date", "<", "datetime", ".", "datetime", ".", "now", "(", ")", ":", "status", "=", "\"closed\"", "elif", "start_date", ">", "datetime", ".", "datetime", ".", "now", "(", ")", ":", "status", "=", "\"upcoming\"", "return", "status" ]
https://github.com/HaliteChallenge/Halite-II/blob/5cf95b4aef38621a44a503f90399af598fb51214/apiserver/apiserver/web/util.py#L289-L300
odoo/odoo
8de8c196a137f4ebbf67d7c7c83fee36f873f5c8
addons/web_editor/models/ir_translation.py
python
IrTranslation.save_html
(self, value)
return self.write({'value': value})
Convert the HTML fragment ``value`` to XML if necessary, and write it as the value of translation ``self``.
Convert the HTML fragment ``value`` to XML if necessary, and write it as the value of translation ``self``.
[ "Convert", "the", "HTML", "fragment", "value", "to", "XML", "if", "necessary", "and", "write", "it", "as", "the", "value", "of", "translation", "self", "." ]
def save_html(self, value): """ Convert the HTML fragment ``value`` to XML if necessary, and write it as the value of translation ``self``. """ assert len(self) == 1 and self.type == 'model_terms' mname, fname = self.name.split(',') field = self.env[mname]._fields[fname] if field.translate == xml_translate: # wrap value inside a div and parse it as HTML div = "<div>%s</div>" % encode(value) root = etree.fromstring(div, etree.HTMLParser(encoding='utf-8')) # root is html > body > div # serialize div as XML and discard surrounding tags value = etree.tostring(root[0][0], encoding='utf-8')[5:-6] elif field.translate == html_translate: # wrap value inside a div and parse it as HTML div = "<div>%s</div>" % encode(value) root = etree.fromstring(div, etree.HTMLParser(encoding='utf-8')) # root is html > body > div # serialize div as HTML and discard surrounding tags value = etree.tostring(root[0][0], encoding='utf-8', method='html')[5:-6] return self.write({'value': value})
[ "def", "save_html", "(", "self", ",", "value", ")", ":", "assert", "len", "(", "self", ")", "==", "1", "and", "self", ".", "type", "==", "'model_terms'", "mname", ",", "fname", "=", "self", ".", "name", ".", "split", "(", "','", ")", "field", "=", "self", ".", "env", "[", "mname", "]", ".", "_fields", "[", "fname", "]", "if", "field", ".", "translate", "==", "xml_translate", ":", "# wrap value inside a div and parse it as HTML", "div", "=", "\"<div>%s</div>\"", "%", "encode", "(", "value", ")", "root", "=", "etree", ".", "fromstring", "(", "div", ",", "etree", ".", "HTMLParser", "(", "encoding", "=", "'utf-8'", ")", ")", "# root is html > body > div", "# serialize div as XML and discard surrounding tags", "value", "=", "etree", ".", "tostring", "(", "root", "[", "0", "]", "[", "0", "]", ",", "encoding", "=", "'utf-8'", ")", "[", "5", ":", "-", "6", "]", "elif", "field", ".", "translate", "==", "html_translate", ":", "# wrap value inside a div and parse it as HTML", "div", "=", "\"<div>%s</div>\"", "%", "encode", "(", "value", ")", "root", "=", "etree", ".", "fromstring", "(", "div", ",", "etree", ".", "HTMLParser", "(", "encoding", "=", "'utf-8'", ")", ")", "# root is html > body > div", "# serialize div as HTML and discard surrounding tags", "value", "=", "etree", ".", "tostring", "(", "root", "[", "0", "]", "[", "0", "]", ",", "encoding", "=", "'utf-8'", ",", "method", "=", "'html'", ")", "[", "5", ":", "-", "6", "]", "return", "self", ".", "write", "(", "{", "'value'", ":", "value", "}", ")" ]
https://github.com/odoo/odoo/blob/8de8c196a137f4ebbf67d7c7c83fee36f873f5c8/addons/web_editor/models/ir_translation.py#L25-L46
tain335/tain335
21c08048e6599b5f18d7fd6acfc1e88ece226d09
Lupy-0.2.1/lupy/search/boolean.py
python
BooleanQuery.add
(self, query, required, prohibited)
Adds a clause to a boolean query. Clauses may be: C{required} which means that documents which I{do not} match this sub-query will I{not} match the boolean query; C{prohibited} which means that documents which I{do} match this sub-query will I{not} match the boolean query; or neither, in which case matched documents are neither prohibited from nor required to match the sub-query. It is an error to specify a clause as both C{required} and C{prohibited}.
Adds a clause to a boolean query. Clauses may be: C{required} which means that documents which I{do not} match this sub-query will I{not} match the boolean query; C{prohibited} which means that documents which I{do} match this sub-query will I{not} match the boolean query; or neither, in which case matched documents are neither prohibited from nor required to match the sub-query. It is an error to specify a clause as both C{required} and C{prohibited}.
[ "Adds", "a", "clause", "to", "a", "boolean", "query", ".", "Clauses", "may", "be", ":", "C", "{", "required", "}", "which", "means", "that", "documents", "which", "I", "{", "do", "not", "}", "match", "this", "sub", "-", "query", "will", "I", "{", "not", "}", "match", "the", "boolean", "query", ";", "C", "{", "prohibited", "}", "which", "means", "that", "documents", "which", "I", "{", "do", "}", "match", "this", "sub", "-", "query", "will", "I", "{", "not", "}", "match", "the", "boolean", "query", ";", "or", "neither", "in", "which", "case", "matched", "documents", "are", "neither", "prohibited", "from", "nor", "required", "to", "match", "the", "sub", "-", "query", ".", "It", "is", "an", "error", "to", "specify", "a", "clause", "as", "both", "C", "{", "required", "}", "and", "C", "{", "prohibited", "}", "." ]
def add(self, query, required, prohibited): """Adds a clause to a boolean query. Clauses may be: C{required} which means that documents which I{do not} match this sub-query will I{not} match the boolean query; C{prohibited} which means that documents which I{do} match this sub-query will I{not} match the boolean query; or neither, in which case matched documents are neither prohibited from nor required to match the sub-query. It is an error to specify a clause as both C{required} and C{prohibited}.""" self.clauses.append(BooleanClause(query, required, prohibited))
[ "def", "add", "(", "self", ",", "query", ",", "required", ",", "prohibited", ")", ":", "self", ".", "clauses", ".", "append", "(", "BooleanClause", "(", "query", ",", "required", ",", "prohibited", ")", ")" ]
https://github.com/tain335/tain335/blob/21c08048e6599b5f18d7fd6acfc1e88ece226d09/Lupy-0.2.1/lupy/search/boolean.py#L26-L40
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/closured/lib/python2.7/pickle.py
python
Unpickler.load
(self)
Read a pickled object representation from the open file. Return the reconstituted object hierarchy specified in the file.
Read a pickled object representation from the open file.
[ "Read", "a", "pickled", "object", "representation", "from", "the", "open", "file", "." ]
def load(self): """Read a pickled object representation from the open file. Return the reconstituted object hierarchy specified in the file. """ self.mark = object() # any new unique object self.stack = [] self.append = self.stack.append read = self.read dispatch = self.dispatch try: while 1: key = read(1) dispatch[key](self) except _Stop, stopinst: return stopinst.value
[ "def", "load", "(", "self", ")", ":", "self", ".", "mark", "=", "object", "(", ")", "# any new unique object", "self", ".", "stack", "=", "[", "]", "self", ".", "append", "=", "self", ".", "stack", ".", "append", "read", "=", "self", ".", "read", "dispatch", "=", "self", ".", "dispatch", "try", ":", "while", "1", ":", "key", "=", "read", "(", "1", ")", "dispatch", "[", "key", "]", "(", "self", ")", "except", "_Stop", ",", "stopinst", ":", "return", "stopinst", ".", "value" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/closured/lib/python2.7/pickle.py#L845-L860
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
product/Formulator/Field.py
python
ZMIField.manage_tales
(self, REQUEST)
Change TALES expressions.
Change TALES expressions.
[ "Change", "TALES", "expressions", "." ]
def manage_tales(self, REQUEST): """Change TALES expressions. """ try: # validate the form and get results result = self.tales_form.validate(REQUEST) except ValidationError, err: if REQUEST: message = "Error: %s - %s" % (err.field.get_value('title'), err.error_text) return self.manage_talesForm(self,REQUEST, manage_tabs_message=message) else: raise self._edit_tales(result) if REQUEST: message="Content changed." return self.manage_talesForm(self, REQUEST, manage_tabs_message=message)
[ "def", "manage_tales", "(", "self", ",", "REQUEST", ")", ":", "try", ":", "# validate the form and get results", "result", "=", "self", ".", "tales_form", ".", "validate", "(", "REQUEST", ")", "except", "ValidationError", ",", "err", ":", "if", "REQUEST", ":", "message", "=", "\"Error: %s - %s\"", "%", "(", "err", ".", "field", ".", "get_value", "(", "'title'", ")", ",", "err", ".", "error_text", ")", "return", "self", ".", "manage_talesForm", "(", "self", ",", "REQUEST", ",", "manage_tabs_message", "=", "message", ")", "else", ":", "raise", "self", ".", "_edit_tales", "(", "result", ")", "if", "REQUEST", ":", "message", "=", "\"Content changed.\"", "return", "self", ".", "manage_talesForm", "(", "self", ",", "REQUEST", ",", "manage_tabs_message", "=", "message", ")" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/product/Formulator/Field.py#L618-L638
alex-cory/fasthacks
72b099f11df2e5640d61e55c80706c3b234eacbe
cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/util.py
python
looks_like_xml
(text)
Check if a doctype exists or if we have some tags.
Check if a doctype exists or if we have some tags.
[ "Check", "if", "a", "doctype", "exists", "or", "if", "we", "have", "some", "tags", "." ]
def looks_like_xml(text): """ Check if a doctype exists or if we have some tags. """ key = hash(text) try: return _looks_like_xml_cache[key] except KeyError: m = doctype_lookup_re.match(text) if m is not None: return True rv = tag_re.search(text[:1000]) is not None _looks_like_xml_cache[key] = rv return rv
[ "def", "looks_like_xml", "(", "text", ")", ":", "key", "=", "hash", "(", "text", ")", "try", ":", "return", "_looks_like_xml_cache", "[", "key", "]", "except", "KeyError", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "m", "is", "not", "None", ":", "return", "True", "rv", "=", "tag_re", ".", "search", "(", "text", "[", ":", "1000", "]", ")", "is", "not", "None", "_looks_like_xml_cache", "[", "key", "]", "=", "rv", "return", "rv" ]
https://github.com/alex-cory/fasthacks/blob/72b099f11df2e5640d61e55c80706c3b234eacbe/cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/util.py#L193-L206
Dieterbe/anthracite
10d5b54e21a79aa0abc66d638828f0f251beacb5
bottle.py
python
_ImportRedirect.__init__
(self, name, impmask)
Create a virtual package that redirects imports (see PEP 302).
Create a virtual package that redirects imports (see PEP 302).
[ "Create", "a", "virtual", "package", "that", "redirects", "imports", "(", "see", "PEP", "302", ")", "." ]
def __init__(self, name, impmask): ''' Create a virtual package that redirects imports (see PEP 302). ''' self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self)
[ "def", "__init__", "(", "self", ",", "name", ",", "impmask", ")", ":", "self", ".", "name", "=", "name", "self", ".", "impmask", "=", "impmask", "self", ".", "module", "=", "sys", ".", "modules", ".", "setdefault", "(", "name", ",", "imp", ".", "new_module", "(", "name", ")", ")", "self", ".", "module", ".", "__dict__", ".", "update", "(", "{", "'__file__'", ":", "__file__", ",", "'__path__'", ":", "[", "]", ",", "'__all__'", ":", "[", "]", ",", "'__loader__'", ":", "self", "}", ")", "sys", ".", "meta_path", ".", "append", "(", "self", ")" ]
https://github.com/Dieterbe/anthracite/blob/10d5b54e21a79aa0abc66d638828f0f251beacb5/bottle.py#L1651-L1658
demi6od/ChromeFuzzer
4eaf1084d5f8fd20897706edf8b67bfbbd6380fc
PocSmplify/paimei-master/pida/function.py
python
function.render_node_udraw
(self, graph)
return super(function, self).render_node_udraw(graph)
Overload the default node.render_node_udraw() routine to create a custom label. Pass control to the default node renderer and then return the merged content. @type graph: pgraph.graph @param graph: Top level graph object containing the current node @rtype: String @return: Contents of rendered node.
Overload the default node.render_node_udraw() routine to create a custom label. Pass control to the default node renderer and then return the merged content.
[ "Overload", "the", "default", "node", ".", "render_node_udraw", "()", "routine", "to", "create", "a", "custom", "label", ".", "Pass", "control", "to", "the", "default", "node", "renderer", "and", "then", "return", "the", "merged", "content", "." ]
def render_node_udraw (self, graph): ''' Overload the default node.render_node_udraw() routine to create a custom label. Pass control to the default node renderer and then return the merged content. @type graph: pgraph.graph @param graph: Top level graph object containing the current node @rtype: String @return: Contents of rendered node. ''' if self.is_import: self.label = "%s" % (self.name) else: self.label = "%08x %s\\n" % (self.ea_start, self.name) self.label += "size: %d" % (self.ea_end - self.ea_start) return super(function, self).render_node_udraw(graph)
[ "def", "render_node_udraw", "(", "self", ",", "graph", ")", ":", "if", "self", ".", "is_import", ":", "self", ".", "label", "=", "\"%s\"", "%", "(", "self", ".", "name", ")", "else", ":", "self", ".", "label", "=", "\"%08x %s\\\\n\"", "%", "(", "self", ".", "ea_start", ",", "self", ".", "name", ")", "self", ".", "label", "+=", "\"size: %d\"", "%", "(", "self", ".", "ea_end", "-", "self", ".", "ea_start", ")", "return", "super", "(", "function", ",", "self", ")", ".", "render_node_udraw", "(", "graph", ")" ]
https://github.com/demi6od/ChromeFuzzer/blob/4eaf1084d5f8fd20897706edf8b67bfbbd6380fc/PocSmplify/paimei-master/pida/function.py#L438-L456
odoo/odoo
8de8c196a137f4ebbf67d7c7c83fee36f873f5c8
addons/portal/controllers/mail.py
python
PortalChatter.portal_chatter_post
(self, res_model, res_id, message, attachment_ids=None, attachment_tokens=None, **kw)
Create a new `mail.message` with the given `message` and/or `attachment_ids` and return new message values. The message will be associated to the record `res_id` of the model `res_model`. The user must have access rights on this target document or must provide valid identifiers through `kw`. See `_message_post_helper`.
Create a new `mail.message` with the given `message` and/or `attachment_ids` and return new message values.
[ "Create", "a", "new", "mail", ".", "message", "with", "the", "given", "message", "and", "/", "or", "attachment_ids", "and", "return", "new", "message", "values", "." ]
def portal_chatter_post(self, res_model, res_id, message, attachment_ids=None, attachment_tokens=None, **kw): """Create a new `mail.message` with the given `message` and/or `attachment_ids` and return new message values. The message will be associated to the record `res_id` of the model `res_model`. The user must have access rights on this target document or must provide valid identifiers through `kw`. See `_message_post_helper`. """ res_id = int(res_id) self._portal_post_check_attachments(attachment_ids, attachment_tokens) if message or attachment_ids: result = {'default_message': message} # message is received in plaintext and saved in html if message: message = plaintext2html(message) post_values = { 'res_model': res_model, 'res_id': res_id, 'message': message, 'send_after_commit': False, 'attachment_ids': False, # will be added afterward } post_values.update((fname, kw.get(fname)) for fname in self._portal_post_filter_params()) message = _message_post_helper(**post_values) result.update({'default_message_id': message.id}) if attachment_ids: # sudo write the attachment to bypass the read access # verification in mail message record = request.env[res_model].browse(res_id) message_values = {'res_id': res_id, 'model': res_model} attachments = record._message_post_process_attachments([], attachment_ids, message_values) if attachments.get('attachment_ids'): message.sudo().write(attachments) result.update({'default_attachment_ids': message.attachment_ids.sudo().read(['id', 'name', 'mimetype', 'file_size', 'access_token'])}) return result
[ "def", "portal_chatter_post", "(", "self", ",", "res_model", ",", "res_id", ",", "message", ",", "attachment_ids", "=", "None", ",", "attachment_tokens", "=", "None", ",", "*", "*", "kw", ")", ":", "res_id", "=", "int", "(", "res_id", ")", "self", ".", "_portal_post_check_attachments", "(", "attachment_ids", ",", "attachment_tokens", ")", "if", "message", "or", "attachment_ids", ":", "result", "=", "{", "'default_message'", ":", "message", "}", "# message is received in plaintext and saved in html", "if", "message", ":", "message", "=", "plaintext2html", "(", "message", ")", "post_values", "=", "{", "'res_model'", ":", "res_model", ",", "'res_id'", ":", "res_id", ",", "'message'", ":", "message", ",", "'send_after_commit'", ":", "False", ",", "'attachment_ids'", ":", "False", ",", "# will be added afterward", "}", "post_values", ".", "update", "(", "(", "fname", ",", "kw", ".", "get", "(", "fname", ")", ")", "for", "fname", "in", "self", ".", "_portal_post_filter_params", "(", ")", ")", "message", "=", "_message_post_helper", "(", "*", "*", "post_values", ")", "result", ".", "update", "(", "{", "'default_message_id'", ":", "message", ".", "id", "}", ")", "if", "attachment_ids", ":", "# sudo write the attachment to bypass the read access", "# verification in mail message", "record", "=", "request", ".", "env", "[", "res_model", "]", ".", "browse", "(", "res_id", ")", "message_values", "=", "{", "'res_id'", ":", "res_id", ",", "'model'", ":", "res_model", "}", "attachments", "=", "record", ".", "_message_post_process_attachments", "(", "[", "]", ",", "attachment_ids", ",", "message_values", ")", "if", "attachments", ".", "get", "(", "'attachment_ids'", ")", ":", "message", ".", "sudo", "(", ")", ".", "write", "(", "attachments", ")", "result", ".", "update", "(", "{", "'default_attachment_ids'", ":", "message", ".", "attachment_ids", ".", "sudo", "(", ")", ".", "read", "(", "[", "'id'", ",", "'name'", ",", "'mimetype'", ",", "'file_size'", ",", "'access_token'", "]", ")", "}", ")", "return", "result" ]
https://github.com/odoo/odoo/blob/8de8c196a137f4ebbf67d7c7c83fee36f873f5c8/addons/portal/controllers/mail.py#L116-L154
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
3rd_party/python3/site-packages/cherrypy-18.1.2/cherrypy/_cplogging.py
python
LogManager.reopen_files
(self)
Close and reopen all file handlers.
Close and reopen all file handlers.
[ "Close", "and", "reopen", "all", "file", "handlers", "." ]
def reopen_files(self): """Close and reopen all file handlers.""" for log in (self.error_log, self.access_log): for h in log.handlers: if isinstance(h, logging.FileHandler): h.acquire() h.stream.close() h.stream = open(h.baseFilename, h.mode) h.release()
[ "def", "reopen_files", "(", "self", ")", ":", "for", "log", "in", "(", "self", ".", "error_log", ",", "self", ".", "access_log", ")", ":", "for", "h", "in", "log", ".", "handlers", ":", "if", "isinstance", "(", "h", ",", "logging", ".", "FileHandler", ")", ":", "h", ".", "acquire", "(", ")", "h", ".", "stream", ".", "close", "(", ")", "h", ".", "stream", "=", "open", "(", "h", ".", "baseFilename", ",", "h", ".", "mode", ")", "h", ".", "release", "(", ")" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/3rd_party/python3/site-packages/cherrypy-18.1.2/cherrypy/_cplogging.py#L189-L197
odoo/odoo
8de8c196a137f4ebbf67d7c7c83fee36f873f5c8
odoo/models.py
python
BaseModel._export_rows
(self, fields, *, _is_toplevel_call=True)
return lines
Export fields of the records in ``self``. :param fields: list of lists of fields to traverse :param bool _is_toplevel_call: used when recursing, avoid using when calling from outside :return: list of lists of corresponding values
Export fields of the records in ``self``.
[ "Export", "fields", "of", "the", "records", "in", "self", "." ]
def _export_rows(self, fields, *, _is_toplevel_call=True): """ Export fields of the records in ``self``. :param fields: list of lists of fields to traverse :param bool _is_toplevel_call: used when recursing, avoid using when calling from outside :return: list of lists of corresponding values """ import_compatible = self.env.context.get('import_compat', True) lines = [] def splittor(rs): """ Splits the self recordset in batches of 1000 (to avoid entire-recordset-prefetch-effects) & removes the previous batch from the cache after it's been iterated in full """ for idx in range(0, len(rs), 1000): sub = rs[idx:idx+1000] for rec in sub: yield rec rs.invalidate_cache(ids=sub.ids) if not _is_toplevel_call: splittor = lambda rs: rs # memory stable but ends up prefetching 275 fields (???) for record in splittor(self): # main line of record, initially empty current = [''] * len(fields) lines.append(current) # list of primary fields followed by secondary field(s) primary_done = [] # process column by column for i, path in enumerate(fields): if not path: continue name = path[0] if name in primary_done: continue if name == '.id': current[i] = str(record.id) elif name == 'id': current[i] = (record._name, record.id) else: field = record._fields[name] value = record[name] # this part could be simpler, but it has to be done this way # in order to reproduce the former behavior if not isinstance(value, BaseModel): current[i] = field.convert_to_export(value, record) else: primary_done.append(name) # recursively export the fields that follow name; use # 'display_name' where no subfield is exported fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else []) for p in fields] # in import_compat mode, m2m should always be exported as # a comma-separated list of xids or names in a single cell if import_compatible and field.type == 'many2many': index = None # find out which subfield the user wants & its # location as we might not get it as the first # column we encounter for name in ['id', 'name', 'display_name']: with contextlib.suppress(ValueError): index = fields2.index([name]) break if index is None: # not found anything, assume we just want the # name_get in the first column name = None index = i if name == 'id': xml_ids = [xid for _, xid in value.__ensure_xml_id()] current[index] = ','.join(xml_ids) else: current[index] = field.convert_to_export(value, record) continue lines2 = value._export_rows(fields2, _is_toplevel_call=False) if lines2: # merge first line with record's main line for j, val in enumerate(lines2[0]): if val or isinstance(val, (int, float)): current[j] = val # append the other lines at the end lines += lines2[1:] else: current[i] = '' # if any xid should be exported, only do so at toplevel if _is_toplevel_call and any(f[-1] == 'id' for f in fields): bymodels = collections.defaultdict(set) xidmap = collections.defaultdict(list) # collect all the tuples in "lines" (along with their coordinates) for i, line in enumerate(lines): for j, cell in enumerate(line): if type(cell) is tuple: bymodels[cell[0]].add(cell[1]) xidmap[cell].append((i, j)) # for each model, xid-export everything and inject in matrix for model, ids in bymodels.items(): for record, xid in self.env[model].browse(ids).__ensure_xml_id(): for i, j in xidmap.pop((record._name, record.id)): lines[i][j] = xid assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items()) return lines
[ "def", "_export_rows", "(", "self", ",", "fields", ",", "*", ",", "_is_toplevel_call", "=", "True", ")", ":", "import_compatible", "=", "self", ".", "env", ".", "context", ".", "get", "(", "'import_compat'", ",", "True", ")", "lines", "=", "[", "]", "def", "splittor", "(", "rs", ")", ":", "\"\"\" Splits the self recordset in batches of 1000 (to avoid\n entire-recordset-prefetch-effects) & removes the previous batch\n from the cache after it's been iterated in full\n \"\"\"", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "rs", ")", ",", "1000", ")", ":", "sub", "=", "rs", "[", "idx", ":", "idx", "+", "1000", "]", "for", "rec", "in", "sub", ":", "yield", "rec", "rs", ".", "invalidate_cache", "(", "ids", "=", "sub", ".", "ids", ")", "if", "not", "_is_toplevel_call", ":", "splittor", "=", "lambda", "rs", ":", "rs", "# memory stable but ends up prefetching 275 fields (???)", "for", "record", "in", "splittor", "(", "self", ")", ":", "# main line of record, initially empty", "current", "=", "[", "''", "]", "*", "len", "(", "fields", ")", "lines", ".", "append", "(", "current", ")", "# list of primary fields followed by secondary field(s)", "primary_done", "=", "[", "]", "# process column by column", "for", "i", ",", "path", "in", "enumerate", "(", "fields", ")", ":", "if", "not", "path", ":", "continue", "name", "=", "path", "[", "0", "]", "if", "name", "in", "primary_done", ":", "continue", "if", "name", "==", "'.id'", ":", "current", "[", "i", "]", "=", "str", "(", "record", ".", "id", ")", "elif", "name", "==", "'id'", ":", "current", "[", "i", "]", "=", "(", "record", ".", "_name", ",", "record", ".", "id", ")", "else", ":", "field", "=", "record", ".", "_fields", "[", "name", "]", "value", "=", "record", "[", "name", "]", "# this part could be simpler, but it has to be done this way", "# in order to reproduce the former behavior", "if", "not", "isinstance", "(", "value", ",", "BaseModel", ")", ":", "current", "[", "i", "]", "=", "field", ".", "convert_to_export", "(", "value", ",", "record", ")", "else", ":", "primary_done", ".", "append", "(", "name", ")", "# recursively export the fields that follow name; use", "# 'display_name' where no subfield is exported", "fields2", "=", "[", "(", "p", "[", "1", ":", "]", "or", "[", "'display_name'", "]", "if", "p", "and", "p", "[", "0", "]", "==", "name", "else", "[", "]", ")", "for", "p", "in", "fields", "]", "# in import_compat mode, m2m should always be exported as", "# a comma-separated list of xids or names in a single cell", "if", "import_compatible", "and", "field", ".", "type", "==", "'many2many'", ":", "index", "=", "None", "# find out which subfield the user wants & its", "# location as we might not get it as the first", "# column we encounter", "for", "name", "in", "[", "'id'", ",", "'name'", ",", "'display_name'", "]", ":", "with", "contextlib", ".", "suppress", "(", "ValueError", ")", ":", "index", "=", "fields2", ".", "index", "(", "[", "name", "]", ")", "break", "if", "index", "is", "None", ":", "# not found anything, assume we just want the", "# name_get in the first column", "name", "=", "None", "index", "=", "i", "if", "name", "==", "'id'", ":", "xml_ids", "=", "[", "xid", "for", "_", ",", "xid", "in", "value", ".", "__ensure_xml_id", "(", ")", "]", "current", "[", "index", "]", "=", "','", ".", "join", "(", "xml_ids", ")", "else", ":", "current", "[", "index", "]", "=", "field", ".", "convert_to_export", "(", "value", ",", "record", ")", "continue", "lines2", "=", "value", ".", "_export_rows", "(", "fields2", ",", "_is_toplevel_call", "=", "False", ")", "if", "lines2", ":", "# merge first line with record's main line", "for", "j", ",", "val", "in", "enumerate", "(", "lines2", "[", "0", "]", ")", ":", "if", "val", "or", "isinstance", "(", "val", ",", "(", "int", ",", "float", ")", ")", ":", "current", "[", "j", "]", "=", "val", "# append the other lines at the end", "lines", "+=", "lines2", "[", "1", ":", "]", "else", ":", "current", "[", "i", "]", "=", "''", "# if any xid should be exported, only do so at toplevel", "if", "_is_toplevel_call", "and", "any", "(", "f", "[", "-", "1", "]", "==", "'id'", "for", "f", "in", "fields", ")", ":", "bymodels", "=", "collections", ".", "defaultdict", "(", "set", ")", "xidmap", "=", "collections", ".", "defaultdict", "(", "list", ")", "# collect all the tuples in \"lines\" (along with their coordinates)", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "for", "j", ",", "cell", "in", "enumerate", "(", "line", ")", ":", "if", "type", "(", "cell", ")", "is", "tuple", ":", "bymodels", "[", "cell", "[", "0", "]", "]", ".", "add", "(", "cell", "[", "1", "]", ")", "xidmap", "[", "cell", "]", ".", "append", "(", "(", "i", ",", "j", ")", ")", "# for each model, xid-export everything and inject in matrix", "for", "model", ",", "ids", "in", "bymodels", ".", "items", "(", ")", ":", "for", "record", ",", "xid", "in", "self", ".", "env", "[", "model", "]", ".", "browse", "(", "ids", ")", ".", "__ensure_xml_id", "(", ")", ":", "for", "i", ",", "j", "in", "xidmap", ".", "pop", "(", "(", "record", ".", "_name", ",", "record", ".", "id", ")", ")", ":", "lines", "[", "i", "]", "[", "j", "]", "=", "xid", "assert", "not", "xidmap", ",", "\"failed to export xids for %s\"", "%", "', '", ".", "join", "(", "'{}:{}'", "%", "it", "for", "it", "in", "xidmap", ".", "items", "(", ")", ")", "return", "lines" ]
https://github.com/odoo/odoo/blob/8de8c196a137f4ebbf67d7c7c83fee36f873f5c8/odoo/models.py#L922-L1035
hola/challenge_word_classifier
8127fa7fef8fc351b6d015ed681bcac2d5254762
submissions/5748d6a363905b3a11d97cf0/src/make_feature.py
python
WriteIntoPickle
(file, d_f, d_t)
writing into file Vars. format: d_all = [d_f, d_t] d_f: d_t: [[x1,x2,x3, ... x_fz] [y1,y2,y3, ... y_dz] [x1,x2,x3, ... x_fz] ... [x1,x2,x3, ... x_fz]]
writing into file Vars. format: d_all = [d_f, d_t] d_f: d_t: [[x1,x2,x3, ... x_fz] [y1,y2,y3, ... y_dz] [x1,x2,x3, ... x_fz] ... [x1,x2,x3, ... x_fz]]
[ "writing", "into", "file", "Vars", ".", "format", ":", "d_all", "=", "[", "d_f", "d_t", "]", "d_f", ":", "d_t", ":", "[[", "x1", "x2", "x3", "...", "x_fz", "]", "[", "y1", "y2", "y3", "...", "y_dz", "]", "[", "x1", "x2", "x3", "...", "x_fz", "]", "...", "[", "x1", "x2", "x3", "...", "x_fz", "]]" ]
def WriteIntoPickle(file, d_f, d_t): """writing into file Vars. format: d_all = [d_f, d_t] d_f: d_t: [[x1,x2,x3, ... x_fz] [y1,y2,y3, ... y_dz] [x1,x2,x3, ... x_fz] ... [x1,x2,x3, ... x_fz]] """ with open(file, 'wb') as fout: pck.dump([d_f, d_t], fout, True)
[ "def", "WriteIntoPickle", "(", "file", ",", "d_f", ",", "d_t", ")", ":", "with", "open", "(", "file", ",", "'wb'", ")", "as", "fout", ":", "pck", ".", "dump", "(", "[", "d_f", ",", "d_t", "]", ",", "fout", ",", "True", ")" ]
https://github.com/hola/challenge_word_classifier/blob/8127fa7fef8fc351b6d015ed681bcac2d5254762/submissions/5748d6a363905b3a11d97cf0/src/make_feature.py#L756-L767
ayojs/ayo
45a1c8cf6384f5bcc81d834343c3ed9d78b97df3
tools/cpplint.py
python
_IncludeState.ResetSection
(self, directive)
Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else").
Reset section checking for preprocessor directive.
[ "Reset", "section", "checking", "for", "preprocessor", "directive", "." ]
def ResetSection(self, directive): """Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else"). """ # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = []
[ "def", "ResetSection", "(", "self", ",", "directive", ")", ":", "# The name of the current section.", "self", ".", "_section", "=", "self", ".", "_INITIAL_SECTION", "# The path of last found header.", "self", ".", "_last_header", "=", "''", "# Update list of includes. Note that we never pop from the", "# include list.", "if", "directive", "in", "(", "'if'", ",", "'ifdef'", ",", "'ifndef'", ")", ":", "self", ".", "include_list", ".", "append", "(", "[", "]", ")", "elif", "directive", "in", "(", "'else'", ",", "'elif'", ")", ":", "self", ".", "include_list", "[", "-", "1", "]", "=", "[", "]" ]
https://github.com/ayojs/ayo/blob/45a1c8cf6384f5bcc81d834343c3ed9d78b97df3/tools/cpplint.py#L720-L736
facebookarchive/nuclide
2a2a0a642d136768b7d2a6d35a652dc5fb77d70a
pkg/nuclide-python-rpc/VendorLib/jedi/evaluate/sys_path.py
python
dotted_path_in_sys_path
(sys_path, module_path)
return None
Returns the dotted path inside a sys.path.
Returns the dotted path inside a sys.path.
[ "Returns", "the", "dotted", "path", "inside", "a", "sys", ".", "path", "." ]
def dotted_path_in_sys_path(sys_path, module_path): """ Returns the dotted path inside a sys.path. """ # First remove the suffix. for suffix, _, _ in imp.get_suffixes(): if module_path.endswith(suffix): module_path = module_path[:-len(suffix)] break else: # There should always be a suffix in a valid Python file on the path. return None if module_path.startswith(os.path.sep): # The paths in sys.path most of the times don't end with a slash. module_path = module_path[1:] for p in sys_path: if module_path.startswith(p): rest = module_path[len(p):] if rest: split = rest.split(os.path.sep) for string in split: if not string or '.' in string: return None return '.'.join(split) return None
[ "def", "dotted_path_in_sys_path", "(", "sys_path", ",", "module_path", ")", ":", "# First remove the suffix.", "for", "suffix", ",", "_", ",", "_", "in", "imp", ".", "get_suffixes", "(", ")", ":", "if", "module_path", ".", "endswith", "(", "suffix", ")", ":", "module_path", "=", "module_path", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "else", ":", "# There should always be a suffix in a valid Python file on the path.", "return", "None", "if", "module_path", ".", "startswith", "(", "os", ".", "path", ".", "sep", ")", ":", "# The paths in sys.path most of the times don't end with a slash.", "module_path", "=", "module_path", "[", "1", ":", "]", "for", "p", "in", "sys_path", ":", "if", "module_path", ".", "startswith", "(", "p", ")", ":", "rest", "=", "module_path", "[", "len", "(", "p", ")", ":", "]", "if", "rest", ":", "split", "=", "rest", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "for", "string", "in", "split", ":", "if", "not", "string", "or", "'.'", "in", "string", ":", "return", "None", "return", "'.'", ".", "join", "(", "split", ")", "return", "None" ]
https://github.com/facebookarchive/nuclide/blob/2a2a0a642d136768b7d2a6d35a652dc5fb77d70a/pkg/nuclide-python-rpc/VendorLib/jedi/evaluate/sys_path.py#L281-L308
basnijholt/home-assistant-config
ae69265f1b9bb904e884e985b99d5d2cbaa77f82
custom_components/hacs/repositories/plugin.py
python
HacsPluginRepository.__init__
(self, full_name)
Initialize.
Initialize.
[ "Initialize", "." ]
def __init__(self, full_name): """Initialize.""" super().__init__() self.data.full_name = full_name self.data.full_name_lower = full_name.lower() self.data.file_name = None self.data.category = "plugin" self.information.javascript_type = None self.content.path.local = self.localpath
[ "def", "__init__", "(", "self", ",", "full_name", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "data", ".", "full_name", "=", "full_name", "self", ".", "data", ".", "full_name_lower", "=", "full_name", ".", "lower", "(", ")", "self", ".", "data", ".", "file_name", "=", "None", "self", ".", "data", ".", "category", "=", "\"plugin\"", "self", ".", "information", ".", "javascript_type", "=", "None", "self", ".", "content", ".", "path", ".", "local", "=", "self", ".", "localpath" ]
https://github.com/basnijholt/home-assistant-config/blob/ae69265f1b9bb904e884e985b99d5d2cbaa77f82/custom_components/hacs/repositories/plugin.py#L12-L20
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
src/pyasm/prod/biz/naming.py
python
TemplateCodeNaming.get_sobject_by_filename
(self, filename)
return Template.get(filename[0:index], project_code)
extract the code from the filename
extract the code from the filename
[ "extract", "the", "code", "from", "the", "filename" ]
def get_sobject_by_filename(self, filename): '''extract the code from the filename''' index = filename.find(".") if index == -1: return None project_code = Project.get_project_code() return Template.get(filename[0:index], project_code)
[ "def", "get_sobject_by_filename", "(", "self", ",", "filename", ")", ":", "index", "=", "filename", ".", "find", "(", "\".\"", ")", "if", "index", "==", "-", "1", ":", "return", "None", "project_code", "=", "Project", ".", "get_project_code", "(", ")", "return", "Template", ".", "get", "(", "filename", "[", "0", ":", "index", "]", ",", "project_code", ")" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/src/pyasm/prod/biz/naming.py#L148-L154
wotermelon/toJump
3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f
lib/mac/systrace/catapult/dependency_manager/dependency_manager/manager.py
python
DependencyManager._UpdateDependencies
(self, config)
Add the dependency information stored in |config| to this instance. Args: config: An instances of BaseConfig or a subclasses. Raises: UnsupportedConfigFormatError: If supported_config_types was specified and config is not in the supported config_types.
Add the dependency information stored in |config| to this instance.
[ "Add", "the", "dependency", "information", "stored", "in", "|config|", "to", "this", "instance", "." ]
def _UpdateDependencies(self, config): """Add the dependency information stored in |config| to this instance. Args: config: An instances of BaseConfig or a subclasses. Raises: UnsupportedConfigFormatError: If supported_config_types was specified and config is not in the supported config_types. """ if not isinstance(config, base_config.BaseConfig): raise ValueError('Must use a BaseConfig or subclass instance with the ' 'DependencyManager.') if (self.supported_configs and config.GetConfigType() not in self.supported_configs): raise exceptions.UnsupportedConfigFormatError(config.GetConfigType(), config.config_path) for dep_info in config.IterDependencyInfo(): dependency = dep_info.dependency platform = dep_info.platform if dependency not in self._lookup_dict: self._lookup_dict[dependency] = {} if platform not in self._lookup_dict[dependency]: self._lookup_dict[dependency][platform] = dep_info else: self._lookup_dict[dependency][platform].Update(dep_info)
[ "def", "_UpdateDependencies", "(", "self", ",", "config", ")", ":", "if", "not", "isinstance", "(", "config", ",", "base_config", ".", "BaseConfig", ")", ":", "raise", "ValueError", "(", "'Must use a BaseConfig or subclass instance with the '", "'DependencyManager.'", ")", "if", "(", "self", ".", "supported_configs", "and", "config", ".", "GetConfigType", "(", ")", "not", "in", "self", ".", "supported_configs", ")", ":", "raise", "exceptions", ".", "UnsupportedConfigFormatError", "(", "config", ".", "GetConfigType", "(", ")", ",", "config", ".", "config_path", ")", "for", "dep_info", "in", "config", ".", "IterDependencyInfo", "(", ")", ":", "dependency", "=", "dep_info", ".", "dependency", "platform", "=", "dep_info", ".", "platform", "if", "dependency", "not", "in", "self", ".", "_lookup_dict", ":", "self", ".", "_lookup_dict", "[", "dependency", "]", "=", "{", "}", "if", "platform", "not", "in", "self", ".", "_lookup_dict", "[", "dependency", "]", ":", "self", ".", "_lookup_dict", "[", "dependency", "]", "[", "platform", "]", "=", "dep_info", "else", ":", "self", ".", "_lookup_dict", "[", "dependency", "]", "[", "platform", "]", ".", "Update", "(", "dep_info", ")" ]
https://github.com/wotermelon/toJump/blob/3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f/lib/mac/systrace/catapult/dependency_manager/dependency_manager/manager.py#L198-L223
nodejs/node-convergence-archive
e11fe0c2777561827cdb7207d46b0917ef3c42a7
tools/gyp/pylib/gyp/input.py
python
DependencyGraphNode.DependenciesForLinkSettings
(self, targets)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
Returns a list of dependency targets whose link_settings should be merged into this target.
Returns a list of dependency targets whose link_settings should be merged into this target.
[ "Returns", "a", "list", "of", "dependency", "targets", "whose", "link_settings", "should", "be", "merged", "into", "this", "target", "." ]
def DependenciesForLinkSettings(self, targets): """ Returns a list of dependency targets whose link_settings should be merged into this target. """ # TODO(sbaig) Currently, chrome depends on the bug that shared libraries' # link_settings are propagated. So for now, we will allow it, unless the # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to # False. Once chrome is fixed, we can remove this flag. include_shared_libraries = \ targets[self.ref].get('allow_sharedlib_linksettings_propagation', True) return self._LinkDependenciesInternal(targets, include_shared_libraries)
[ "def", "DependenciesForLinkSettings", "(", "self", ",", "targets", ")", ":", "# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'", "# link_settings are propagated. So for now, we will allow it, unless the", "# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to", "# False. Once chrome is fixed, we can remove this flag.", "include_shared_libraries", "=", "targets", "[", "self", ".", "ref", "]", ".", "get", "(", "'allow_sharedlib_linksettings_propagation'", ",", "True", ")", "return", "self", ".", "_LinkDependenciesInternal", "(", "targets", ",", "include_shared_libraries", ")" ]
https://github.com/nodejs/node-convergence-archive/blob/e11fe0c2777561827cdb7207d46b0917ef3c42a7/tools/gyp/pylib/gyp/input.py#L1755-L1767
google/closure-library
ed8d5cac16878e5fcbce4dedc8113edf295b2fd6
closure/bin/build/depswriter.py
python
_ToJsSrc
(arr)
return json.dumps(arr).replace('"', '\'')
Convert a python arr to a js source string.
Convert a python arr to a js source string.
[ "Convert", "a", "python", "arr", "to", "a", "js", "source", "string", "." ]
def _ToJsSrc(arr): """Convert a python arr to a js source string.""" return json.dumps(arr).replace('"', '\'')
[ "def", "_ToJsSrc", "(", "arr", ")", ":", "return", "json", ".", "dumps", "(", "arr", ")", ".", "replace", "(", "'\"'", ",", "'\\''", ")" ]
https://github.com/google/closure-library/blob/ed8d5cac16878e5fcbce4dedc8113edf295b2fd6/closure/bin/build/depswriter.py#L81-L84
xtk/X
04c1aa856664a8517d23aefd94c470d47130aead
lib/selenium/selenium/selenium.py
python
selenium.is_editable
(self,locator)
return self.get_boolean("isEditable", [locator,])
Determines whether the specified input element is editable, ie hasn't been disabled. This method will fail if the specified element isn't an input element. 'locator' is an element locator
Determines whether the specified input element is editable, ie hasn't been disabled. This method will fail if the specified element isn't an input element. 'locator' is an element locator
[ "Determines", "whether", "the", "specified", "input", "element", "is", "editable", "ie", "hasn", "t", "been", "disabled", ".", "This", "method", "will", "fail", "if", "the", "specified", "element", "isn", "t", "an", "input", "element", ".", "locator", "is", "an", "element", "locator" ]
def is_editable(self,locator): """ Determines whether the specified input element is editable, ie hasn't been disabled. This method will fail if the specified element isn't an input element. 'locator' is an element locator """ return self.get_boolean("isEditable", [locator,])
[ "def", "is_editable", "(", "self", ",", "locator", ")", ":", "return", "self", ".", "get_boolean", "(", "\"isEditable\"", ",", "[", "locator", ",", "]", ")" ]
https://github.com/xtk/X/blob/04c1aa856664a8517d23aefd94c470d47130aead/lib/selenium/selenium/selenium.py#L1385-L1392
babybuddy/babybuddy
acde3156c6de781f90a85d021eaf086b28a7a008
reports/graphs/diaperchange_types.py
python
diaperchange_types
(changes)
return utils.split_graph_output(output)
Create a graph showing types of totals for diaper changes. :param changes: a QuerySet of Diaper Change instances. :returns: a tuple of the the graph's html and javascript.
Create a graph showing types of totals for diaper changes. :param changes: a QuerySet of Diaper Change instances. :returns: a tuple of the the graph's html and javascript.
[ "Create", "a", "graph", "showing", "types", "of", "totals", "for", "diaper", "changes", ".", ":", "param", "changes", ":", "a", "QuerySet", "of", "Diaper", "Change", "instances", ".", ":", "returns", ":", "a", "tuple", "of", "the", "the", "graph", "s", "html", "and", "javascript", "." ]
def diaperchange_types(changes): """ Create a graph showing types of totals for diaper changes. :param changes: a QuerySet of Diaper Change instances. :returns: a tuple of the the graph's html and javascript. """ changes = changes.annotate(date=TruncDate('time'))\ .values('date') \ .annotate(wet_count=Count(Case(When(wet=True, then=1)))) \ .annotate(solid_count=Count(Case(When(solid=True, then=1)))) \ .annotate(total=Count('id')) \ .order_by('-date') solid_trace = go.Scatter( mode='markers', name=_('Solid'), x=list(changes.values_list('date', flat=True)), y=list(changes.values_list('solid_count', flat=True)), ) wet_trace = go.Scatter( mode='markers', name=_('Wet'), x=list(changes.values_list('date', flat=True)), y=list(changes.values_list('wet_count', flat=True)) ) total_trace = go.Scatter( name=_('Total'), x=list(changes.values_list('date', flat=True)), y=list(changes.values_list('total', flat=True)) ) layout_args = utils.default_graph_layout_options() layout_args['barmode'] = 'stack' layout_args['title'] = _('<b>Diaper Change Types</b>') layout_args['xaxis']['title'] = _('Date') layout_args['xaxis']['rangeselector'] = utils.rangeselector_date() layout_args['yaxis']['title'] = _('Number of changes') fig = go.Figure({ 'data': [solid_trace, wet_trace, total_trace], 'layout': go.Layout(**layout_args) }) output = plotly.plot( fig, output_type='div', include_plotlyjs=False, config={'locale': get_language()} ) return utils.split_graph_output(output)
[ "def", "diaperchange_types", "(", "changes", ")", ":", "changes", "=", "changes", ".", "annotate", "(", "date", "=", "TruncDate", "(", "'time'", ")", ")", ".", "values", "(", "'date'", ")", ".", "annotate", "(", "wet_count", "=", "Count", "(", "Case", "(", "When", "(", "wet", "=", "True", ",", "then", "=", "1", ")", ")", ")", ")", ".", "annotate", "(", "solid_count", "=", "Count", "(", "Case", "(", "When", "(", "solid", "=", "True", ",", "then", "=", "1", ")", ")", ")", ")", ".", "annotate", "(", "total", "=", "Count", "(", "'id'", ")", ")", ".", "order_by", "(", "'-date'", ")", "solid_trace", "=", "go", ".", "Scatter", "(", "mode", "=", "'markers'", ",", "name", "=", "_", "(", "'Solid'", ")", ",", "x", "=", "list", "(", "changes", ".", "values_list", "(", "'date'", ",", "flat", "=", "True", ")", ")", ",", "y", "=", "list", "(", "changes", ".", "values_list", "(", "'solid_count'", ",", "flat", "=", "True", ")", ")", ",", ")", "wet_trace", "=", "go", ".", "Scatter", "(", "mode", "=", "'markers'", ",", "name", "=", "_", "(", "'Wet'", ")", ",", "x", "=", "list", "(", "changes", ".", "values_list", "(", "'date'", ",", "flat", "=", "True", ")", ")", ",", "y", "=", "list", "(", "changes", ".", "values_list", "(", "'wet_count'", ",", "flat", "=", "True", ")", ")", ")", "total_trace", "=", "go", ".", "Scatter", "(", "name", "=", "_", "(", "'Total'", ")", ",", "x", "=", "list", "(", "changes", ".", "values_list", "(", "'date'", ",", "flat", "=", "True", ")", ")", ",", "y", "=", "list", "(", "changes", ".", "values_list", "(", "'total'", ",", "flat", "=", "True", ")", ")", ")", "layout_args", "=", "utils", ".", "default_graph_layout_options", "(", ")", "layout_args", "[", "'barmode'", "]", "=", "'stack'", "layout_args", "[", "'title'", "]", "=", "_", "(", "'<b>Diaper Change Types</b>'", ")", "layout_args", "[", "'xaxis'", "]", "[", "'title'", "]", "=", "_", "(", "'Date'", ")", "layout_args", "[", "'xaxis'", "]", "[", "'rangeselector'", "]", "=", "utils", ".", "rangeselector_date", "(", ")", "layout_args", "[", "'yaxis'", "]", "[", "'title'", "]", "=", "_", "(", "'Number of changes'", ")", "fig", "=", "go", ".", "Figure", "(", "{", "'data'", ":", "[", "solid_trace", ",", "wet_trace", ",", "total_trace", "]", ",", "'layout'", ":", "go", ".", "Layout", "(", "*", "*", "layout_args", ")", "}", ")", "output", "=", "plotly", ".", "plot", "(", "fig", ",", "output_type", "=", "'div'", ",", "include_plotlyjs", "=", "False", ",", "config", "=", "{", "'locale'", ":", "get_language", "(", ")", "}", ")", "return", "utils", ".", "split_graph_output", "(", "output", ")" ]
https://github.com/babybuddy/babybuddy/blob/acde3156c6de781f90a85d021eaf086b28a7a008/reports/graphs/diaperchange_types.py#L13-L61
TeamvisionCorp/TeamVision
aa2a57469e430ff50cce21174d8f280efa0a83a7
distribute/0.0.5/build_shell/teamvision/teamvision/project/viewmodels/vm_project_version.py
python
VM_ProjectVersion.__init__
(self,version,selected_version=0)
Constructor
Constructor
[ "Constructor" ]
def __init__(self,version,selected_version=0): ''' Constructor ''' self.version=version self.selected_versions=selected_version
[ "def", "__init__", "(", "self", ",", "version", ",", "selected_version", "=", "0", ")", ":", "self", ".", "version", "=", "version", "self", ".", "selected_versions", "=", "selected_version" ]
https://github.com/TeamvisionCorp/TeamVision/blob/aa2a57469e430ff50cce21174d8f280efa0a83a7/distribute/0.0.5/build_shell/teamvision/teamvision/project/viewmodels/vm_project_version.py#L16-L21
mozilla/chromeless
4e6c980479b0f91b76830dc7b58ff6ae9a0b3978
impl/markdown/extensions/abbr.py
python
AbbrPreprocessor._generate_pattern
(self, text)
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
Given a string, returns an regex pattern to match that string. 'HTML' -> r'(?P<abbr>[H][T][M][L])' Note: we force each char as a literal match (in brackets) as we don't know what they will be beforehand.
Given a string, returns an regex pattern to match that string. 'HTML' -> r'(?P<abbr>[H][T][M][L])' Note: we force each char as a literal match (in brackets) as we don't know what they will be beforehand.
[ "Given", "a", "string", "returns", "an", "regex", "pattern", "to", "match", "that", "string", ".", "HTML", "-", ">", "r", "(", "?P<abbr", ">", "[", "H", "]", "[", "T", "]", "[", "M", "]", "[", "L", "]", ")", "Note", ":", "we", "force", "each", "char", "as", "a", "literal", "match", "(", "in", "brackets", ")", "as", "we", "don", "t", "know", "what", "they", "will", "be", "beforehand", "." ]
def _generate_pattern(self, text): ''' Given a string, returns an regex pattern to match that string. 'HTML' -> r'(?P<abbr>[H][T][M][L])' Note: we force each char as a literal match (in brackets) as we don't know what they will be beforehand. ''' chars = list(text) for i in range(len(chars)): chars[i] = r'[%s]' % chars[i] return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
[ "def", "_generate_pattern", "(", "self", ",", "text", ")", ":", "chars", "=", "list", "(", "text", ")", "for", "i", "in", "range", "(", "len", "(", "chars", ")", ")", ":", "chars", "[", "i", "]", "=", "r'[%s]'", "%", "chars", "[", "i", "]", "return", "r'(?P<abbr>\\b%s\\b)'", "%", "(", "r''", ".", "join", "(", "chars", ")", ")" ]
https://github.com/mozilla/chromeless/blob/4e6c980479b0f91b76830dc7b58ff6ae9a0b3978/impl/markdown/extensions/abbr.py#L61-L74
nprapps/app-template
54c79412d05678b7de0080832cd041a90e4d9112
fabfile/issues.py
python
bootstrap
()
Bootstraps Github issues with default configuration.
Bootstraps Github issues with default configuration.
[ "Bootstraps", "Github", "issues", "with", "default", "configuration", "." ]
def bootstrap(): """ Bootstraps Github issues with default configuration. """ if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG': logger.warn('You can\'t run the issues bootstrap until you\'ve set PROJECT_SLUG in app_config.py!') return auth = github.get_auth() github.delete_existing_labels(auth) github.create_labels(auth) github.create_tickets(auth) github.create_milestones(auth) github.create_hipchat_hook(auth)
[ "def", "bootstrap", "(", ")", ":", "if", "app_config", ".", "PROJECT_SLUG", "==", "'$NEW_PROJECT_SLUG'", ":", "logger", ".", "warn", "(", "'You can\\'t run the issues bootstrap until you\\'ve set PROJECT_SLUG in app_config.py!'", ")", "return", "auth", "=", "github", ".", "get_auth", "(", ")", "github", ".", "delete_existing_labels", "(", "auth", ")", "github", ".", "create_labels", "(", "auth", ")", "github", ".", "create_tickets", "(", "auth", ")", "github", ".", "create_milestones", "(", "auth", ")", "github", ".", "create_hipchat_hook", "(", "auth", ")" ]
https://github.com/nprapps/app-template/blob/54c79412d05678b7de0080832cd041a90e4d9112/fabfile/issues.py#L18-L31
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
src/pyasm/prod/web/asset_history_wdg.py
python
InstanceHistoryWdg.get_snapshot_contexts
(self, search_type, search_id)
return contexts
get the contexts for the snapshots
get the contexts for the snapshots
[ "get", "the", "contexts", "for", "the", "snapshots" ]
def get_snapshot_contexts(self, search_type, search_id): '''get the contexts for the snapshots''' contexts = Snapshot.get_contexts(search_type, search_id) # add all of the asset snapshots #self.instance_search_type = self.kwargs.get('search_type') instance = Search.get_by_id(search_type, search_id) if not self.asset_search_type: self.asset_search_type = 'prod/asset' #asset = instance.get_asset(search_type = self.asset_search_type) asset_code = instance.get_value("asset_code") asset = Search.get_by_code(self.asset_search_type, asset_code) if not asset: return contexts asset_id = asset.get_id() asset_search_type = asset.get_search_type() asset_contexts = Snapshot.get_contexts(asset_search_type, asset_id) contexts.extend(asset_contexts) contexts = Common.get_unique_list(contexts) contexts.sort() return contexts
[ "def", "get_snapshot_contexts", "(", "self", ",", "search_type", ",", "search_id", ")", ":", "contexts", "=", "Snapshot", ".", "get_contexts", "(", "search_type", ",", "search_id", ")", "# add all of the asset snapshots", "#self.instance_search_type = self.kwargs.get('search_type')", "instance", "=", "Search", ".", "get_by_id", "(", "search_type", ",", "search_id", ")", "if", "not", "self", ".", "asset_search_type", ":", "self", ".", "asset_search_type", "=", "'prod/asset'", "#asset = instance.get_asset(search_type = self.asset_search_type)", "asset_code", "=", "instance", ".", "get_value", "(", "\"asset_code\"", ")", "asset", "=", "Search", ".", "get_by_code", "(", "self", ".", "asset_search_type", ",", "asset_code", ")", "if", "not", "asset", ":", "return", "contexts", "asset_id", "=", "asset", ".", "get_id", "(", ")", "asset_search_type", "=", "asset", ".", "get_search_type", "(", ")", "asset_contexts", "=", "Snapshot", ".", "get_contexts", "(", "asset_search_type", ",", "asset_id", ")", "contexts", ".", "extend", "(", "asset_contexts", ")", "contexts", "=", "Common", ".", "get_unique_list", "(", "contexts", ")", "contexts", ".", "sort", "(", ")", "return", "contexts" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/src/pyasm/prod/web/asset_history_wdg.py#L407-L429
infobyte/faraday
dceeac70262c7ce146020381e3dd50a7eb81f9bb
faraday/server/api/base.py
python
CountWorkspacedMixin.count
(self, **kwargs)
return res
--- tags: [{tag_name}] summary: "Group {class_model} by the field set in the group_by GET parameter." responses: 200: description: Ok content: application/json: schema: {schema_class} 404: description: group_by is not specified
--- tags: [{tag_name}] summary: "Group {class_model} by the field set in the group_by GET parameter." responses: 200: description: Ok content: application/json: schema: {schema_class} 404: description: group_by is not specified
[ "---", "tags", ":", "[", "{", "tag_name", "}", "]", "summary", ":", "Group", "{", "class_model", "}", "by", "the", "field", "set", "in", "the", "group_by", "GET", "parameter", ".", "responses", ":", "200", ":", "description", ":", "Ok", "content", ":", "application", "/", "json", ":", "schema", ":", "{", "schema_class", "}", "404", ":", "description", ":", "group_by", "is", "not", "specified" ]
def count(self, **kwargs): """ --- tags: [{tag_name}] summary: "Group {class_model} by the field set in the group_by GET parameter." responses: 200: description: Ok content: application/json: schema: {schema_class} 404: description: group_by is not specified """ res = { 'groups': [], 'total_count': 0 } group_by = flask.request.args.get('group_by', None) sort_dir = flask.request.args.get('order', "asc").lower() # TODO migration: whitelist fields to avoid leaking a confidential # field's value. # Example: /users/count/?group_by=password # Also we should check that the field exists in the db and isn't, for # example, a relationship if not group_by or group_by not in inspect(self.model_class).attrs: flask.abort(400, {"message": "group_by is a required parameter"}) if sort_dir and sort_dir not in ('asc', 'desc'): flask.abort(400, {"message": "order must be 'desc' or 'asc'"}) workspace_name = kwargs.pop('workspace_name') # using format is not a great practice. # the user input is group_by, however it's filtered by column name. table_name = inspect(self.model_class).tables[0].name group_by = f'{table_name}.{group_by}' count = self._filter_query( db.session.query(self.model_class) .join(Workspace) .group_by(group_by) .filter(Workspace.name == workspace_name, *self.count_extra_filters)) # order order_by = group_by if sort_dir == 'desc': count = count.order_by(desc(order_by)) else: count = count.order_by(asc(order_by)) for key, count in count.values(group_by, func.count(group_by)): res['groups'].append( {'count': count, 'name': key, # To add compatibility with the web ui flask.request.args.get('group_by'): key, } ) res['total_count'] += count return res
[ "def", "count", "(", "self", ",", "*", "*", "kwargs", ")", ":", "res", "=", "{", "'groups'", ":", "[", "]", ",", "'total_count'", ":", "0", "}", "group_by", "=", "flask", ".", "request", ".", "args", ".", "get", "(", "'group_by'", ",", "None", ")", "sort_dir", "=", "flask", ".", "request", ".", "args", ".", "get", "(", "'order'", ",", "\"asc\"", ")", ".", "lower", "(", ")", "# TODO migration: whitelist fields to avoid leaking a confidential", "# field's value.", "# Example: /users/count/?group_by=password", "# Also we should check that the field exists in the db and isn't, for", "# example, a relationship", "if", "not", "group_by", "or", "group_by", "not", "in", "inspect", "(", "self", ".", "model_class", ")", ".", "attrs", ":", "flask", ".", "abort", "(", "400", ",", "{", "\"message\"", ":", "\"group_by is a required parameter\"", "}", ")", "if", "sort_dir", "and", "sort_dir", "not", "in", "(", "'asc'", ",", "'desc'", ")", ":", "flask", ".", "abort", "(", "400", ",", "{", "\"message\"", ":", "\"order must be 'desc' or 'asc'\"", "}", ")", "workspace_name", "=", "kwargs", ".", "pop", "(", "'workspace_name'", ")", "# using format is not a great practice.", "# the user input is group_by, however it's filtered by column name.", "table_name", "=", "inspect", "(", "self", ".", "model_class", ")", ".", "tables", "[", "0", "]", ".", "name", "group_by", "=", "f'{table_name}.{group_by}'", "count", "=", "self", ".", "_filter_query", "(", "db", ".", "session", ".", "query", "(", "self", ".", "model_class", ")", ".", "join", "(", "Workspace", ")", ".", "group_by", "(", "group_by", ")", ".", "filter", "(", "Workspace", ".", "name", "==", "workspace_name", ",", "*", "self", ".", "count_extra_filters", ")", ")", "# order", "order_by", "=", "group_by", "if", "sort_dir", "==", "'desc'", ":", "count", "=", "count", ".", "order_by", "(", "desc", "(", "order_by", ")", ")", "else", ":", "count", "=", "count", ".", "order_by", "(", "asc", "(", "order_by", ")", ")", "for", "key", ",", "count", "in", "count", ".", "values", "(", "group_by", ",", "func", ".", "count", "(", "group_by", ")", ")", ":", "res", "[", "'groups'", "]", ".", "append", "(", "{", "'count'", ":", "count", ",", "'name'", ":", "key", ",", "# To add compatibility with the web ui", "flask", ".", "request", ".", "args", ".", "get", "(", "'group_by'", ")", ":", "key", ",", "}", ")", "res", "[", "'total_count'", "]", "+=", "count", "return", "res" ]
https://github.com/infobyte/faraday/blob/dceeac70262c7ce146020381e3dd50a7eb81f9bb/faraday/server/api/base.py#L1564-L1625
facebookarchive/nuclide
2a2a0a642d136768b7d2a6d35a652dc5fb77d70a
modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/kernel32.py
python
MemoryBasicInformation.__contains__
(self, address)
return self.BaseAddress <= address < (self.BaseAddress + self.RegionSize)
Test if the given memory address falls within this memory region. @type address: int @param address: Memory address to test. @rtype: bool @return: C{True} if the given memory address falls within this memory region, C{False} otherwise.
Test if the given memory address falls within this memory region.
[ "Test", "if", "the", "given", "memory", "address", "falls", "within", "this", "memory", "region", "." ]
def __contains__(self, address): """ Test if the given memory address falls within this memory region. @type address: int @param address: Memory address to test. @rtype: bool @return: C{True} if the given memory address falls within this memory region, C{False} otherwise. """ return self.BaseAddress <= address < (self.BaseAddress + self.RegionSize)
[ "def", "__contains__", "(", "self", ",", "address", ")", ":", "return", "self", ".", "BaseAddress", "<=", "address", "<", "(", "self", ".", "BaseAddress", "+", "self", ".", "RegionSize", ")" ]
https://github.com/facebookarchive/nuclide/blob/2a2a0a642d136768b7d2a6d35a652dc5fb77d70a/modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/kernel32.py#L1052-L1063
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
3rd_party/python2/site-packages/cheroot/server.py
python
HTTPRequest.send_headers
(self)
Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this.
Assert, process, and send the HTTP response message-headers.
[ "Assert", "process", "and", "send", "the", "HTTP", "response", "message", "-", "headers", "." ]
def send_headers(self): """Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this. """ hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif b'content-length' not in hkeys: # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in (204, 205, 304): pass else: needs_chunked = ( self.response_protocol == 'HTTP/1.1' and self.method != b'HEAD' ) if needs_chunked: # Use the chunked transfer-coding self.chunked_write = True self.outheaders.append((b'Transfer-Encoding', b'chunked')) else: # Closing the conn is the only way to determine len. self.close_connection = True if b'connection' not in hkeys: if self.response_protocol == 'HTTP/1.1': # Both server and client are HTTP/1.1 or better if self.close_connection: self.outheaders.append((b'Connection', b'close')) else: # Server and/or client are HTTP/1.0 if not self.close_connection: self.outheaders.append((b'Connection', b'Keep-Alive')) if (not self.close_connection) and (not self.chunked_read): # Read any remaining request body data on the socket. # "If an origin server receives a request that does not include an # Expect request-header field with the "100-continue" expectation, # the request includes a request body, and the server responds # with a final status code before reading the entire request body # from the transport connection, then the server SHOULD NOT close # the transport connection until it has read the entire request, # or until the client closes the connection. Otherwise, the client # might not reliably receive the response message. However, this # requirement is not be construed as preventing a server from # defending itself against denial-of-service attacks, or from # badly broken client implementations." remaining = getattr(self.rfile, 'remaining', 0) if remaining > 0: self.rfile.read(remaining) if b'date' not in hkeys: self.outheaders.append(( b'Date', email.utils.formatdate(usegmt=True).encode('ISO-8859-1'), )) if b'server' not in hkeys: self.outheaders.append(( b'Server', self.server.server_name.encode('ISO-8859-1'), )) proto = self.server.protocol.encode('ascii') buf = [proto + SPACE + self.status + CRLF] for k, v in self.outheaders: buf.append(k + COLON + SPACE + v + CRLF) buf.append(CRLF) self.conn.wfile.write(EMPTY.join(buf))
[ "def", "send_headers", "(", "self", ")", ":", "hkeys", "=", "[", "key", ".", "lower", "(", ")", "for", "key", ",", "value", "in", "self", ".", "outheaders", "]", "status", "=", "int", "(", "self", ".", "status", "[", ":", "3", "]", ")", "if", "status", "==", "413", ":", "# Request Entity Too Large. Close conn to avoid garbage.", "self", ".", "close_connection", "=", "True", "elif", "b'content-length'", "not", "in", "hkeys", ":", "# \"All 1xx (informational), 204 (no content),", "# and 304 (not modified) responses MUST NOT", "# include a message-body.\" So no point chunking.", "if", "status", "<", "200", "or", "status", "in", "(", "204", ",", "205", ",", "304", ")", ":", "pass", "else", ":", "needs_chunked", "=", "(", "self", ".", "response_protocol", "==", "'HTTP/1.1'", "and", "self", ".", "method", "!=", "b'HEAD'", ")", "if", "needs_chunked", ":", "# Use the chunked transfer-coding", "self", ".", "chunked_write", "=", "True", "self", ".", "outheaders", ".", "append", "(", "(", "b'Transfer-Encoding'", ",", "b'chunked'", ")", ")", "else", ":", "# Closing the conn is the only way to determine len.", "self", ".", "close_connection", "=", "True", "if", "b'connection'", "not", "in", "hkeys", ":", "if", "self", ".", "response_protocol", "==", "'HTTP/1.1'", ":", "# Both server and client are HTTP/1.1 or better", "if", "self", ".", "close_connection", ":", "self", ".", "outheaders", ".", "append", "(", "(", "b'Connection'", ",", "b'close'", ")", ")", "else", ":", "# Server and/or client are HTTP/1.0", "if", "not", "self", ".", "close_connection", ":", "self", ".", "outheaders", ".", "append", "(", "(", "b'Connection'", ",", "b'Keep-Alive'", ")", ")", "if", "(", "not", "self", ".", "close_connection", ")", "and", "(", "not", "self", ".", "chunked_read", ")", ":", "# Read any remaining request body data on the socket.", "# \"If an origin server receives a request that does not include an", "# Expect request-header field with the \"100-continue\" expectation,", "# the request includes a request body, and the server responds", "# with a final status code before reading the entire request body", "# from the transport connection, then the server SHOULD NOT close", "# the transport connection until it has read the entire request,", "# or until the client closes the connection. Otherwise, the client", "# might not reliably receive the response message. However, this", "# requirement is not be construed as preventing a server from", "# defending itself against denial-of-service attacks, or from", "# badly broken client implementations.\"", "remaining", "=", "getattr", "(", "self", ".", "rfile", ",", "'remaining'", ",", "0", ")", "if", "remaining", ">", "0", ":", "self", ".", "rfile", ".", "read", "(", "remaining", ")", "if", "b'date'", "not", "in", "hkeys", ":", "self", ".", "outheaders", ".", "append", "(", "(", "b'Date'", ",", "email", ".", "utils", ".", "formatdate", "(", "usegmt", "=", "True", ")", ".", "encode", "(", "'ISO-8859-1'", ")", ",", ")", ")", "if", "b'server'", "not", "in", "hkeys", ":", "self", ".", "outheaders", ".", "append", "(", "(", "b'Server'", ",", "self", ".", "server", ".", "server_name", ".", "encode", "(", "'ISO-8859-1'", ")", ",", ")", ")", "proto", "=", "self", ".", "server", ".", "protocol", ".", "encode", "(", "'ascii'", ")", "buf", "=", "[", "proto", "+", "SPACE", "+", "self", ".", "status", "+", "CRLF", "]", "for", "k", ",", "v", "in", "self", ".", "outheaders", ":", "buf", ".", "append", "(", "k", "+", "COLON", "+", "SPACE", "+", "v", "+", "CRLF", ")", "buf", ".", "append", "(", "CRLF", ")", "self", ".", "conn", ".", "wfile", ".", "write", "(", "EMPTY", ".", "join", "(", "buf", ")", ")" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/3rd_party/python2/site-packages/cheroot/server.py#L1028-L1102
scottrogowski/code2flow
37e45ca4340289f8ceec79b3fe5131c401387c58
code2flow/ruby.py
python
walk
(tree_el)
return ret
Given an ast element (list), walk it in a dfs to get every el (list) out of it :param tree_el ast: :rtype: list[ast]
Given an ast element (list), walk it in a dfs to get every el (list) out of it
[ "Given", "an", "ast", "element", "(", "list", ")", "walk", "it", "in", "a", "dfs", "to", "get", "every", "el", "(", "list", ")", "out", "of", "it" ]
def walk(tree_el): """ Given an ast element (list), walk it in a dfs to get every el (list) out of it :param tree_el ast: :rtype: list[ast] """ if not tree_el: return [] ret = [tree_el] for el in tree_el: if isinstance(el, list): ret += walk(el) return ret
[ "def", "walk", "(", "tree_el", ")", ":", "if", "not", "tree_el", ":", "return", "[", "]", "ret", "=", "[", "tree_el", "]", "for", "el", "in", "tree_el", ":", "if", "isinstance", "(", "el", ",", "list", ")", ":", "ret", "+=", "walk", "(", "el", ")", "return", "ret" ]
https://github.com/scottrogowski/code2flow/blob/37e45ca4340289f8ceec79b3fe5131c401387c58/code2flow/ruby.py#L58-L72
atom-community/ide-python
c046f9c2421713b34baa22648235541c5bb284fe
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/third_party/pep8/pycodestyle.py
python
comparison_to_singleton
(logical_line, noqa)
r"""Comparison to singletons should use "is" or "is not". Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. Okay: if arg is not None: E711: if arg != None: E711: if None == arg: E712: if arg == True: E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was set to some other value. The other value might have a type (such as a container) that could be false in a boolean context!
r"""Comparison to singletons should use "is" or "is not".
[ "r", "Comparison", "to", "singletons", "should", "use", "is", "or", "is", "not", "." ]
def comparison_to_singleton(logical_line, noqa): r"""Comparison to singletons should use "is" or "is not". Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. Okay: if arg is not None: E711: if arg != None: E711: if None == arg: E712: if arg == True: E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was set to some other value. The other value might have a type (such as a container) that could be false in a boolean context! """ match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) if match: singleton = match.group(1) or match.group(3) same = (match.group(2) == '==') msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) if singleton in ('None',): code = 'E711' else: code = 'E712' nonzero = ((singleton == 'True' and same) or (singleton == 'False' and not same)) msg += " or 'if %scond:'" % ('' if nonzero else 'not ') yield match.start(2), ("%s comparison to %s should be %s" % (code, singleton, msg))
[ "def", "comparison_to_singleton", "(", "logical_line", ",", "noqa", ")", ":", "match", "=", "not", "noqa", "and", "COMPARE_SINGLETON_REGEX", ".", "search", "(", "logical_line", ")", "if", "match", ":", "singleton", "=", "match", ".", "group", "(", "1", ")", "or", "match", ".", "group", "(", "3", ")", "same", "=", "(", "match", ".", "group", "(", "2", ")", "==", "'=='", ")", "msg", "=", "\"'if cond is %s:'\"", "%", "(", "(", "''", "if", "same", "else", "'not '", ")", "+", "singleton", ")", "if", "singleton", "in", "(", "'None'", ",", ")", ":", "code", "=", "'E711'", "else", ":", "code", "=", "'E712'", "nonzero", "=", "(", "(", "singleton", "==", "'True'", "and", "same", ")", "or", "(", "singleton", "==", "'False'", "and", "not", "same", ")", ")", "msg", "+=", "\" or 'if %scond:'\"", "%", "(", "''", "if", "nonzero", "else", "'not '", ")", "yield", "match", ".", "start", "(", "2", ")", ",", "(", "\"%s comparison to %s should be %s\"", "%", "(", "code", ",", "singleton", ",", "msg", ")", ")" ]
https://github.com/atom-community/ide-python/blob/c046f9c2421713b34baa22648235541c5bb284fe/lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/third_party/pep8/pycodestyle.py#L1123-L1154
DocSavage/bloog
ba3e32209006670fbac1beda1e3b631608c2b6cb
utils/external/BeautifulSoup.py
python
PageElement._lastRecursiveChild
(self)
return lastChild
Finds the last element beneath this object to be parsed.
Finds the last element beneath this object to be parsed.
[ "Finds", "the", "last", "element", "beneath", "this", "object", "to", "be", "parsed", "." ]
def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild
[ "def", "_lastRecursiveChild", "(", "self", ")", ":", "lastChild", "=", "self", "while", "hasattr", "(", "lastChild", ",", "'contents'", ")", "and", "lastChild", ".", "contents", ":", "lastChild", "=", "lastChild", ".", "contents", "[", "-", "1", "]", "return", "lastChild" ]
https://github.com/DocSavage/bloog/blob/ba3e32209006670fbac1beda1e3b631608c2b6cb/utils/external/BeautifulSoup.py#L168-L173
oldj/SwitchHosts
d0eb2321fe36780ec32c914cbc69a818fc1918d3
alfred/workflow/background.py
python
kill
(name, sig=signal.SIGTERM)
return True
Send a signal to job ``name`` via :func:`os.kill`. .. versionadded:: 1.29 Args: name (str): Name of the job sig (int, optional): Signal to send (default: SIGTERM) Returns: bool: `False` if job isn't running, `True` if signal was sent.
Send a signal to job ``name`` via :func:`os.kill`.
[ "Send", "a", "signal", "to", "job", "name", "via", ":", "func", ":", "os", ".", "kill", "." ]
def kill(name, sig=signal.SIGTERM): """Send a signal to job ``name`` via :func:`os.kill`. .. versionadded:: 1.29 Args: name (str): Name of the job sig (int, optional): Signal to send (default: SIGTERM) Returns: bool: `False` if job isn't running, `True` if signal was sent. """ pid = _job_pid(name) if pid is None: return False os.kill(pid, sig) return True
[ "def", "kill", "(", "name", ",", "sig", "=", "signal", ".", "SIGTERM", ")", ":", "pid", "=", "_job_pid", "(", "name", ")", "if", "pid", "is", "None", ":", "return", "False", "os", ".", "kill", "(", "pid", ",", "sig", ")", "return", "True" ]
https://github.com/oldj/SwitchHosts/blob/d0eb2321fe36780ec32c914cbc69a818fc1918d3/alfred/workflow/background.py#L176-L193
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/closured/lib/python2.7/collections.py
python
Counter.update
(self, iterable=None, **kwds)
Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4
Like dict.update() but add counts instead of replacing them.
[ "Like", "dict", ".", "update", "()", "but", "add", "counts", "instead", "of", "replacing", "them", "." ]
def update(self, iterable=None, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds)
[ "def", "update", "(", "self", ",", "iterable", "=", "None", ",", "*", "*", "kwds", ")", ":", "# The regular dict.update() operation makes no sense here because the", "# replace behavior results in the some of original untouched counts", "# being mixed-in with all of the other counts for a mismash that", "# doesn't have a straight-forward interpretation in most counting", "# contexts. Instead, we implement straight-addition. Both the inputs", "# and outputs are allowed to contain zero and negative counts.", "if", "iterable", "is", "not", "None", ":", "if", "isinstance", "(", "iterable", ",", "Mapping", ")", ":", "if", "self", ":", "self_get", "=", "self", ".", "get", "for", "elem", ",", "count", "in", "iterable", ".", "iteritems", "(", ")", ":", "self", "[", "elem", "]", "=", "self_get", "(", "elem", ",", "0", ")", "+", "count", "else", ":", "super", "(", "Counter", ",", "self", ")", ".", "update", "(", "iterable", ")", "# fast path when counter is empty", "else", ":", "self_get", "=", "self", ".", "get", "for", "elem", "in", "iterable", ":", "self", "[", "elem", "]", "=", "self_get", "(", "elem", ",", "0", ")", "+", "1", "if", "kwds", ":", "self", ".", "update", "(", "kwds", ")" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/closured/lib/python2.7/collections.py#L469-L502
jupyter-widgets/pythreejs
a78cb57456948526e39ea79ac003c2cfde5ed0f4
setupbase.py
python
_get_package_data
(root, file_patterns=None)
return _get_files(file_patterns, _glob_pjoin(HERE, root))
Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from `HERE`. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored.
Expand file patterns to a list of `package_data` paths.
[ "Expand", "file", "patterns", "to", "a", "list", "of", "package_data", "paths", "." ]
def _get_package_data(root, file_patterns=None): """Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from `HERE`. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored. """ if file_patterns is None: file_patterns = ['*'] return _get_files(file_patterns, _glob_pjoin(HERE, root))
[ "def", "_get_package_data", "(", "root", ",", "file_patterns", "=", "None", ")", ":", "if", "file_patterns", "is", "None", ":", "file_patterns", "=", "[", "'*'", "]", "return", "_get_files", "(", "file_patterns", ",", "_glob_pjoin", "(", "HERE", ",", "root", ")", ")" ]
https://github.com/jupyter-widgets/pythreejs/blob/a78cb57456948526e39ea79ac003c2cfde5ed0f4/setupbase.py#L605-L623
demi6od/ChromeFuzzer
4eaf1084d5f8fd20897706edf8b67bfbbd6380fc
PocSmplify/paimei-master/pgraph/graph.py
python
graph.render_graph_udraw
(self)
return udraw
Render the uDraw graph description. @rtype: String @return: uDraw graph description.
Render the uDraw graph description.
[ "Render", "the", "uDraw", "graph", "description", "." ]
def render_graph_udraw (self): ''' Render the uDraw graph description. @rtype: String @return: uDraw graph description. ''' udraw = '[' # render each of the nodes in the graph. # the individual nodes will handle their own edge rendering. for node in self.nodes.values(): udraw += node.render_node_udraw(self) udraw += ',' # trim the extraneous comment and close the graph. udraw = udraw[0:-1] + ']' return udraw
[ "def", "render_graph_udraw", "(", "self", ")", ":", "udraw", "=", "'['", "# render each of the nodes in the graph.", "# the individual nodes will handle their own edge rendering.", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", ":", "udraw", "+=", "node", ".", "render_node_udraw", "(", "self", ")", "udraw", "+=", "','", "# trim the extraneous comment and close the graph.", "udraw", "=", "udraw", "[", "0", ":", "-", "1", "]", "+", "']'", "return", "udraw" ]
https://github.com/demi6od/ChromeFuzzer/blob/4eaf1084d5f8fd20897706edf8b67bfbbd6380fc/PocSmplify/paimei-master/pgraph/graph.py#L641-L660
mozilla/spidernode
aafa9e5273f954f272bb4382fc007af14674b4c2
tools/gyp/pylib/gyp/MSVSSettings.py
python
_MSBuildOnly
(tool, name, setting_type)
Defines a setting that is only found in MSBuild. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. name: the name of the setting. setting_type: the type of this setting.
Defines a setting that is only found in MSBuild.
[ "Defines", "a", "setting", "that", "is", "only", "found", "in", "MSBuild", "." ]
def _MSBuildOnly(tool, name, setting_type): """Defines a setting that is only found in MSBuild. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. name: the name of the setting. setting_type: the type of this setting. """ def _Translate(value, msbuild_settings): # Let msbuild-only properties get translated as-is from msvs_settings. tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {}) tool_settings[name] = value _msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
[ "def", "_MSBuildOnly", "(", "tool", ",", "name", ",", "setting_type", ")", ":", "def", "_Translate", "(", "value", ",", "msbuild_settings", ")", ":", "# Let msbuild-only properties get translated as-is from msvs_settings.", "tool_settings", "=", "msbuild_settings", ".", "setdefault", "(", "tool", ".", "msbuild_name", ",", "{", "}", ")", "tool_settings", "[", "name", "]", "=", "value", "_msbuild_validators", "[", "tool", ".", "msbuild_name", "]", "[", "name", "]", "=", "setting_type", ".", "ValidateMSBuild", "_msvs_to_msbuild_converters", "[", "tool", ".", "msvs_name", "]", "[", "name", "]", "=", "_Translate" ]
https://github.com/mozilla/spidernode/blob/aafa9e5273f954f272bb4382fc007af14674b4c2/tools/gyp/pylib/gyp/MSVSSettings.py#L309-L324
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
src/pyasm/widget/widget_config.py
python
WidgetConfigView.get_configs_from_file
(cls, search_type, view)
return configs
biased towards the edit and potentially edit_definition view of the search_type
biased towards the edit and potentially edit_definition view of the search_type
[ "biased", "towards", "the", "edit", "and", "potentially", "edit_definition", "view", "of", "the", "search_type" ]
def get_configs_from_file(cls, search_type, view): '''biased towards the edit and potentially edit_definition view of the search_type''' full_search_type = search_type if isinstance(search_type, SearchType): search_type_obj = search_type else: search_type_obj = SearchType.get(search_type) base_search_type = search_type_obj.get_base_key() from pyasm.biz import Project search_type = Project.get_full_search_type(full_search_type) # build name of the files to look in sub_dir = search_type_obj.get_value('namespace') tmp = search_type_obj.get_base_key().split("/") if len(tmp) == 2: table = tmp[1] else: # ignore the schema for config files for now table = tmp[2] filename = "%s-conf.xml" % table default_filename = "DEFAULT-conf.xml" base_dir = Environment.get_install_dir() configs = [] internal_conf_path = "%s/src/config2/search_type/widget/%s/%s" \ % (base_dir, sub_dir, filename) if os.path.exists(internal_conf_path): # load in config file config = WidgetConfig.get(view, internal_conf_path) if config.has_view(): configs.append(config) # look for the default file (for definitions) default_conf_path = "%s/src/config2/search_type/widget/%s/%s" \ % (base_dir, sub_dir, default_filename) if os.path.exists(default_conf_path): # load in config file config = WidgetConfig.get(view, default_conf_path) if config.has_view(): configs.append(config) # finally, look at prod default default_conf_path = "%s/src/config2/search_type/widget/prod/%s" \ % (base_dir, default_filename) if os.path.exists(default_conf_path): config = WidgetConfig.get(view, default_conf_path) if config.has_view(): configs.append(config) return configs
[ "def", "get_configs_from_file", "(", "cls", ",", "search_type", ",", "view", ")", ":", "full_search_type", "=", "search_type", "if", "isinstance", "(", "search_type", ",", "SearchType", ")", ":", "search_type_obj", "=", "search_type", "else", ":", "search_type_obj", "=", "SearchType", ".", "get", "(", "search_type", ")", "base_search_type", "=", "search_type_obj", ".", "get_base_key", "(", ")", "from", "pyasm", ".", "biz", "import", "Project", "search_type", "=", "Project", ".", "get_full_search_type", "(", "full_search_type", ")", "# build name of the files to look in", "sub_dir", "=", "search_type_obj", ".", "get_value", "(", "'namespace'", ")", "tmp", "=", "search_type_obj", ".", "get_base_key", "(", ")", ".", "split", "(", "\"/\"", ")", "if", "len", "(", "tmp", ")", "==", "2", ":", "table", "=", "tmp", "[", "1", "]", "else", ":", "# ignore the schema for config files for now", "table", "=", "tmp", "[", "2", "]", "filename", "=", "\"%s-conf.xml\"", "%", "table", "default_filename", "=", "\"DEFAULT-conf.xml\"", "base_dir", "=", "Environment", ".", "get_install_dir", "(", ")", "configs", "=", "[", "]", "internal_conf_path", "=", "\"%s/src/config2/search_type/widget/%s/%s\"", "%", "(", "base_dir", ",", "sub_dir", ",", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "internal_conf_path", ")", ":", "# load in config file", "config", "=", "WidgetConfig", ".", "get", "(", "view", ",", "internal_conf_path", ")", "if", "config", ".", "has_view", "(", ")", ":", "configs", ".", "append", "(", "config", ")", "# look for the default file (for definitions)", "default_conf_path", "=", "\"%s/src/config2/search_type/widget/%s/%s\"", "%", "(", "base_dir", ",", "sub_dir", ",", "default_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "default_conf_path", ")", ":", "# load in config file", "config", "=", "WidgetConfig", ".", "get", "(", "view", ",", "default_conf_path", ")", "if", "config", ".", "has_view", "(", ")", ":", "configs", ".", "append", "(", "config", ")", "# finally, look at prod default", "default_conf_path", "=", "\"%s/src/config2/search_type/widget/prod/%s\"", "%", "(", "base_dir", ",", "default_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "default_conf_path", ")", ":", "config", "=", "WidgetConfig", ".", "get", "(", "view", ",", "default_conf_path", ")", "if", "config", ".", "has_view", "(", ")", ":", "configs", ".", "append", "(", "config", ")", "return", "configs" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/src/pyasm/widget/widget_config.py#L1744-L1800
defunctzombie/libuv.js
04a76a470dfdcad14ea8f19b6f215f205a9214f8
tools/gyp/pylib/gyp/msvs_emulation.py
python
MsvsSettings.AdjustLibraries
(self, libraries)
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
Strip -l from library if it's specified with that.
Strip -l from library if it's specified with that.
[ "Strip", "-", "l", "from", "library", "if", "it", "s", "specified", "with", "that", "." ]
def AdjustLibraries(self, libraries): """Strip -l from library if it's specified with that.""" libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries] return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
[ "def", "AdjustLibraries", "(", "self", ",", "libraries", ")", ":", "libs", "=", "[", "lib", "[", "2", ":", "]", "if", "lib", ".", "startswith", "(", "'-l'", ")", "else", "lib", "for", "lib", "in", "libraries", "]", "return", "[", "lib", "+", "'.lib'", "if", "not", "lib", ".", "endswith", "(", "'.lib'", ")", "else", "lib", "for", "lib", "in", "libs", "]" ]
https://github.com/defunctzombie/libuv.js/blob/04a76a470dfdcad14ea8f19b6f215f205a9214f8/tools/gyp/pylib/gyp/msvs_emulation.py#L203-L206
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
product/ERP5Type/Base.py
python
Base.makeTemplate
(self)
Make document behave as a template. A template is no longer indexable TODO: - make template read only, acquired local roles, etc. - stronger security model - prevent from changing templates or invoking workflows
Make document behave as a template. A template is no longer indexable
[ "Make", "document", "behave", "as", "a", "template", ".", "A", "template", "is", "no", "longer", "indexable" ]
def makeTemplate(self): """ Make document behave as a template. A template is no longer indexable TODO: - make template read only, acquired local roles, etc. - stronger security model - prevent from changing templates or invoking workflows """ parent = self.getParentValue() if parent.getPortalType() != "Preference" and not parent.isTemplate: raise ValueError("Template documents can not be created outside Preferences") self.isTemplate = ConstantGetter('isTemplate', value=True)
[ "def", "makeTemplate", "(", "self", ")", ":", "parent", "=", "self", ".", "getParentValue", "(", ")", "if", "parent", ".", "getPortalType", "(", ")", "!=", "\"Preference\"", "and", "not", "parent", ".", "isTemplate", ":", "raise", "ValueError", "(", "\"Template documents can not be created outside Preferences\"", ")", "self", ".", "isTemplate", "=", "ConstantGetter", "(", "'isTemplate'", ",", "value", "=", "True", ")" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/product/ERP5Type/Base.py#L3369-L3382
facebookarchive/planout
eee764781054abb39f003133b00b88a73c8f8982
python/planout/experiment.py
python
DefaultExperiment.get_default_params
(self)
return {}
Default experiments that are just key-value stores should override this method.
Default experiments that are just key-value stores should override this method.
[ "Default", "experiments", "that", "are", "just", "key", "-", "value", "stores", "should", "override", "this", "method", "." ]
def get_default_params(self): """ Default experiments that are just key-value stores should override this method.""" return {}
[ "def", "get_default_params", "(", "self", ")", ":", "return", "{", "}" ]
https://github.com/facebookarchive/planout/blob/eee764781054abb39f003133b00b88a73c8f8982/python/planout/experiment.py#L250-L254
GeoNode/geonode
326d70153ad79e1ed831d46a0e3b239d422757a8
geonode/groups/templatetags/groups_tags.py
python
group_profile_image
(group_profile, css_classes="", size=None)
return img_tag
Returns an HTML img tag with the input group_profiles's logo. If the group profile does not have an associated logo, a stock image is used.
Returns an HTML img tag with the input group_profiles's logo.
[ "Returns", "an", "HTML", "img", "tag", "with", "the", "input", "group_profiles", "s", "logo", "." ]
def group_profile_image(group_profile, css_classes="", size=None): """Returns an HTML img tag with the input group_profiles's logo. If the group profile does not have an associated logo, a stock image is used. """ if isinstance(css_classes, str): class_attr = f'class="{css_classes}" ' else: try: class_attr = f'class="{(" ".join(str(i) for i in css_classes))}" ' except Exception: class_attr = "" if size is not None: style_attr = f'style="width: {size}px; height: {size}px" ' else: style_attr = "" if group_profile.logo.name: url = group_profile.logo_url else: url = staticfiles_storage.url("geonode/img/default-avatar.jpg") img_tag = f'<img {class_attr}{style_attr}src="{url}" alt="{group_profile.title}">' return img_tag
[ "def", "group_profile_image", "(", "group_profile", ",", "css_classes", "=", "\"\"", ",", "size", "=", "None", ")", ":", "if", "isinstance", "(", "css_classes", ",", "str", ")", ":", "class_attr", "=", "f'class=\"{css_classes}\" '", "else", ":", "try", ":", "class_attr", "=", "f'class=\"{(\" \".join(str(i) for i in css_classes))}\" '", "except", "Exception", ":", "class_attr", "=", "\"\"", "if", "size", "is", "not", "None", ":", "style_attr", "=", "f'style=\"width: {size}px; height: {size}px\" '", "else", ":", "style_attr", "=", "\"\"", "if", "group_profile", ".", "logo", ".", "name", ":", "url", "=", "group_profile", ".", "logo_url", "else", ":", "url", "=", "staticfiles_storage", ".", "url", "(", "\"geonode/img/default-avatar.jpg\"", ")", "img_tag", "=", "f'<img {class_attr}{style_attr}src=\"{url}\" alt=\"{group_profile.title}\">'", "return", "img_tag" ]
https://github.com/GeoNode/geonode/blob/326d70153ad79e1ed831d46a0e3b239d422757a8/geonode/groups/templatetags/groups_tags.py#L26-L51
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/reloop-closured/lib/python2.7/zipfile.py
python
ZipFile.extract
(self, member, path=None, pwd=None)
return self._extract_member(member, path, pwd)
Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'.
Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'.
[ "Extract", "a", "member", "from", "the", "archive", "to", "the", "current", "working", "directory", "using", "its", "full", "name", ".", "Its", "file", "information", "is", "extracted", "as", "accurately", "as", "possible", ".", "member", "may", "be", "a", "filename", "or", "a", "ZipInfo", "object", ".", "You", "can", "specify", "a", "different", "directory", "using", "path", "." ]
def extract(self, member, path=None, pwd=None): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'. """ if not isinstance(member, ZipInfo): member = self.getinfo(member) if path is None: path = os.getcwd() return self._extract_member(member, path, pwd)
[ "def", "extract", "(", "self", ",", "member", ",", "path", "=", "None", ",", "pwd", "=", "None", ")", ":", "if", "not", "isinstance", "(", "member", ",", "ZipInfo", ")", ":", "member", "=", "self", ".", "getinfo", "(", "member", ")", "if", "path", "is", "None", ":", "path", "=", "os", ".", "getcwd", "(", ")", "return", "self", ".", "_extract_member", "(", "member", ",", "path", ",", "pwd", ")" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/reloop-closured/lib/python2.7/zipfile.py#L938-L950
facebookarchive/nuclide
2a2a0a642d136768b7d2a6d35a652dc5fb77d70a
modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/crash.py
python
CrashContainer.__del__
(self)
Class destructor. Closes the database when this object is destroyed.
Class destructor. Closes the database when this object is destroyed.
[ "Class", "destructor", ".", "Closes", "the", "database", "when", "this", "object", "is", "destroyed", "." ]
def __del__(self): "Class destructor. Closes the database when this object is destroyed." try: if self.__filename: self.__db.close() except: pass
[ "def", "__del__", "(", "self", ")", ":", "try", ":", "if", "self", ".", "__filename", ":", "self", ".", "__db", ".", "close", "(", ")", "except", ":", "pass" ]
https://github.com/facebookarchive/nuclide/blob/2a2a0a642d136768b7d2a6d35a652dc5fb77d70a/modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/crash.py#L1408-L1414
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
product/ERP5Catalog/CatalogTool.py
python
CatalogTool.getSecurityUidDictAndRoleColumnDict
(self, sql_catalog_id=None, local_roles=None)
return security_uid_dict, role_column_dict, local_role_column_dict
Return a dict of local_roles_group_id -> security Uids and a dictionnary containing available role columns. XXX: This method always uses default catalog. This should not break a site as long as security uids are considered consistent among all catalogs.
Return a dict of local_roles_group_id -> security Uids and a dictionnary containing available role columns.
[ "Return", "a", "dict", "of", "local_roles_group_id", "-", ">", "security", "Uids", "and", "a", "dictionnary", "containing", "available", "role", "columns", "." ]
def getSecurityUidDictAndRoleColumnDict(self, sql_catalog_id=None, local_roles=None): """ Return a dict of local_roles_group_id -> security Uids and a dictionnary containing available role columns. XXX: This method always uses default catalog. This should not break a site as long as security uids are considered consistent among all catalogs. """ allowedRolesAndUsers, role_column_dict, local_role_column_dict = \ self.getAllowedRolesAndUsers( sql_catalog_id=sql_catalog_id, local_roles=local_roles, ) catalog = self.getSQLCatalog(sql_catalog_id) method = getattr(catalog, catalog.sql_search_security, None) if allowedRolesAndUsers: allowedRolesAndUsers.sort() cache_key = tuple(allowedRolesAndUsers) tv = getTransactionalVariable() try: security_uid_cache = tv['getSecurityUidDictAndRoleColumnDict'] except KeyError: security_uid_cache = tv['getSecurityUidDictAndRoleColumnDict'] = {} try: security_uid_dict = security_uid_cache[cache_key] except KeyError: if method is None: warnings.warn("The usage of allowedRolesAndUsers is "\ "deprecated. Please update your catalog "\ "business template.", DeprecationWarning) security_uid_dict = {None: [x.security_uid for x in \ self.unrestrictedSearchResults( allowedRolesAndUsers=allowedRolesAndUsers, select_list=["security_uid"], group_by=["security_uid"])] } else: # XXX: What with this string transformation ?! Souldn't it be done in # dtml instead ? ... yes, but how to be bw compatible ? allowedRolesAndUsers = [sqlquote(role) for role in allowedRolesAndUsers] security_uid_dict = defaultdict(list) for brain in method(security_roles_list=allowedRolesAndUsers): security_uid_dict[getattr(brain, 'local_roles_group_id', '') ].append(brain.uid) security_uid_cache[cache_key] = security_uid_dict else: security_uid_dict = [] return security_uid_dict, role_column_dict, local_role_column_dict
[ "def", "getSecurityUidDictAndRoleColumnDict", "(", "self", ",", "sql_catalog_id", "=", "None", ",", "local_roles", "=", "None", ")", ":", "allowedRolesAndUsers", ",", "role_column_dict", ",", "local_role_column_dict", "=", "self", ".", "getAllowedRolesAndUsers", "(", "sql_catalog_id", "=", "sql_catalog_id", ",", "local_roles", "=", "local_roles", ",", ")", "catalog", "=", "self", ".", "getSQLCatalog", "(", "sql_catalog_id", ")", "method", "=", "getattr", "(", "catalog", ",", "catalog", ".", "sql_search_security", ",", "None", ")", "if", "allowedRolesAndUsers", ":", "allowedRolesAndUsers", ".", "sort", "(", ")", "cache_key", "=", "tuple", "(", "allowedRolesAndUsers", ")", "tv", "=", "getTransactionalVariable", "(", ")", "try", ":", "security_uid_cache", "=", "tv", "[", "'getSecurityUidDictAndRoleColumnDict'", "]", "except", "KeyError", ":", "security_uid_cache", "=", "tv", "[", "'getSecurityUidDictAndRoleColumnDict'", "]", "=", "{", "}", "try", ":", "security_uid_dict", "=", "security_uid_cache", "[", "cache_key", "]", "except", "KeyError", ":", "if", "method", "is", "None", ":", "warnings", ".", "warn", "(", "\"The usage of allowedRolesAndUsers is \"", "\"deprecated. Please update your catalog \"", "\"business template.\"", ",", "DeprecationWarning", ")", "security_uid_dict", "=", "{", "None", ":", "[", "x", ".", "security_uid", "for", "x", "in", "self", ".", "unrestrictedSearchResults", "(", "allowedRolesAndUsers", "=", "allowedRolesAndUsers", ",", "select_list", "=", "[", "\"security_uid\"", "]", ",", "group_by", "=", "[", "\"security_uid\"", "]", ")", "]", "}", "else", ":", "# XXX: What with this string transformation ?! Souldn't it be done in", "# dtml instead ? ... yes, but how to be bw compatible ?", "allowedRolesAndUsers", "=", "[", "sqlquote", "(", "role", ")", "for", "role", "in", "allowedRolesAndUsers", "]", "security_uid_dict", "=", "defaultdict", "(", "list", ")", "for", "brain", "in", "method", "(", "security_roles_list", "=", "allowedRolesAndUsers", ")", ":", "security_uid_dict", "[", "getattr", "(", "brain", ",", "'local_roles_group_id'", ",", "''", ")", "]", ".", "append", "(", "brain", ".", "uid", ")", "security_uid_cache", "[", "cache_key", "]", "=", "security_uid_dict", "else", ":", "security_uid_dict", "=", "[", "]", "return", "security_uid_dict", ",", "role_column_dict", ",", "local_role_column_dict" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/product/ERP5Catalog/CatalogTool.py#L690-L739
KhronosGroup/Vulkan-Docs
ee155139142a2a71b56238419bf0a6859f7b0a93
scripts/spec_tools/entity_db.py
python
EntityDatabase.getGeneratedDirs
(self)
return ['basetypes', 'defines', 'enums', 'flags', 'funcpointers', 'handles', 'protos', 'structs']
Return a sequence of strings that are the subdirectories of generates API includes. Called only once during construction.
Return a sequence of strings that are the subdirectories of generates API includes.
[ "Return", "a", "sequence", "of", "strings", "that", "are", "the", "subdirectories", "of", "generates", "API", "includes", "." ]
def getGeneratedDirs(self): """Return a sequence of strings that are the subdirectories of generates API includes. Called only once during construction. """ return ['basetypes', 'defines', 'enums', 'flags', 'funcpointers', 'handles', 'protos', 'structs']
[ "def", "getGeneratedDirs", "(", "self", ")", ":", "return", "[", "'basetypes'", ",", "'defines'", ",", "'enums'", ",", "'flags'", ",", "'funcpointers'", ",", "'handles'", ",", "'protos'", ",", "'structs'", "]" ]
https://github.com/KhronosGroup/Vulkan-Docs/blob/ee155139142a2a71b56238419bf0a6859f7b0a93/scripts/spec_tools/entity_db.py#L77-L89
ayojs/ayo
45a1c8cf6384f5bcc81d834343c3ed9d78b97df3
tools/gyp/pylib/gyp/input.py
python
VerifyNoCollidingTargets
(targets)
Verify that no two targets in the same directory share the same name. Arguments: targets: A list of targets in the form 'path/to/file.gyp:target_name'.
Verify that no two targets in the same directory share the same name.
[ "Verify", "that", "no", "two", "targets", "in", "the", "same", "directory", "share", "the", "same", "name", "." ]
def VerifyNoCollidingTargets(targets): """Verify that no two targets in the same directory share the same name. Arguments: targets: A list of targets in the form 'path/to/file.gyp:target_name'. """ # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'. used = {} for target in targets: # Separate out 'path/to/file.gyp, 'target_name' from # 'path/to/file.gyp:target_name'. path, name = target.rsplit(':', 1) # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'. subdir, gyp = os.path.split(path) # Use '.' for the current directory '', so that the error messages make # more sense. if not subdir: subdir = '.' # Prepare a key like 'path/to:target_name'. key = subdir + ':' + name if key in used: # Complain if this target is already used. raise GypError('Duplicate target name "%s" in directory "%s" used both ' 'in "%s" and "%s".' % (name, subdir, gyp, used[key])) used[key] = gyp
[ "def", "VerifyNoCollidingTargets", "(", "targets", ")", ":", "# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.", "used", "=", "{", "}", "for", "target", "in", "targets", ":", "# Separate out 'path/to/file.gyp, 'target_name' from", "# 'path/to/file.gyp:target_name'.", "path", ",", "name", "=", "target", ".", "rsplit", "(", "':'", ",", "1", ")", "# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.", "subdir", ",", "gyp", "=", "os", ".", "path", ".", "split", "(", "path", ")", "# Use '.' for the current directory '', so that the error messages make", "# more sense.", "if", "not", "subdir", ":", "subdir", "=", "'.'", "# Prepare a key like 'path/to:target_name'.", "key", "=", "subdir", "+", "':'", "+", "name", "if", "key", "in", "used", ":", "# Complain if this target is already used.", "raise", "GypError", "(", "'Duplicate target name \"%s\" in directory \"%s\" used both '", "'in \"%s\" and \"%s\".'", "%", "(", "name", ",", "subdir", ",", "gyp", ",", "used", "[", "key", "]", ")", ")", "used", "[", "key", "]", "=", "gyp" ]
https://github.com/ayojs/ayo/blob/45a1c8cf6384f5bcc81d834343c3ed9d78b97df3/tools/gyp/pylib/gyp/input.py#L2710-L2734
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
product/ERP5/bootstrap/erp5_core/DocumentTemplateItem/portal_components/document.erp5.ImmobilisableItem.py
python
ImmobilisableItem.getCurrentAmortisationPrice
(self, with_currency=0, **kw)
return self.getAmortisationPrice(at_date=DateTime(), with_currency=with_currency, **kw)
Returns the deprecated value of item at current time
Returns the deprecated value of item at current time
[ "Returns", "the", "deprecated", "value", "of", "item", "at", "current", "time" ]
def getCurrentAmortisationPrice(self, with_currency=0, **kw): """ Returns the deprecated value of item at current time """ return self.getAmortisationPrice(at_date=DateTime(), with_currency=with_currency, **kw)
[ "def", "getCurrentAmortisationPrice", "(", "self", ",", "with_currency", "=", "0", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "getAmortisationPrice", "(", "at_date", "=", "DateTime", "(", ")", ",", "with_currency", "=", "with_currency", ",", "*", "*", "kw", ")" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/product/ERP5/bootstrap/erp5_core/DocumentTemplateItem/portal_components/document.erp5.ImmobilisableItem.py#L1025-L1028
webrtc/apprtc
db975e22ea07a0c11a4179d4beb2feb31cf344f4
src/third_party/apiclient/http.py
python
BatchHttpRequest._new_id
(self)
return str(self._last_auto_id)
Create a new id. Auto incrementing number that avoids conflicts with ids already used. Returns: string, a new unique id.
Create a new id.
[ "Create", "a", "new", "id", "." ]
def _new_id(self): """Create a new id. Auto incrementing number that avoids conflicts with ids already used. Returns: string, a new unique id. """ self._last_auto_id += 1 while str(self._last_auto_id) in self._requests: self._last_auto_id += 1 return str(self._last_auto_id)
[ "def", "_new_id", "(", "self", ")", ":", "self", ".", "_last_auto_id", "+=", "1", "while", "str", "(", "self", ".", "_last_auto_id", ")", "in", "self", ".", "_requests", ":", "self", ".", "_last_auto_id", "+=", "1", "return", "str", "(", "self", ".", "_last_auto_id", ")" ]
https://github.com/webrtc/apprtc/blob/db975e22ea07a0c11a4179d4beb2feb31cf344f4/src/third_party/apiclient/http.py#L1151-L1162
sagemath/cloud
054854b87817edfa95e9044c793059bddc361e67
scripts/julia.py
python
JuliaElement._sub_
(self, right)
return P.new('%s - %s'%(self._name, right._name))
EXAMPLES:: sage: a = julia(1); b = julia(2) sage: a - b -1
EXAMPLES::
[ "EXAMPLES", "::" ]
def _sub_(self, right): """ EXAMPLES:: sage: a = julia(1); b = julia(2) sage: a - b -1 """ P = self._check_valid() return P.new('%s - %s'%(self._name, right._name))
[ "def", "_sub_", "(", "self", ",", "right", ")", ":", "P", "=", "self", ".", "_check_valid", "(", ")", "return", "P", ".", "new", "(", "'%s - %s'", "%", "(", "self", ".", "_name", ",", "right", ".", "_name", ")", ")" ]
https://github.com/sagemath/cloud/blob/054854b87817edfa95e9044c793059bddc361e67/scripts/julia.py#L375-L384
alex-cory/fasthacks
72b099f11df2e5640d61e55c80706c3b234eacbe
cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/img.py
python
ImageFormatter._get_style_font
(self, style)
return self.fonts.get_font(style['bold'], style['italic'])
Get the correct font for the style.
Get the correct font for the style.
[ "Get", "the", "correct", "font", "for", "the", "style", "." ]
def _get_style_font(self, style): """ Get the correct font for the style. """ return self.fonts.get_font(style['bold'], style['italic'])
[ "def", "_get_style_font", "(", "self", ",", "style", ")", ":", "return", "self", ".", "fonts", ".", "get_font", "(", "style", "[", "'bold'", "]", ",", "style", "[", "'italic'", "]", ")" ]
https://github.com/alex-cory/fasthacks/blob/72b099f11df2e5640d61e55c80706c3b234eacbe/cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/img.py#L395-L399
RASSec/A_Scan_Framework
4a46cf14b8c717dc0196071bbfd27e2d9c85bb17
pocscan/plugins/pocsuite/packages/requests/models.py
python
PreparedRequest.prepare
(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None)
Prepares the entire request with the given parameters.
Prepares the entire request with the given parameters.
[ "Prepares", "the", "entire", "request", "with", "the", "given", "parameters", "." ]
def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks)
[ "def", "prepare", "(", "self", ",", "method", "=", "None", ",", "url", "=", "None", ",", "headers", "=", "None", ",", "files", "=", "None", ",", "data", "=", "None", ",", "params", "=", "None", ",", "auth", "=", "None", ",", "cookies", "=", "None", ",", "hooks", "=", "None", ")", ":", "self", ".", "prepare_method", "(", "method", ")", "self", ".", "prepare_url", "(", "url", ",", "params", ")", "self", ".", "prepare_headers", "(", "headers", ")", "self", ".", "prepare_cookies", "(", "cookies", ")", "self", ".", "prepare_body", "(", "data", ",", "files", ")", "self", ".", "prepare_auth", "(", "auth", ",", "url", ")", "# Note that prepare_auth must be last to enable authentication schemes", "# such as OAuth to work on a fully prepared request.", "# This MUST go after prepare_auth. Authenticators could add a hook", "self", ".", "prepare_hooks", "(", "hooks", ")" ]
https://github.com/RASSec/A_Scan_Framework/blob/4a46cf14b8c717dc0196071bbfd27e2d9c85bb17/pocscan/plugins/pocsuite/packages/requests/models.py#L289-L303
datahuborg/datahub
f066b472c2b66cc3b868bbe433aed2d4557aea32
src/apps/dbwipes/util.py
python
parse_agg
(s)
return { 'fname': d['func'], 'func': func, 'cols': cols, 'alias': d.get('alias', '') or d['func'] }
parse an aggregation SELECT clause e.g., avg(temp) as foo into dictionary of function name, column, and alias components
parse an aggregation SELECT clause e.g., avg(temp) as foo into dictionary of function name, column, and alias components
[ "parse", "an", "aggregation", "SELECT", "clause", "e", ".", "g", ".", "avg", "(", "temp", ")", "as", "foo", "into", "dictionary", "of", "function", "name", "column", "and", "alias", "components" ]
def parse_agg(s): """ parse an aggregation SELECT clause e.g., avg(temp) as foo into dictionary of function name, column, and alias components """ # print(s) p = re.compile( '(?P<func>\w+)\(\s*(?P<col>[\w\,\s]+)\s*\)\s*(as\s+(?P<alias>\w+))?') d = p.match(s).groupdict() klass = __agg2f__[d['func'].strip()] expr = str(d['col']) cols = [col.strip() for col in expr.split(',')] varlist = [Var(col) for col in cols] # print(klass) # print(cols) # print(varlist) func = klass(varlist) return { 'fname': d['func'], 'func': func, 'cols': cols, 'alias': d.get('alias', '') or d['func'] }
[ "def", "parse_agg", "(", "s", ")", ":", "# print(s)", "p", "=", "re", ".", "compile", "(", "'(?P<func>\\w+)\\(\\s*(?P<col>[\\w\\,\\s]+)\\s*\\)\\s*(as\\s+(?P<alias>\\w+))?'", ")", "d", "=", "p", ".", "match", "(", "s", ")", ".", "groupdict", "(", ")", "klass", "=", "__agg2f__", "[", "d", "[", "'func'", "]", ".", "strip", "(", ")", "]", "expr", "=", "str", "(", "d", "[", "'col'", "]", ")", "cols", "=", "[", "col", ".", "strip", "(", ")", "for", "col", "in", "expr", ".", "split", "(", "','", ")", "]", "varlist", "=", "[", "Var", "(", "col", ")", "for", "col", "in", "cols", "]", "# print(klass)", "# print(cols)", "# print(varlist)", "func", "=", "klass", "(", "varlist", ")", "return", "{", "'fname'", ":", "d", "[", "'func'", "]", ",", "'func'", ":", "func", ",", "'cols'", ":", "cols", ",", "'alias'", ":", "d", ".", "get", "(", "'alias'", ",", "''", ")", "or", "d", "[", "'func'", "]", "}" ]
https://github.com/datahuborg/datahub/blob/f066b472c2b66cc3b868bbe433aed2d4557aea32/src/apps/dbwipes/util.py#L91-L113
frenck/home-assistant-config
91fb77e527bc470b557b6156fd1d60515e0b0be9
custom_components/samsungtv_smart/media_player.py
python
SamsungTVDevice.channel_list
(self)
return list(self._channel_list)
List of available channels.
List of available channels.
[ "List", "of", "available", "channels", "." ]
def channel_list(self): """List of available channels.""" if self._channel_list is None: return None return list(self._channel_list)
[ "def", "channel_list", "(", "self", ")", ":", "if", "self", ".", "_channel_list", "is", "None", ":", "return", "None", "return", "list", "(", "self", ".", "_channel_list", ")" ]
https://github.com/frenck/home-assistant-config/blob/91fb77e527bc470b557b6156fd1d60515e0b0be9/custom_components/samsungtv_smart/media_player.py#L978-L982
Unmanic/unmanic
655b18b5fd80c814e45ec38929d1046da93114c3
unmanic/libs/installation_link.py
python
Links.validate_remote_installation
(self, address: str)
return { 'system_configuration': system_configuration_data.get('configuration'), 'settings': settings_data.get('settings'), 'version': version_data.get('version'), 'session': { "level": session_data.get('level'), "picture_uri": session_data.get('picture_uri'), "name": session_data.get('name'), "email": session_data.get('email'), "uuid": session_data.get('uuid'), }, 'task_count': int(tasks_data.get('recordsTotal', 0)) }
Validate a remote Unmanic installation by requesting its system info and version :param address: :return:
Validate a remote Unmanic installation by requesting its system info and version
[ "Validate", "a", "remote", "Unmanic", "installation", "by", "requesting", "its", "system", "info", "and", "version" ]
def validate_remote_installation(self, address: str): """ Validate a remote Unmanic installation by requesting its system info and version :param address: :return: """ address = self.__format_address(address) # Fetch config url = "{}/unmanic/api/v2/settings/configuration".format(address) res = requests.get(url, timeout=2) if res.status_code != 200: return {} system_configuration_data = res.json() # Fetch settings url = "{}/unmanic/api/v2/settings/read".format(address) res = requests.get(url, timeout=2) if res.status_code != 200: return {} settings_data = res.json() # Fetch version url = "{}/unmanic/api/v2/version/read".format(address) res = requests.get(url, timeout=2) if res.status_code != 200: return {} version_data = res.json() # Fetch version url = "{}/unmanic/api/v2/session/state".format(address) res = requests.get(url, timeout=2) if res.status_code != 200: return {} session_data = res.json() # Fetch task count data data = { "start": 0, "length": 1 } url = "{}/unmanic/api/v2/pending/tasks".format(address) res = requests.post(url, json=data, timeout=2) if res.status_code != 200: return {} tasks_data = res.json() return { 'system_configuration': system_configuration_data.get('configuration'), 'settings': settings_data.get('settings'), 'version': version_data.get('version'), 'session': { "level": session_data.get('level'), "picture_uri": session_data.get('picture_uri'), "name": session_data.get('name'), "email": session_data.get('email'), "uuid": session_data.get('uuid'), }, 'task_count': int(tasks_data.get('recordsTotal', 0)) }
[ "def", "validate_remote_installation", "(", "self", ",", "address", ":", "str", ")", ":", "address", "=", "self", ".", "__format_address", "(", "address", ")", "# Fetch config", "url", "=", "\"{}/unmanic/api/v2/settings/configuration\"", ".", "format", "(", "address", ")", "res", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "2", ")", "if", "res", ".", "status_code", "!=", "200", ":", "return", "{", "}", "system_configuration_data", "=", "res", ".", "json", "(", ")", "# Fetch settings", "url", "=", "\"{}/unmanic/api/v2/settings/read\"", ".", "format", "(", "address", ")", "res", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "2", ")", "if", "res", ".", "status_code", "!=", "200", ":", "return", "{", "}", "settings_data", "=", "res", ".", "json", "(", ")", "# Fetch version", "url", "=", "\"{}/unmanic/api/v2/version/read\"", ".", "format", "(", "address", ")", "res", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "2", ")", "if", "res", ".", "status_code", "!=", "200", ":", "return", "{", "}", "version_data", "=", "res", ".", "json", "(", ")", "# Fetch version", "url", "=", "\"{}/unmanic/api/v2/session/state\"", ".", "format", "(", "address", ")", "res", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "2", ")", "if", "res", ".", "status_code", "!=", "200", ":", "return", "{", "}", "session_data", "=", "res", ".", "json", "(", ")", "# Fetch task count data", "data", "=", "{", "\"start\"", ":", "0", ",", "\"length\"", ":", "1", "}", "url", "=", "\"{}/unmanic/api/v2/pending/tasks\"", ".", "format", "(", "address", ")", "res", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "data", ",", "timeout", "=", "2", ")", "if", "res", ".", "status_code", "!=", "200", ":", "return", "{", "}", "tasks_data", "=", "res", ".", "json", "(", ")", "return", "{", "'system_configuration'", ":", "system_configuration_data", ".", "get", "(", "'configuration'", ")", ",", "'settings'", ":", "settings_data", ".", "get", "(", "'settings'", ")", ",", "'version'", ":", "version_data", ".", "get", "(", "'version'", ")", ",", "'session'", ":", "{", "\"level\"", ":", "session_data", ".", "get", "(", "'level'", ")", ",", "\"picture_uri\"", ":", "session_data", ".", "get", "(", "'picture_uri'", ")", ",", "\"name\"", ":", "session_data", ".", "get", "(", "'name'", ")", ",", "\"email\"", ":", "session_data", ".", "get", "(", "'email'", ")", ",", "\"uuid\"", ":", "session_data", ".", "get", "(", "'uuid'", ")", ",", "}", ",", "'task_count'", ":", "int", "(", "tasks_data", ".", "get", "(", "'recordsTotal'", ",", "0", ")", ")", "}" ]
https://github.com/Unmanic/unmanic/blob/655b18b5fd80c814e45ec38929d1046da93114c3/unmanic/libs/installation_link.py#L202-L263
jam-py/jam-py
0821492cdff8665928e0f093a4435aa64285a45c
jam/third_party/werkzeug/wrappers.py
python
BaseRequest.access_route
(self)
return self.list_storage_class()
If a forwarded header exists this is a list of all ip addresses from the client ip to the last proxy server.
If a forwarded header exists this is a list of all ip addresses from the client ip to the last proxy server.
[ "If", "a", "forwarded", "header", "exists", "this", "is", "a", "list", "of", "all", "ip", "addresses", "from", "the", "client", "ip", "to", "the", "last", "proxy", "server", "." ]
def access_route(self): """If a forwarded header exists this is a list of all ip addresses from the client ip to the last proxy server. """ if 'HTTP_X_FORWARDED_FOR' in self.environ: addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',') return self.list_storage_class([x.strip() for x in addr]) elif 'REMOTE_ADDR' in self.environ: return self.list_storage_class([self.environ['REMOTE_ADDR']]) return self.list_storage_class()
[ "def", "access_route", "(", "self", ")", ":", "if", "'HTTP_X_FORWARDED_FOR'", "in", "self", ".", "environ", ":", "addr", "=", "self", ".", "environ", "[", "'HTTP_X_FORWARDED_FOR'", "]", ".", "split", "(", "','", ")", "return", "self", ".", "list_storage_class", "(", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "addr", "]", ")", "elif", "'REMOTE_ADDR'", "in", "self", ".", "environ", ":", "return", "self", ".", "list_storage_class", "(", "[", "self", ".", "environ", "[", "'REMOTE_ADDR'", "]", "]", ")", "return", "self", ".", "list_storage_class", "(", ")" ]
https://github.com/jam-py/jam-py/blob/0821492cdff8665928e0f093a4435aa64285a45c/jam/third_party/werkzeug/wrappers.py#L660-L669
wotermelon/toJump
3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f
lib/win/systrace/catapult/devil/devil/android/perf/thermal_throttle.py
python
ThermalThrottle.IsThrottled
(self)
return self._throttled
True if currently throttled.
True if currently throttled.
[ "True", "if", "currently", "throttled", "." ]
def IsThrottled(self): """True if currently throttled.""" self._ReadLog() return self._throttled
[ "def", "IsThrottled", "(", "self", ")", ":", "self", ".", "_ReadLog", "(", ")", "return", "self", ".", "_throttled" ]
https://github.com/wotermelon/toJump/blob/3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f/lib/win/systrace/catapult/devil/devil/android/perf/thermal_throttle.py#L90-L93
frenck/home-assistant-config
91fb77e527bc470b557b6156fd1d60515e0b0be9
custom_components/hacs/repositories/base.py
python
HacsRepository.common_update_data
(self, ignore_issues: bool = False, force: bool = False)
Common update data.
Common update data.
[ "Common", "update", "data", "." ]
async def common_update_data(self, ignore_issues: bool = False, force: bool = False) -> None: """Common update data.""" releases = [] try: repository_object, etag = await self.async_get_legacy_repository_object( etag=None if force or self.data.installed else self.data.etag_repository, ) self.repository_object = repository_object if self.data.full_name.lower() != repository_object.full_name.lower(): self.hacs.common.renamed_repositories[ self.data.full_name ] = repository_object.full_name raise HacsRepositoryExistException self.data.update_data(repository_object.attributes) self.data.etag_repository = etag except HacsNotModifiedException: return except HacsRepositoryExistException: raise HacsRepositoryExistException from None except (AIOGitHubAPIException, HacsException) as exception: if not self.hacs.status.startup: self.logger.error("%s %s", self, exception) if not ignore_issues: self.validate.errors.append("Repository does not exist.") raise HacsException(exception) from exception # Make sure the repository is not archived. if self.data.archived and not ignore_issues: self.validate.errors.append("Repository is archived.") if self.data.full_name not in self.hacs.common.archived_repositories: self.hacs.common.archived_repositories.append(self.data.full_name) raise HacsRepositoryArchivedException(f"{self} Repository is archived.") # Make sure the repository is not in the blacklist. if self.hacs.repositories.is_removed(self.data.full_name) and not ignore_issues: self.validate.errors.append("Repository has been requested to be removed.") raise HacsException(f"{self} Repository has been requested to be removed.") # Get releases. try: releases = await self.get_releases( prerelease=self.data.show_beta, returnlimit=self.hacs.configuration.release_limit, ) if releases: self.data.releases = True self.releases.objects = [x for x in releases if not x.draft] self.data.published_tags = [x.tag_name for x in self.releases.objects] self.data.last_version = next(iter(self.data.published_tags)) except (AIOGitHubAPIException, HacsException): self.data.releases = False if not self.force_branch: self.ref = version_to_download(self) if self.data.releases: for release in self.releases.objects or []: if release.tag_name == self.ref: assets = release.assets if assets: downloads = next(iter(assets)).attributes.get("download_count") self.data.downloads = downloads self.hacs.log.debug("%s Running checks against %s", self, self.ref.replace("tags/", "")) try: self.tree = await self.get_tree(self.ref) if not self.tree: raise HacsException("No files in tree") self.treefiles = [] for treefile in self.tree: self.treefiles.append(treefile.full_path) except (AIOGitHubAPIException, HacsException) as exception: if not self.hacs.status.startup and not ignore_issues: self.logger.error("%s %s", self, exception) if not ignore_issues: raise HacsException(exception) from None
[ "async", "def", "common_update_data", "(", "self", ",", "ignore_issues", ":", "bool", "=", "False", ",", "force", ":", "bool", "=", "False", ")", "->", "None", ":", "releases", "=", "[", "]", "try", ":", "repository_object", ",", "etag", "=", "await", "self", ".", "async_get_legacy_repository_object", "(", "etag", "=", "None", "if", "force", "or", "self", ".", "data", ".", "installed", "else", "self", ".", "data", ".", "etag_repository", ",", ")", "self", ".", "repository_object", "=", "repository_object", "if", "self", ".", "data", ".", "full_name", ".", "lower", "(", ")", "!=", "repository_object", ".", "full_name", ".", "lower", "(", ")", ":", "self", ".", "hacs", ".", "common", ".", "renamed_repositories", "[", "self", ".", "data", ".", "full_name", "]", "=", "repository_object", ".", "full_name", "raise", "HacsRepositoryExistException", "self", ".", "data", ".", "update_data", "(", "repository_object", ".", "attributes", ")", "self", ".", "data", ".", "etag_repository", "=", "etag", "except", "HacsNotModifiedException", ":", "return", "except", "HacsRepositoryExistException", ":", "raise", "HacsRepositoryExistException", "from", "None", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", "as", "exception", ":", "if", "not", "self", ".", "hacs", ".", "status", ".", "startup", ":", "self", ".", "logger", ".", "error", "(", "\"%s %s\"", ",", "self", ",", "exception", ")", "if", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository does not exist.\"", ")", "raise", "HacsException", "(", "exception", ")", "from", "exception", "# Make sure the repository is not archived.", "if", "self", ".", "data", ".", "archived", "and", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository is archived.\"", ")", "if", "self", ".", "data", ".", "full_name", "not", "in", "self", ".", "hacs", ".", "common", ".", "archived_repositories", ":", "self", ".", "hacs", ".", "common", ".", "archived_repositories", ".", "append", "(", "self", ".", "data", ".", "full_name", ")", "raise", "HacsRepositoryArchivedException", "(", "f\"{self} Repository is archived.\"", ")", "# Make sure the repository is not in the blacklist.", "if", "self", ".", "hacs", ".", "repositories", ".", "is_removed", "(", "self", ".", "data", ".", "full_name", ")", "and", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository has been requested to be removed.\"", ")", "raise", "HacsException", "(", "f\"{self} Repository has been requested to be removed.\"", ")", "# Get releases.", "try", ":", "releases", "=", "await", "self", ".", "get_releases", "(", "prerelease", "=", "self", ".", "data", ".", "show_beta", ",", "returnlimit", "=", "self", ".", "hacs", ".", "configuration", ".", "release_limit", ",", ")", "if", "releases", ":", "self", ".", "data", ".", "releases", "=", "True", "self", ".", "releases", ".", "objects", "=", "[", "x", "for", "x", "in", "releases", "if", "not", "x", ".", "draft", "]", "self", ".", "data", ".", "published_tags", "=", "[", "x", ".", "tag_name", "for", "x", "in", "self", ".", "releases", ".", "objects", "]", "self", ".", "data", ".", "last_version", "=", "next", "(", "iter", "(", "self", ".", "data", ".", "published_tags", ")", ")", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", ":", "self", ".", "data", ".", "releases", "=", "False", "if", "not", "self", ".", "force_branch", ":", "self", ".", "ref", "=", "version_to_download", "(", "self", ")", "if", "self", ".", "data", ".", "releases", ":", "for", "release", "in", "self", ".", "releases", ".", "objects", "or", "[", "]", ":", "if", "release", ".", "tag_name", "==", "self", ".", "ref", ":", "assets", "=", "release", ".", "assets", "if", "assets", ":", "downloads", "=", "next", "(", "iter", "(", "assets", ")", ")", ".", "attributes", ".", "get", "(", "\"download_count\"", ")", "self", ".", "data", ".", "downloads", "=", "downloads", "self", ".", "hacs", ".", "log", ".", "debug", "(", "\"%s Running checks against %s\"", ",", "self", ",", "self", ".", "ref", ".", "replace", "(", "\"tags/\"", ",", "\"\"", ")", ")", "try", ":", "self", ".", "tree", "=", "await", "self", ".", "get_tree", "(", "self", ".", "ref", ")", "if", "not", "self", ".", "tree", ":", "raise", "HacsException", "(", "\"No files in tree\"", ")", "self", ".", "treefiles", "=", "[", "]", "for", "treefile", "in", "self", ".", "tree", ":", "self", ".", "treefiles", ".", "append", "(", "treefile", ".", "full_path", ")", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", "as", "exception", ":", "if", "not", "self", ".", "hacs", ".", "status", ".", "startup", "and", "not", "ignore_issues", ":", "self", ".", "logger", ".", "error", "(", "\"%s %s\"", ",", "self", ",", "exception", ")", "if", "not", "ignore_issues", ":", "raise", "HacsException", "(", "exception", ")", "from", "None" ]
https://github.com/frenck/home-assistant-config/blob/91fb77e527bc470b557b6156fd1d60515e0b0be9/custom_components/hacs/repositories/base.py#L931-L1007
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
3rd_party/python3/site-packages/tempora/schedule.py
python
PeriodicCommandFixedDelay.daily_at
(cls, at, target)
return cls.at_time(cls._localize(when), daily, target)
Schedule a command to run at a specific time each day.
Schedule a command to run at a specific time each day.
[ "Schedule", "a", "command", "to", "run", "at", "a", "specific", "time", "each", "day", "." ]
def daily_at(cls, at, target): """ Schedule a command to run at a specific time each day. """ daily = datetime.timedelta(days=1) # convert when to the next datetime matching this time when = datetime.datetime.combine(datetime.date.today(), at) if when < now(): when += daily return cls.at_time(cls._localize(when), daily, target)
[ "def", "daily_at", "(", "cls", ",", "at", ",", "target", ")", ":", "daily", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "# convert when to the next datetime matching this time", "when", "=", "datetime", ".", "datetime", ".", "combine", "(", "datetime", ".", "date", ".", "today", "(", ")", ",", "at", ")", "if", "when", "<", "now", "(", ")", ":", "when", "+=", "daily", "return", "cls", ".", "at_time", "(", "cls", ".", "_localize", "(", "when", ")", ",", "daily", ",", "target", ")" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/3rd_party/python3/site-packages/tempora/schedule.py#L144-L153
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
bt5/erp5_forge/ModuleComponentTemplateItem/portal_components/module.erp5.Subversion.py
python
Subversion.log
(self, path)
return self._getClient().log(os.path.join(self.working_copy, path))
return log of a file or dir
return log of a file or dir
[ "return", "log", "of", "a", "file", "or", "dir" ]
def log(self, path): """return log of a file or dir """ return self._getClient().log(os.path.join(self.working_copy, path))
[ "def", "log", "(", "self", ",", "path", ")", ":", "return", "self", ".", "_getClient", "(", ")", ".", "log", "(", "os", ".", "path", ".", "join", "(", "self", ".", "working_copy", ",", "path", ")", ")" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/bt5/erp5_forge/ModuleComponentTemplateItem/portal_components/module.erp5.Subversion.py#L174-L177
Sefaria/Sefaria-Project
506752f49394fadebae283d525af8276eb2e241e
sefaria/model/notification.py
python
Notification.make_follow
(self, follower_id=None)
return self
Make this Notification for a new Follow event
Make this Notification for a new Follow event
[ "Make", "this", "Notification", "for", "a", "new", "Follow", "event" ]
def make_follow(self, follower_id=None): """Make this Notification for a new Follow event""" self.type = "follow" self.content["follower"] = follower_id return self
[ "def", "make_follow", "(", "self", ",", "follower_id", "=", "None", ")", ":", "self", ".", "type", "=", "\"follow\"", "self", ".", "content", "[", "\"follower\"", "]", "=", "follower_id", "return", "self" ]
https://github.com/Sefaria/Sefaria-Project/blob/506752f49394fadebae283d525af8276eb2e241e/sefaria/model/notification.py#L226-L230
xixiaoyao/CS224n-winter-together
f1fbcd4db284a804cb9dfc24b65481ba66e7d32c
Assignments/assignment4/geekhch/run.py
python
beam_search
(model: NMT, test_data_src: List[List[str]], beam_size: int, max_decoding_time_step: int)
return hypotheses
Run beam search to construct hypotheses for a list of src-language sentences. @param model (NMT): NMT Model @param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set. @param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step) @param max_decoding_time_step (int): maximum sentence length that Beam search can produce @returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence.
Run beam search to construct hypotheses for a list of src-language sentences.
[ "Run", "beam", "search", "to", "construct", "hypotheses", "for", "a", "list", "of", "src", "-", "language", "sentences", "." ]
def beam_search(model: NMT, test_data_src: List[List[str]], beam_size: int, max_decoding_time_step: int) -> List[List[Hypothesis]]: """ Run beam search to construct hypotheses for a list of src-language sentences. @param model (NMT): NMT Model @param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set. @param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step) @param max_decoding_time_step (int): maximum sentence length that Beam search can produce @returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence. """ was_training = model.training model.eval() hypotheses = [] with torch.no_grad(): for src_sent in tqdm(test_data_src, desc='Decoding', file=sys.stdout): example_hyps = model.beam_search(src_sent, beam_size=beam_size, max_decoding_time_step=max_decoding_time_step) hypotheses.append(example_hyps) if was_training: model.train(was_training) return hypotheses
[ "def", "beam_search", "(", "model", ":", "NMT", ",", "test_data_src", ":", "List", "[", "List", "[", "str", "]", "]", ",", "beam_size", ":", "int", ",", "max_decoding_time_step", ":", "int", ")", "->", "List", "[", "List", "[", "Hypothesis", "]", "]", ":", "was_training", "=", "model", ".", "training", "model", ".", "eval", "(", ")", "hypotheses", "=", "[", "]", "with", "torch", ".", "no_grad", "(", ")", ":", "for", "src_sent", "in", "tqdm", "(", "test_data_src", ",", "desc", "=", "'Decoding'", ",", "file", "=", "sys", ".", "stdout", ")", ":", "example_hyps", "=", "model", ".", "beam_search", "(", "src_sent", ",", "beam_size", "=", "beam_size", ",", "max_decoding_time_step", "=", "max_decoding_time_step", ")", "hypotheses", ".", "append", "(", "example_hyps", ")", "if", "was_training", ":", "model", ".", "train", "(", "was_training", ")", "return", "hypotheses" ]
https://github.com/xixiaoyao/CS224n-winter-together/blob/f1fbcd4db284a804cb9dfc24b65481ba66e7d32c/Assignments/assignment4/geekhch/run.py#L297-L317
almonk/Bind
03e9e98fb8b30a58cb4fc2829f06289fa9958897
public/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py
python
ConvertVCMacrosToMSBuild
(s)
return s
Convert the the MSVS macros found in the string to the MSBuild equivalent. This list is probably not exhaustive. Add as needed.
Convert the the MSVS macros found in the string to the MSBuild equivalent.
[ "Convert", "the", "the", "MSVS", "macros", "found", "in", "the", "string", "to", "the", "MSBuild", "equivalent", "." ]
def ConvertVCMacrosToMSBuild(s): """Convert the the MSVS macros found in the string to the MSBuild equivalent. This list is probably not exhaustive. Add as needed. """ if '$' in s: replace_map = { '$(ConfigurationName)': '$(Configuration)', '$(InputDir)': '%(RootDir)%(Directory)', '$(InputExt)': '%(Extension)', '$(InputFileName)': '%(Filename)%(Extension)', '$(InputName)': '%(Filename)', '$(InputPath)': '%(FullPath)', '$(ParentName)': '$(ProjectFileName)', '$(PlatformName)': '$(Platform)', '$(SafeInputName)': '%(Filename)', } for old, new in replace_map.iteritems(): s = s.replace(old, new) s = FixVCMacroSlashes(s) return s
[ "def", "ConvertVCMacrosToMSBuild", "(", "s", ")", ":", "if", "'$'", "in", "s", ":", "replace_map", "=", "{", "'$(ConfigurationName)'", ":", "'$(Configuration)'", ",", "'$(InputDir)'", ":", "'%(RootDir)%(Directory)'", ",", "'$(InputExt)'", ":", "'%(Extension)'", ",", "'$(InputFileName)'", ":", "'%(Filename)%(Extension)'", ",", "'$(InputName)'", ":", "'%(Filename)'", ",", "'$(InputPath)'", ":", "'%(FullPath)'", ",", "'$(ParentName)'", ":", "'$(ProjectFileName)'", ",", "'$(PlatformName)'", ":", "'$(Platform)'", ",", "'$(SafeInputName)'", ":", "'%(Filename)'", ",", "}", "for", "old", ",", "new", "in", "replace_map", ".", "iteritems", "(", ")", ":", "s", "=", "s", ".", "replace", "(", "old", ",", "new", ")", "s", "=", "FixVCMacroSlashes", "(", "s", ")", "return", "s" ]
https://github.com/almonk/Bind/blob/03e9e98fb8b30a58cb4fc2829f06289fa9958897/public/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py#L383-L403
GoogleCloudPlatform/PerfKitExplorer
9efa61015d50c25f6d753f0212ad3bf16876d496
third_party/py/oauth2client/appengine.py
python
StorageByKeyName._is_ndb
(self)
Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model.
Determine whether the model of the instance is an NDB model.
[ "Determine", "whether", "the", "model", "of", "the", "instance", "is", "an", "NDB", "model", "." ]
def _is_ndb(self): """Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model. """ # issubclass will fail if one of the arguments is not a class, only need # worry about new-style classes since ndb and db models are new-style if isinstance(self._model, type): if ndb is not None and issubclass(self._model, ndb.Model): return True elif issubclass(self._model, db.Model): return False raise TypeError('Model class not an NDB or DB model: %s.' % (self._model,))
[ "def", "_is_ndb", "(", "self", ")", ":", "# issubclass will fail if one of the arguments is not a class, only need", "# worry about new-style classes since ndb and db models are new-style", "if", "isinstance", "(", "self", ".", "_model", ",", "type", ")", ":", "if", "ndb", "is", "not", "None", "and", "issubclass", "(", "self", ".", "_model", ",", "ndb", ".", "Model", ")", ":", "return", "True", "elif", "issubclass", "(", "self", ".", "_model", ",", "db", ".", "Model", ")", ":", "return", "False", "raise", "TypeError", "(", "'Model class not an NDB or DB model: %s.'", "%", "(", "self", ".", "_model", ",", ")", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitExplorer/blob/9efa61015d50c25f6d753f0212ad3bf16876d496/third_party/py/oauth2client/appengine.py#L395-L409
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
bt5/erp5_payzen_secure_payment/DocumentTemplateItem/portal_components/document.erp5.PayzenService.py
python
PayzenREST.rest_getInfo
(self, transmissionDate, transactionId)
return [data_kw, sent_data, received_data]
Returns getInfo as dict, booelan, string, string transmissionDate is "raw" date in format YYYYMMDD, without any marks transactionId is id of transaction for this date As soon as communication happeneded does not raise.
Returns getInfo as dict, booelan, string, string
[ "Returns", "getInfo", "as", "dict", "booelan", "string", "string" ]
def rest_getInfo(self, transmissionDate, transactionId): """Returns getInfo as dict, booelan, string, string transmissionDate is "raw" date in format YYYYMMDD, without any marks transactionId is id of transaction for this date As soon as communication happeneded does not raise. """ URL = "https://api.payzen.eu/api-payment/V4/Order/Get" kw = dict( orderId=transactionId, ) sent_data = str(kw) data_kw, received_data = self.callPayzenApi(URL, kw) return [data_kw, sent_data, received_data]
[ "def", "rest_getInfo", "(", "self", ",", "transmissionDate", ",", "transactionId", ")", ":", "URL", "=", "\"https://api.payzen.eu/api-payment/V4/Order/Get\"", "kw", "=", "dict", "(", "orderId", "=", "transactionId", ",", ")", "sent_data", "=", "str", "(", "kw", ")", "data_kw", ",", "received_data", "=", "self", ".", "callPayzenApi", "(", "URL", ",", "kw", ")", "return", "[", "data_kw", ",", "sent_data", ",", "received_data", "]" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/bt5/erp5_payzen_secure_payment/DocumentTemplateItem/portal_components/document.erp5.PayzenService.py#L75-L89
jumpserver/coco
0b2ae31eb2221127ab81775a4f985bf00abaec45
coco/httpd/elfinder/connector.py
python
ElFinderConnector.run
(self, request)
Main entry point for running commands. Attemps to run a command function based on info in request.GET. The command function will complete in one of two ways. It can set response, which will be turned in to an HttpResponse and returned to the client. Or it can set return_view, a Django View function which will be rendered and returned to the client.
Main entry point for running commands. Attemps to run a command function based on info in request.GET.
[ "Main", "entry", "point", "for", "running", "commands", ".", "Attemps", "to", "run", "a", "command", "function", "based", "on", "info", "in", "request", ".", "GET", "." ]
def run(self, request): """ Main entry point for running commands. Attemps to run a command function based on info in request.GET. The command function will complete in one of two ways. It can set response, which will be turned in to an HttpResponse and returned to the client. Or it can set return_view, a Django View function which will be rendered and returned to the client. """ self.request = request func_name, args = self.get_request_commands() if not func_name: self.response['error'] = 'No command specified' else: self.run_command(func_name, args)
[ "def", "run", "(", "self", ",", "request", ")", ":", "self", ".", "request", "=", "request", "func_name", ",", "args", "=", "self", ".", "get_request_commands", "(", ")", "if", "not", "func_name", ":", "self", ".", "response", "[", "'error'", "]", "=", "'No command specified'", "else", ":", "self", ".", "run_command", "(", "func_name", ",", "args", ")" ]
https://github.com/jumpserver/coco/blob/0b2ae31eb2221127ab81775a4f985bf00abaec45/coco/httpd/elfinder/connector.py#L139-L155
odoo/odoo
8de8c196a137f4ebbf67d7c7c83fee36f873f5c8
addons/website/models/ir_http.py
python
Http._get_converters
(cls)
return dict( super(Http, cls)._get_converters(), model=ModelConverter, )
Get the converters list for custom url pattern werkzeug need to match Rule. This override adds the website ones.
Get the converters list for custom url pattern werkzeug need to match Rule. This override adds the website ones.
[ "Get", "the", "converters", "list", "for", "custom", "url", "pattern", "werkzeug", "need", "to", "match", "Rule", ".", "This", "override", "adds", "the", "website", "ones", "." ]
def _get_converters(cls): """ Get the converters list for custom url pattern werkzeug need to match Rule. This override adds the website ones. """ return dict( super(Http, cls)._get_converters(), model=ModelConverter, )
[ "def", "_get_converters", "(", "cls", ")", ":", "return", "dict", "(", "super", "(", "Http", ",", "cls", ")", ".", "_get_converters", "(", ")", ",", "model", "=", "ModelConverter", ",", ")" ]
https://github.com/odoo/odoo/blob/8de8c196a137f4ebbf67d7c7c83fee36f873f5c8/addons/website/models/ir_http.py#L118-L125
allwefantasy/mlsql
fcbeb2444289077585cc3db65d5a23ccafbdc88f
streamingpro-mlsql/src/main/resources-local/python/worker232.py
python
chain
(f, g)
return lambda *a: g(f(*a))
chain two functions together
chain two functions together
[ "chain", "two", "functions", "together" ]
def chain(f, g): """chain two functions together """ return lambda *a: g(f(*a))
[ "def", "chain", "(", "f", ",", "g", ")", ":", "return", "lambda", "*", "a", ":", "g", "(", "f", "(", "*", "a", ")", ")" ]
https://github.com/allwefantasy/mlsql/blob/fcbeb2444289077585cc3db65d5a23ccafbdc88f/streamingpro-mlsql/src/main/resources-local/python/worker232.py#L65-L67
atom-community/ide-python
c046f9c2421713b34baa22648235541c5bb284fe
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/__init__.py
python
iter_project_files
(project, relative=False, **kwargs)
Yield (dirname, basename, filename) for all files in the project.
Yield (dirname, basename, filename) for all files in the project.
[ "Yield", "(", "dirname", "basename", "filename", ")", "for", "all", "files", "in", "the", "project", "." ]
def iter_project_files(project, relative=False, **kwargs): """Yield (dirname, basename, filename) for all files in the project.""" if relative: with _util.cwd(VENDORED_ROOT): for result in _util.iter_all_files(project, **kwargs): yield result else: root = project_root(project) for result in _util.iter_all_files(root, **kwargs): yield result
[ "def", "iter_project_files", "(", "project", ",", "relative", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "relative", ":", "with", "_util", ".", "cwd", "(", "VENDORED_ROOT", ")", ":", "for", "result", "in", "_util", ".", "iter_all_files", "(", "project", ",", "*", "*", "kwargs", ")", ":", "yield", "result", "else", ":", "root", "=", "project_root", "(", "project", ")", "for", "result", "in", "_util", ".", "iter_all_files", "(", "root", ",", "*", "*", "kwargs", ")", ":", "yield", "result" ]
https://github.com/atom-community/ide-python/blob/c046f9c2421713b34baa22648235541c5bb284fe/lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/__init__.py#L39-L48
Southpaw-TACTIC/TACTIC
ba9b87aef0ee3b3ea51446f25b285ebbca06f62c
src/pyasm/prod/service/api_xmlrpc.py
python
ApiXMLRPC.add_config_element
(self, ticket, search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={}, login=None, unique=True, auto_unique_name=False, auto_unique_view=False, view_as_attr=False)
return dict
Add an element into a config @params: search_type - the search type that this config belongs to view - the specific view of the search type name - the name of the element class_name - the fully qualified class of the display action_class_name - the fully qualified class of the action display_options - keyward options in a dictionary to construct the specific display action_options - keyward options in a dictionary to construct the specific display element_attrs - element attributes in a dictionary login - login of the user specific to this view auto_unique_name - auto generate a unique element and display view name auto_unique_view - auto generate a unique display view name unique - a unique display view name is expected view_as_attr - view should be read or created as a name attribute
Add an element into a config
[ "Add", "an", "element", "into", "a", "config" ]
def add_config_element(self, ticket, search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={}, login=None, unique=True, auto_unique_name=False, auto_unique_view=False, view_as_attr=False): '''Add an element into a config @params: search_type - the search type that this config belongs to view - the specific view of the search type name - the name of the element class_name - the fully qualified class of the display action_class_name - the fully qualified class of the action display_options - keyward options in a dictionary to construct the specific display action_options - keyward options in a dictionary to construct the specific display element_attrs - element attributes in a dictionary login - login of the user specific to this view auto_unique_name - auto generate a unique element and display view name auto_unique_view - auto generate a unique display view name unique - a unique display view name is expected view_as_attr - view should be read or created as a name attribute ''' assert view config_search_type = "config/widget_config" search = Search(config_search_type) search.add_filter("search_type", search_type) search.add_filter("view", view) search.add_filter("login", login) config = search.get_sobject() configs = [] from tactic.ui.panel import SideBarBookmarkMenuWdg SideBarBookmarkMenuWdg.add_internal_config(configs, ['definition']) internal_names = ['search','publish','edit','insert','edit_definition','definition'] existing_names = [] for internal_config in configs: internal_names.extend(internal_config.get_element_names()) if config: db_element_names = config.get_element_names() existing_names.extend(db_element_names) from pyasm.common import UserException # error on name starting with number pat = re.compile('^\d+.*') if pat.search(name): raise UserException('The name [%s] should not start with a number.'%name) if not view_as_attr and view.find('@') != -1: view_as_attr = True # error out on all special chars from name except . pat = re.compile('[\$\s,#~`\%\*\^\&\(\)\+\=\[\]\[\}\{\;\:\'\"\<\>\?\|\\\!]') if pat.search(name): raise UserException('The name [%s] contains special characters or spaces.'%name) if unique: if name in existing_names: raise UserException('This view name [%s] has been taken.'%name) if name in internal_names: raise UserException('This view name [%s] is reserved for internal use.'%name) if config and auto_unique_name: # remove all special chars from name except . pat = re.compile('[\$\s,@#~`\%\*\^\&\(\)\+\=\[\]\[\}\{\;\:\'\"\<\>\?\|\\\!]') name = pat.sub('', name) suffix = Common.randint(0, 100) existing_names = config.get_element_names() if name in existing_names: new_name = '%s%0.3d' %(name, suffix) while True: if new_name in existing_names: suffix += 1 new_name = '%s%0.3d' %(name, suffix) else: name = new_name break # update the view only if it is a folder since the view is derived from the title if auto_unique_view: tmp_view = display_options.get('view') if tmp_view: display_options['view'] = name # find a default for any views but definition if not config and view !='definition': view_node = WidgetConfigView.get_default_view_node(view) if view_node: config = SearchType.create(config_search_type) config.set_value("search_type", search_type ) config.set_value("view", view ) xml = config.get_xml_value("config", "config") root = xml.get_root_node() #root.appendChild(view_node) xml.append_child(root, view_node) config.set_value("config", xml.get_xml()) if login: config.set_value("login", login) config._init() if not config: config = SearchType.create(config_search_type) config.set_value("search_type", search_type ) config.set_value("view", view ) xml = config.get_xml_value("config", "config") if login: config.set_value("login", login) config._init() root = xml.get_root_node() # build a new config if view_as_attr: # personal view uses the new view attr-based xml syntax view_node = xml.create_element("view", attrs= {'name': view}) else: view_node = xml.create_element(view) #root.appendChild(view_node) xml.append_child(root, view_node) # view has been set from above otherwise ''' xml = config.get_xml_value("config") view_node = xml.get_node("config/%s" % view ) ''' config.append_display_element(name, cls_name=class_name, options=display_options, element_attrs=element_attrs, action_options=action_options, action_cls_name=action_class_name, view_as_attr=view_as_attr) config.commit_config() # this name could be regenerated. so we return it to client dict = {'element_name': name} return dict
[ "def", "add_config_element", "(", "self", ",", "ticket", ",", "search_type", ",", "view", ",", "name", ",", "class_name", "=", "None", ",", "display_options", "=", "{", "}", ",", "action_class_name", "=", "None", ",", "action_options", "=", "{", "}", ",", "element_attrs", "=", "{", "}", ",", "login", "=", "None", ",", "unique", "=", "True", ",", "auto_unique_name", "=", "False", ",", "auto_unique_view", "=", "False", ",", "view_as_attr", "=", "False", ")", ":", "assert", "view", "config_search_type", "=", "\"config/widget_config\"", "search", "=", "Search", "(", "config_search_type", ")", "search", ".", "add_filter", "(", "\"search_type\"", ",", "search_type", ")", "search", ".", "add_filter", "(", "\"view\"", ",", "view", ")", "search", ".", "add_filter", "(", "\"login\"", ",", "login", ")", "config", "=", "search", ".", "get_sobject", "(", ")", "configs", "=", "[", "]", "from", "tactic", ".", "ui", ".", "panel", "import", "SideBarBookmarkMenuWdg", "SideBarBookmarkMenuWdg", ".", "add_internal_config", "(", "configs", ",", "[", "'definition'", "]", ")", "internal_names", "=", "[", "'search'", ",", "'publish'", ",", "'edit'", ",", "'insert'", ",", "'edit_definition'", ",", "'definition'", "]", "existing_names", "=", "[", "]", "for", "internal_config", "in", "configs", ":", "internal_names", ".", "extend", "(", "internal_config", ".", "get_element_names", "(", ")", ")", "if", "config", ":", "db_element_names", "=", "config", ".", "get_element_names", "(", ")", "existing_names", ".", "extend", "(", "db_element_names", ")", "from", "pyasm", ".", "common", "import", "UserException", "# error on name starting with number", "pat", "=", "re", ".", "compile", "(", "'^\\d+.*'", ")", "if", "pat", ".", "search", "(", "name", ")", ":", "raise", "UserException", "(", "'The name [%s] should not start with a number.'", "%", "name", ")", "if", "not", "view_as_attr", "and", "view", ".", "find", "(", "'@'", ")", "!=", "-", "1", ":", "view_as_attr", "=", "True", "# error out on all special chars from name except .", "pat", "=", "re", ".", "compile", "(", "'[\\$\\s,#~`\\%\\*\\^\\&\\(\\)\\+\\=\\[\\]\\[\\}\\{\\;\\:\\'\\\"\\<\\>\\?\\|\\\\\\!]'", ")", "if", "pat", ".", "search", "(", "name", ")", ":", "raise", "UserException", "(", "'The name [%s] contains special characters or spaces.'", "%", "name", ")", "if", "unique", ":", "if", "name", "in", "existing_names", ":", "raise", "UserException", "(", "'This view name [%s] has been taken.'", "%", "name", ")", "if", "name", "in", "internal_names", ":", "raise", "UserException", "(", "'This view name [%s] is reserved for internal use.'", "%", "name", ")", "if", "config", "and", "auto_unique_name", ":", "# remove all special chars from name except .", "pat", "=", "re", ".", "compile", "(", "'[\\$\\s,@#~`\\%\\*\\^\\&\\(\\)\\+\\=\\[\\]\\[\\}\\{\\;\\:\\'\\\"\\<\\>\\?\\|\\\\\\!]'", ")", "name", "=", "pat", ".", "sub", "(", "''", ",", "name", ")", "suffix", "=", "Common", ".", "randint", "(", "0", ",", "100", ")", "existing_names", "=", "config", ".", "get_element_names", "(", ")", "if", "name", "in", "existing_names", ":", "new_name", "=", "'%s%0.3d'", "%", "(", "name", ",", "suffix", ")", "while", "True", ":", "if", "new_name", "in", "existing_names", ":", "suffix", "+=", "1", "new_name", "=", "'%s%0.3d'", "%", "(", "name", ",", "suffix", ")", "else", ":", "name", "=", "new_name", "break", "# update the view only if it is a folder since the view is derived from the title", "if", "auto_unique_view", ":", "tmp_view", "=", "display_options", ".", "get", "(", "'view'", ")", "if", "tmp_view", ":", "display_options", "[", "'view'", "]", "=", "name", "# find a default for any views but definition", "if", "not", "config", "and", "view", "!=", "'definition'", ":", "view_node", "=", "WidgetConfigView", ".", "get_default_view_node", "(", "view", ")", "if", "view_node", ":", "config", "=", "SearchType", ".", "create", "(", "config_search_type", ")", "config", ".", "set_value", "(", "\"search_type\"", ",", "search_type", ")", "config", ".", "set_value", "(", "\"view\"", ",", "view", ")", "xml", "=", "config", ".", "get_xml_value", "(", "\"config\"", ",", "\"config\"", ")", "root", "=", "xml", ".", "get_root_node", "(", ")", "#root.appendChild(view_node)", "xml", ".", "append_child", "(", "root", ",", "view_node", ")", "config", ".", "set_value", "(", "\"config\"", ",", "xml", ".", "get_xml", "(", ")", ")", "if", "login", ":", "config", ".", "set_value", "(", "\"login\"", ",", "login", ")", "config", ".", "_init", "(", ")", "if", "not", "config", ":", "config", "=", "SearchType", ".", "create", "(", "config_search_type", ")", "config", ".", "set_value", "(", "\"search_type\"", ",", "search_type", ")", "config", ".", "set_value", "(", "\"view\"", ",", "view", ")", "xml", "=", "config", ".", "get_xml_value", "(", "\"config\"", ",", "\"config\"", ")", "if", "login", ":", "config", ".", "set_value", "(", "\"login\"", ",", "login", ")", "config", ".", "_init", "(", ")", "root", "=", "xml", ".", "get_root_node", "(", ")", "# build a new config", "if", "view_as_attr", ":", "# personal view uses the new view attr-based xml syntax", "view_node", "=", "xml", ".", "create_element", "(", "\"view\"", ",", "attrs", "=", "{", "'name'", ":", "view", "}", ")", "else", ":", "view_node", "=", "xml", ".", "create_element", "(", "view", ")", "#root.appendChild(view_node)", "xml", ".", "append_child", "(", "root", ",", "view_node", ")", "# view has been set from above otherwise", "'''\n xml = config.get_xml_value(\"config\")\n view_node = xml.get_node(\"config/%s\" % view )\n '''", "config", ".", "append_display_element", "(", "name", ",", "cls_name", "=", "class_name", ",", "options", "=", "display_options", ",", "element_attrs", "=", "element_attrs", ",", "action_options", "=", "action_options", ",", "action_cls_name", "=", "action_class_name", ",", "view_as_attr", "=", "view_as_attr", ")", "config", ".", "commit_config", "(", ")", "# this name could be regenerated. so we return it to client", "dict", "=", "{", "'element_name'", ":", "name", "}", "return", "dict" ]
https://github.com/Southpaw-TACTIC/TACTIC/blob/ba9b87aef0ee3b3ea51446f25b285ebbca06f62c/src/pyasm/prod/service/api_xmlrpc.py#L6390-L6528
facebookarchive/nuclide
2a2a0a642d136768b7d2a6d35a652dc5fb77d70a
modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/jedi/evaluate/context/iterable.py
python
SequenceLiteralContext.py__iter__
(self)
While values returns the possible values for any array field, this function returns the value for a certain index.
While values returns the possible values for any array field, this function returns the value for a certain index.
[ "While", "values", "returns", "the", "possible", "values", "for", "any", "array", "field", "this", "function", "returns", "the", "value", "for", "a", "certain", "index", "." ]
def py__iter__(self): """ While values returns the possible values for any array field, this function returns the value for a certain index. """ if self.array_type == u'dict': # Get keys. types = ContextSet() for k, _ in self._items(): types |= self._defining_context.eval_node(k) # We don't know which dict index comes first, therefore always # yield all the types. for _ in types: yield LazyKnownContexts(types) else: for node in self._items(): yield LazyTreeContext(self._defining_context, node) for addition in check_array_additions(self._defining_context, self): yield addition
[ "def", "py__iter__", "(", "self", ")", ":", "if", "self", ".", "array_type", "==", "u'dict'", ":", "# Get keys.", "types", "=", "ContextSet", "(", ")", "for", "k", ",", "_", "in", "self", ".", "_items", "(", ")", ":", "types", "|=", "self", ".", "_defining_context", ".", "eval_node", "(", "k", ")", "# We don't know which dict index comes first, therefore always", "# yield all the types.", "for", "_", "in", "types", ":", "yield", "LazyKnownContexts", "(", "types", ")", "else", ":", "for", "node", "in", "self", ".", "_items", "(", ")", ":", "yield", "LazyTreeContext", "(", "self", ".", "_defining_context", ",", "node", ")", "for", "addition", "in", "check_array_additions", "(", "self", ".", "_defining_context", ",", "self", ")", ":", "yield", "addition" ]
https://github.com/facebookarchive/nuclide/blob/2a2a0a642d136768b7d2a6d35a652dc5fb77d70a/modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/jedi/evaluate/context/iterable.py#L304-L323
nodejs/http2
734ad72e3939e62bcff0f686b8ec426b8aaa22e3
deps/v8/third_party/jinja2/bccache.py
python
BytecodeCache.dump_bytecode
(self, bucket)
Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception.
Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception.
[ "Subclasses", "have", "to", "override", "this", "method", "to", "write", "the", "bytecode", "from", "a", "bucket", "back", "to", "the", "cache", ".", "If", "it", "unable", "to", "do", "so", "it", "must", "not", "fail", "silently", "but", "raise", "an", "exception", "." ]
def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError()
[ "def", "dump_bytecode", "(", "self", ",", "bucket", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/nodejs/http2/blob/734ad72e3939e62bcff0f686b8ec426b8aaa22e3/deps/v8/third_party/jinja2/bccache.py#L153-L158
mozilla/spidernode
aafa9e5273f954f272bb4382fc007af14674b4c2
deps/spidershim/spidermonkey/python/mozbuild/mozbuild/frontend/context.py
python
ContextDerivedTypedRecord
(*fields)
return _TypedRecord
Factory for objects with certain properties and dynamic type checks. This API is extremely similar to the TypedNamedTuple API, except that properties may be mutated. This supports syntax like: VARIABLE_NAME.property += [ 'item1', 'item2', ]
Factory for objects with certain properties and dynamic type checks.
[ "Factory", "for", "objects", "with", "certain", "properties", "and", "dynamic", "type", "checks", "." ]
def ContextDerivedTypedRecord(*fields): """Factory for objects with certain properties and dynamic type checks. This API is extremely similar to the TypedNamedTuple API, except that properties may be mutated. This supports syntax like: VARIABLE_NAME.property += [ 'item1', 'item2', ] """ class _TypedRecord(ContextDerivedValue): __slots__ = tuple([name for name, _ in fields]) def __init__(self, context): for fname, ftype in self._fields.items(): if issubclass(ftype, ContextDerivedValue): setattr(self, fname, self._fields[fname](context)) else: setattr(self, fname, self._fields[fname]()) def __setattr__(self, name, value): if name in self._fields and not isinstance(value, self._fields[name]): value = self._fields[name](value) object.__setattr__(self, name, value) _TypedRecord._fields = dict(fields) return _TypedRecord
[ "def", "ContextDerivedTypedRecord", "(", "*", "fields", ")", ":", "class", "_TypedRecord", "(", "ContextDerivedValue", ")", ":", "__slots__", "=", "tuple", "(", "[", "name", "for", "name", ",", "_", "in", "fields", "]", ")", "def", "__init__", "(", "self", ",", "context", ")", ":", "for", "fname", ",", "ftype", "in", "self", ".", "_fields", ".", "items", "(", ")", ":", "if", "issubclass", "(", "ftype", ",", "ContextDerivedValue", ")", ":", "setattr", "(", "self", ",", "fname", ",", "self", ".", "_fields", "[", "fname", "]", "(", "context", ")", ")", "else", ":", "setattr", "(", "self", ",", "fname", ",", "self", ".", "_fields", "[", "fname", "]", "(", ")", ")", "def", "__setattr__", "(", "self", ",", "name", ",", "value", ")", ":", "if", "name", "in", "self", ".", "_fields", "and", "not", "isinstance", "(", "value", ",", "self", ".", "_fields", "[", "name", "]", ")", ":", "value", "=", "self", ".", "_fields", "[", "name", "]", "(", "value", ")", "object", ".", "__setattr__", "(", "self", ",", "name", ",", "value", ")", "_TypedRecord", ".", "_fields", "=", "dict", "(", "fields", ")", "return", "_TypedRecord" ]
https://github.com/mozilla/spidernode/blob/aafa9e5273f954f272bb4382fc007af14674b4c2/deps/spidershim/spidermonkey/python/mozbuild/mozbuild/frontend/context.py#L530-L559
mitotic/graphterm
193a5c925fd146d83f6d5967b2f0ee0745bf0b1c
graphterm/otrace.py
python
PickleInterface.get_child_tree
(cls, ancestor_key, entity_char=":")
return context
Returns dict tree branch below the root tree
Returns dict tree branch below the root tree
[ "Returns", "dict", "tree", "branch", "below", "the", "root", "tree" ]
def get_child_tree(cls, ancestor_key, entity_char=":"): """ Returns dict tree branch below the root tree """ path = cls.path_from_key(ancestor_key) context = {entity_char: cls.get_entity(ancestor_key)} while path: key = path.pop() context = {key: context} return context
[ "def", "get_child_tree", "(", "cls", ",", "ancestor_key", ",", "entity_char", "=", "\":\"", ")", ":", "path", "=", "cls", ".", "path_from_key", "(", "ancestor_key", ")", "context", "=", "{", "entity_char", ":", "cls", ".", "get_entity", "(", "ancestor_key", ")", "}", "while", "path", ":", "key", "=", "path", ".", "pop", "(", ")", "context", "=", "{", "key", ":", "context", "}", "return", "context" ]
https://github.com/mitotic/graphterm/blob/193a5c925fd146d83f6d5967b2f0ee0745bf0b1c/graphterm/otrace.py#L5498-L5506
nodejs/node
ac3c33c1646bf46104c15ae035982c06364da9b8
deps/v8/tools/lldb_commands.py
python
jco
(debugger, param, *args)
Print the code object at the given pc (default: current pc)
Print the code object at the given pc (default: current pc)
[ "Print", "the", "code", "object", "at", "the", "given", "pc", "(", "default", ":", "current", "pc", ")" ]
def jco(debugger, param, *args): """Print the code object at the given pc (default: current pc)""" if not param: param = str(current_frame(debugger).FindRegister("pc").value) ptr_arg_cmd(debugger, 'jco', param, "_v8_internal_Print_Code({})")
[ "def", "jco", "(", "debugger", ",", "param", ",", "*", "args", ")", ":", "if", "not", "param", ":", "param", "=", "str", "(", "current_frame", "(", "debugger", ")", ".", "FindRegister", "(", "\"pc\"", ")", ".", "value", ")", "ptr_arg_cmd", "(", "debugger", ",", "'jco'", ",", "param", ",", "\"_v8_internal_Print_Code({})\"", ")" ]
https://github.com/nodejs/node/blob/ac3c33c1646bf46104c15ae035982c06364da9b8/deps/v8/tools/lldb_commands.py#L58-L62
odoo/odoo
8de8c196a137f4ebbf67d7c7c83fee36f873f5c8
odoo/tools/mail.py
python
email_escape_char
(email_address)
return email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
Escape problematic characters in the given email address string
Escape problematic characters in the given email address string
[ "Escape", "problematic", "characters", "in", "the", "given", "email", "address", "string" ]
def email_escape_char(email_address): """ Escape problematic characters in the given email address string""" return email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
[ "def", "email_escape_char", "(", "email_address", ")", ":", "return", "email_address", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "'%'", ",", "'\\\\%'", ")", ".", "replace", "(", "'_'", ",", "'\\\\_'", ")" ]
https://github.com/odoo/odoo/blob/8de8c196a137f4ebbf67d7c7c83fee36f873f5c8/odoo/tools/mail.py#L549-L551
jupyter/notebook
26626343384195a1f4f5461ba42eb3e133655976
notebook/services/kernels/kernelmanager.py
python
MappingKernelManager.restart_kernel
(self, kernel_id, now=False)
return future
Restart a kernel by kernel_id
Restart a kernel by kernel_id
[ "Restart", "a", "kernel", "by", "kernel_id" ]
async def restart_kernel(self, kernel_id, now=False): """Restart a kernel by kernel_id""" self._check_kernel_id(kernel_id) await maybe_future(self.pinned_superclass.restart_kernel(self, kernel_id, now=now)) kernel = self.get_kernel(kernel_id) # return a Future that will resolve when the kernel has successfully restarted channel = kernel.connect_shell() future = Future() def finish(): """Common cleanup when restart finishes/fails for any reason.""" if not channel.closed(): channel.close() loop.remove_timeout(timeout) kernel.remove_restart_callback(on_restart_failed, 'dead') def on_reply(msg): self.log.debug("Kernel info reply received: %s", kernel_id) finish() if not future.done(): future.set_result(msg) def on_timeout(): self.log.warning("Timeout waiting for kernel_info_reply: %s", kernel_id) finish() if not future.done(): future.set_exception(TimeoutError("Timeout waiting for restart")) def on_restart_failed(): self.log.warning("Restarting kernel failed: %s", kernel_id) finish() if not future.done(): future.set_exception(RuntimeError("Restart failed")) kernel.add_restart_callback(on_restart_failed, 'dead') kernel.session.send(channel, "kernel_info_request") channel.on_recv(on_reply) loop = IOLoop.current() timeout = loop.add_timeout(loop.time() + self.kernel_info_timeout, on_timeout) return future
[ "async", "def", "restart_kernel", "(", "self", ",", "kernel_id", ",", "now", "=", "False", ")", ":", "self", ".", "_check_kernel_id", "(", "kernel_id", ")", "await", "maybe_future", "(", "self", ".", "pinned_superclass", ".", "restart_kernel", "(", "self", ",", "kernel_id", ",", "now", "=", "now", ")", ")", "kernel", "=", "self", ".", "get_kernel", "(", "kernel_id", ")", "# return a Future that will resolve when the kernel has successfully restarted", "channel", "=", "kernel", ".", "connect_shell", "(", ")", "future", "=", "Future", "(", ")", "def", "finish", "(", ")", ":", "\"\"\"Common cleanup when restart finishes/fails for any reason.\"\"\"", "if", "not", "channel", ".", "closed", "(", ")", ":", "channel", ".", "close", "(", ")", "loop", ".", "remove_timeout", "(", "timeout", ")", "kernel", ".", "remove_restart_callback", "(", "on_restart_failed", ",", "'dead'", ")", "def", "on_reply", "(", "msg", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Kernel info reply received: %s\"", ",", "kernel_id", ")", "finish", "(", ")", "if", "not", "future", ".", "done", "(", ")", ":", "future", ".", "set_result", "(", "msg", ")", "def", "on_timeout", "(", ")", ":", "self", ".", "log", ".", "warning", "(", "\"Timeout waiting for kernel_info_reply: %s\"", ",", "kernel_id", ")", "finish", "(", ")", "if", "not", "future", ".", "done", "(", ")", ":", "future", ".", "set_exception", "(", "TimeoutError", "(", "\"Timeout waiting for restart\"", ")", ")", "def", "on_restart_failed", "(", ")", ":", "self", ".", "log", ".", "warning", "(", "\"Restarting kernel failed: %s\"", ",", "kernel_id", ")", "finish", "(", ")", "if", "not", "future", ".", "done", "(", ")", ":", "future", ".", "set_exception", "(", "RuntimeError", "(", "\"Restart failed\"", ")", ")", "kernel", ".", "add_restart_callback", "(", "on_restart_failed", ",", "'dead'", ")", "kernel", ".", "session", ".", "send", "(", "channel", ",", "\"kernel_info_request\"", ")", "channel", ".", "on_recv", "(", "on_reply", ")", "loop", "=", "IOLoop", ".", "current", "(", ")", "timeout", "=", "loop", ".", "add_timeout", "(", "loop", ".", "time", "(", ")", "+", "self", ".", "kernel_info_timeout", ",", "on_timeout", ")", "return", "future" ]
https://github.com/jupyter/notebook/blob/26626343384195a1f4f5461ba42eb3e133655976/notebook/services/kernels/kernelmanager.py#L311-L350
domogik/domogik
fefd584d354875bcb15f351cbc455abffaa6501f
src/domogik/xpl/common/xplmessage.py
python
XplMessage.set_header
(self, hop_count=None, source=None, target=None)
Set the message header. @param hop_count: hop count @type hop_count: int @param source: message source @type source: str @param target: message target @type targer: str
Set the message header.
[ "Set", "the", "message", "header", "." ]
def set_header(self, hop_count=None, source=None, target=None): """ Set the message header. @param hop_count: hop count @type hop_count: int @param source: message source @type source: str @param target: message target @type targer: str """ if hop_count is not None: self.set_hop_count(hop_count) if source is not None: self.set_source(source) if target is not None: self.set_target(target)
[ "def", "set_header", "(", "self", ",", "hop_count", "=", "None", ",", "source", "=", "None", ",", "target", "=", "None", ")", ":", "if", "hop_count", "is", "not", "None", ":", "self", ".", "set_hop_count", "(", "hop_count", ")", "if", "source", "is", "not", "None", ":", "self", ".", "set_source", "(", "source", ")", "if", "target", "is", "not", "None", ":", "self", ".", "set_target", "(", "target", ")" ]
https://github.com/domogik/domogik/blob/fefd584d354875bcb15f351cbc455abffaa6501f/src/domogik/xpl/common/xplmessage.py#L310-L327
sbrshk/whatever
f7ba72effd6f836ca701ed889c747db804d5ea8f
node_modules/node-gyp/gyp/tools/pretty_vcproj.py
python
FlattenFilter
(node)
return node_list
Returns a list of all the node and sub nodes.
Returns a list of all the node and sub nodes.
[ "Returns", "a", "list", "of", "all", "the", "node", "and", "sub", "nodes", "." ]
def FlattenFilter(node): """Returns a list of all the node and sub nodes.""" node_list = [] if (node.attributes and node.getAttribute('Name') == '_excluded_files'): # We don't add the "_excluded_files" filter. return [] for current in node.childNodes: if current.nodeName == 'Filter': node_list.extend(FlattenFilter(current)) else: node_list.append(current) return node_list
[ "def", "FlattenFilter", "(", "node", ")", ":", "node_list", "=", "[", "]", "if", "(", "node", ".", "attributes", "and", "node", ".", "getAttribute", "(", "'Name'", ")", "==", "'_excluded_files'", ")", ":", "# We don't add the \"_excluded_files\" filter.", "return", "[", "]", "for", "current", "in", "node", ".", "childNodes", ":", "if", "current", ".", "nodeName", "==", "'Filter'", ":", "node_list", ".", "extend", "(", "FlattenFilter", "(", "current", ")", ")", "else", ":", "node_list", ".", "append", "(", "current", ")", "return", "node_list" ]
https://github.com/sbrshk/whatever/blob/f7ba72effd6f836ca701ed889c747db804d5ea8f/node_modules/node-gyp/gyp/tools/pretty_vcproj.py#L95-L110
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/unclosured/lib/python2.7/lib2to3/pgen2/parse.py
python
Parser.setup
(self, start=None)
Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol.
Prepare for parsing.
[ "Prepare", "for", "parsing", "." ]
def setup(self, start=None): """Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. """ if start is None: start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set()
[ "def", "setup", "(", "self", ",", "start", "=", "None", ")", ":", "if", "start", "is", "None", ":", "start", "=", "self", ".", "grammar", ".", "start", "# Each stack entry is a tuple: (dfa, state, node).", "# A node is a tuple: (type, value, context, children),", "# where children is a list of nodes or None, and context may be None.", "newnode", "=", "(", "start", ",", "None", ",", "None", ",", "[", "]", ")", "stackentry", "=", "(", "self", ".", "grammar", ".", "dfas", "[", "start", "]", ",", "0", ",", "newnode", ")", "self", ".", "stack", "=", "[", "stackentry", "]", "self", ".", "rootnode", "=", "None", "self", ".", "used_names", "=", "set", "(", ")" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/unclosured/lib/python2.7/lib2to3/pgen2/parse.py#L89-L111
jam-py/jam-py
0821492cdff8665928e0f093a4435aa64285a45c
jam/third_party/sqlalchemy/engine/interfaces.py
python
CreateEnginePlugin.__init__
(self, url, kwargs)
Construct a new :class:`.CreateEnginePlugin`. The plugin object is instantiated individually for each call to :func:`.create_engine`. A single :class:`.Engine` will be passed to the :meth:`.CreateEnginePlugin.engine_created` method corresponding to this URL. :param url: the :class:`.URL` object. The plugin should inspect what it needs here as well as remove its custom arguments from the :attr:`.URL.query` collection. The URL can be modified in-place in any other way as well. :param kwargs: The keyword arguments passed to :func`.create_engine`. The plugin can read and modify this dictionary in-place, to affect the ultimate arguments used to create the engine. It should remove its custom arguments from the dictionary as well.
Construct a new :class:`.CreateEnginePlugin`.
[ "Construct", "a", "new", ":", "class", ":", ".", "CreateEnginePlugin", "." ]
def __init__(self, url, kwargs): """Construct a new :class:`.CreateEnginePlugin`. The plugin object is instantiated individually for each call to :func:`.create_engine`. A single :class:`.Engine` will be passed to the :meth:`.CreateEnginePlugin.engine_created` method corresponding to this URL. :param url: the :class:`.URL` object. The plugin should inspect what it needs here as well as remove its custom arguments from the :attr:`.URL.query` collection. The URL can be modified in-place in any other way as well. :param kwargs: The keyword arguments passed to :func`.create_engine`. The plugin can read and modify this dictionary in-place, to affect the ultimate arguments used to create the engine. It should remove its custom arguments from the dictionary as well. """ self.url = url
[ "def", "__init__", "(", "self", ",", "url", ",", "kwargs", ")", ":", "self", ".", "url", "=", "url" ]
https://github.com/jam-py/jam-py/blob/0821492cdff8665928e0f093a4435aa64285a45c/jam/third_party/sqlalchemy/engine/interfaces.py#L975-L993
mozilla/spidernode
aafa9e5273f954f272bb4382fc007af14674b4c2
tools/gyp/pylib/gyp/MSVSNew.py
python
MSVSSolution.Write
(self, writer=gyp.common.WriteOnDiff)
Writes the solution file to disk. Raises: IndexError: An entry appears multiple times.
Writes the solution file to disk.
[ "Writes", "the", "solution", "file", "to", "disk", "." ]
def Write(self, writer=gyp.common.WriteOnDiff): """Writes the solution file to disk. Raises: IndexError: An entry appears multiple times. """ # Walk the entry tree and collect all the folders and projects. all_entries = set() entries_to_check = self.entries[:] while entries_to_check: e = entries_to_check.pop(0) # If this entry has been visited, nothing to do. if e in all_entries: continue all_entries.add(e) # If this is a folder, check its entries too. if isinstance(e, MSVSFolder): entries_to_check += e.entries all_entries = sorted(all_entries) # Open file and print header f = writer(self.path) f.write('Microsoft Visual Studio Solution File, ' 'Format Version %s\r\n' % self.version.SolutionVersion()) f.write('# %s\r\n' % self.version.Description()) # Project entries sln_root = os.path.split(self.path)[0] for e in all_entries: relative_path = gyp.common.RelativePath(e.path, sln_root) # msbuild does not accept an empty folder_name. # use '.' in case relative_path is empty. folder_name = relative_path.replace('/', '\\') or '.' f.write('Project("%s") = "%s", "%s", "%s"\r\n' % ( e.entry_type_guid, # Entry type GUID e.name, # Folder name folder_name, # Folder name (again) e.get_guid(), # Entry GUID )) # TODO(rspangler): Need a way to configure this stuff if self.websiteProperties: f.write('\tProjectSection(WebsiteProperties) = preProject\r\n' '\t\tDebug.AspNetCompiler.Debug = "True"\r\n' '\t\tRelease.AspNetCompiler.Debug = "False"\r\n' '\tEndProjectSection\r\n') if isinstance(e, MSVSFolder): if e.items: f.write('\tProjectSection(SolutionItems) = preProject\r\n') for i in e.items: f.write('\t\t%s = %s\r\n' % (i, i)) f.write('\tEndProjectSection\r\n') if isinstance(e, MSVSProject): if e.dependencies: f.write('\tProjectSection(ProjectDependencies) = postProject\r\n') for d in e.dependencies: f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid())) f.write('\tEndProjectSection\r\n') f.write('EndProject\r\n') # Global section f.write('Global\r\n') # Configurations (variants) f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n') for v in self.variants: f.write('\t\t%s = %s\r\n' % (v, v)) f.write('\tEndGlobalSection\r\n') # Sort config guids for easier diffing of solution changes. config_guids = [] config_guids_overrides = {} for e in all_entries: if isinstance(e, MSVSProject): config_guids.append(e.get_guid()) config_guids_overrides[e.get_guid()] = e.config_platform_overrides config_guids.sort() f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n') for g in config_guids: for v in self.variants: nv = config_guids_overrides[g].get(v, v) # Pick which project configuration to build for this solution # configuration. f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) # Enable project in this solution configuration. f.write('\t\t%s.%s.Build.0 = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) f.write('\tEndGlobalSection\r\n') # TODO(rspangler): Should be able to configure this stuff too (though I've # never seen this be any different) f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n') f.write('\t\tHideSolutionNode = FALSE\r\n') f.write('\tEndGlobalSection\r\n') # Folder mappings # Omit this section if there are no folders if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]): f.write('\tGlobalSection(NestedProjects) = preSolution\r\n') for e in all_entries: if not isinstance(e, MSVSFolder): continue # Does not apply to projects, only folders for subentry in e.entries: f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid())) f.write('\tEndGlobalSection\r\n') f.write('EndGlobal\r\n') f.close()
[ "def", "Write", "(", "self", ",", "writer", "=", "gyp", ".", "common", ".", "WriteOnDiff", ")", ":", "# Walk the entry tree and collect all the folders and projects.", "all_entries", "=", "set", "(", ")", "entries_to_check", "=", "self", ".", "entries", "[", ":", "]", "while", "entries_to_check", ":", "e", "=", "entries_to_check", ".", "pop", "(", "0", ")", "# If this entry has been visited, nothing to do.", "if", "e", "in", "all_entries", ":", "continue", "all_entries", ".", "add", "(", "e", ")", "# If this is a folder, check its entries too.", "if", "isinstance", "(", "e", ",", "MSVSFolder", ")", ":", "entries_to_check", "+=", "e", ".", "entries", "all_entries", "=", "sorted", "(", "all_entries", ")", "# Open file and print header", "f", "=", "writer", "(", "self", ".", "path", ")", "f", ".", "write", "(", "'Microsoft Visual Studio Solution File, '", "'Format Version %s\\r\\n'", "%", "self", ".", "version", ".", "SolutionVersion", "(", ")", ")", "f", ".", "write", "(", "'# %s\\r\\n'", "%", "self", ".", "version", ".", "Description", "(", ")", ")", "# Project entries", "sln_root", "=", "os", ".", "path", ".", "split", "(", "self", ".", "path", ")", "[", "0", "]", "for", "e", "in", "all_entries", ":", "relative_path", "=", "gyp", ".", "common", ".", "RelativePath", "(", "e", ".", "path", ",", "sln_root", ")", "# msbuild does not accept an empty folder_name.", "# use '.' in case relative_path is empty.", "folder_name", "=", "relative_path", ".", "replace", "(", "'/'", ",", "'\\\\'", ")", "or", "'.'", "f", ".", "write", "(", "'Project(\"%s\") = \"%s\", \"%s\", \"%s\"\\r\\n'", "%", "(", "e", ".", "entry_type_guid", ",", "# Entry type GUID", "e", ".", "name", ",", "# Folder name", "folder_name", ",", "# Folder name (again)", "e", ".", "get_guid", "(", ")", ",", "# Entry GUID", ")", ")", "# TODO(rspangler): Need a way to configure this stuff", "if", "self", ".", "websiteProperties", ":", "f", ".", "write", "(", "'\\tProjectSection(WebsiteProperties) = preProject\\r\\n'", "'\\t\\tDebug.AspNetCompiler.Debug = \"True\"\\r\\n'", "'\\t\\tRelease.AspNetCompiler.Debug = \"False\"\\r\\n'", "'\\tEndProjectSection\\r\\n'", ")", "if", "isinstance", "(", "e", ",", "MSVSFolder", ")", ":", "if", "e", ".", "items", ":", "f", ".", "write", "(", "'\\tProjectSection(SolutionItems) = preProject\\r\\n'", ")", "for", "i", "in", "e", ".", "items", ":", "f", ".", "write", "(", "'\\t\\t%s = %s\\r\\n'", "%", "(", "i", ",", "i", ")", ")", "f", ".", "write", "(", "'\\tEndProjectSection\\r\\n'", ")", "if", "isinstance", "(", "e", ",", "MSVSProject", ")", ":", "if", "e", ".", "dependencies", ":", "f", ".", "write", "(", "'\\tProjectSection(ProjectDependencies) = postProject\\r\\n'", ")", "for", "d", "in", "e", ".", "dependencies", ":", "f", ".", "write", "(", "'\\t\\t%s = %s\\r\\n'", "%", "(", "d", ".", "get_guid", "(", ")", ",", "d", ".", "get_guid", "(", ")", ")", ")", "f", ".", "write", "(", "'\\tEndProjectSection\\r\\n'", ")", "f", ".", "write", "(", "'EndProject\\r\\n'", ")", "# Global section", "f", ".", "write", "(", "'Global\\r\\n'", ")", "# Configurations (variants)", "f", ".", "write", "(", "'\\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\\r\\n'", ")", "for", "v", "in", "self", ".", "variants", ":", "f", ".", "write", "(", "'\\t\\t%s = %s\\r\\n'", "%", "(", "v", ",", "v", ")", ")", "f", ".", "write", "(", "'\\tEndGlobalSection\\r\\n'", ")", "# Sort config guids for easier diffing of solution changes.", "config_guids", "=", "[", "]", "config_guids_overrides", "=", "{", "}", "for", "e", "in", "all_entries", ":", "if", "isinstance", "(", "e", ",", "MSVSProject", ")", ":", "config_guids", ".", "append", "(", "e", ".", "get_guid", "(", ")", ")", "config_guids_overrides", "[", "e", ".", "get_guid", "(", ")", "]", "=", "e", ".", "config_platform_overrides", "config_guids", ".", "sort", "(", ")", "f", ".", "write", "(", "'\\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\\r\\n'", ")", "for", "g", "in", "config_guids", ":", "for", "v", "in", "self", ".", "variants", ":", "nv", "=", "config_guids_overrides", "[", "g", "]", ".", "get", "(", "v", ",", "v", ")", "# Pick which project configuration to build for this solution", "# configuration.", "f", ".", "write", "(", "'\\t\\t%s.%s.ActiveCfg = %s\\r\\n'", "%", "(", "g", ",", "# Project GUID", "v", ",", "# Solution build configuration", "nv", ",", "# Project build config for that solution config", ")", ")", "# Enable project in this solution configuration.", "f", ".", "write", "(", "'\\t\\t%s.%s.Build.0 = %s\\r\\n'", "%", "(", "g", ",", "# Project GUID", "v", ",", "# Solution build configuration", "nv", ",", "# Project build config for that solution config", ")", ")", "f", ".", "write", "(", "'\\tEndGlobalSection\\r\\n'", ")", "# TODO(rspangler): Should be able to configure this stuff too (though I've", "# never seen this be any different)", "f", ".", "write", "(", "'\\tGlobalSection(SolutionProperties) = preSolution\\r\\n'", ")", "f", ".", "write", "(", "'\\t\\tHideSolutionNode = FALSE\\r\\n'", ")", "f", ".", "write", "(", "'\\tEndGlobalSection\\r\\n'", ")", "# Folder mappings", "# Omit this section if there are no folders", "if", "any", "(", "[", "e", ".", "entries", "for", "e", "in", "all_entries", "if", "isinstance", "(", "e", ",", "MSVSFolder", ")", "]", ")", ":", "f", ".", "write", "(", "'\\tGlobalSection(NestedProjects) = preSolution\\r\\n'", ")", "for", "e", "in", "all_entries", ":", "if", "not", "isinstance", "(", "e", ",", "MSVSFolder", ")", ":", "continue", "# Does not apply to projects, only folders", "for", "subentry", "in", "e", ".", "entries", ":", "f", ".", "write", "(", "'\\t\\t%s = %s\\r\\n'", "%", "(", "subentry", ".", "get_guid", "(", ")", ",", "e", ".", "get_guid", "(", ")", ")", ")", "f", ".", "write", "(", "'\\tEndGlobalSection\\r\\n'", ")", "f", ".", "write", "(", "'EndGlobal\\r\\n'", ")", "f", ".", "close", "(", ")" ]
https://github.com/mozilla/spidernode/blob/aafa9e5273f954f272bb4382fc007af14674b4c2/tools/gyp/pylib/gyp/MSVSNew.py#L216-L340
openwisp/openwisp-controller
0bfda7a28c86092f165b177c551c07babcb40630
openwisp_controller/connection/base/models.py
python
AbstractCommand.full_clean
(self, *args, **kwargs)
return super().full_clean(*args, **kwargs)
Automatically sets the connection field if empty Will be done before the rest of the validation process to avoid triggering validation errors.
Automatically sets the connection field if empty Will be done before the rest of the validation process to avoid triggering validation errors.
[ "Automatically", "sets", "the", "connection", "field", "if", "empty", "Will", "be", "done", "before", "the", "rest", "of", "the", "validation", "process", "to", "avoid", "triggering", "validation", "errors", "." ]
def full_clean(self, *args, **kwargs): """ Automatically sets the connection field if empty Will be done before the rest of the validation process to avoid triggering validation errors. """ if not self.connection: self.connection = self.device.deviceconnection_set.first() return super().full_clean(*args, **kwargs)
[ "def", "full_clean", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "connection", ":", "self", ".", "connection", "=", "self", ".", "device", ".", "deviceconnection_set", ".", "first", "(", ")", "return", "super", "(", ")", ".", "full_clean", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/openwisp/openwisp-controller/blob/0bfda7a28c86092f165b177c551c07babcb40630/openwisp_controller/connection/base/models.py#L398-L406
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/reloop-closured/lib/python2.7/rexec.py
python
RExec.s_unload
(self, *args)
return self.s_apply(self.r_unload, args)
Unload the module. Removes it from the restricted environment's sys.modules dictionary. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_unload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.
Unload the module.
[ "Unload", "the", "module", "." ]
def s_unload(self, *args): """Unload the module. Removes it from the restricted environment's sys.modules dictionary. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_unload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. """ return self.s_apply(self.r_unload, args)
[ "def", "s_unload", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "s_apply", "(", "self", ".", "r_unload", ",", "args", ")" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/reloop-closured/lib/python2.7/rexec.py#L491-L505
alex-cory/fasthacks
72b099f11df2e5640d61e55c80706c3b234eacbe
cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/scanner.py
python
Scanner.check
(self, pattern)
return self._re_cache[pattern].match(self.data, self.pos)
Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead.
Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead.
[ "Apply", "pattern", "on", "the", "current", "position", "and", "return", "the", "match", "object", ".", "(", "Doesn", "t", "touch", "pos", ")", ".", "Use", "this", "for", "lookahead", "." ]
def check(self, pattern): """ Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos)
[ "def", "check", "(", "self", ",", "pattern", ")", ":", "if", "self", ".", "eos", ":", "raise", "EndOfText", "(", ")", "if", "pattern", "not", "in", "self", ".", "_re_cache", ":", "self", ".", "_re_cache", "[", "pattern", "]", "=", "re", ".", "compile", "(", "pattern", ",", "self", ".", "flags", ")", "return", "self", ".", "_re_cache", "[", "pattern", "]", ".", "match", "(", "self", ".", "data", ",", "self", ".", "pos", ")" ]
https://github.com/alex-cory/fasthacks/blob/72b099f11df2e5640d61e55c80706c3b234eacbe/cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/scanner.py#L55-L65
tum-pbs/PhiFlow
31d9944f4f26e56358dd73fa797dde567b6334b0
phi/math/backend/_dtype.py
python
DType.itemsize
(self)
return self.bits // 8
Number of bytes used to storea single value of this type. See `DType.bits`.
Number of bytes used to storea single value of this type. See `DType.bits`.
[ "Number", "of", "bytes", "used", "to", "storea", "single", "value", "of", "this", "type", ".", "See", "DType", ".", "bits", "." ]
def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ assert self.bits % 8 == 0 return self.bits // 8
[ "def", "itemsize", "(", "self", ")", ":", "assert", "self", ".", "bits", "%", "8", "==", "0", "return", "self", ".", "bits", "//", "8" ]
https://github.com/tum-pbs/PhiFlow/blob/31d9944f4f26e56358dd73fa797dde567b6334b0/phi/math/backend/_dtype.py#L48-L51
getredash/redash
49fe29579a56e7a8e206895586eca1736a6d210d
redash/models/users.py
python
update_user_active_at
(sender, *args, **kwargs)
Used as a Flask request_started signal callback that adds the current user's details to Redis
Used as a Flask request_started signal callback that adds the current user's details to Redis
[ "Used", "as", "a", "Flask", "request_started", "signal", "callback", "that", "adds", "the", "current", "user", "s", "details", "to", "Redis" ]
def update_user_active_at(sender, *args, **kwargs): """ Used as a Flask request_started signal callback that adds the current user's details to Redis """ if current_user.is_authenticated and not current_user.is_api_user(): redis_connection.hset(LAST_ACTIVE_KEY, current_user.id, int(time.time()))
[ "def", "update_user_active_at", "(", "sender", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "current_user", ".", "is_authenticated", "and", "not", "current_user", ".", "is_api_user", "(", ")", ":", "redis_connection", ".", "hset", "(", "LAST_ACTIVE_KEY", ",", "current_user", ".", "id", ",", "int", "(", "time", ".", "time", "(", ")", ")", ")" ]
https://github.com/getredash/redash/blob/49fe29579a56e7a8e206895586eca1736a6d210d/redash/models/users.py#L48-L54
xixiaoyao/CS224n-winter-together
f1fbcd4db284a804cb9dfc24b65481ba66e7d32c
Assignments/assignment3/BobOfRivia/parser_model.py
python
ParserModel.embedding_lookup
(self, w)
return x
Utilize `w` to select embeddings from embedding matrix `self.embeddings` @param w (Tensor): input tensor of word indices (batch_size, n_features) @return x (Tensor): tensor of embeddings for words represented in w (batch_size, n_features * embed_size)
Utilize `w` to select embeddings from embedding matrix `self.embeddings` @param w (Tensor): input tensor of word indices (batch_size, n_features)
[ "Utilize", "w", "to", "select", "embeddings", "from", "embedding", "matrix", "self", ".", "embeddings", "@param", "w", "(", "Tensor", ")", ":", "input", "tensor", "of", "word", "indices", "(", "batch_size", "n_features", ")" ]
def embedding_lookup(self, w): """ Utilize `w` to select embeddings from embedding matrix `self.embeddings` @param w (Tensor): input tensor of word indices (batch_size, n_features) @return x (Tensor): tensor of embeddings for words represented in w (batch_size, n_features * embed_size) """ ### YOUR CODE HERE (~1-3 Lines) ### TODO: ### 1) For each index `i` in `w`, select `i`th vector from self.embeddings ### 2) Reshape the tensor using `view` function if necessary ### ### Note: All embedding vectors are stacked and stored as a matrix. The model receives ### a list of indices representing a sequence of words, then it calls this lookup ### function to map indices to sequence of embeddings. ### ### This problem aims to test your understanding of embedding lookup, ### so DO NOT use any high level API like nn.Embedding ### (we are asking you to implement that!). Pay attention to tensor shapes ### and reshape if necessary. Make sure you know each tensor's shape before you run the code! ### ### Pytorch has some useful APIs for you, and you can use either one ### in this problem (except nn.Embedding). These docs might be helpful: ### Index select: https://pytorch.org/docs/stable/torch.html#torch.index_select ### Gather: https://pytorch.org/docs/stable/torch.html#torch.gather ### View: https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view # [tensor_node,tensor_node,tensor_node ...] -> tensor # tensor_node->numpy # tensor = torch.tensor(numpy) x = torch.tensor([torch.index_select(self.embeddings, 0, w[i]).view(-1).detach().numpy() for i in range(w.shape[0])]) ### END YOUR CODE return x
[ "def", "embedding_lookup", "(", "self", ",", "w", ")", ":", "### YOUR CODE HERE (~1-3 Lines)", "### TODO:", "### 1) For each index `i` in `w`, select `i`th vector from self.embeddings", "### 2) Reshape the tensor using `view` function if necessary", "###", "### Note: All embedding vectors are stacked and stored as a matrix. The model receives", "### a list of indices representing a sequence of words, then it calls this lookup", "### function to map indices to sequence of embeddings.", "###", "### This problem aims to test your understanding of embedding lookup,", "### so DO NOT use any high level API like nn.Embedding", "### (we are asking you to implement that!). Pay attention to tensor shapes", "### and reshape if necessary. Make sure you know each tensor's shape before you run the code!", "###", "### Pytorch has some useful APIs for you, and you can use either one", "### in this problem (except nn.Embedding). These docs might be helpful:", "### Index select: https://pytorch.org/docs/stable/torch.html#torch.index_select", "### Gather: https://pytorch.org/docs/stable/torch.html#torch.gather", "### View: https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view", "# [tensor_node,tensor_node,tensor_node ...] -> tensor", "# tensor_node->numpy", "# tensor = torch.tensor(numpy)", "x", "=", "torch", ".", "tensor", "(", "[", "torch", ".", "index_select", "(", "self", ".", "embeddings", ",", "0", ",", "w", "[", "i", "]", ")", ".", "view", "(", "-", "1", ")", ".", "detach", "(", ")", ".", "numpy", "(", ")", "for", "i", "in", "range", "(", "w", ".", "shape", "[", "0", "]", ")", "]", ")", "### END YOUR CODE", "return", "x" ]
https://github.com/xixiaoyao/CS224n-winter-together/blob/f1fbcd4db284a804cb9dfc24b65481ba66e7d32c/Assignments/assignment3/BobOfRivia/parser_model.py#L84-L118
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/unclosured/lib/python2.7/runpy.py
python
run_module
(mod_name, init_globals=None, run_name=None, alter_sys=False)
Execute a module's code without importing it Returns the resulting top level namespace dictionary
Execute a module's code without importing it
[ "Execute", "a", "module", "s", "code", "without", "importing", "it" ]
def run_module(mod_name, init_globals=None, run_name=None, alter_sys=False): """Execute a module's code without importing it Returns the resulting top level namespace dictionary """ mod_name, loader, code, fname = _get_module_details(mod_name) if run_name is None: run_name = mod_name pkg_name = mod_name.rpartition('.')[0] if alter_sys: return _run_module_code(code, init_globals, run_name, fname, loader, pkg_name) else: # Leave the sys module alone return _run_code(code, {}, init_globals, run_name, fname, loader, pkg_name)
[ "def", "run_module", "(", "mod_name", ",", "init_globals", "=", "None", ",", "run_name", "=", "None", ",", "alter_sys", "=", "False", ")", ":", "mod_name", ",", "loader", ",", "code", ",", "fname", "=", "_get_module_details", "(", "mod_name", ")", "if", "run_name", "is", "None", ":", "run_name", "=", "mod_name", "pkg_name", "=", "mod_name", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", "if", "alter_sys", ":", "return", "_run_module_code", "(", "code", ",", "init_globals", ",", "run_name", ",", "fname", ",", "loader", ",", "pkg_name", ")", "else", ":", "# Leave the sys module alone", "return", "_run_code", "(", "code", ",", "{", "}", ",", "init_globals", ",", "run_name", ",", "fname", ",", "loader", ",", "pkg_name", ")" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/unclosured/lib/python2.7/runpy.py#L164-L180
mceSystems/node-jsc
90634f3064fab8e89a85b3942f0cc5054acc86fa
deps/v8/third_party/jinja2/runtime.py
python
make_logging_undefined
(logger=None, base=None)
return LoggingUndefined
Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created. Example:: logger = logging.getLogger(__name__) LoggingUndefined = make_logging_undefined( logger=logger, base=Undefined ) .. versionadded:: 2.8 :param logger: the logger to use. If not provided, a default logger is created. :param base: the base class to add logging functionality to. This defaults to :class:`Undefined`.
Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created.
[ "Given", "a", "logger", "object", "this", "returns", "a", "new", "undefined", "class", "that", "will", "log", "certain", "failures", ".", "It", "will", "log", "iterations", "and", "printing", ".", "If", "no", "logger", "is", "given", "a", "default", "logger", "is", "created", "." ]
def make_logging_undefined(logger=None, base=None): """Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created. Example:: logger = logging.getLogger(__name__) LoggingUndefined = make_logging_undefined( logger=logger, base=Undefined ) .. versionadded:: 2.8 :param logger: the logger to use. If not provided, a default logger is created. :param base: the base class to add logging functionality to. This defaults to :class:`Undefined`. """ if logger is None: import logging logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stderr)) if base is None: base = Undefined def _log_message(undef): if undef._undefined_hint is None: if undef._undefined_obj is missing: hint = '%s is undefined' % undef._undefined_name elif not isinstance(undef._undefined_name, string_types): hint = '%s has no element %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = '%s has no attribute %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = undef._undefined_hint logger.warning('Template variable warning: %s', hint) class LoggingUndefined(base): def _fail_with_undefined_error(self, *args, **kwargs): try: return base._fail_with_undefined_error(self, *args, **kwargs) except self._undefined_exception as e: logger.error('Template variable error: %s', str(e)) raise e def __str__(self): rv = base.__str__(self) _log_message(self) return rv def __iter__(self): rv = base.__iter__(self) _log_message(self) return rv if PY2: def __nonzero__(self): rv = base.__nonzero__(self) _log_message(self) return rv def __unicode__(self): rv = base.__unicode__(self) _log_message(self) return rv else: def __bool__(self): rv = base.__bool__(self) _log_message(self) return rv return LoggingUndefined
[ "def", "make_logging_undefined", "(", "logger", "=", "None", ",", "base", "=", "None", ")", ":", "if", "logger", "is", "None", ":", "import", "logging", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "addHandler", "(", "logging", ".", "StreamHandler", "(", "sys", ".", "stderr", ")", ")", "if", "base", "is", "None", ":", "base", "=", "Undefined", "def", "_log_message", "(", "undef", ")", ":", "if", "undef", ".", "_undefined_hint", "is", "None", ":", "if", "undef", ".", "_undefined_obj", "is", "missing", ":", "hint", "=", "'%s is undefined'", "%", "undef", ".", "_undefined_name", "elif", "not", "isinstance", "(", "undef", ".", "_undefined_name", ",", "string_types", ")", ":", "hint", "=", "'%s has no element %s'", "%", "(", "object_type_repr", "(", "undef", ".", "_undefined_obj", ")", ",", "undef", ".", "_undefined_name", ")", "else", ":", "hint", "=", "'%s has no attribute %s'", "%", "(", "object_type_repr", "(", "undef", ".", "_undefined_obj", ")", ",", "undef", ".", "_undefined_name", ")", "else", ":", "hint", "=", "undef", ".", "_undefined_hint", "logger", ".", "warning", "(", "'Template variable warning: %s'", ",", "hint", ")", "class", "LoggingUndefined", "(", "base", ")", ":", "def", "_fail_with_undefined_error", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "base", ".", "_fail_with_undefined_error", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "self", ".", "_undefined_exception", "as", "e", ":", "logger", ".", "error", "(", "'Template variable error: %s'", ",", "str", "(", "e", ")", ")", "raise", "e", "def", "__str__", "(", "self", ")", ":", "rv", "=", "base", ".", "__str__", "(", "self", ")", "_log_message", "(", "self", ")", "return", "rv", "def", "__iter__", "(", "self", ")", ":", "rv", "=", "base", ".", "__iter__", "(", "self", ")", "_log_message", "(", "self", ")", "return", "rv", "if", "PY2", ":", "def", "__nonzero__", "(", "self", ")", ":", "rv", "=", "base", ".", "__nonzero__", "(", "self", ")", "_log_message", "(", "self", ")", "return", "rv", "def", "__unicode__", "(", "self", ")", ":", "rv", "=", "base", ".", "__unicode__", "(", "self", ")", "_log_message", "(", "self", ")", "return", "rv", "else", ":", "def", "__bool__", "(", "self", ")", ":", "rv", "=", "base", ".", "__bool__", "(", "self", ")", "_log_message", "(", "self", ")", "return", "rv", "return", "LoggingUndefined" ]
https://github.com/mceSystems/node-jsc/blob/90634f3064fab8e89a85b3942f0cc5054acc86fa/deps/v8/third_party/jinja2/runtime.py#L531-L609
IITC-CE/ingress-intel-total-conversion
a154ef3186e31234e9bf20a5028dce2d513c723c
build_plugin.py
python
process_file
(source, out_dir, dist_path=None, deps_list=None)
Generate .user.js (and optionally .meta.js) from given source file. Resulted file(s) put into out_dir (if specified, otherwise - use current). dist_path component is for adding to @downloadURL/@updateURL.
Generate .user.js (and optionally .meta.js) from given source file.
[ "Generate", ".", "user", ".", "js", "(", "and", "optionally", ".", "meta", ".", "js", ")", "from", "given", "source", "file", "." ]
def process_file(source, out_dir, dist_path=None, deps_list=None): """Generate .user.js (and optionally .meta.js) from given source file. Resulted file(s) put into out_dir (if specified, otherwise - use current). dist_path component is for adding to @downloadURL/@updateURL. """ global log_dependency log_dependency = partial(log_dependency, deps_list=deps_list) try: meta, script = readtext(source).split('\n\n', 1) except ValueError: raise Exception(f'{source}: wrong input: empty line expected after metablock') plugin_name = source.stem meta, is_main = fill_meta(meta, plugin_name, dist_path) settings.plugin_id = plugin_name path = source.parent # used as root for all (relative) paths script = re.sub(r"'@bundle_code@';", partial(bundle_code, path=path), script) try: script_before_wrapper, script = script.split('\n/*wrapped-from-here*/\n', 1) except ValueError: script_before_wrapper = '' wrapper = get_module(settings.plugin_wrapper) template = r"'@(\w+)(?::([\w./-]+))?@'" # to find '@keyword[:path]@' patterns repl = partial(expand_template, path=path) data = [ meta, script_before_wrapper, re.sub(template, repl, wrapper.start), re.sub(template, repl, script), wrapper.setup if not is_main else '', # it's for plugins only wrapper.end, ] (out_dir / (plugin_name + '.user.js')).write_text(''.join(data), encoding='utf8') if settings.url_dist_base and settings.update_file == '.meta.js': (out_dir / (plugin_name + '.meta.js')).write_text(meta, encoding='utf8')
[ "def", "process_file", "(", "source", ",", "out_dir", ",", "dist_path", "=", "None", ",", "deps_list", "=", "None", ")", ":", "global", "log_dependency", "log_dependency", "=", "partial", "(", "log_dependency", ",", "deps_list", "=", "deps_list", ")", "try", ":", "meta", ",", "script", "=", "readtext", "(", "source", ")", ".", "split", "(", "'\\n\\n'", ",", "1", ")", "except", "ValueError", ":", "raise", "Exception", "(", "f'{source}: wrong input: empty line expected after metablock'", ")", "plugin_name", "=", "source", ".", "stem", "meta", ",", "is_main", "=", "fill_meta", "(", "meta", ",", "plugin_name", ",", "dist_path", ")", "settings", ".", "plugin_id", "=", "plugin_name", "path", "=", "source", ".", "parent", "# used as root for all (relative) paths", "script", "=", "re", ".", "sub", "(", "r\"'@bundle_code@';\"", ",", "partial", "(", "bundle_code", ",", "path", "=", "path", ")", ",", "script", ")", "try", ":", "script_before_wrapper", ",", "script", "=", "script", ".", "split", "(", "'\\n/*wrapped-from-here*/\\n'", ",", "1", ")", "except", "ValueError", ":", "script_before_wrapper", "=", "''", "wrapper", "=", "get_module", "(", "settings", ".", "plugin_wrapper", ")", "template", "=", "r\"'@(\\w+)(?::([\\w./-]+))?@'\"", "# to find '@keyword[:path]@' patterns", "repl", "=", "partial", "(", "expand_template", ",", "path", "=", "path", ")", "data", "=", "[", "meta", ",", "script_before_wrapper", ",", "re", ".", "sub", "(", "template", ",", "repl", ",", "wrapper", ".", "start", ")", ",", "re", ".", "sub", "(", "template", ",", "repl", ",", "script", ")", ",", "wrapper", ".", "setup", "if", "not", "is_main", "else", "''", ",", "# it's for plugins only", "wrapper", ".", "end", ",", "]", "(", "out_dir", "/", "(", "plugin_name", "+", "'.user.js'", ")", ")", ".", "write_text", "(", "''", ".", "join", "(", "data", ")", ",", "encoding", "=", "'utf8'", ")", "if", "settings", ".", "url_dist_base", "and", "settings", ".", "update_file", "==", "'.meta.js'", ":", "(", "out_dir", "/", "(", "plugin_name", "+", "'.meta.js'", ")", ")", ".", "write_text", "(", "meta", ",", "encoding", "=", "'utf8'", ")" ]
https://github.com/IITC-CE/ingress-intel-total-conversion/blob/a154ef3186e31234e9bf20a5028dce2d513c723c/build_plugin.py#L155-L193
atom-community/ide-python
c046f9c2421713b34baa22648235541c5bb284fe
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/kernel32.py
python
MemoryBasicInformation.is_mapped
(self)
return self.Type == MEM_MAPPED
@rtype: bool @return: C{True} if the memory in this region belongs to a mapped file.
[]
def is_mapped(self): """ @rtype: bool @return: C{True} if the memory in this region belongs to a mapped file. """ return self.Type == MEM_MAPPED
[ "def", "is_mapped", "(", "self", ")", ":", "return", "self", ".", "Type", "==", "MEM_MAPPED" ]
https://github.com/atom-community/ide-python/blob/c046f9c2421713b34baa22648235541c5bb284fe/lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/kernel32.py#L1094-L1099
alex-cory/fasthacks
72b099f11df2e5640d61e55c80706c3b234eacbe
cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/util.py
python
doctype_matches
(text, regex)
return re.compile(regex).match(doctype.strip()) is not None
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
[ "Check", "if", "the", "doctype", "matches", "a", "regular", "expression", "(", "if", "present", ")", ".", "Note", "that", "this", "method", "only", "checks", "the", "first", "part", "of", "a", "DOCTYPE", ".", "eg", ":", "html", "PUBLIC", "-", "//", "W3C", "//", "DTD", "XHTML", "1", ".", "0", "Strict", "//", "EN" ]
def doctype_matches(text, regex): """ Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' """ m = doctype_lookup_re.match(text) if m is None: return False doctype = m.group(2) return re.compile(regex).match(doctype.strip()) is not None
[ "def", "doctype_matches", "(", "text", ",", "regex", ")", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "m", "is", "None", ":", "return", "False", "doctype", "=", "m", ".", "group", "(", "2", ")", "return", "re", ".", "compile", "(", "regex", ")", ".", "match", "(", "doctype", ".", "strip", "(", ")", ")", "is", "not", "None" ]
https://github.com/alex-cory/fasthacks/blob/72b099f11df2e5640d61e55c80706c3b234eacbe/cli_modules/preview/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/util.py#L172-L182
mozilla/spidernode
aafa9e5273f954f272bb4382fc007af14674b4c2
deps/spidershim/spidermonkey/python/mozbuild/mozbuild/frontend/reader.py
python
is_read_allowed
(path, config)
return False
Whether we are allowed to load a mozbuild file at the specified path. This is used as cheap security to ensure the build is isolated to known source directories. We are allowed to read from the main source directory and any defined external source directories. The latter is to allow 3rd party applications to hook into our build system.
Whether we are allowed to load a mozbuild file at the specified path.
[ "Whether", "we", "are", "allowed", "to", "load", "a", "mozbuild", "file", "at", "the", "specified", "path", "." ]
def is_read_allowed(path, config): """Whether we are allowed to load a mozbuild file at the specified path. This is used as cheap security to ensure the build is isolated to known source directories. We are allowed to read from the main source directory and any defined external source directories. The latter is to allow 3rd party applications to hook into our build system. """ assert os.path.isabs(path) assert os.path.isabs(config.topsrcdir) path = mozpath.normpath(path) topsrcdir = mozpath.normpath(config.topsrcdir) if mozpath.basedir(path, [topsrcdir]): return True if config.external_source_dir: external_dir = os.path.normcase(config.external_source_dir) norm_path = os.path.normcase(path) if mozpath.basedir(norm_path, [external_dir]): return True return False
[ "def", "is_read_allowed", "(", "path", ",", "config", ")", ":", "assert", "os", ".", "path", ".", "isabs", "(", "path", ")", "assert", "os", ".", "path", ".", "isabs", "(", "config", ".", "topsrcdir", ")", "path", "=", "mozpath", ".", "normpath", "(", "path", ")", "topsrcdir", "=", "mozpath", ".", "normpath", "(", "config", ".", "topsrcdir", ")", "if", "mozpath", ".", "basedir", "(", "path", ",", "[", "topsrcdir", "]", ")", ":", "return", "True", "if", "config", ".", "external_source_dir", ":", "external_dir", "=", "os", ".", "path", ".", "normcase", "(", "config", ".", "external_source_dir", ")", "norm_path", "=", "os", ".", "path", ".", "normcase", "(", "path", ")", "if", "mozpath", ".", "basedir", "(", "norm_path", ",", "[", "external_dir", "]", ")", ":", "return", "True", "return", "False" ]
https://github.com/mozilla/spidernode/blob/aafa9e5273f954f272bb4382fc007af14674b4c2/deps/spidershim/spidermonkey/python/mozbuild/mozbuild/frontend/reader.py#L144-L169
Nexedi/erp5
44df1959c0e21576cf5e9803d602d95efb4b695b
product/ERP5/Document/BusinessTemplate.py
python
FilesystemDocumentTemplateItem._getPath
(self, key)
return key
Magical method to extract real path
Magical method to extract real path
[ "Magical", "method", "to", "extract", "real", "path" ]
def _getPath(self, key): """Magical method to extract real path""" if '/' in key: return key.split('/')[1] return key
[ "def", "_getPath", "(", "self", ",", "key", ")", ":", "if", "'/'", "in", "key", ":", "return", "key", ".", "split", "(", "'/'", ")", "[", "1", "]", "return", "key" ]
https://github.com/Nexedi/erp5/blob/44df1959c0e21576cf5e9803d602d95efb4b695b/product/ERP5/Document/BusinessTemplate.py#L3836-L3840
Luvata/CS224N-2019
1f8baf847ea943b6fdc81e82f500ee25ed36f221
CS224N-2019/a5-v1.2/nmt_model.py
python
NMT.forward
(self, source: List[List[str]], target: List[List[str]])
return scores
Take a mini-batch of source and target sentences, compute the log-likelihood of target sentences under the language models learned by the NMT system. @param source (List[List[str]]): list of source sentence tokens @param target (List[List[str]]): list of target sentence tokens, wrapped by `<s>` and `</s>` @returns scores (Tensor): a variable/tensor of shape (b, ) representing the log-likelihood of generating the gold-standard target sentence for each example in the input batch. Here b = batch size.
Take a mini-batch of source and target sentences, compute the log-likelihood of target sentences under the language models learned by the NMT system.
[ "Take", "a", "mini", "-", "batch", "of", "source", "and", "target", "sentences", "compute", "the", "log", "-", "likelihood", "of", "target", "sentences", "under", "the", "language", "models", "learned", "by", "the", "NMT", "system", "." ]
def forward(self, source: List[List[str]], target: List[List[str]]) -> torch.Tensor: """ Take a mini-batch of source and target sentences, compute the log-likelihood of target sentences under the language models learned by the NMT system. @param source (List[List[str]]): list of source sentence tokens @param target (List[List[str]]): list of target sentence tokens, wrapped by `<s>` and `</s>` @returns scores (Tensor): a variable/tensor of shape (b, ) representing the log-likelihood of generating the gold-standard target sentence for each example in the input batch. Here b = batch size. """ # Compute sentence lengths source_lengths = [len(s) for s in source] # Convert list of lists into tensors ## A4 code # source_padded = self.vocab.src.to_input_tensor(source, device=self.device) # Tensor: (src_len, b) # target_padded = self.vocab.tgt.to_input_tensor(target, device=self.device) # Tensor: (tgt_len, b) # enc_hiddens, dec_init_state = self.encode(source_padded, source_lengths) # enc_masks = self.generate_sent_masks(enc_hiddens, source_lengths) # combined_outputs = self.decode(enc_hiddens, enc_masks, dec_init_state, target_padded) ## End A4 code ### YOUR CODE HERE for part 1k ### TODO: ### Modify the code lines above as needed to fetch the character-level tensor ### to feed into encode() and decode(). You should: ### - Keep `target_padded` from A4 code above for predictions -> It's a list of word indices ### - Add `source_padded_chars` for character level padded encodings for source -> Char padded indices ### - Add `target_padded_chars` for character level padded encodings for target ### - Modify calls to encode() and decode() to use the character level encodings source_padded_chars = self.vocab.src.to_input_tensor_char(source, device=self.device) # (src_len, b, max_w_len) target_padded_chars = self.vocab.tgt.to_input_tensor_char(target, device=self.device) # (tgt_len, b, max_w_len) target_padded = self.vocab.tgt.to_input_tensor(target, device=self.device) # (tgt_len, b) enc_hiddens, dec_init_state = self.encode(source_padded_chars, source_lengths) enc_masks = self.generate_sent_masks(enc_hiddens, source_lengths) combined_outputs = self.decode(enc_hiddens, enc_masks, dec_init_state, target_padded_chars) ### END YOUR CODE P = F.log_softmax(self.target_vocab_projection(combined_outputs), dim=-1) # Zero out, probabilities for which we have nothing in the target text target_masks = (target_padded != self.vocab.tgt['<pad>']).float() # Compute log probability of generating true target words target_gold_words_log_prob = torch.gather(P, index=target_padded[1:].unsqueeze(-1), dim=-1).squeeze(-1) * target_masks[1:] scores = target_gold_words_log_prob.sum() # mhahn2 Small modification from A4 code. if self.charDecoder is not None: max_word_len = target_padded_chars.shape[-1] # remove start of word character ? target_words = target_padded[1:].contiguous().view(-1) # view : (l, b, max_w_len) -> (l * b, max_w_len) target_chars = target_padded_chars[1:].contiguous().view(-1, max_word_len) target_outputs = combined_outputs.view(-1, 256) target_chars_oov = target_chars #torch.index_select(target_chars, dim=0, index=oovIndices) rnn_states_oov = target_outputs #torch.index_select(target_outputs, dim=0, index=oovIndices) oovs_losses = self.charDecoder.train_forward(target_chars_oov.t(), (rnn_states_oov.unsqueeze(0), rnn_states_oov.unsqueeze(0))) scores = scores - oovs_losses return scores
[ "def", "forward", "(", "self", ",", "source", ":", "List", "[", "List", "[", "str", "]", "]", ",", "target", ":", "List", "[", "List", "[", "str", "]", "]", ")", "->", "torch", ".", "Tensor", ":", "# Compute sentence lengths", "source_lengths", "=", "[", "len", "(", "s", ")", "for", "s", "in", "source", "]", "# Convert list of lists into tensors", "## A4 code", "# source_padded = self.vocab.src.to_input_tensor(source, device=self.device) # Tensor: (src_len, b)", "# target_padded = self.vocab.tgt.to_input_tensor(target, device=self.device) # Tensor: (tgt_len, b)", "# enc_hiddens, dec_init_state = self.encode(source_padded, source_lengths)", "# enc_masks = self.generate_sent_masks(enc_hiddens, source_lengths)", "# combined_outputs = self.decode(enc_hiddens, enc_masks, dec_init_state, target_padded)", "## End A4 code", "### YOUR CODE HERE for part 1k", "### TODO: ", "### Modify the code lines above as needed to fetch the character-level tensor ", "### to feed into encode() and decode(). You should:", "### - Keep `target_padded` from A4 code above for predictions -> It's a list of word indices", "### - Add `source_padded_chars` for character level padded encodings for source -> Char padded indices", "### - Add `target_padded_chars` for character level padded encodings for target", "### - Modify calls to encode() and decode() to use the character level encodings", "source_padded_chars", "=", "self", ".", "vocab", ".", "src", ".", "to_input_tensor_char", "(", "source", ",", "device", "=", "self", ".", "device", ")", "# (src_len, b, max_w_len)", "target_padded_chars", "=", "self", ".", "vocab", ".", "tgt", ".", "to_input_tensor_char", "(", "target", ",", "device", "=", "self", ".", "device", ")", "# (tgt_len, b, max_w_len)", "target_padded", "=", "self", ".", "vocab", ".", "tgt", ".", "to_input_tensor", "(", "target", ",", "device", "=", "self", ".", "device", ")", "# (tgt_len, b)", "enc_hiddens", ",", "dec_init_state", "=", "self", ".", "encode", "(", "source_padded_chars", ",", "source_lengths", ")", "enc_masks", "=", "self", ".", "generate_sent_masks", "(", "enc_hiddens", ",", "source_lengths", ")", "combined_outputs", "=", "self", ".", "decode", "(", "enc_hiddens", ",", "enc_masks", ",", "dec_init_state", ",", "target_padded_chars", ")", "### END YOUR CODE", "P", "=", "F", ".", "log_softmax", "(", "self", ".", "target_vocab_projection", "(", "combined_outputs", ")", ",", "dim", "=", "-", "1", ")", "# Zero out, probabilities for which we have nothing in the target text", "target_masks", "=", "(", "target_padded", "!=", "self", ".", "vocab", ".", "tgt", "[", "'<pad>'", "]", ")", ".", "float", "(", ")", "# Compute log probability of generating true target words", "target_gold_words_log_prob", "=", "torch", ".", "gather", "(", "P", ",", "index", "=", "target_padded", "[", "1", ":", "]", ".", "unsqueeze", "(", "-", "1", ")", ",", "dim", "=", "-", "1", ")", ".", "squeeze", "(", "-", "1", ")", "*", "target_masks", "[", "1", ":", "]", "scores", "=", "target_gold_words_log_prob", ".", "sum", "(", ")", "# mhahn2 Small modification from A4 code.", "if", "self", ".", "charDecoder", "is", "not", "None", ":", "max_word_len", "=", "target_padded_chars", ".", "shape", "[", "-", "1", "]", "# remove start of word character ?", "target_words", "=", "target_padded", "[", "1", ":", "]", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", "# view : (l, b, max_w_len) -> (l * b, max_w_len)", "target_chars", "=", "target_padded_chars", "[", "1", ":", "]", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ",", "max_word_len", ")", "target_outputs", "=", "combined_outputs", ".", "view", "(", "-", "1", ",", "256", ")", "target_chars_oov", "=", "target_chars", "#torch.index_select(target_chars, dim=0, index=oovIndices)", "rnn_states_oov", "=", "target_outputs", "#torch.index_select(target_outputs, dim=0, index=oovIndices)", "oovs_losses", "=", "self", ".", "charDecoder", ".", "train_forward", "(", "target_chars_oov", ".", "t", "(", ")", ",", "(", "rnn_states_oov", ".", "unsqueeze", "(", "0", ")", ",", "rnn_states_oov", ".", "unsqueeze", "(", "0", ")", ")", ")", "scores", "=", "scores", "-", "oovs_losses", "return", "scores" ]
https://github.com/Luvata/CS224N-2019/blob/1f8baf847ea943b6fdc81e82f500ee25ed36f221/CS224N-2019/a5-v1.2/nmt_model.py#L65-L133
wotermelon/toJump
3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f
lib/win/systrace/catapult/third_party/pyserial/serial/rfc2217.py
python
RFC2217Serial.read
(self, size=1)
return bytes(data)
\ Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.
\ Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.
[ "\\", "Read", "size", "bytes", "from", "the", "serial", "port", ".", "If", "a", "timeout", "is", "set", "it", "may", "return", "less", "characters", "as", "requested", ".", "With", "no", "timeout", "it", "will", "block", "until", "the", "requested", "number", "of", "bytes", "is", "read", "." ]
def read(self, size=1): """\ Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read. """ if not self._isOpen: raise portNotOpenError data = bytearray() try: while len(data) < size: if self._thread is None: raise SerialException('connection failed (reader thread died)') data.append(self._read_buffer.get(True, self._timeout)) except Queue.Empty: # -> timeout pass return bytes(data)
[ "def", "read", "(", "self", ",", "size", "=", "1", ")", ":", "if", "not", "self", ".", "_isOpen", ":", "raise", "portNotOpenError", "data", "=", "bytearray", "(", ")", "try", ":", "while", "len", "(", "data", ")", "<", "size", ":", "if", "self", ".", "_thread", "is", "None", ":", "raise", "SerialException", "(", "'connection failed (reader thread died)'", ")", "data", ".", "append", "(", "self", ".", "_read_buffer", ".", "get", "(", "True", ",", "self", ".", "_timeout", ")", ")", "except", "Queue", ".", "Empty", ":", "# -> timeout", "pass", "return", "bytes", "(", "data", ")" ]
https://github.com/wotermelon/toJump/blob/3dcec5cb5d91387d415b805d015ab8d2e6ffcf5f/lib/win/systrace/catapult/third_party/pyserial/serial/rfc2217.py#L568-L583
finos/perspective
910799a5c981ab501fd907f34a21b0ef5a9a914c
python/perspective/bench/runtime/perspective_benchmark.py
python
PerspectiveBenchmark.benchmark_table
(self)
Benchmark table creation from different formats.
Benchmark table creation from different formats.
[ "Benchmark", "table", "creation", "from", "different", "formats", "." ]
def benchmark_table(self): """Benchmark table creation from different formats.""" for name in ("df", "dict", "records"): data = getattr(self, name) test_meta = make_meta("table", name) func = Benchmark(lambda: Table(data), meta=test_meta) setattr(self, "table_{0}".format(name), func)
[ "def", "benchmark_table", "(", "self", ")", ":", "for", "name", "in", "(", "\"df\"", ",", "\"dict\"", ",", "\"records\"", ")", ":", "data", "=", "getattr", "(", "self", ",", "name", ")", "test_meta", "=", "make_meta", "(", "\"table\"", ",", "name", ")", "func", "=", "Benchmark", "(", "lambda", ":", "Table", "(", "data", ")", ",", "meta", "=", "test_meta", ")", "setattr", "(", "self", ",", "\"table_{0}\"", ".", "format", "(", "name", ")", ",", "func", ")" ]
https://github.com/finos/perspective/blob/910799a5c981ab501fd907f34a21b0ef5a9a914c/python/perspective/bench/runtime/perspective_benchmark.py#L95-L101
replit-archive/jsrepl
36d79b6288ca5d26208e8bade2a168c6ebcb2376
extern/python/unclosured/lib/python2.7/lib2to3/refactor.py
python
get_fixers_from_package
(pkg_name)
return [pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)]
Return the fully qualified names for fixers in the package pkg_name.
Return the fully qualified names for fixers in the package pkg_name.
[ "Return", "the", "fully", "qualified", "names", "for", "fixers", "in", "the", "package", "pkg_name", "." ]
def get_fixers_from_package(pkg_name): """ Return the fully qualified names for fixers in the package pkg_name. """ return [pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)]
[ "def", "get_fixers_from_package", "(", "pkg_name", ")", ":", "return", "[", "pkg_name", "+", "\".\"", "+", "fix_name", "for", "fix_name", "in", "get_all_fix_names", "(", "pkg_name", ",", "False", ")", "]" ]
https://github.com/replit-archive/jsrepl/blob/36d79b6288ca5d26208e8bade2a168c6ebcb2376/extern/python/unclosured/lib/python2.7/lib2to3/refactor.py#L103-L108
nodejs/node
ac3c33c1646bf46104c15ae035982c06364da9b8
deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
python
WriteAutoRegenerationRule
(params, root_makefile, makefile_name, build_files)
Write the target to regenerate the Makefile.
Write the target to regenerate the Makefile.
[ "Write", "the", "target", "to", "regenerate", "the", "Makefile", "." ]
def WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files): """Write the target to regenerate the Makefile.""" options = params["options"] build_files_args = [ gyp.common.RelativePath(filename, options.toplevel_dir) for filename in params["build_files_arg"] ] gyp_binary = gyp.common.FixIfRelativePath( params["gyp_binary"], options.toplevel_dir ) if not gyp_binary.startswith(os.sep): gyp_binary = os.path.join(".", gyp_binary) root_makefile.write( "quiet_cmd_regen_makefile = ACTION Regenerating $@\n" "cmd_regen_makefile = cd $(srcdir); %(cmd)s\n" "%(makefile_name)s: %(deps)s\n" "\t$(call do_cmd,regen_makefile)\n\n" % { "makefile_name": makefile_name, "deps": " ".join(SourceifyAndQuoteSpaces(bf) for bf in build_files), "cmd": gyp.common.EncodePOSIXShellList( [gyp_binary, "-fmake"] + gyp.RegenerateFlags(options) + build_files_args ), } )
[ "def", "WriteAutoRegenerationRule", "(", "params", ",", "root_makefile", ",", "makefile_name", ",", "build_files", ")", ":", "options", "=", "params", "[", "\"options\"", "]", "build_files_args", "=", "[", "gyp", ".", "common", ".", "RelativePath", "(", "filename", ",", "options", ".", "toplevel_dir", ")", "for", "filename", "in", "params", "[", "\"build_files_arg\"", "]", "]", "gyp_binary", "=", "gyp", ".", "common", ".", "FixIfRelativePath", "(", "params", "[", "\"gyp_binary\"", "]", ",", "options", ".", "toplevel_dir", ")", "if", "not", "gyp_binary", ".", "startswith", "(", "os", ".", "sep", ")", ":", "gyp_binary", "=", "os", ".", "path", ".", "join", "(", "\".\"", ",", "gyp_binary", ")", "root_makefile", ".", "write", "(", "\"quiet_cmd_regen_makefile = ACTION Regenerating $@\\n\"", "\"cmd_regen_makefile = cd $(srcdir); %(cmd)s\\n\"", "\"%(makefile_name)s: %(deps)s\\n\"", "\"\\t$(call do_cmd,regen_makefile)\\n\\n\"", "%", "{", "\"makefile_name\"", ":", "makefile_name", ",", "\"deps\"", ":", "\" \"", ".", "join", "(", "SourceifyAndQuoteSpaces", "(", "bf", ")", "for", "bf", "in", "build_files", ")", ",", "\"cmd\"", ":", "gyp", ".", "common", ".", "EncodePOSIXShellList", "(", "[", "gyp_binary", ",", "\"-fmake\"", "]", "+", "gyp", ".", "RegenerateFlags", "(", "options", ")", "+", "build_files_args", ")", ",", "}", ")" ]
https://github.com/nodejs/node/blob/ac3c33c1646bf46104c15ae035982c06364da9b8/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py#L2190-L2216