repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
pathtemplater-1.0.0.dev7 | pathtemplater-1.0.0.dev7//pathtemplater/pathtemplater.pyclass:PathTemplater/_get_setfilesuffix_methodname | def _get_setfilesuffix_methodname(altsuffix_name):
"""
Return the name of the method name to set suffix to `altsuffix_name`
e.g. `logfile()` for setting to `log`.
"""
return altsuffix_name + 'file'
|
pyglottolog-3.2.1 | pyglottolog-3.2.1//src/pyglottolog/references/libmonster.pyfile:/src/pyglottolog/references/libmonster.py:function:rangecomplete/rangecomplete | def rangecomplete(incomplete, complete):
"""
>>> rangecomplete('2', '10')
'12'
"""
if len(complete) > len(incomplete):
return complete[:len(complete) - len(incomplete)] + incomplete
return incomplete
|
tittles | tittles//tittles.pyfile:/tittles.py:function:dicarray_find_key_value/dicarray_find_key_value | def dicarray_find_key_value(dica, key, value):
"""Find dics in dicarray with [key: value] pair."""
dics_ = []
for d_ in dica:
if isinstance(d_, dict) and d_.get(key) == value:
dics_.append(d_)
return dics_
|
pycrc | pycrc//codegen.pyfile:/codegen.py:function:_use_inline_crc_finalize/_use_inline_crc_finalize | def _use_inline_crc_finalize(opt):
"""
Return True if the init function can be inlined.
"""
if opt.algorithm in set([opt.algo_bit_by_bit_fast, opt.algo_table_driven]
) and (opt.width is not None and opt.reflect_in is not None and opt
.reflect_out is not None and opt.xor_out is not None):
return True
else:
return False
|
fastavro | fastavro//_validation_py.pyfile:/_validation_py.py:function:validate_boolean/validate_boolean | def validate_boolean(datum, **kwargs):
"""
Check that the data value is bool instance
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, bool)
|
swarmops | swarmops//PSO.pyclass:PSO/parameters_list | @staticmethod
def parameters_list(num_particles, omega, phi_p, phi_g):
"""
Create a list with PSO parameters in the correct order.
:param num_particles: Number of particles for the PSO swarm.
:param omega: The omega parameter (aka. inertia weight) for the PSO.
:param phi_p: The phi_p parameter (aka. particle weight) for the PSO.
:param phi_g: The phi_g parameter (aka. social weight) for the PSO.
:return: List with PSO parameters.
"""
return [num_particles, omega, phi_p, phi_g]
|
changelogfromtags-0.4.5 | changelogfromtags-0.4.5//changelogfromtags/_version.pyfile:/changelogfromtags/_version.py:function:render_git_describe_long/render_git_describe_long | def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
Pympler-0.8 | Pympler-0.8//pympler/util/stringutils.pyfile:/pympler/util/stringutils.py:function:trunc/trunc | def trunc(obj, max, left=0):
"""
Convert `obj` to string, eliminate newlines and truncate the string to
`max` characters. If there are more characters in the string add ``...`` to
the string. With `left=True`, the string can be truncated at the beginning.
@note: Does not catch exceptions when converting `obj` to string with
`str`.
>>> trunc('This is a long text.', 8)
This ...
>>> trunc('This is a long text.', 8, left)
...text.
"""
s = str(obj)
s = s.replace('\n', '|')
if len(s) > max:
if left:
return '...' + s[len(s) - max + 3:]
else:
return s[:max - 3] + '...'
else:
return s
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/iam.pyfile:/pyboto3/iam.py:function:delete_access_key/delete_access_key | def delete_access_key(UserName=None, AccessKeyId=None):
"""
Deletes the access key pair associated with the specified IAM user.
If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.
See also: AWS API Documentation
Examples
The following command deletes one access key (access key ID and secret access key) assigned to the IAM user named Bob.
Expected Output:
:example: response = client.delete_access_key(
UserName='string',
AccessKeyId='string'
)
:type UserName: string
:param UserName: The name of the user whose access key pair you want to delete.
This parameter allows (per its regex pattern ) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-
:type AccessKeyId: string
:param AccessKeyId: [REQUIRED]
The access key ID for the access key ID and secret access key you want to delete.
This parameter allows (per its regex pattern ) a string of characters that can consist of any upper or lowercased letter or digit.
:return: response = client.delete_access_key(
AccessKeyId='AKIDPMS9RO4H3FEXAMPLE',
UserName='Bob',
)
print(response)
"""
pass
|
dns-lexicon-3.3.22 | dns-lexicon-3.3.22//lexicon/providers/rackspace.pyfile:/lexicon/providers/rackspace.py:function:provider_parser/provider_parser | def provider_parser(subparser):
"""Configure provider parser for Rackspace"""
subparser.add_argument('--auth-account', help=
'specify account number for authentication')
subparser.add_argument('--auth-username', help=
'specify username for authentication. Only used if --auth-token is empty.'
)
subparser.add_argument('--auth-api-key', help=
'specify api key for authentication. Only used if --auth-token is empty.'
)
subparser.add_argument('--auth-token', help=
'specify token for authentication. If empty, the username and api key will be used to create a token.'
)
subparser.add_argument('--sleep-time', type=float, default=1, help=
'number of seconds to wait between update requests.')
|
buildbot | buildbot//interfaces.pyclass:IStatus/getWorker | def getWorker(name):
"""Return the IWorkerStatus object for a given named worker."""
|
gepyto | gepyto//structures/genes.pyfile:/structures/genes.py:function:_parse_gene/_parse_gene | def _parse_gene(o):
"""Parse gene information from an Ensembl `overlap` query.
:param o: A Python dict representing an entry from the response.
:type o: dict
:returns: A standardised dict of gene information.
:rtype: dict
"""
assert o['feature_type'] == 'gene'
d = {'desc': o.get('description'), 'build': o.get('assembly_name'),
'chrom': o.get('seq_region_name'), 'start': o.get('start'), 'end':
o.get('end'), 'strand': o.get('strand'), 'biotype': o.get('biotype')}
return d
|
PyQtPurchasing-5.14.0 | PyQtPurchasing-5.14.0//configure.pyclass:ModuleConfiguration/get_sip_flags | @staticmethod
def get_sip_flags(target_configuration):
""" Return the list of module-specific flags to pass to SIP.
target_configuration is the target configuration.
"""
major = target_configuration.qtp_version >> 16 & 255
minor = target_configuration.qtp_version >> 8 & 255
patch = target_configuration.qtp_version & 255
if (major, minor) >= (5, 13):
patch = 0
elif (major, minor) == (5, 12):
if patch > 4:
patch = 4
version_tag = 'QtPurchasing_%d_%d_%d' % (major, minor, patch)
return ['-t', version_tag]
|
django-anger-0.1.1-20130516 | django-anger-0.1.1-20130516//django_anger/migration_utils.pyfile:/django_anger/migration_utils.py:function:forwards_contents/forwards_contents | def forwards_contents(f):
"""
Return a string containing everything in the forwards() method of a
migration.
This string may not be safe to evaluate.
"""
forwards_found = False
forwards_lines = []
for line in f:
if 'def backwards(self, orm):' in line:
break
if forwards_found:
forwards_lines.append(line)
elif 'def forwards(self, orm):' in line:
forwards_found = True
return ''.join(forwards_lines)
|
python-ndn-0.2b2.post1 | python-ndn-0.2b2.post1//src/ndn/contrib/cocoapy/runtime.pyfile:/src/ndn/contrib/cocoapy/runtime.py:function:parse_type_encoding/parse_type_encoding | def parse_type_encoding(encoding):
"""Takes a type encoding string and outputs a list of the separated type codes.
Currently does not handle unions or bitfields and strips out any field width
specifiers or type specifiers from the encoding. For Python 3.2+, encoding is
assumed to be a bytes object and not unicode.
Examples:
parse_type_encoding('^v16@0:8') --> ['^v', '@', ':']
parse_type_encoding('{CGSize=dd}40@0:8{CGSize=dd}16Q32') --> ['{CGSize=dd}', '@', ':', '{CGSize=dd}', 'Q']
"""
type_encodings = []
brace_count = 0
bracket_count = 0
typecode = b''
for c in encoding:
if isinstance(c, int):
c = bytes([c])
if c == b'{':
if typecode and typecode[-1:
] != b'^' and brace_count == 0 and bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
brace_count += 1
elif c == b'}':
typecode += c
brace_count -= 1
assert brace_count >= 0
elif c == b'[':
if typecode and typecode[-1:
] != b'^' and brace_count == 0 and bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
bracket_count += 1
elif c == b']':
typecode += c
bracket_count -= 1
assert bracket_count >= 0
elif brace_count or bracket_count:
typecode += c
elif c in b'0123456789':
pass
elif c in b'rnNoORV':
pass
elif c in b'^cislqCISLQfdBv*@#:b?':
if typecode and typecode[-1:] == b'^':
typecode += c
else:
if typecode:
type_encodings.append(typecode)
typecode = c
if typecode:
type_encodings.append(typecode)
return type_encodings
|
seglearn | seglearn//transform.pyfile:/transform.py:function:last/last | def last(y):
""" Returns the last column from 2d matrix """
return y[:, (y.shape[1] - 1)]
|
stagpy | stagpy//processing.pyfile:/processing.py:function:dtime/dtime | def dtime(sdat, tstart=None, tend=None):
"""Time increment dt.
Compute dt as a function of time.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: dt and time arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
return time[1:] - time[:-1], time[:-1]
|
mailman | mailman//interfaces/mta.pyclass:IMailTransportAgentDelivery/deliver | def deliver(mlist, msg, msgdata):
"""Deliver a message to a mailing list's recipients.
Ordinarily the mailing list is consulted for delivery specifics,
however the message metadata dictionary can contain additional
directions to control delivery. Specifics are left to the
implementation, but there are a few common keys:
* envelope_sender - the email address of the RFC 2821 envelope sender;
* decorated - a flag indicating whether the message has been decorated
with headers and footers yet;
* recipients - the set of all recipients who should receive this
message, as a set of email addresses;
:param mlist: The mailing list being delivered to.
:type mlist: `IMailingList`
:param msg: The original message being delivered.
:type msg: `Message`
:param msgdata: Additional message metadata for this delivery.
:type msgdata: dictionary
:return: delivery failures as defined by `smtplib.SMTP.sendmail`
:rtype: dictionary
"""
|
twisted | twisted//internet/endpoints.pyfile:/internet/endpoints.py:function:_parseUNIX/_parseUNIX | def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{twisted.internet.interfaces.IReactorUNIX.listenUNIX} (or,
modulo argument 2, the factory, arguments to L{UNIXServerEndpoint}.
"""
return (address, factory), {'mode': int(mode, 8), 'backlog': int(
backlog), 'wantPID': bool(int(lockfile))}
|
wokkel-18.0.0 | wokkel-18.0.0//wokkel/iwokkel.pyclass:IMUCClient/leave | def leave(roomJID):
"""
Leave a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#exit
@param roomJID: The Room JID of the room to leave.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
|
qcfractal-0.13.1 | qcfractal-0.13.1//qcfractal/storage_sockets/db_queries.pyclass:QueryBase/_raise_missing_attribute | @staticmethod
def _raise_missing_attribute(cls, query_key, missing_attribute, amend_msg=''):
"""Raises error for missing attribute in a message suitable for the REST user"""
raise AttributeError(
f'To query {cls._class_name} for {query_key} you must provide {missing_attribute}.'
)
|
fury-0.5.1 | fury-0.5.1//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre | def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
beast-1.3.2 | beast-1.3.2//beast/physicsmodel/stars/simpletable.pyfile:/beast/physicsmodel/stars/simpletable.py:function:_latex_writeto/_latex_writeto | def _latex_writeto(filename, tab, comments='%'):
""" Write the data into a latex table format
Parameters
----------
filename: str
file or unit to write into
tab: SimpleTable instance
table
comments: str
string to prepend header lines
delimiter: str, optional
The string used to separate values. By default, this is any
whitespace.
commentedHeader: bool, optional
if set, the last line of the header is expected to be the column titles
"""
txt = '\\begin{table}\n\\begin{center}\n'
tabname = tab.header.get('NAME', None)
if tabname not in ['', None, 'None']:
txt += '\\caption{{{0:s}}}\n'.format(tabname)
txt += '\\begin{{tabular}}{{{0:s}}}\n'.format('c' * tab.ncols)
txt += tab.pprint(delim=' & ', fields='MAG*', headerChar='', endline=
'\\\\\n', all=True, ret=True)
txt += '\\end{tabular}\n'
txt += '\\end{center}\n'
if len(tab._desc) > 0:
txt += '\\% notes \n\\begin{scriptsize}\n'
for e, (k, v) in enumerate(tab._desc.items()):
if v not in (None, 'None', 'none', ''):
txt += '{0:d} {1:s}: {2:s} \\\\\n'.format(e, k, v)
txt += '\\end{scriptsize}\n'
txt += '\\end{table}\n'
if hasattr(filename, 'write'):
filename.write(txt)
else:
with open(filename, 'w') as unit:
unit.write(txt)
|
chemml | chemml//utils/utilities.pyfile:/utils/utilities.py:function:list_del_indices/list_del_indices | def list_del_indices(mylist, indices):
"""
iteratively remove elements of a list by indices
Parameters
----------
mylist : list
the list of elements of interest
indices : list
the list of indices of elements that should be removed
Returns
-------
list
the reduced mylist entry
"""
for index in sorted(indices, reverse=True):
del mylist[index]
return mylist
|
dropbox_compatiable-3.38 | dropbox_compatiable-3.38//dropbox/rest.pyclass:RESTClient/GET | @classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
|
datarobot-2.20.2 | datarobot-2.20.2//datarobot/models/model.pyclass:FrozenModel/get | @classmethod
def get(cls, project_id, model_id):
"""
Retrieve a specific frozen model.
Parameters
----------
project_id : str
The project's id.
model_id : str
The ``model_id`` of the leaderboard item to retrieve.
Returns
-------
model : FrozenModel
The queried instance.
"""
url = cls._frozen_path_template.format(project_id) + model_id + '/'
return cls.from_location(url)
|
Djamo-2.67.0-rc2 | Djamo-2.67.0-rc2//djamo/document.pyclass:Document/serialize_item | @classmethod
def serialize_item(cls, item, args=None):
"""
Serialize a query that stored in ``item`` tuple like: (key, value)
"""
fields = getattr(cls, 'fields', None)
if fields and isinstance(fields, dict):
if item[0] in fields:
return fields[item(0)].serialize(item[1], params=args)
return {item[0]: item[1]}
|
zxbasic-1.9.2 | zxbasic-1.9.2//arch/zx48k/backend/__float.pyfile:/arch/zx48k/backend/__float.py:function:_fpop/_fpop | def _fpop():
""" Returns the pop sequence of a float
"""
output = []
output.append('pop af')
output.append('pop de')
output.append('pop bc')
return output
|
pkcs1-0.9.6 | pkcs1-0.9.6//pkcs1/primitives.pyfile:/pkcs1/primitives.py:function:integer_bit_size/integer_bit_size | def integer_bit_size(n):
"""Returns the number of bits necessary to store the integer n."""
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
|
tengri | tengri//weather.pyfile:/weather.py:function:get_valid_place/get_valid_place | def get_valid_place(place):
""" Return valid place.
Strip spaces and check for empty value."""
place = place.strip()
if not place:
raise ValueError('empty string')
return place
|
graphtransliterator-1.1.2 | graphtransliterator-1.1.2//graphtransliterator/ambiguity.pyfile:/graphtransliterator/ambiguity.py:function:_count_of_prev/_count_of_prev | def _count_of_prev(rule):
"""Count previous tokens to be present before a match in a rule."""
return len(rule.prev_classes or []) + len(rule.prev_tokens or [])
|
botocore-1.16.14 | botocore-1.16.14//botocore/docs/utils.pyfile:/botocore/docs/utils.py:function:get_official_service_name/get_official_service_name | def get_official_service_name(service_model):
"""Generate the official name of an AWS Service
:param service_model: The service model representing the service
"""
official_name = service_model.metadata.get('serviceFullName')
short_name = service_model.metadata.get('serviceAbbreviation', '')
if short_name.startswith('Amazon'):
short_name = short_name[7:]
if short_name.startswith('AWS'):
short_name = short_name[4:]
if short_name and short_name.lower() not in official_name.lower():
official_name += ' ({0})'.format(short_name)
return official_name
|
zetalib-0.4.5 | zetalib-0.4.5//zetalib/util.pyfile:/zetalib/util.py:function:is_subpolynomial/is_subpolynomial | def is_subpolynomial(f, a):
"""Test if `a' is a sum of terms of the polynomial `f'.
"""
a = f.parent()(a)
return len(f.monomials()) == len((f - a).monomials()) + len(a.monomials())
|
neuropredict | neuropredict//compare.pyfile:/compare.py:function:check_if_better/check_if_better | def check_if_better(rank_one, rank_two, critical_dist):
"""Checks whether rank1 is greater than rank2 by at least critical dist"""
is_better = rank_one - rank_two >= critical_dist
return is_better
|
ftw.publisher.sender-2.14.0 | ftw.publisher.sender-2.14.0//ftw/publisher/sender/workflows/interfaces.pyclass:IConstraintDefinition/__init__ | def __init__(context, request):
"""Adapts context and request.
"""
|
python-debian-0.1.37 | python-debian-0.1.37//lib/debian/debian_support.pyclass:NativeVersion/_order | @classmethod
def _order(cls, x):
"""Return an integer value for character x"""
if x == '~':
return -1
if cls.re_digit.match(x):
return int(x) + 1
if cls.re_alpha.match(x):
return ord(x)
return ord(x) + 256
|
twc | twc//twutils.pyfile:/twutils.py:function:_process_sort_string/_process_sort_string | def _process_sort_string(sort_string):
"""Returns a list of tuples: (sort condition, reverse sort)"""
cmp = []
for attrname in sort_string.split(','):
attrname = attrname.strip()
rev = False
if attrname.endswith('+'):
attrname = attrname[:-1]
elif attrname.endswith('-'):
attrname = attrname[:-1]
rev = True
cmp.append((attrname, rev))
return cmp
|
i18ndude-5.3.3 | i18ndude-5.3.3//src/i18ndude/untranslated.pyfile:/src/i18ndude/untranslated.py:function:_tal_replaced_content/_tal_replaced_content | def _tal_replaced_content(tag, attrs):
"""Will the data get replaced by tal?
So: is there a tal:content or tal:replace? Or is it '<tal:block
content=.".."'? Problem with that last one is that the lxml html
parser strips off the 'tal:' namespace, unlike the standard lxml
parser that is tried first.
"""
if 'tal:content' in attrs or 'content' in attrs:
return True
if 'tal:replace' in attrs or 'replace' in attrs:
return True
return False
|
smartblinds_client | smartblinds_client//smartblinds.pyfile:/smartblinds.py:function:chunks/chunks | def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
|
libnum | libnum//stuff.pyfile:/stuff.py:function:factorial_get_prime_pow/factorial_get_prime_pow | def factorial_get_prime_pow(n, p):
"""
Return power of prime @p in @n!
"""
count = 0
ppow = p
while ppow <= n:
count += n // ppow
ppow *= p
return count
|
EMDT | EMDT//mining.pyclass:Mine/get_diff_number_from_list | @staticmethod
def get_diff_number_from_list(list):
""" 获得密度化文本中不同的行数统计号
Input:密度化文本 -- list类型
Outpt:降序排列的行数统计号序列 -- list类型
"""
number_list = []
for i in list:
if type(i) == int and i not in number_list:
number_list.append(i)
return sorted(number_list, reverse=True)
|
PyEIS | PyEIS//PyEIS_Lin_KK.pyfile:/PyEIS_Lin_KK.py:function:KK_RC44/KK_RC44 | def KK_RC44(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen ([email protected] / [email protected])
"""
return Rs + R_values[0] / (1 + w * 1.0j * t_values[0]) + R_values[1] / (
1 + w * 1.0j * t_values[1]) + R_values[2] / (1 + w * 1.0j * t_values[2]
) + R_values[3] / (1 + w * 1.0j * t_values[3]) + R_values[4] / (1 +
w * 1.0j * t_values[4]) + R_values[5] / (1 + w * 1.0j * t_values[5]
) + R_values[6] / (1 + w * 1.0j * t_values[6]) + R_values[7] / (1 +
w * 1.0j * t_values[7]) + R_values[8] / (1 + w * 1.0j * t_values[8]
) + R_values[9] / (1 + w * 1.0j * t_values[9]) + R_values[10] / (1 +
w * 1.0j * t_values[10]) + R_values[11] / (1 + w * 1.0j * t_values[11]
) + R_values[12] / (1 + w * 1.0j * t_values[12]) + R_values[13] / (
1 + w * 1.0j * t_values[13]) + R_values[14] / (1 + w * 1.0j *
t_values[14]) + R_values[15] / (1 + w * 1.0j * t_values[15]
) + R_values[16] / (1 + w * 1.0j * t_values[16]) + R_values[17] / (
1 + w * 1.0j * t_values[17]) + R_values[18] / (1 + w * 1.0j *
t_values[18]) + R_values[19] / (1 + w * 1.0j * t_values[19]
) + R_values[20] / (1 + w * 1.0j * t_values[20]) + R_values[21] / (
1 + w * 1.0j * t_values[21]) + R_values[22] / (1 + w * 1.0j *
t_values[22]) + R_values[23] / (1 + w * 1.0j * t_values[23]
) + R_values[24] / (1 + w * 1.0j * t_values[24]) + R_values[25] / (
1 + w * 1.0j * t_values[25]) + R_values[26] / (1 + w * 1.0j *
t_values[26]) + R_values[27] / (1 + w * 1.0j * t_values[27]
) + R_values[28] / (1 + w * 1.0j * t_values[28]) + R_values[29] / (
1 + w * 1.0j * t_values[29]) + R_values[30] / (1 + w * 1.0j *
t_values[30]) + R_values[31] / (1 + w * 1.0j * t_values[31]
) + R_values[32] / (1 + w * 1.0j * t_values[32]) + R_values[33] / (
1 + w * 1.0j * t_values[33]) + R_values[34] / (1 + w * 1.0j *
t_values[34]) + R_values[35] / (1 + w * 1.0j * t_values[35]
) + R_values[36] / (1 + w * 1.0j * t_values[36]) + R_values[37] / (
1 + w * 1.0j * t_values[37]) + R_values[38] / (1 + w * 1.0j *
t_values[38]) + R_values[39] / (1 + w * 1.0j * t_values[39]
) + R_values[40] / (1 + w * 1.0j * t_values[40]) + R_values[41] / (
1 + w * 1.0j * t_values[41]) + R_values[42] / (1 + w * 1.0j *
t_values[42]) + R_values[43] / (1 + w * 1.0j * t_values[43])
|
lightnimage-0.0.0.14 | lightnimage-0.0.0.14//lightnimage/engine.pyclass:SimpleAreaGroupingEngine/area_size | @staticmethod
def area_size(area):
"""
Returns the size of the area, by multiplying width and height.
CHANGELOG
Added 05.12.2018
:param Tuple(Tuple(int,int)) area: THe tuple describing the two dimensional are in a x y plane
:return: int
"""
width = area[0][1] - area[0][0]
height = area[1][1] - area[1][0]
return width * height
|
spylon-0.3.0 | spylon-0.3.0//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
mercurial-5.4 | mercurial-5.4//mercurial/interfaces/repository.pyclass:imanifestrevisionstored/readfast | def readfast(shallow=False):
"""Calls either ``read()`` or ``readdelta()``.
The faster of the two options is called.
"""
|
UP-Manager-0.0.0 | UP-Manager-0.0.0//scripts/manage_translations.pyfile:/scripts/manage_translations.py:function:_tx_resource_for_name/_tx_resource_for_name | def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return 'django-core.core'
else:
return 'django-core.contrib-%s' % name
|
ib_dl-1.5.3 | ib_dl-1.5.3//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
A3MIO-0.0.1 | A3MIO-0.0.1//A3MIO.pyfile:/A3MIO.py:function:pad/pad | def pad(x, l, pad_char='-'):
"""Pad x with pad_char characters until it has length l"""
return x + pad_char * (l - len(x))
|
hypernetx-0.3.4 | hypernetx-0.3.4//hypernetx/classes/entity.pyclass:Entity/merge_entities | @staticmethod
def merge_entities(name, ent1, ent2):
"""
Merge two entities making sure they do not conflict.
Parameters
----------
name : hashable
ent1 : Entity
First entity to have elements and properties added to new
entity
ent2 : Entity
elements of ent2 will be checked against ent1.complete_registry()
and only nonexisting elements will be added using add() method.
Properties of ent2 will update properties of ent1 in new entity.
Returns
-------
a new entity : Entity
"""
newent = ent1.clone(name)
newent.add_elements_from(ent2.elements.values())
for k, v in ent2.properties.items():
newent.__setattr__(k, v)
return newent
|
tangible | tangible//utils.pyfile:/utils.py:function:_quads_to_triangles/_quads_to_triangles | def _quads_to_triangles(quads):
"""Convert a list of quads to a list of triangles.
:param quads: The list of quads.
:type quads: list of 4-tuples
:returns: List of triangles.
:rtype: list of 3-tuples
"""
triangles = []
for quad in quads:
triangles.append((quad[0], quad[1], quad[2]))
triangles.append((quad[0], quad[2], quad[3]))
return triangles
|
nbsite-0.6.7 | nbsite-0.6.7//nbsite/examples/sites/holoviews/holoviews/element/raster.pyclass:RGB/load_image | @classmethod
def load_image(cls, filename, height=1, array=False, bounds=None, bare=
False, **kwargs):
"""
Returns an raster element or raw numpy array from a PNG image
file, using matplotlib.
The specified height determines the bounds of the raster
object in sheet coordinates: by default the height is 1 unit
with the width scaled appropriately by the image aspect ratio.
Note that as PNG images are encoded as RGBA, the red component
maps to the first channel, the green component maps to the
second component etc. For RGB elements, this mapping is
trivial but may be important for subclasses e.g. for HSV
elements.
Setting bare=True will apply options disabling axis labels
displaying just the bare image. Any additional keyword
arguments will be passed to the Image object.
"""
try:
from matplotlib import pyplot as plt
except:
raise ImportError('RGB.load_image requires matplotlib.')
data = plt.imread(filename)
if array:
return data
h, w, _ = data.shape
if bounds is None:
f = float(height) / h
xoffset, yoffset = w * f / 2, h * f / 2
bounds = -xoffset, -yoffset, xoffset, yoffset
rgb = cls(data, bounds=bounds, **kwargs)
if bare:
rgb = rgb(plot=dict(xaxis=None, yaxis=None))
return rgb
|
nibabel | nibabel//cifti2/cifti2.pyclass:Cifti2Image/from_image | @classmethod
def from_image(klass, img):
""" Class method to create new instance of own class from `img`
Parameters
----------
img : instance
In fact, an object with the API of :class:`DataobjImage`.
Returns
-------
cimg : instance
Image, of our own class
"""
if isinstance(img, klass):
return img
raise NotImplementedError
|
econ-ark-0.10.6 | econ-ark-0.10.6//HARK/utilities.pyfile:/HARK/utilities.py:function:CRRAutilityPPP/CRRAutilityPPP | def CRRAutilityPPP(c, gam):
"""
Evaluates constant relative risk aversion (CRRA) marginal marginal marginal
utility of consumption c given risk aversion parameter gam.
Parameters
----------
c : float
Consumption value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Marginal marginal marginal utility
"""
return (gam + 1.0) * gam * c ** (-gam - 2.0)
|
co2sim-3.0.0 | co2sim-3.0.0//src/co2mpas/model/physical/engine/co2_emission.pyfile:/src/co2mpas/model/physical/engine/co2_emission.py:function:calculate_fuel_consumptions/calculate_fuel_consumptions | def calculate_fuel_consumptions(co2_emissions, fuel_carbon_content):
"""
Calculates the instantaneous fuel consumption vector [g/s].
:param co2_emissions:
CO2 instantaneous emissions vector [CO2g/s].
:type co2_emissions: numpy.array
:param fuel_carbon_content:
Fuel carbon content [CO2g/g].
:type fuel_carbon_content: float
:return:
The instantaneous fuel consumption vector [g/s].
:rtype: numpy.array
"""
return co2_emissions / fuel_carbon_content
|
oscarapi | oscarapi//utils/request.pyfile:/utils/request.py:function:get_domain/get_domain | def get_domain(request):
"""
Get the domain name parsed from a hostname:port string
>>> class FakeRequest(object):
... def __init__(self, url):
... self.url = url
... def get_host(self):
... return self.url
>>> req = FakeRequest("example.com:5984")
>>> get_domain(req)
'example.com'
"""
return request.get_host().split(':')[0]
|
bpy | bpy//ops/gpencil.pyfile:/ops/gpencil.py:function:duplicate_move/duplicate_move | def duplicate_move(GPENCIL_OT_duplicate=None, TRANSFORM_OT_translate=None):
"""Make copies of the selected Grease Pencil strokes and move them
:param GPENCIL_OT_duplicate: Duplicate Strokes, Duplicate the selected Grease Pencil strokes
:param TRANSFORM_OT_translate: Move, Move selected items
"""
pass
|
ccdproc-2.1.0 | ccdproc-2.1.0//ccdproc/core.pyfile:/ccdproc/core.py:function:setbox/setbox | def setbox(x, y, mbox, xmax, ymax):
"""
Create a box of length mbox around a position x,y. If the box will
be out of [0,len] then reset the edges of the box to be within the
boundaries.
Parameters
----------
x : int
Central x-position of box.
y : int
Central y-position of box.
mbox : int
Width of box.
xmax : int
Maximum x value.
ymax : int
Maximum y value.
Returns
-------
x1 : int
Lower x corner of box.
x2 : int
Upper x corner of box.
y1 : int
Lower y corner of box.
y2 : int
Upper y corner of box.
"""
mbox = max(int(0.5 * mbox), 1)
y1 = max(0, y - mbox)
y2 = min(y + mbox + 1, ymax - 1)
x1 = max(0, x - mbox)
x2 = min(x + mbox + 1, xmax - 1)
return x1, x2, y1, y2
|
irlutils-0.1.4 | irlutils-0.1.4//url/crawl/database_utils.pyfile:/url/crawl/database_utils.py:function:list_placeholder/list_placeholder | def list_placeholder(length, is_pg=False):
"""Returns a (?,?,?,?...) string of the desired length"""
return '(' + '?,' * (length - 1) + '?)'
|
davtelepot-2.5.9 | davtelepot-2.5.9//davtelepot/utilities.pyfile:/davtelepot/utilities.py:function:get_line_by_content/get_line_by_content | def get_line_by_content(text, key):
"""Get line of `text` containing `key`."""
for line in text.split('\n'):
if key in line:
return line
return
|
PyGraphics-2.1 | PyGraphics-2.1//cpython/media.pyfile:/cpython/media.py:function:get_pixel/get_pixel | def get_pixel(pic, x, y):
"""Return the Pixel object at the coordinates (x, y) in Picture pic."""
return pic.get_pixel(x, y)
|
pyasdf-0.6.1 | pyasdf-0.6.1//pyasdf/utils.pyfile:/pyasdf/utils.py:function:_read_string_array/_read_string_array | def _read_string_array(data):
"""
Helper function taking a string data set and preparing it so it can be
read to a BytesIO object.
"""
return data[()].tostring().strip(b'\x00 ').strip()
|
pygame | pygame//midi.pyfile:/midi.py:function:midi_to_frequency/midi_to_frequency | def midi_to_frequency(midi_note):
""" Converts a midi note to a frequency.
::Examples::
>>> midi_to_frequency(21)
27.5
>>> midi_to_frequency(26)
36.7
>>> midi_to_frequency(108)
4186.0
"""
return round(440.0 * 2 ** ((midi_note - 69) * (1.0 / 12.0)), 1)
|
glean | glean//_dispatcher.pyclass:Dispatcher/reset | @classmethod
def reset(cls):
"""
Reset the dispatcher so the queue is cleared, and it is reset into
queueing mode.
"""
cls._queue_initial_tasks = True
cls._preinit_task_queue = []
cls._overflow_count = 0
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bmesh/ops.pyfile:/bmesh/ops.py:function:create_icosphere/create_icosphere | def create_icosphere(bm: 'bmesh.types.BMesh', subdivisions: int, diameter:
float, matrix: 'mathutils.Matrix', calc_uvs: bool) ->dict:
"""Creates a grid with a variable number of subdivisions
:param bm: The bmesh to operate on.
:type bm: 'bmesh.types.BMesh'
:param subdivisions: how many times to recursively subdivide the sphere
:type subdivisions: int
:param diameter: diameter
:type diameter: float
:param matrix: matrix to multiply the new geometry with
:type matrix: 'mathutils.Matrix'
:param calc_uvs: calculate default UVs
:type calc_uvs: bool
:return: verts: output vertstype list of (bmesh.types.BMVert)
"""
pass
|
yodapy-0.3.0b0 | yodapy-0.3.0b0//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre | def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
grokcore.formlib-3.0.1 | grokcore.formlib-3.0.1//src/grokcore/formlib/formlib.pyfile:/src/grokcore/formlib/formlib.py:function:apply_data/apply_data | def apply_data(context, form_fields, data, adapters=None, update=False):
"""Save form data (``data`` dict) on a ``context`` object.
This is a beefed up version of zope.formlib.form.applyChanges().
It allows you to specify whether values should be compared with
the attributes on already existing objects or not, using the
``update`` parameter.
Unlike zope.formlib.form.applyChanges(), it will return a
dictionary of interfaces and their fields that were changed. This
is necessary to appropriately send IObjectModifiedEvents.
"""
if adapters is None:
adapters = {}
changes = {}
for form_field in form_fields:
field = form_field.field
interface = form_field.interface
adapter = adapters.get(interface)
if adapter is None:
if interface is None:
adapter = context
else:
adapter = interface(context)
adapters[interface] = adapter
name = form_field.__name__
newvalue = data.get(name, form_field)
if update:
if newvalue is not form_field and field.get(adapter) != newvalue:
field.set(adapter, newvalue)
changes.setdefault(interface, []).append(name)
elif newvalue is not form_field:
field.set(adapter, newvalue)
changes.setdefault(interface, []).append(name)
return changes
|
grmpy | grmpy//estimate/estimate_semipar.pyfile:/estimate/estimate_semipar.py:function:process_choice_data/process_choice_data | def process_choice_data(dict_, data):
"""This functions processes the inputs for the
decision equation"""
indicator = dict_['ESTIMATION']['indicator']
D = data[indicator].values
Z = data[dict_['CHOICE']['order']]
return indicator, D, Z
|
orionframework | orionframework//utils/lists.pyfile:/utils/lists.py:function:add_all/add_all | def add_all(array, other):
"""
Append all items of the second array into the first one.
:param array:
:param other:
:return:
"""
for item in other:
array.append(item)
return array
|
ndtamr | ndtamr//NDTree.pyfile:/NDTree.py:function:restrict_datafunc/restrict_datafunc | def restrict_datafunc(n):
"""Use the function in the Data class."""
return n._data_class(coords=n.coords)
|
zope.dublincore-4.2.0 | zope.dublincore-4.2.0//src/zope/dublincore/interfaces.pyclass:IWritableGeneralDublinCore/setQualifiedContributors | def setQualifiedContributors(qualified_contributors):
"""Set the qualified Contributors elements.
The argument must be a sequence of Contributor `IDublinCoreElementItem`.
"""
|
scrapy | scrapy//extensions/httpcache.pyfile:/extensions/httpcache.py:function:parse_cachecontrol/parse_cachecontrol | def parse_cachecontrol(header):
"""Parse Cache-Control header
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None,
... b'max-age': b'3600'}
True
>>> parse_cachecontrol(b'') == {}
True
"""
directives = {}
for directive in header.split(b','):
key, sep, val = directive.strip().partition(b'=')
if key:
directives[key.lower()] = val if sep else None
return directives
|
sense_emu | sense_emu//common.pyfile:/common.py:function:clamp/clamp | def clamp(value, min_value, max_value):
"""
Return *value* clipped to the range *min_value* to *max_value* inclusive.
"""
return min(max_value, max(min_value, value))
|
allmydata | allmydata//interfaces.pyclass:IEncryptedUploadable/read_encrypted | def read_encrypted(length, hash_only):
"""This behaves just like IUploadable.read(), but returns crypttext
instead of plaintext. If hash_only is True, then this discards the
data (and returns an empty list); this improves efficiency when
resuming an interrupted upload (where we need to compute the
plaintext hashes, but don't need the redundant encrypted data)."""
|
oemof | oemof//tools/economics.pyfile:/tools/economics.py:function:annuity/annuity | def annuity(capex, n, wacc, u=None, cost_decrease=0):
"""Calculates the annuity of an initial investment 'capex', considering
the cost of capital 'wacc' during a project horizon 'n'
In case of a single initial investment, the employed formula reads:
.. math::
\\text{annuity} = \\text{capex} \\cdot
\\frac{(\\text{wacc} \\cdot (1+\\text{wacc})^n)}
{((1 + \\text{wacc})^n - 1)}
In case of repeated investments (due to replacements) at fixed intervals
'u', the formula yields:
.. math::
\\text{annuity} = \\text{capex} \\cdot
\\frac{(\\text{wacc} \\cdot (1+\\text{wacc})^n)}
{((1 + \\text{wacc})^n - 1)} \\cdot \\left(
\\frac{1 - \\left( \\frac{(1-\\text{cost\\_decrease})}
{(1+\\text{wacc})} \\right)^n}
{1 - \\left(\\frac{(1-\\text{cost\\_decrease})}{(1+\\text{wacc})}
\\right)^u} \\right)
Parameters
----------
capex : float
Capital expenditure for first investment. Net Present Value (NPV) or
Net Present Cost (NPC) of investment
n : int
Horizon of the analysis, or number of years the annuity wants to be
obtained for (n>=1)
wacc : float
Weighted average cost of capital (0<wacc<1)
u : int
Lifetime of the investigated investment. Might be smaller than the
analysis horizon, 'n', meaning it will have to be replaced.
Takes value 'n' if not specified otherwise (u>=1)
cost_decrease : float
Annual rate of cost decrease (due to, e.g., price experience curve).
This only influences the result for investments corresponding to
replacements, whenever u<n.
Takes value 0, if not specified otherwise (0<cost_decrease<1)
Returns
-------
float
annuity
"""
if u is None:
u = n
if n < 1 or (wacc < 0 or wacc > 1) or u < 1 or (cost_decrease < 0 or
cost_decrease > 1):
raise ValueError("Input arguments for 'annuity' out of bounds!")
return capex * (wacc * (1 + wacc) ** n) / ((1 + wacc) ** n - 1) * ((1 -
((1 - cost_decrease) / (1 + wacc)) ** n) / (1 - ((1 - cost_decrease
) / (1 + wacc)) ** u))
|
fake-bpy-module-2.79-20200428 | fake-bpy-module-2.79-20200428//bpy/ops/time.pyfile:/bpy/ops/time.py:function:view_all/view_all | def view_all():
"""Show the entire playable frame range
"""
pass
|
qhist-0.1.2 | qhist-0.1.2//qhist/utils.pyfile:/qhist/utils.py:function:prefer_first/prefer_first | def prefer_first(*vals):
"""
This idiom is used so often...
Pick fist value unles it's None.
Usage::
>>> prefer_first( True, 42 )
True
>>> prefer_first( None, 42 )
42
## Still pick 1st one if it's not None
>>> prefer_first( False, 42 )
False
>>> prefer_first( 1, 2, 3 )
1
>>> prefer_first( None, 2, 3 )
2
>>> prefer_first( None, None, 3 )
3
>>> prefer_first( None, None, None ) is None
True
"""
for val in vals:
if val is not None:
return val
return val
|
noseapp_selenium-1.2.3 | noseapp_selenium-1.2.3//noseapp_selenium/tools.pyfile:/noseapp_selenium/tools.py:function:set_default_to_meta/set_default_to_meta | def set_default_to_meta(meta, key, default_value):
"""
Set default value to dict of meta
"""
if callable(default_value):
default_value = default_value()
meta.setdefault(key, default_value)
|
LbNightlyTools | LbNightlyTools//Scripts/Common.pyfile:/Scripts/Common.py:function:addBuildDirOptions/addBuildDirOptions | def addBuildDirOptions(parser):
"""
Add build directory specific options to the parser.
"""
from optparse import OptionGroup
group = OptionGroup(parser, 'Build Dir Options')
group.add_option('--clean', action='store_true', help=
'purge the build directory before building')
group.add_option('--no-clean', action='store_false', dest='clean', help
='do not purge the build directory before building')
group.add_option('--no-unpack', action='store_true', help=
'assume that the sources are already present')
parser.add_option_group(group)
parser.set_defaults(clean=False, no_unpack=False)
return parser
|
openfisca_france | openfisca_france//model/prelevements_obligatoires/impot_revenu/reductions_impot.pyclass:domlog/formula_2016_01_01 | def formula_2016_01_01(foyer_fiscal, period, parameters):
"""
Investissements OUTRE-MER dans le secteur du logement et autres secteurs d’activité
2016
"""
fhod = foyer_fiscal('fhod', period)
fhoe = foyer_fiscal('fhoe', period)
fhof = foyer_fiscal('fhof', period)
fhog = foyer_fiscal('fhog', period)
fhox = foyer_fiscal('fhox', period)
fhoy = foyer_fiscal('fhoy', period)
fhoz = foyer_fiscal('fhoz', period)
fhqb = foyer_fiscal('fhqb', period)
fhqc = foyer_fiscal('fhqc', period)
fhqd = foyer_fiscal('fhqd', period)
fhql = foyer_fiscal('fhql', period)
fhqm = foyer_fiscal('fhqm', period)
fhqt = foyer_fiscal('fhqt', period)
fhoa = foyer_fiscal('fhoa', period)
fhob = foyer_fiscal('fhob', period)
fhoc = foyer_fiscal('fhoc', period)
fhoh = foyer_fiscal('fhoh', period)
fhoi = foyer_fiscal('fhoi', period)
fhoj = foyer_fiscal('fhoj', period)
fhok = foyer_fiscal('fhok', period)
fhol = foyer_fiscal('fhol', period)
fhom = foyer_fiscal('fhom', period)
fhon = foyer_fiscal('fhon', period)
fhoo = foyer_fiscal('fhoo', period)
fhop = foyer_fiscal('fhop', period)
fhoq = foyer_fiscal('fhoq', period)
fhor = foyer_fiscal('fhor', period)
fhos = foyer_fiscal('fhos', period)
fhot = foyer_fiscal('fhot', period)
fhou = foyer_fiscal('fhou', period)
fhov = foyer_fiscal('fhov', period)
fhow = foyer_fiscal('fhow', period)
fhua = foyer_fiscal('fhua', period)
fhub = foyer_fiscal('fhub', period)
fhuc = foyer_fiscal('fhuc', period)
fhud = foyer_fiscal('fhud', period)
fhue = foyer_fiscal('fhue', period)
fhuf = foyer_fiscal('fhuf', period)
fhug = foyer_fiscal('fhug', period)
fhuh = foyer_fiscal('fhuh', period)
fhui = foyer_fiscal('fhui', period)
fhuj = foyer_fiscal('fhuj', period)
fhuk = foyer_fiscal('fhuk', period)
fhul = foyer_fiscal('fhul', period)
fhum = foyer_fiscal('fhum', period)
fhun = foyer_fiscal('fhun', period)
fhuo = foyer_fiscal('fhuo', period)
fhup = foyer_fiscal('fhup', period)
fhuq = foyer_fiscal('fhuq', period)
fhur = foyer_fiscal('fhur', period)
fhus = foyer_fiscal('fhus', period)
fhut = foyer_fiscal('fhut', period)
fhuu = foyer_fiscal('fhuu', period)
return (fhqb + fhqc + fhqd + fhql + fhqm + fhqt + fhoa + fhob + fhoc +
fhoh + fhoi + fhoj + fhok + fhol + fhom + fhon + fhoo + fhop + fhoq +
fhor + fhos + fhot + fhou + fhov + fhow + fhod + fhoe + fhof + fhog +
fhox + fhoy + fhoz + fhua + fhub + fhuc + fhud + fhue + fhuf + fhug +
fhuh + fhui + fhuj + fhuk + fhul + fhum + fhun + fhuo + fhup + fhuq +
fhur + fhus + fhut + fhuu)
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/clip.pyfile:/bpy/ops/clip.py:function:copy_tracks/copy_tracks | def copy_tracks():
"""Copy selected tracks to clipboard
"""
pass
|
spot_motion_monitor | spot_motion_monitor//utils/config_helpers.pyfile:/utils/config_helpers.py:function:convertValueOrNone/convertValueOrNone | def convertValueOrNone(value, convert=int):
"""Convert a value to a type unless NoneType.
Parameters
----------
value : anything
The value to possibly convert.
convert : TYPE, optional
The type for conversion.
Returns
-------
convert type
The converted value.
"""
return value if value is None else convert(value)
|
neuralnet_pytorch | neuralnet_pytorch//utils.pyfile:/utils.py:function:dimshuffle/dimshuffle | def dimshuffle(x, pattern):
"""
Reorders the dimensions of this variable, optionally inserting broadcasted dimensions.
Inspired by `Theano's dimshuffle`_.
.. _Theano's dimshuffle:
https://github.com/Theano/Theano/blob/d395439aec5a6ddde8ef5c266fd976412a5c5695/theano/tensor/var.py#L323-L356
:param x:
Input tensor.
:param pattern:
List/tuple of int mixed with 'x' for broadcastable dimensions.
:return:
a tensor whose shape matches `pattern`.
Examples
--------
To create a 3D view of a [2D] matrix, call ``dimshuffle(x, [0,'x',1])``.
This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``dimshuffle(x, [1, 'x', 0])``.
See Also
--------
:func:`~neuralnet_pytorch.utils.shape_padleft`
:func:`~neuralnet_pytorch.utils.shape_padright`
"""
assert isinstance(pattern, (list, tuple)), 'pattern must be a list/tuple'
no_expand_pattern = [x for x in pattern if x != 'x']
y = x.permute(*no_expand_pattern)
shape = list(y.shape)
for idx, e in enumerate(pattern):
if e == 'x':
shape.insert(idx, 1)
return y.view(*shape)
|
pcbasic | pcbasic//basic/memory/arrays.pyclass:Arrays/_record_size | @staticmethod
def _record_size(name, dimensions):
"""Calculate size of array record in bytes."""
return 1 + max(3, len(name)) + 3 + 2 * len(dimensions)
|
pyDEA | pyDEA//core/data_processing/targets_and_slacks.pyfile:/core/data_processing/targets_and_slacks.py:function:calculate_radial_reduction/calculate_radial_reduction | def calculate_radial_reduction(dmu_code, category, data, efficiency_score,
orientation):
""" Calculates radial reduction for a given DMU and category.
Args:
dmu_code (str): DMU code.
category (str): category name.
data (InputData): object that stores input data.
efficiency_score (double): efficiency spyDEA.core.
orientation (str): problem orientation, can take values
input or output.
Returns:
double: radial reduction value.
"""
if orientation == 'input':
if category in data.output_categories:
return 0
objective_value = efficiency_score
elif orientation == 'output':
if category in data.input_categories:
return 0
objective_value = 1 / efficiency_score
return (objective_value - 1) * data.coefficients[dmu_code, category]
|
hdmf_docutils | hdmf_docutils//doctools/renderrst.pyclass:SpecToRST/quantity_to_string | @staticmethod
def quantity_to_string(quantity):
"""
Helper function to convert a quantity identifier from the schema to a consistent
string for in RST documentation
:param quantity: Quantity string used in the format specification
:return: String describing the quantity
"""
qdict = {'*': '0 or more', 'zero_or_more': '0 or more', '+':
'1 or more', 'one_or_more': '1 or more', '?': '0 or 1',
'zero_or_one': '0 or 1'}
if isinstance(quantity, int):
return str(quantity)
else:
return qdict[quantity]
|
nbsite-0.6.7 | nbsite-0.6.7//nbsite/gallery/thumbnailer.pyfile:/nbsite/gallery/thumbnailer.py:function:strip_trailing_semicolons/strip_trailing_semicolons | def strip_trailing_semicolons(source, function):
"""
Give the source of a cell, filter out lines that contain a specified
function call and end in a semicolon.
"""
filtered = []
for line in source.splitlines():
if line.endswith(f'{function}();'):
filtered.append(line[:-1])
else:
filtered.append(line)
return '\n'.join(filtered)
|
baseband-3.1.1 | baseband-3.1.1//baseband/vlbi_base/payload.pyclass:VLBIPayloadBase/fromdata | @classmethod
def fromdata(cls, data, header=None, bps=2):
"""Encode data as a payload.
Parameters
----------
data : `~numpy.ndarray`
Data to be encoded. The last dimension is taken as the number of
channels.
header : header instance, optional
If given, used to infer the bps.
bps : int, optional
Bits per elementary sample, i.e., per channel and per real or
imaginary component, used if header is not given. Default: 2.
"""
sample_shape = data.shape[1:]
complex_data = data.dtype.kind == 'c'
if header:
bps = header.bps
try:
encoder = cls._encoders[bps]
except KeyError:
raise ValueError('{0} cannot encode data with {1} bits'.format(cls.
__name__, bps))
if complex_data:
data = data.view((data.real.dtype, (2,)))
words = encoder(data).ravel().view(cls._dtype_word)
return cls(words, sample_shape=sample_shape, bps=bps, complex_data=
complex_data)
|
websync-0.2.1 | websync-0.2.1//websync/dict.pyfile:/websync/dict.py:function:mergeDicts/mergeDicts | def mergeDicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
|
pya2l-0.0.1 | pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_daq_list_can_id_type_variable | @staticmethod
def p_daq_list_can_id_type_variable(p):
"""daq_list_can_id_type_variable : VARIABLE"""
p[0] = p[1]
|
hpack-3.0.0 | hpack-3.0.0//hpack/table.pyfile:/hpack/table.py:function:table_entry_size/table_entry_size | def table_entry_size(name, value):
"""
Calculates the size of a single entry
This size is mostly irrelevant to us and defined
specifically to accommodate memory management for
lower level implementations. The 32 extra bytes are
considered the "maximum" overhead that would be
required to represent each entry in the table.
See RFC7541 Section 4.1
"""
return 32 + len(name) + len(value)
|
lava-tool-0.11.1 | lava-tool-0.11.1//lava/tool/command.pyclass:Command/get_name | @classmethod
def get_name(cls):
"""
Return the name of this command.
The default implementation strips any leading underscores and replaces
all other underscores with dashes.
"""
return cls.__name__.lstrip('_').replace('_', '-')
|
luckydonaldUtils | luckydonaldUtils//interactions.pyfile:/interactions.py:function:string_y_n/string_y_n | def string_y_n(string, default=None):
"""
Strict mapping of a given string to a boolean.
If it is `'y'` or `'Y'`, `True` is returned.
If it is `'n'` or `'N'`, `True` is returned.
If it is empty or None (evaluates to False), and `default` is set, `default` is returned.
Else a `ValueError` is raised.
:param string: The input
:type string: str
:param default: default result for empty input
:type default: None | bool
:raises ValueError: If it is not any of ['y', 'Y', 'n', 'N']
:return: result (True/False/default)
:rtype: bool
"""
if not string and default is not None:
return default
if string not in ['y', 'Y', 'n', 'N']:
raise ValueError('Please enter y or n.')
if string == 'y' or string == 'Y':
return True
if string == 'n' or string == 'N':
return False
|
malduck | malduck//ints.pyclass:MetaIntType/invert_mask | @property
def invert_mask(cls):
"""
Mask for sign bit
"""
return 2 ** cls.bits >> 1
|
pype3 | pype3//trees.pyfile:/trees.py:function:pype_return_f_args/pype_return_f_args | def pype_return_f_args(accum, *fArgs):
"""
FArgs is a tuple, but we want it to be a list - it's just neater.
"""
return list(fArgs)
|
idtrackerai-3.0.22a0 | idtrackerai-3.0.22a0//idtrackerai/accumulation_manager.pyclass:AccumulationManager/set_fragment_temporary_id | @staticmethod
def set_fragment_temporary_id(fragment, temporary_id, P1_array,
index_individual_fragment):
"""Given a P1 array relative to a global fragment sets to 0 the row relative to fragment
which is temporarily identified with identity temporary_id
Parameters
----------
fragment : Fragment
Fragment object containing images associated with a single individual
temporary_id : int
temporary identifier associated to fragment
P1_array : nd.array
P1 vector of fragment
index_individual_fragment : int
Index of fragment with respect to a global fragment in which it is contained
Returns
-------
P1_array : nd.array
updated P1 array
"""
fragment._temporary_id = int(temporary_id)
P1_array[(index_individual_fragment), :] = 0.0
P1_array[:, (temporary_id)] = 0.0
return P1_array
|
eli5 | eli5//sklearn/utils.pyfile:/sklearn/utils.py:function:is_multitarget_regressor/is_multitarget_regressor | def is_multitarget_regressor(clf):
"""
Return True if a regressor is multitarget
or False if it predicts a single target.
"""
return len(clf.coef_.shape) > 1 and clf.coef_.shape[0] > 1
|
utterson-0.3.2 | utterson-0.3.2//bin/utterson_lib/core.pyfile:/bin/utterson_lib/core.py:function:check_key/check_key | def check_key(key, expected_chr):
"""Checks the key against the expected_chr. Specifically running ord() on the expected_chr"""
if key == ord(expected_chr.upper()) or key == ord(expected_chr.lower()):
return True
return False
|
crux-0.0.14 | crux-0.0.14//crux/_utils.pyfile:/crux/_utils.py:function:valid_chunk_size/valid_chunk_size | def valid_chunk_size(chunk_size):
"""Checks whether chunk size is multiple of 256 KiB.
Args:
chunk_size (int): Input chunk_size to be validated.
Returns:
bool: True if chunk_size is multiple of 256 KiB, False otherwise.
"""
return not bool(chunk_size % 262144)
|
mdfreader-4.0 | mdfreader-4.0//mdfreader/mdf4reader.pyfile:/mdfreader/mdf4reader.py:function:_linear_conversion/_linear_conversion | def _linear_conversion(vector, cc_val):
""" apply linear conversion to data
Parameters
----------------
vector : numpy 1D array
raw data to be converted to physical value
cc_val : mdfinfo4.info4 conversion block ('CCBlock') dict
Returns
-----------
converted data to physical value
"""
p1 = cc_val[0]
p2 = cc_val[1]
if p2 == 1.0 and p1 in (0.0, -0.0):
return vector
else:
return vector * p2 + p1
|
constclust-0.1.1 | constclust-0.1.1//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|