code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def crypto_box_seal_open(ciphertext, pk, sk):
"""
Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2
"""
ensure(isinstance(ciphertext, bytes),
"input ciphertext must be bytes",
raising=TypeError)
ensure(isinstance(pk, bytes),
"public key must be bytes",
raising=TypeError)
ensure(isinstance(sk, bytes),
"secret key must be bytes",
raising=TypeError)
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError("Invalid public key")
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError("Invalid secret key")
_clen = len(ciphertext)
ensure(_clen >= crypto_box_SEALBYTES,
("Input cyphertext must be "
"at least {} long").format(crypto_box_SEALBYTES),
raising=exc.TypeError)
_mlen = _clen - crypto_box_SEALBYTES
# zero-length malloc results are implementation.dependent
plaintext = ffi.new("unsigned char[]", max(1, _mlen))
res = lib.crypto_box_seal_open(plaintext, ciphertext, _clen, pk, sk)
ensure(res == 0, "An error occurred trying to decrypt the message",
raising=exc.CryptoError)
return ffi.buffer(plaintext, _mlen)[:] | def function[crypto_box_seal_open, parameter[ciphertext, pk, sk]]:
constant[
Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2
]
call[name[ensure], parameter[call[name[isinstance], parameter[name[ciphertext], name[bytes]]], constant[input ciphertext must be bytes]]]
call[name[ensure], parameter[call[name[isinstance], parameter[name[pk], name[bytes]]], constant[public key must be bytes]]]
call[name[ensure], parameter[call[name[isinstance], parameter[name[sk], name[bytes]]], constant[secret key must be bytes]]]
if compare[call[name[len], parameter[name[pk]]] not_equal[!=] name[crypto_box_PUBLICKEYBYTES]] begin[:]
<ast.Raise object at 0x7da204622470>
if compare[call[name[len], parameter[name[sk]]] not_equal[!=] name[crypto_box_SECRETKEYBYTES]] begin[:]
<ast.Raise object at 0x7da18bc73df0>
variable[_clen] assign[=] call[name[len], parameter[name[ciphertext]]]
call[name[ensure], parameter[compare[name[_clen] greater_or_equal[>=] name[crypto_box_SEALBYTES]], call[constant[Input cyphertext must be at least {} long].format, parameter[name[crypto_box_SEALBYTES]]]]]
variable[_mlen] assign[=] binary_operation[name[_clen] - name[crypto_box_SEALBYTES]]
variable[plaintext] assign[=] call[name[ffi].new, parameter[constant[unsigned char[]], call[name[max], parameter[constant[1], name[_mlen]]]]]
variable[res] assign[=] call[name[lib].crypto_box_seal_open, parameter[name[plaintext], name[ciphertext], name[_clen], name[pk], name[sk]]]
call[name[ensure], parameter[compare[name[res] equal[==] constant[0]], constant[An error occurred trying to decrypt the message]]]
return[call[call[name[ffi].buffer, parameter[name[plaintext], name[_mlen]]]][<ast.Slice object at 0x7da2046229b0>]] | keyword[def] identifier[crypto_box_seal_open] ( identifier[ciphertext] , identifier[pk] , identifier[sk] ):
literal[string]
identifier[ensure] ( identifier[isinstance] ( identifier[ciphertext] , identifier[bytes] ),
literal[string] ,
identifier[raising] = identifier[TypeError] )
identifier[ensure] ( identifier[isinstance] ( identifier[pk] , identifier[bytes] ),
literal[string] ,
identifier[raising] = identifier[TypeError] )
identifier[ensure] ( identifier[isinstance] ( identifier[sk] , identifier[bytes] ),
literal[string] ,
identifier[raising] = identifier[TypeError] )
keyword[if] identifier[len] ( identifier[pk] )!= identifier[crypto_box_PUBLICKEYBYTES] :
keyword[raise] identifier[exc] . identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[sk] )!= identifier[crypto_box_SECRETKEYBYTES] :
keyword[raise] identifier[exc] . identifier[ValueError] ( literal[string] )
identifier[_clen] = identifier[len] ( identifier[ciphertext] )
identifier[ensure] ( identifier[_clen] >= identifier[crypto_box_SEALBYTES] ,
( literal[string]
literal[string] ). identifier[format] ( identifier[crypto_box_SEALBYTES] ),
identifier[raising] = identifier[exc] . identifier[TypeError] )
identifier[_mlen] = identifier[_clen] - identifier[crypto_box_SEALBYTES]
identifier[plaintext] = identifier[ffi] . identifier[new] ( literal[string] , identifier[max] ( literal[int] , identifier[_mlen] ))
identifier[res] = identifier[lib] . identifier[crypto_box_seal_open] ( identifier[plaintext] , identifier[ciphertext] , identifier[_clen] , identifier[pk] , identifier[sk] )
identifier[ensure] ( identifier[res] == literal[int] , literal[string] ,
identifier[raising] = identifier[exc] . identifier[CryptoError] )
keyword[return] identifier[ffi] . identifier[buffer] ( identifier[plaintext] , identifier[_mlen] )[:] | def crypto_box_seal_open(ciphertext, pk, sk):
"""
Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2
"""
ensure(isinstance(ciphertext, bytes), 'input ciphertext must be bytes', raising=TypeError)
ensure(isinstance(pk, bytes), 'public key must be bytes', raising=TypeError)
ensure(isinstance(sk, bytes), 'secret key must be bytes', raising=TypeError)
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError('Invalid public key') # depends on [control=['if'], data=[]]
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError('Invalid secret key') # depends on [control=['if'], data=[]]
_clen = len(ciphertext)
ensure(_clen >= crypto_box_SEALBYTES, 'Input cyphertext must be at least {} long'.format(crypto_box_SEALBYTES), raising=exc.TypeError)
_mlen = _clen - crypto_box_SEALBYTES
# zero-length malloc results are implementation.dependent
plaintext = ffi.new('unsigned char[]', max(1, _mlen))
res = lib.crypto_box_seal_open(plaintext, ciphertext, _clen, pk, sk)
ensure(res == 0, 'An error occurred trying to decrypt the message', raising=exc.CryptoError)
return ffi.buffer(plaintext, _mlen)[:] |
def nt2aa(ntseq):
"""Translate a nucleotide sequence into an amino acid sequence.
Parameters
----------
ntseq : str
Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase)
Returns
-------
aaseq : str
Amino acid sequence
Example
--------
>>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'CAWSVAPDRGGYTF'
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
aa_dict ='KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF'
return ''.join([aa_dict[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)]) | def function[nt2aa, parameter[ntseq]]:
constant[Translate a nucleotide sequence into an amino acid sequence.
Parameters
----------
ntseq : str
Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase)
Returns
-------
aaseq : str
Amino acid sequence
Example
--------
>>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'CAWSVAPDRGGYTF'
]
variable[nt2num] assign[=] dictionary[[<ast.Constant object at 0x7da204961de0>, <ast.Constant object at 0x7da204963610>, <ast.Constant object at 0x7da204961bd0>, <ast.Constant object at 0x7da204960100>, <ast.Constant object at 0x7da204960dc0>, <ast.Constant object at 0x7da2049630d0>, <ast.Constant object at 0x7da2049607f0>, <ast.Constant object at 0x7da204961840>], [<ast.Constant object at 0x7da204960850>, <ast.Constant object at 0x7da204960160>, <ast.Constant object at 0x7da204960ac0>, <ast.Constant object at 0x7da204962980>, <ast.Constant object at 0x7da204963550>, <ast.Constant object at 0x7da204963730>, <ast.Constant object at 0x7da204963820>, <ast.Constant object at 0x7da204962c50>]]
variable[aa_dict] assign[=] constant[KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da2049627d0>]]] | keyword[def] identifier[nt2aa] ( identifier[ntseq] ):
literal[string]
identifier[nt2num] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[aa_dict] = literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[aa_dict] [ identifier[nt2num] [ identifier[ntseq] [ identifier[i] ]]+ literal[int] * identifier[nt2num] [ identifier[ntseq] [ identifier[i] + literal[int] ]]+ literal[int] * identifier[nt2num] [ identifier[ntseq] [ identifier[i] + literal[int] ]]] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[ntseq] ), literal[int] ) keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[ntseq] )]) | def nt2aa(ntseq):
"""Translate a nucleotide sequence into an amino acid sequence.
Parameters
----------
ntseq : str
Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase)
Returns
-------
aaseq : str
Amino acid sequence
Example
--------
>>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
'CAWSVAPDRGGYTF'
"""
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}
aa_dict = 'KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF'
return ''.join([aa_dict[nt2num[ntseq[i]] + 4 * nt2num[ntseq[i + 1]] + 16 * nt2num[ntseq[i + 2]]] for i in range(0, len(ntseq), 3) if i + 2 < len(ntseq)]) |
def setNeutral(self, aMathObject, deltaName="origin"):
"""Set the neutral object."""
self._neutral = aMathObject
self.addDelta(Location(), aMathObject-aMathObject, deltaName, punch=False, axisOnly=True) | def function[setNeutral, parameter[self, aMathObject, deltaName]]:
constant[Set the neutral object.]
name[self]._neutral assign[=] name[aMathObject]
call[name[self].addDelta, parameter[call[name[Location], parameter[]], binary_operation[name[aMathObject] - name[aMathObject]], name[deltaName]]] | keyword[def] identifier[setNeutral] ( identifier[self] , identifier[aMathObject] , identifier[deltaName] = literal[string] ):
literal[string]
identifier[self] . identifier[_neutral] = identifier[aMathObject]
identifier[self] . identifier[addDelta] ( identifier[Location] (), identifier[aMathObject] - identifier[aMathObject] , identifier[deltaName] , identifier[punch] = keyword[False] , identifier[axisOnly] = keyword[True] ) | def setNeutral(self, aMathObject, deltaName='origin'):
"""Set the neutral object."""
self._neutral = aMathObject
self.addDelta(Location(), aMathObject - aMathObject, deltaName, punch=False, axisOnly=True) |
def _collect_data(self):
"""
Returns a list of all the data gathered from the engine
iterable.
"""
all_data = []
for line in self.engine.run_engine():
logging.debug("Adding {} to all_data".format(line))
all_data.append(line.copy())
logging.debug("all_data is now {}".format(all_data))
return all_data | def function[_collect_data, parameter[self]]:
constant[
Returns a list of all the data gathered from the engine
iterable.
]
variable[all_data] assign[=] list[[]]
for taget[name[line]] in starred[call[name[self].engine.run_engine, parameter[]]] begin[:]
call[name[logging].debug, parameter[call[constant[Adding {} to all_data].format, parameter[name[line]]]]]
call[name[all_data].append, parameter[call[name[line].copy, parameter[]]]]
call[name[logging].debug, parameter[call[constant[all_data is now {}].format, parameter[name[all_data]]]]]
return[name[all_data]] | keyword[def] identifier[_collect_data] ( identifier[self] ):
literal[string]
identifier[all_data] =[]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[engine] . identifier[run_engine] ():
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[line] ))
identifier[all_data] . identifier[append] ( identifier[line] . identifier[copy] ())
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[all_data] ))
keyword[return] identifier[all_data] | def _collect_data(self):
"""
Returns a list of all the data gathered from the engine
iterable.
"""
all_data = []
for line in self.engine.run_engine():
logging.debug('Adding {} to all_data'.format(line))
all_data.append(line.copy())
logging.debug('all_data is now {}'.format(all_data)) # depends on [control=['for'], data=['line']]
return all_data |
def cdot(L, out=None):
r"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
"""
L = asarray(L, float)
layout_error = "Wrong matrix layout."
if L.ndim != 2:
raise ValueError(layout_error)
if L.shape[0] != L.shape[1]:
raise ValueError(layout_error)
if out is None:
out = empty((L.shape[0], L.shape[1]), float)
return einsum("ij,kj->ik", L, L, out=out) | def function[cdot, parameter[L, out]]:
constant[Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
]
variable[L] assign[=] call[name[asarray], parameter[name[L], name[float]]]
variable[layout_error] assign[=] constant[Wrong matrix layout.]
if compare[name[L].ndim not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1bbbee0>
if compare[call[name[L].shape][constant[0]] not_equal[!=] call[name[L].shape][constant[1]]] begin[:]
<ast.Raise object at 0x7da1b1bba650>
if compare[name[out] is constant[None]] begin[:]
variable[out] assign[=] call[name[empty], parameter[tuple[[<ast.Subscript object at 0x7da1b1bb90f0>, <ast.Subscript object at 0x7da1b1bb8940>]], name[float]]]
return[call[name[einsum], parameter[constant[ij,kj->ik], name[L], name[L]]]] | keyword[def] identifier[cdot] ( identifier[L] , identifier[out] = keyword[None] ):
literal[string]
identifier[L] = identifier[asarray] ( identifier[L] , identifier[float] )
identifier[layout_error] = literal[string]
keyword[if] identifier[L] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( identifier[layout_error] )
keyword[if] identifier[L] . identifier[shape] [ literal[int] ]!= identifier[L] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( identifier[layout_error] )
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[empty] (( identifier[L] . identifier[shape] [ literal[int] ], identifier[L] . identifier[shape] [ literal[int] ]), identifier[float] )
keyword[return] identifier[einsum] ( literal[string] , identifier[L] , identifier[L] , identifier[out] = identifier[out] ) | def cdot(L, out=None):
"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\\mathrm L\\mathrm L^\\intercal`.
"""
L = asarray(L, float)
layout_error = 'Wrong matrix layout.'
if L.ndim != 2:
raise ValueError(layout_error) # depends on [control=['if'], data=[]]
if L.shape[0] != L.shape[1]:
raise ValueError(layout_error) # depends on [control=['if'], data=[]]
if out is None:
out = empty((L.shape[0], L.shape[1]), float) # depends on [control=['if'], data=['out']]
return einsum('ij,kj->ik', L, L, out=out) |
def safe_format_sh(s, **kwargs):
"""
:type s str
"""
to_replace = set(kwargs.keys()) & set(FORMAT_RE.findall(s))
for item in to_replace:
s = s.replace("{{" + item + "}}", kwargs[item])
return s | def function[safe_format_sh, parameter[s]]:
constant[
:type s str
]
variable[to_replace] assign[=] binary_operation[call[name[set], parameter[call[name[kwargs].keys, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[FORMAT_RE].findall, parameter[name[s]]]]]]
for taget[name[item]] in starred[name[to_replace]] begin[:]
variable[s] assign[=] call[name[s].replace, parameter[binary_operation[binary_operation[constant[{{] + name[item]] + constant[}}]], call[name[kwargs]][name[item]]]]
return[name[s]] | keyword[def] identifier[safe_format_sh] ( identifier[s] ,** identifier[kwargs] ):
literal[string]
identifier[to_replace] = identifier[set] ( identifier[kwargs] . identifier[keys] ())& identifier[set] ( identifier[FORMAT_RE] . identifier[findall] ( identifier[s] ))
keyword[for] identifier[item] keyword[in] identifier[to_replace] :
identifier[s] = identifier[s] . identifier[replace] ( literal[string] + identifier[item] + literal[string] , identifier[kwargs] [ identifier[item] ])
keyword[return] identifier[s] | def safe_format_sh(s, **kwargs):
"""
:type s str
"""
to_replace = set(kwargs.keys()) & set(FORMAT_RE.findall(s))
for item in to_replace:
s = s.replace('{{' + item + '}}', kwargs[item]) # depends on [control=['for'], data=['item']]
return s |
def compile_mof_file(self, mof_file, namespace=None, search_paths=None,
verbose=None):
"""
Compile the MOF definitions in the specified file (and its included
files) and add the resulting CIM objects to the specified CIM namespace
of the mock repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_file (:term:`string`):
Path name of the file containing the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository.
"""
namespace = namespace or self.default_namespace
self._validate_namespace(namespace)
mofcomp = MOFCompiler(_MockMOFWBEMConnection(self),
search_paths=search_paths,
verbose=verbose)
mofcomp.compile_file(mof_file, namespace) | def function[compile_mof_file, parameter[self, mof_file, namespace, search_paths, verbose]]:
constant[
Compile the MOF definitions in the specified file (and its included
files) and add the resulting CIM objects to the specified CIM namespace
of the mock repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_file (:term:`string`):
Path name of the file containing the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository.
]
variable[namespace] assign[=] <ast.BoolOp object at 0x7da20c76dc90>
call[name[self]._validate_namespace, parameter[name[namespace]]]
variable[mofcomp] assign[=] call[name[MOFCompiler], parameter[call[name[_MockMOFWBEMConnection], parameter[name[self]]]]]
call[name[mofcomp].compile_file, parameter[name[mof_file], name[namespace]]] | keyword[def] identifier[compile_mof_file] ( identifier[self] , identifier[mof_file] , identifier[namespace] = keyword[None] , identifier[search_paths] = keyword[None] ,
identifier[verbose] = keyword[None] ):
literal[string]
identifier[namespace] = identifier[namespace] keyword[or] identifier[self] . identifier[default_namespace]
identifier[self] . identifier[_validate_namespace] ( identifier[namespace] )
identifier[mofcomp] = identifier[MOFCompiler] ( identifier[_MockMOFWBEMConnection] ( identifier[self] ),
identifier[search_paths] = identifier[search_paths] ,
identifier[verbose] = identifier[verbose] )
identifier[mofcomp] . identifier[compile_file] ( identifier[mof_file] , identifier[namespace] ) | def compile_mof_file(self, mof_file, namespace=None, search_paths=None, verbose=None):
"""
Compile the MOF definitions in the specified file (and its included
files) and add the resulting CIM objects to the specified CIM namespace
of the mock repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_file (:term:`string`):
Path name of the file containing the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository.
"""
namespace = namespace or self.default_namespace
self._validate_namespace(namespace)
mofcomp = MOFCompiler(_MockMOFWBEMConnection(self), search_paths=search_paths, verbose=verbose)
mofcomp.compile_file(mof_file, namespace) |
def leaf_bus(self, df=False):
"""
Return leaf bus idx, line idx, and the line foreign key
Returns
-------
(list, list, list) or DataFrame
"""
# leafs - leaf bus idx
# lines - line idx
# fkey - the foreign key of Line, in 'bus1' or 'bus2', linking the bus
leafs, lines, fkeys = list(), list(), list()
# convert to unique, ordered list
buses = sorted(list(set(self.bus1 + self.bus2)))
links = self.link_bus(buses)
for bus, link in zip(buses, links):
line = link[0]
fkey = link[1]
if line is None:
continue
if len(line) == 1:
leafs.append(bus)
lines.extend(line)
fkeys.extend(fkey)
# output formatting
if df is False:
return leafs, lines, fkeys
else:
_data = {'Bus idx': leafs, 'Line idx': lines, 'fkey': fkeys}
if globals()['pd'] is None:
globals()['pd'] = importlib.import_module('pandas')
return pd.DataFrame(data=_data) | def function[leaf_bus, parameter[self, df]]:
constant[
Return leaf bus idx, line idx, and the line foreign key
Returns
-------
(list, list, list) or DataFrame
]
<ast.Tuple object at 0x7da20e9b3520> assign[=] tuple[[<ast.Call object at 0x7da20e9b3b20>, <ast.Call object at 0x7da20e9b3be0>, <ast.Call object at 0x7da20e9b2860>]]
variable[buses] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[binary_operation[name[self].bus1 + name[self].bus2]]]]]]]
variable[links] assign[=] call[name[self].link_bus, parameter[name[buses]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b3010>, <ast.Name object at 0x7da20e9b1180>]]] in starred[call[name[zip], parameter[name[buses], name[links]]]] begin[:]
variable[line] assign[=] call[name[link]][constant[0]]
variable[fkey] assign[=] call[name[link]][constant[1]]
if compare[name[line] is constant[None]] begin[:]
continue
if compare[call[name[len], parameter[name[line]]] equal[==] constant[1]] begin[:]
call[name[leafs].append, parameter[name[bus]]]
call[name[lines].extend, parameter[name[line]]]
call[name[fkeys].extend, parameter[name[fkey]]]
if compare[name[df] is constant[False]] begin[:]
return[tuple[[<ast.Name object at 0x7da20e9b0700>, <ast.Name object at 0x7da20e9b1ba0>, <ast.Name object at 0x7da20e9b3d00>]]] | keyword[def] identifier[leaf_bus] ( identifier[self] , identifier[df] = keyword[False] ):
literal[string]
identifier[leafs] , identifier[lines] , identifier[fkeys] = identifier[list] (), identifier[list] (), identifier[list] ()
identifier[buses] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[self] . identifier[bus1] + identifier[self] . identifier[bus2] )))
identifier[links] = identifier[self] . identifier[link_bus] ( identifier[buses] )
keyword[for] identifier[bus] , identifier[link] keyword[in] identifier[zip] ( identifier[buses] , identifier[links] ):
identifier[line] = identifier[link] [ literal[int] ]
identifier[fkey] = identifier[link] [ literal[int] ]
keyword[if] identifier[line] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[len] ( identifier[line] )== literal[int] :
identifier[leafs] . identifier[append] ( identifier[bus] )
identifier[lines] . identifier[extend] ( identifier[line] )
identifier[fkeys] . identifier[extend] ( identifier[fkey] )
keyword[if] identifier[df] keyword[is] keyword[False] :
keyword[return] identifier[leafs] , identifier[lines] , identifier[fkeys]
keyword[else] :
identifier[_data] ={ literal[string] : identifier[leafs] , literal[string] : identifier[lines] , literal[string] : identifier[fkeys] }
keyword[if] identifier[globals] ()[ literal[string] ] keyword[is] keyword[None] :
identifier[globals] ()[ literal[string] ]= identifier[importlib] . identifier[import_module] ( literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[_data] ) | def leaf_bus(self, df=False):
"""
Return leaf bus idx, line idx, and the line foreign key
Returns
-------
(list, list, list) or DataFrame
"""
# leafs - leaf bus idx
# lines - line idx
# fkey - the foreign key of Line, in 'bus1' or 'bus2', linking the bus
(leafs, lines, fkeys) = (list(), list(), list())
# convert to unique, ordered list
buses = sorted(list(set(self.bus1 + self.bus2)))
links = self.link_bus(buses)
for (bus, link) in zip(buses, links):
line = link[0]
fkey = link[1]
if line is None:
continue # depends on [control=['if'], data=[]]
if len(line) == 1:
leafs.append(bus)
lines.extend(line)
fkeys.extend(fkey) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# output formatting
if df is False:
return (leafs, lines, fkeys) # depends on [control=['if'], data=[]]
else:
_data = {'Bus idx': leafs, 'Line idx': lines, 'fkey': fkeys}
if globals()['pd'] is None:
globals()['pd'] = importlib.import_module('pandas') # depends on [control=['if'], data=[]]
return pd.DataFrame(data=_data) |
async def get(self, public_key):
"""Receives public key, looking up document at storage,
sends document id to the balance server
"""
if settings.SIGNATURE_VERIFICATION:
super().verify()
response = await self.account.getnews(public_key=public_key)
# If we`ve got a empty or list with news
if isinstance(response, list):
self.write(json.dumps(response))
raise tornado.web.Finish
# If we`ve got a message with error
elif isinstance(response, dict):
try:
error_code = response["error"]
except:
del response["account_id"]
self.write(response)
else:
self.set_status(error_code)
self.write(response)
raise tornado.web.Finish | <ast.AsyncFunctionDef object at 0x7da1b23459c0> | keyword[async] keyword[def] identifier[get] ( identifier[self] , identifier[public_key] ):
literal[string]
keyword[if] identifier[settings] . identifier[SIGNATURE_VERIFICATION] :
identifier[super] (). identifier[verify] ()
identifier[response] = keyword[await] identifier[self] . identifier[account] . identifier[getnews] ( identifier[public_key] = identifier[public_key] )
keyword[if] identifier[isinstance] ( identifier[response] , identifier[list] ):
identifier[self] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[response] ))
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish]
keyword[elif] identifier[isinstance] ( identifier[response] , identifier[dict] ):
keyword[try] :
identifier[error_code] = identifier[response] [ literal[string] ]
keyword[except] :
keyword[del] identifier[response] [ literal[string] ]
identifier[self] . identifier[write] ( identifier[response] )
keyword[else] :
identifier[self] . identifier[set_status] ( identifier[error_code] )
identifier[self] . identifier[write] ( identifier[response] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] | async def get(self, public_key):
"""Receives public key, looking up document at storage,
sends document id to the balance server
"""
if settings.SIGNATURE_VERIFICATION:
super().verify() # depends on [control=['if'], data=[]]
response = await self.account.getnews(public_key=public_key) # If we`ve got a empty or list with news
if isinstance(response, list):
self.write(json.dumps(response))
raise tornado.web.Finish # depends on [control=['if'], data=[]] # If we`ve got a message with error
elif isinstance(response, dict):
try:
error_code = response['error'] # depends on [control=['try'], data=[]]
except:
del response['account_id']
self.write(response) # depends on [control=['except'], data=[]]
else:
self.set_status(error_code)
self.write(response)
raise tornado.web.Finish # depends on [control=['if'], data=[]] |
def to_json(self):
"""
Returns a json-compatible object from the constraint that can be saved using the json module.
Example
--------
>>> import json
>>> with open("path_to_file.json", "w") as outfile:
>>> json.dump(constraint.to_json(), outfile)
"""
if self.indicator_variable is None:
indicator = None
else:
indicator = self.indicator_variable.name
json_obj = {
"name": self.name,
"expression": expr_to_json(self.expression),
"lb": self.lb,
"ub": self.ub,
"indicator_variable": indicator,
"active_when": self.active_when
}
return json_obj | def function[to_json, parameter[self]]:
constant[
Returns a json-compatible object from the constraint that can be saved using the json module.
Example
--------
>>> import json
>>> with open("path_to_file.json", "w") as outfile:
>>> json.dump(constraint.to_json(), outfile)
]
if compare[name[self].indicator_variable is constant[None]] begin[:]
variable[indicator] assign[=] constant[None]
variable[json_obj] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c4ebc0>, <ast.Constant object at 0x7da1b0c4d8a0>, <ast.Constant object at 0x7da1b0c4e8c0>, <ast.Constant object at 0x7da1b0c4e1a0>, <ast.Constant object at 0x7da1b0c4f010>, <ast.Constant object at 0x7da1b0c4cfd0>], [<ast.Attribute object at 0x7da1b0c4cc70>, <ast.Call object at 0x7da1b0c4eb00>, <ast.Attribute object at 0x7da1b0c4d5d0>, <ast.Attribute object at 0x7da1b0c4e950>, <ast.Name object at 0x7da1b0c4e200>, <ast.Attribute object at 0x7da1b0c4e3e0>]]
return[name[json_obj]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[indicator_variable] keyword[is] keyword[None] :
identifier[indicator] = keyword[None]
keyword[else] :
identifier[indicator] = identifier[self] . identifier[indicator_variable] . identifier[name]
identifier[json_obj] ={
literal[string] : identifier[self] . identifier[name] ,
literal[string] : identifier[expr_to_json] ( identifier[self] . identifier[expression] ),
literal[string] : identifier[self] . identifier[lb] ,
literal[string] : identifier[self] . identifier[ub] ,
literal[string] : identifier[indicator] ,
literal[string] : identifier[self] . identifier[active_when]
}
keyword[return] identifier[json_obj] | def to_json(self):
"""
Returns a json-compatible object from the constraint that can be saved using the json module.
Example
--------
>>> import json
>>> with open("path_to_file.json", "w") as outfile:
>>> json.dump(constraint.to_json(), outfile)
"""
if self.indicator_variable is None:
indicator = None # depends on [control=['if'], data=[]]
else:
indicator = self.indicator_variable.name
json_obj = {'name': self.name, 'expression': expr_to_json(self.expression), 'lb': self.lb, 'ub': self.ub, 'indicator_variable': indicator, 'active_when': self.active_when}
return json_obj |
def run_script(self, script, identifier=_DEFAULT_SCRIPT_NAME):
"""
Run a JS script within the context.\
All code is ran synchronously,\
there is no event loop. It's thread-safe
:param script: utf-8 encoded or unicode string
:type script: bytes or str
:param identifier: utf-8 encoded or unicode string.\
This is used as the name of the script\
(ie: in stack-traces)
:type identifier: bytes or str
:return: Result of running the JS script
:rtype: str
:raises V8Error: if there was\
an error running the JS script
"""
assert isinstance(script, six.text_type) or _is_utf_8(script)
assert isinstance(identifier, six.text_type) or _is_utf_8(identifier)
if isinstance(script, six.text_type):
script = script.encode('utf-8')
if isinstance(identifier, six.text_type):
identifier = identifier.encode('utf-8')
with _String() as output:
with _String() as error:
code = lib.v8cffi_run_script(
self._c_context[0],
script,
len(script),
identifier,
len(identifier),
output.string_ptr,
output.len_ptr,
error.string_ptr,
error.len_ptr)
if code != lib.E_V8_OK:
raise exceptions.get_exception(code)(six.text_type(error))
return six.text_type(output) | def function[run_script, parameter[self, script, identifier]]:
constant[
Run a JS script within the context. All code is ran synchronously, there is no event loop. It's thread-safe
:param script: utf-8 encoded or unicode string
:type script: bytes or str
:param identifier: utf-8 encoded or unicode string. This is used as the name of the script (ie: in stack-traces)
:type identifier: bytes or str
:return: Result of running the JS script
:rtype: str
:raises V8Error: if there was an error running the JS script
]
assert[<ast.BoolOp object at 0x7da1b265f640>]
assert[<ast.BoolOp object at 0x7da1b265f760>]
if call[name[isinstance], parameter[name[script], name[six].text_type]] begin[:]
variable[script] assign[=] call[name[script].encode, parameter[constant[utf-8]]]
if call[name[isinstance], parameter[name[identifier], name[six].text_type]] begin[:]
variable[identifier] assign[=] call[name[identifier].encode, parameter[constant[utf-8]]]
with call[name[_String], parameter[]] begin[:]
with call[name[_String], parameter[]] begin[:]
variable[code] assign[=] call[name[lib].v8cffi_run_script, parameter[call[name[self]._c_context][constant[0]], name[script], call[name[len], parameter[name[script]]], name[identifier], call[name[len], parameter[name[identifier]]], name[output].string_ptr, name[output].len_ptr, name[error].string_ptr, name[error].len_ptr]]
if compare[name[code] not_equal[!=] name[lib].E_V8_OK] begin[:]
<ast.Raise object at 0x7da1b2546740>
return[call[name[six].text_type, parameter[name[output]]]] | keyword[def] identifier[run_script] ( identifier[self] , identifier[script] , identifier[identifier] = identifier[_DEFAULT_SCRIPT_NAME] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[script] , identifier[six] . identifier[text_type] ) keyword[or] identifier[_is_utf_8] ( identifier[script] )
keyword[assert] identifier[isinstance] ( identifier[identifier] , identifier[six] . identifier[text_type] ) keyword[or] identifier[_is_utf_8] ( identifier[identifier] )
keyword[if] identifier[isinstance] ( identifier[script] , identifier[six] . identifier[text_type] ):
identifier[script] = identifier[script] . identifier[encode] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[identifier] , identifier[six] . identifier[text_type] ):
identifier[identifier] = identifier[identifier] . identifier[encode] ( literal[string] )
keyword[with] identifier[_String] () keyword[as] identifier[output] :
keyword[with] identifier[_String] () keyword[as] identifier[error] :
identifier[code] = identifier[lib] . identifier[v8cffi_run_script] (
identifier[self] . identifier[_c_context] [ literal[int] ],
identifier[script] ,
identifier[len] ( identifier[script] ),
identifier[identifier] ,
identifier[len] ( identifier[identifier] ),
identifier[output] . identifier[string_ptr] ,
identifier[output] . identifier[len_ptr] ,
identifier[error] . identifier[string_ptr] ,
identifier[error] . identifier[len_ptr] )
keyword[if] identifier[code] != identifier[lib] . identifier[E_V8_OK] :
keyword[raise] identifier[exceptions] . identifier[get_exception] ( identifier[code] )( identifier[six] . identifier[text_type] ( identifier[error] ))
keyword[return] identifier[six] . identifier[text_type] ( identifier[output] ) | def run_script(self, script, identifier=_DEFAULT_SCRIPT_NAME):
"""
Run a JS script within the context. All code is ran synchronously, there is no event loop. It's thread-safe
:param script: utf-8 encoded or unicode string
:type script: bytes or str
:param identifier: utf-8 encoded or unicode string. This is used as the name of the script (ie: in stack-traces)
:type identifier: bytes or str
:return: Result of running the JS script
:rtype: str
:raises V8Error: if there was an error running the JS script
"""
assert isinstance(script, six.text_type) or _is_utf_8(script)
assert isinstance(identifier, six.text_type) or _is_utf_8(identifier)
if isinstance(script, six.text_type):
script = script.encode('utf-8') # depends on [control=['if'], data=[]]
if isinstance(identifier, six.text_type):
identifier = identifier.encode('utf-8') # depends on [control=['if'], data=[]]
with _String() as output:
with _String() as error:
code = lib.v8cffi_run_script(self._c_context[0], script, len(script), identifier, len(identifier), output.string_ptr, output.len_ptr, error.string_ptr, error.len_ptr)
if code != lib.E_V8_OK:
raise exceptions.get_exception(code)(six.text_type(error)) # depends on [control=['if'], data=['code']]
return six.text_type(output) # depends on [control=['with'], data=['error']] # depends on [control=['with'], data=['_String', 'output']] |
def _spikes_per_cluster(spike_clusters, spike_ids=None):
"""Return a dictionary {cluster: list_of_spikes}."""
if spike_clusters is None or not len(spike_clusters):
return {}
if spike_ids is None:
spike_ids = np.arange(len(spike_clusters)).astype(np.int64)
# NOTE: this sort method is stable, so spike ids are increasing
# among any cluster. Therefore we don't have to sort again down here,
# when creating the spikes_in_clusters dictionary.
rel_spikes = np.argsort(spike_clusters, kind='mergesort')
abs_spikes = spike_ids[rel_spikes]
spike_clusters = spike_clusters[rel_spikes]
diff = np.empty_like(spike_clusters)
diff[0] = 1
diff[1:] = np.diff(spike_clusters)
idx = np.nonzero(diff > 0)[0]
clusters = spike_clusters[idx]
# NOTE: we don't have to sort abs_spikes[...] here because the argsort
# using 'mergesort' above is stable.
spikes_in_clusters = {clusters[i]: abs_spikes[idx[i]:idx[i + 1]]
for i in range(len(clusters) - 1)}
spikes_in_clusters[clusters[-1]] = abs_spikes[idx[-1]:]
return spikes_in_clusters | def function[_spikes_per_cluster, parameter[spike_clusters, spike_ids]]:
constant[Return a dictionary {cluster: list_of_spikes}.]
if <ast.BoolOp object at 0x7da1b26adb70> begin[:]
return[dictionary[[], []]]
if compare[name[spike_ids] is constant[None]] begin[:]
variable[spike_ids] assign[=] call[call[name[np].arange, parameter[call[name[len], parameter[name[spike_clusters]]]]].astype, parameter[name[np].int64]]
variable[rel_spikes] assign[=] call[name[np].argsort, parameter[name[spike_clusters]]]
variable[abs_spikes] assign[=] call[name[spike_ids]][name[rel_spikes]]
variable[spike_clusters] assign[=] call[name[spike_clusters]][name[rel_spikes]]
variable[diff] assign[=] call[name[np].empty_like, parameter[name[spike_clusters]]]
call[name[diff]][constant[0]] assign[=] constant[1]
call[name[diff]][<ast.Slice object at 0x7da1b26acd30>] assign[=] call[name[np].diff, parameter[name[spike_clusters]]]
variable[idx] assign[=] call[call[name[np].nonzero, parameter[compare[name[diff] greater[>] constant[0]]]]][constant[0]]
variable[clusters] assign[=] call[name[spike_clusters]][name[idx]]
variable[spikes_in_clusters] assign[=] <ast.DictComp object at 0x7da1b13b72b0>
call[name[spikes_in_clusters]][call[name[clusters]][<ast.UnaryOp object at 0x7da1b13b7550>]] assign[=] call[name[abs_spikes]][<ast.Slice object at 0x7da1b13b6b00>]
return[name[spikes_in_clusters]] | keyword[def] identifier[_spikes_per_cluster] ( identifier[spike_clusters] , identifier[spike_ids] = keyword[None] ):
literal[string]
keyword[if] identifier[spike_clusters] keyword[is] keyword[None] keyword[or] keyword[not] identifier[len] ( identifier[spike_clusters] ):
keyword[return] {}
keyword[if] identifier[spike_ids] keyword[is] keyword[None] :
identifier[spike_ids] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[spike_clusters] )). identifier[astype] ( identifier[np] . identifier[int64] )
identifier[rel_spikes] = identifier[np] . identifier[argsort] ( identifier[spike_clusters] , identifier[kind] = literal[string] )
identifier[abs_spikes] = identifier[spike_ids] [ identifier[rel_spikes] ]
identifier[spike_clusters] = identifier[spike_clusters] [ identifier[rel_spikes] ]
identifier[diff] = identifier[np] . identifier[empty_like] ( identifier[spike_clusters] )
identifier[diff] [ literal[int] ]= literal[int]
identifier[diff] [ literal[int] :]= identifier[np] . identifier[diff] ( identifier[spike_clusters] )
identifier[idx] = identifier[np] . identifier[nonzero] ( identifier[diff] > literal[int] )[ literal[int] ]
identifier[clusters] = identifier[spike_clusters] [ identifier[idx] ]
identifier[spikes_in_clusters] ={ identifier[clusters] [ identifier[i] ]: identifier[abs_spikes] [ identifier[idx] [ identifier[i] ]: identifier[idx] [ identifier[i] + literal[int] ]]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[clusters] )- literal[int] )}
identifier[spikes_in_clusters] [ identifier[clusters] [- literal[int] ]]= identifier[abs_spikes] [ identifier[idx] [- literal[int] ]:]
keyword[return] identifier[spikes_in_clusters] | def _spikes_per_cluster(spike_clusters, spike_ids=None):
"""Return a dictionary {cluster: list_of_spikes}."""
if spike_clusters is None or not len(spike_clusters):
return {} # depends on [control=['if'], data=[]]
if spike_ids is None:
spike_ids = np.arange(len(spike_clusters)).astype(np.int64) # depends on [control=['if'], data=['spike_ids']]
# NOTE: this sort method is stable, so spike ids are increasing
# among any cluster. Therefore we don't have to sort again down here,
# when creating the spikes_in_clusters dictionary.
rel_spikes = np.argsort(spike_clusters, kind='mergesort')
abs_spikes = spike_ids[rel_spikes]
spike_clusters = spike_clusters[rel_spikes]
diff = np.empty_like(spike_clusters)
diff[0] = 1
diff[1:] = np.diff(spike_clusters)
idx = np.nonzero(diff > 0)[0]
clusters = spike_clusters[idx]
# NOTE: we don't have to sort abs_spikes[...] here because the argsort
# using 'mergesort' above is stable.
spikes_in_clusters = {clusters[i]: abs_spikes[idx[i]:idx[i + 1]] for i in range(len(clusters) - 1)}
spikes_in_clusters[clusters[-1]] = abs_spikes[idx[-1]:]
return spikes_in_clusters |
def browser_authorize(self):
"""
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
"""
url = self.authorize_url()
# Open the web browser in a new thread for command-line browser support
threading.Timer(1, webbrowser.open, args=(url,)).start()
server_config = {
'server.socket_host': '0.0.0.0',
'server.socket_port': 443,
'server.ssl_module': 'pyopenssl',
'server.ssl_certificate': 'tests/files/certificate.cert',
'server.ssl_private_key': 'tests/files/key.key',
}
cherrypy.config.update(server_config)
cherrypy.quickstart(self) | def function[browser_authorize, parameter[self]]:
constant[
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
]
variable[url] assign[=] call[name[self].authorize_url, parameter[]]
call[call[name[threading].Timer, parameter[constant[1], name[webbrowser].open]].start, parameter[]]
variable[server_config] assign[=] dictionary[[<ast.Constant object at 0x7da1b13404c0>, <ast.Constant object at 0x7da1b1340c40>, <ast.Constant object at 0x7da1b13417e0>, <ast.Constant object at 0x7da1b1341bd0>, <ast.Constant object at 0x7da1b1340ca0>], [<ast.Constant object at 0x7da1b1342260>, <ast.Constant object at 0x7da1b13422c0>, <ast.Constant object at 0x7da1b1340400>, <ast.Constant object at 0x7da1b1342350>, <ast.Constant object at 0x7da1b13412d0>]]
call[name[cherrypy].config.update, parameter[name[server_config]]]
call[name[cherrypy].quickstart, parameter[name[self]]] | keyword[def] identifier[browser_authorize] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[authorize_url] ()
identifier[threading] . identifier[Timer] ( literal[int] , identifier[webbrowser] . identifier[open] , identifier[args] =( identifier[url] ,)). identifier[start] ()
identifier[server_config] ={
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[cherrypy] . identifier[config] . identifier[update] ( identifier[server_config] )
identifier[cherrypy] . identifier[quickstart] ( identifier[self] ) | def browser_authorize(self):
"""
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
"""
url = self.authorize_url()
# Open the web browser in a new thread for command-line browser support
threading.Timer(1, webbrowser.open, args=(url,)).start()
server_config = {'server.socket_host': '0.0.0.0', 'server.socket_port': 443, 'server.ssl_module': 'pyopenssl', 'server.ssl_certificate': 'tests/files/certificate.cert', 'server.ssl_private_key': 'tests/files/key.key'}
cherrypy.config.update(server_config)
cherrypy.quickstart(self) |
def read_requirements(*parts):
"""
Given a requirements.txt (or similar style file), returns a list of requirements.
Assumes anything after a single '#' on a line is a comment, and ignores
empty lines.
"""
requirements = []
for line in read(*parts).splitlines():
new_line = re.sub(r'(\s*)?#.*$', # the space immediately before the
# hash mark, the hash mark, and
# anything that follows it
'', # replace with a blank string
line)
if new_line: # i.e. we have a non-zero-length string
requirements.append(new_line)
return requirements | def function[read_requirements, parameter[]]:
constant[
Given a requirements.txt (or similar style file), returns a list of requirements.
Assumes anything after a single '#' on a line is a comment, and ignores
empty lines.
]
variable[requirements] assign[=] list[[]]
for taget[name[line]] in starred[call[call[name[read], parameter[<ast.Starred object at 0x7da2047ea3e0>]].splitlines, parameter[]]] begin[:]
variable[new_line] assign[=] call[name[re].sub, parameter[constant[(\s*)?#.*$], constant[], name[line]]]
if name[new_line] begin[:]
call[name[requirements].append, parameter[name[new_line]]]
return[name[requirements]] | keyword[def] identifier[read_requirements] (* identifier[parts] ):
literal[string]
identifier[requirements] =[]
keyword[for] identifier[line] keyword[in] identifier[read] (* identifier[parts] ). identifier[splitlines] ():
identifier[new_line] = identifier[re] . identifier[sub] ( literal[string] ,
literal[string] ,
identifier[line] )
keyword[if] identifier[new_line] :
identifier[requirements] . identifier[append] ( identifier[new_line] )
keyword[return] identifier[requirements] | def read_requirements(*parts):
"""
Given a requirements.txt (or similar style file), returns a list of requirements.
Assumes anything after a single '#' on a line is a comment, and ignores
empty lines.
"""
requirements = []
for line in read(*parts).splitlines(): # the space immediately before the
# hash mark, the hash mark, and
# anything that follows it
# replace with a blank string
new_line = re.sub('(\\s*)?#.*$', '', line)
if new_line: # i.e. we have a non-zero-length string
requirements.append(new_line) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return requirements |
def preview(self, request):
"""
Return a occurrences in JSON format up until the configured limit.
"""
recurrence_rule = request.POST.get('recurrence_rule')
limit = int(request.POST.get('limit', 10))
try:
rruleset = rrule.rrulestr(
recurrence_rule, dtstart=djtz.now(), forceset=True)
except ValueError as e:
data = {
'error': six.text_type(e),
}
else:
data = {
'occurrences': rruleset[:limit]
}
return JsonResponse(data) | def function[preview, parameter[self, request]]:
constant[
Return a occurrences in JSON format up until the configured limit.
]
variable[recurrence_rule] assign[=] call[name[request].POST.get, parameter[constant[recurrence_rule]]]
variable[limit] assign[=] call[name[int], parameter[call[name[request].POST.get, parameter[constant[limit], constant[10]]]]]
<ast.Try object at 0x7da1b0ebe530>
return[call[name[JsonResponse], parameter[name[data]]]] | keyword[def] identifier[preview] ( identifier[self] , identifier[request] ):
literal[string]
identifier[recurrence_rule] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] )
identifier[limit] = identifier[int] ( identifier[request] . identifier[POST] . identifier[get] ( literal[string] , literal[int] ))
keyword[try] :
identifier[rruleset] = identifier[rrule] . identifier[rrulestr] (
identifier[recurrence_rule] , identifier[dtstart] = identifier[djtz] . identifier[now] (), identifier[forceset] = keyword[True] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[data] ={
literal[string] : identifier[six] . identifier[text_type] ( identifier[e] ),
}
keyword[else] :
identifier[data] ={
literal[string] : identifier[rruleset] [: identifier[limit] ]
}
keyword[return] identifier[JsonResponse] ( identifier[data] ) | def preview(self, request):
"""
Return a occurrences in JSON format up until the configured limit.
"""
recurrence_rule = request.POST.get('recurrence_rule')
limit = int(request.POST.get('limit', 10))
try:
rruleset = rrule.rrulestr(recurrence_rule, dtstart=djtz.now(), forceset=True) # depends on [control=['try'], data=[]]
except ValueError as e:
data = {'error': six.text_type(e)} # depends on [control=['except'], data=['e']]
else:
data = {'occurrences': rruleset[:limit]}
return JsonResponse(data) |
def run(app, argv=sys.argv, extra_args=None):
"""
Run commands in a plain django environment
:param app: application
:param argv: arguments (default to sys.argv)
:param extra_args: list of extra arguments
"""
if app not in argv[:2]:
# app is automatically added if not present
argv.insert(1, app)
if len(argv) < 3 and 'test' not in argv[:2]:
# test argument is given if not argument is passed
argv.insert(2, 'test')
if extra_args:
argv.extend(extra_args)
return runner(argv) | def function[run, parameter[app, argv, extra_args]]:
constant[
Run commands in a plain django environment
:param app: application
:param argv: arguments (default to sys.argv)
:param extra_args: list of extra arguments
]
if compare[name[app] <ast.NotIn object at 0x7da2590d7190> call[name[argv]][<ast.Slice object at 0x7da204346170>]] begin[:]
call[name[argv].insert, parameter[constant[1], name[app]]]
if <ast.BoolOp object at 0x7da20c991f90> begin[:]
call[name[argv].insert, parameter[constant[2], constant[test]]]
if name[extra_args] begin[:]
call[name[argv].extend, parameter[name[extra_args]]]
return[call[name[runner], parameter[name[argv]]]] | keyword[def] identifier[run] ( identifier[app] , identifier[argv] = identifier[sys] . identifier[argv] , identifier[extra_args] = keyword[None] ):
literal[string]
keyword[if] identifier[app] keyword[not] keyword[in] identifier[argv] [: literal[int] ]:
identifier[argv] . identifier[insert] ( literal[int] , identifier[app] )
keyword[if] identifier[len] ( identifier[argv] )< literal[int] keyword[and] literal[string] keyword[not] keyword[in] identifier[argv] [: literal[int] ]:
identifier[argv] . identifier[insert] ( literal[int] , literal[string] )
keyword[if] identifier[extra_args] :
identifier[argv] . identifier[extend] ( identifier[extra_args] )
keyword[return] identifier[runner] ( identifier[argv] ) | def run(app, argv=sys.argv, extra_args=None):
"""
Run commands in a plain django environment
:param app: application
:param argv: arguments (default to sys.argv)
:param extra_args: list of extra arguments
"""
if app not in argv[:2]:
# app is automatically added if not present
argv.insert(1, app) # depends on [control=['if'], data=['app']]
if len(argv) < 3 and 'test' not in argv[:2]:
# test argument is given if not argument is passed
argv.insert(2, 'test') # depends on [control=['if'], data=[]]
if extra_args:
argv.extend(extra_args) # depends on [control=['if'], data=[]]
return runner(argv) |
def sort_and_print_entries(entries, args):
"""Sort the entries, applying the filters first if necessary."""
# Extract the proper number type.
is_float = args.number_type in ("float", "real", "f", "r")
signed = args.signed or args.number_type in ("real", "r")
alg = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
| natsort.ns.PATH * args.paths
| natsort.ns.LOCALE * args.locale
)
# Pre-remove entries that don't pass the filtering criteria
# Make sure we use the same searching algorithm for filtering
# as for sorting.
do_filter = args.filter is not None or args.reverse_filter is not None
if do_filter or args.exclude:
inp_options = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
)
regex = regex_chooser(inp_options)
if args.filter is not None:
lows, highs = ([f[0] for f in args.filter], [f[1] for f in args.filter])
entries = [
entry
for entry in entries
if keep_entry_range(entry, lows, highs, float, regex)
]
if args.reverse_filter is not None:
lows, highs = (
[f[0] for f in args.reverse_filter],
[f[1] for f in args.reverse_filter],
)
entries = [
entry
for entry in entries
if not keep_entry_range(entry, lows, highs, float, regex)
]
if args.exclude:
exclude = set(args.exclude)
entries = [
entry
for entry in entries
if keep_entry_value(entry, exclude, float, regex)
]
# Print off the sorted results
for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg):
print(entry) | def function[sort_and_print_entries, parameter[entries, args]]:
constant[Sort the entries, applying the filters first if necessary.]
variable[is_float] assign[=] compare[name[args].number_type in tuple[[<ast.Constant object at 0x7da1b0b4bfd0>, <ast.Constant object at 0x7da1b0b497e0>, <ast.Constant object at 0x7da1b0b4a050>, <ast.Constant object at 0x7da1b0b4bf70>]]]
variable[signed] assign[=] <ast.BoolOp object at 0x7da1b0b4b370>
variable[alg] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[natsort].ns.FLOAT * name[is_float]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.SIGNED * name[signed]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.NOEXP * <ast.UnaryOp object at 0x7da1b0b489a0>]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.PATH * name[args].paths]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.LOCALE * name[args].locale]]
variable[do_filter] assign[=] <ast.BoolOp object at 0x7da1b0b48640>
if <ast.BoolOp object at 0x7da1b0b4b550> begin[:]
variable[inp_options] assign[=] binary_operation[binary_operation[binary_operation[name[natsort].ns.FLOAT * name[is_float]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.SIGNED * name[signed]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[natsort].ns.NOEXP * <ast.UnaryOp object at 0x7da1b0b48250>]]
variable[regex] assign[=] call[name[regex_chooser], parameter[name[inp_options]]]
if compare[name[args].filter is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b0b48310> assign[=] tuple[[<ast.ListComp object at 0x7da1b0b486a0>, <ast.ListComp object at 0x7da1b0b4a1d0>]]
variable[entries] assign[=] <ast.ListComp object at 0x7da1b0b49780>
if compare[name[args].reverse_filter is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b0b4b580> assign[=] tuple[[<ast.ListComp object at 0x7da1b0b48280>, <ast.ListComp object at 0x7da1b0b4a710>]]
variable[entries] assign[=] <ast.ListComp object at 0x7da1b0b48370>
if name[args].exclude begin[:]
variable[exclude] assign[=] call[name[set], parameter[name[args].exclude]]
variable[entries] assign[=] <ast.ListComp object at 0x7da1b0bcbb80>
for taget[name[entry]] in starred[call[name[natsort].natsorted, parameter[name[entries]]]] begin[:]
call[name[print], parameter[name[entry]]] | keyword[def] identifier[sort_and_print_entries] ( identifier[entries] , identifier[args] ):
literal[string]
identifier[is_float] = identifier[args] . identifier[number_type] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[signed] = identifier[args] . identifier[signed] keyword[or] identifier[args] . identifier[number_type] keyword[in] ( literal[string] , literal[string] )
identifier[alg] =(
identifier[natsort] . identifier[ns] . identifier[FLOAT] * identifier[is_float]
| identifier[natsort] . identifier[ns] . identifier[SIGNED] * identifier[signed]
| identifier[natsort] . identifier[ns] . identifier[NOEXP] *( keyword[not] identifier[args] . identifier[exp] )
| identifier[natsort] . identifier[ns] . identifier[PATH] * identifier[args] . identifier[paths]
| identifier[natsort] . identifier[ns] . identifier[LOCALE] * identifier[args] . identifier[locale]
)
identifier[do_filter] = identifier[args] . identifier[filter] keyword[is] keyword[not] keyword[None] keyword[or] identifier[args] . identifier[reverse_filter] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[do_filter] keyword[or] identifier[args] . identifier[exclude] :
identifier[inp_options] =(
identifier[natsort] . identifier[ns] . identifier[FLOAT] * identifier[is_float]
| identifier[natsort] . identifier[ns] . identifier[SIGNED] * identifier[signed]
| identifier[natsort] . identifier[ns] . identifier[NOEXP] *( keyword[not] identifier[args] . identifier[exp] )
)
identifier[regex] = identifier[regex_chooser] ( identifier[inp_options] )
keyword[if] identifier[args] . identifier[filter] keyword[is] keyword[not] keyword[None] :
identifier[lows] , identifier[highs] =([ identifier[f] [ literal[int] ] keyword[for] identifier[f] keyword[in] identifier[args] . identifier[filter] ],[ identifier[f] [ literal[int] ] keyword[for] identifier[f] keyword[in] identifier[args] . identifier[filter] ])
identifier[entries] =[
identifier[entry]
keyword[for] identifier[entry] keyword[in] identifier[entries]
keyword[if] identifier[keep_entry_range] ( identifier[entry] , identifier[lows] , identifier[highs] , identifier[float] , identifier[regex] )
]
keyword[if] identifier[args] . identifier[reverse_filter] keyword[is] keyword[not] keyword[None] :
identifier[lows] , identifier[highs] =(
[ identifier[f] [ literal[int] ] keyword[for] identifier[f] keyword[in] identifier[args] . identifier[reverse_filter] ],
[ identifier[f] [ literal[int] ] keyword[for] identifier[f] keyword[in] identifier[args] . identifier[reverse_filter] ],
)
identifier[entries] =[
identifier[entry]
keyword[for] identifier[entry] keyword[in] identifier[entries]
keyword[if] keyword[not] identifier[keep_entry_range] ( identifier[entry] , identifier[lows] , identifier[highs] , identifier[float] , identifier[regex] )
]
keyword[if] identifier[args] . identifier[exclude] :
identifier[exclude] = identifier[set] ( identifier[args] . identifier[exclude] )
identifier[entries] =[
identifier[entry]
keyword[for] identifier[entry] keyword[in] identifier[entries]
keyword[if] identifier[keep_entry_value] ( identifier[entry] , identifier[exclude] , identifier[float] , identifier[regex] )
]
keyword[for] identifier[entry] keyword[in] identifier[natsort] . identifier[natsorted] ( identifier[entries] , identifier[reverse] = identifier[args] . identifier[reverse] , identifier[alg] = identifier[alg] ):
identifier[print] ( identifier[entry] ) | def sort_and_print_entries(entries, args):
"""Sort the entries, applying the filters first if necessary."""
# Extract the proper number type.
is_float = args.number_type in ('float', 'real', 'f', 'r')
signed = args.signed or args.number_type in ('real', 'r')
alg = natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp) | natsort.ns.PATH * args.paths | natsort.ns.LOCALE * args.locale
# Pre-remove entries that don't pass the filtering criteria
# Make sure we use the same searching algorithm for filtering
# as for sorting.
do_filter = args.filter is not None or args.reverse_filter is not None
if do_filter or args.exclude:
inp_options = natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp)
regex = regex_chooser(inp_options)
if args.filter is not None:
(lows, highs) = ([f[0] for f in args.filter], [f[1] for f in args.filter])
entries = [entry for entry in entries if keep_entry_range(entry, lows, highs, float, regex)] # depends on [control=['if'], data=[]]
if args.reverse_filter is not None:
(lows, highs) = ([f[0] for f in args.reverse_filter], [f[1] for f in args.reverse_filter])
entries = [entry for entry in entries if not keep_entry_range(entry, lows, highs, float, regex)] # depends on [control=['if'], data=[]]
if args.exclude:
exclude = set(args.exclude)
entries = [entry for entry in entries if keep_entry_value(entry, exclude, float, regex)] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Print off the sorted results
for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg):
print(entry) # depends on [control=['for'], data=['entry']] |
def handle(cls, value, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another stack
that contains a list. For example, the standard vpc blueprint outputs
the list of Subnets it creates as a pair of Outputs (PublicSubnets,
PrivateSubnets) that are comma separated, so you could use this in your
config:
Subnets: ${split ,::${output vpc::PrivateSubnets}}
"""
try:
delimiter, text = value.split("::", 1)
except ValueError:
raise ValueError("Invalid value for split: %s. Must be in "
"<delimiter>::<text> format." % value)
return text.split(delimiter) | def function[handle, parameter[cls, value]]:
constant[Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another stack
that contains a list. For example, the standard vpc blueprint outputs
the list of Subnets it creates as a pair of Outputs (PublicSubnets,
PrivateSubnets) that are comma separated, so you could use this in your
config:
Subnets: ${split ,::${output vpc::PrivateSubnets}}
]
<ast.Try object at 0x7da18eb56e00>
return[call[name[text].split, parameter[name[delimiter]]]] | keyword[def] identifier[handle] ( identifier[cls] , identifier[value] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[delimiter] , identifier[text] = identifier[value] . identifier[split] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[value] )
keyword[return] identifier[text] . identifier[split] ( identifier[delimiter] ) | def handle(cls, value, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another stack
that contains a list. For example, the standard vpc blueprint outputs
the list of Subnets it creates as a pair of Outputs (PublicSubnets,
PrivateSubnets) that are comma separated, so you could use this in your
config:
Subnets: ${split ,::${output vpc::PrivateSubnets}}
"""
try:
(delimiter, text) = value.split('::', 1) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Invalid value for split: %s. Must be in <delimiter>::<text> format.' % value) # depends on [control=['except'], data=[]]
return text.split(delimiter) |
def serialize_task_spec(self, spec, elem):
"""
Serializes common attributes of :meth:`SpiffWorkflow.specs.TaskSpec`.
"""
if spec.id is not None:
SubElement(elem, 'id').text = str(spec.id)
SubElement(elem, 'name').text = spec.name
if spec.description:
SubElement(elem, 'description').text = spec.description
if spec.manual:
SubElement(elem, 'manual')
if spec.internal:
SubElement(elem, 'internal')
SubElement(elem, 'lookahead').text = str(spec.lookahead)
inputs = [t.name for t in spec.inputs]
outputs = [t.name for t in spec.outputs]
self.serialize_value_list(SubElement(elem, 'inputs'), inputs)
self.serialize_value_list(SubElement(elem, 'outputs'), outputs)
self.serialize_value_map(SubElement(elem, 'data'), spec.data)
self.serialize_value_map(SubElement(elem, 'defines'), spec.defines)
self.serialize_value_list(SubElement(elem, 'pre-assign'),
spec.pre_assign)
self.serialize_value_list(SubElement(elem, 'post-assign'),
spec.post_assign)
# Note: Events are not serialized; this is documented in
# the TaskSpec API docs.
return elem | def function[serialize_task_spec, parameter[self, spec, elem]]:
constant[
Serializes common attributes of :meth:`SpiffWorkflow.specs.TaskSpec`.
]
if compare[name[spec].id is_not constant[None]] begin[:]
call[name[SubElement], parameter[name[elem], constant[id]]].text assign[=] call[name[str], parameter[name[spec].id]]
call[name[SubElement], parameter[name[elem], constant[name]]].text assign[=] name[spec].name
if name[spec].description begin[:]
call[name[SubElement], parameter[name[elem], constant[description]]].text assign[=] name[spec].description
if name[spec].manual begin[:]
call[name[SubElement], parameter[name[elem], constant[manual]]]
if name[spec].internal begin[:]
call[name[SubElement], parameter[name[elem], constant[internal]]]
call[name[SubElement], parameter[name[elem], constant[lookahead]]].text assign[=] call[name[str], parameter[name[spec].lookahead]]
variable[inputs] assign[=] <ast.ListComp object at 0x7da1b014c040>
variable[outputs] assign[=] <ast.ListComp object at 0x7da1b014fee0>
call[name[self].serialize_value_list, parameter[call[name[SubElement], parameter[name[elem], constant[inputs]]], name[inputs]]]
call[name[self].serialize_value_list, parameter[call[name[SubElement], parameter[name[elem], constant[outputs]]], name[outputs]]]
call[name[self].serialize_value_map, parameter[call[name[SubElement], parameter[name[elem], constant[data]]], name[spec].data]]
call[name[self].serialize_value_map, parameter[call[name[SubElement], parameter[name[elem], constant[defines]]], name[spec].defines]]
call[name[self].serialize_value_list, parameter[call[name[SubElement], parameter[name[elem], constant[pre-assign]]], name[spec].pre_assign]]
call[name[self].serialize_value_list, parameter[call[name[SubElement], parameter[name[elem], constant[post-assign]]], name[spec].post_assign]]
return[name[elem]] | keyword[def] identifier[serialize_task_spec] ( identifier[self] , identifier[spec] , identifier[elem] ):
literal[string]
keyword[if] identifier[spec] . identifier[id] keyword[is] keyword[not] keyword[None] :
identifier[SubElement] ( identifier[elem] , literal[string] ). identifier[text] = identifier[str] ( identifier[spec] . identifier[id] )
identifier[SubElement] ( identifier[elem] , literal[string] ). identifier[text] = identifier[spec] . identifier[name]
keyword[if] identifier[spec] . identifier[description] :
identifier[SubElement] ( identifier[elem] , literal[string] ). identifier[text] = identifier[spec] . identifier[description]
keyword[if] identifier[spec] . identifier[manual] :
identifier[SubElement] ( identifier[elem] , literal[string] )
keyword[if] identifier[spec] . identifier[internal] :
identifier[SubElement] ( identifier[elem] , literal[string] )
identifier[SubElement] ( identifier[elem] , literal[string] ). identifier[text] = identifier[str] ( identifier[spec] . identifier[lookahead] )
identifier[inputs] =[ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[spec] . identifier[inputs] ]
identifier[outputs] =[ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[spec] . identifier[outputs] ]
identifier[self] . identifier[serialize_value_list] ( identifier[SubElement] ( identifier[elem] , literal[string] ), identifier[inputs] )
identifier[self] . identifier[serialize_value_list] ( identifier[SubElement] ( identifier[elem] , literal[string] ), identifier[outputs] )
identifier[self] . identifier[serialize_value_map] ( identifier[SubElement] ( identifier[elem] , literal[string] ), identifier[spec] . identifier[data] )
identifier[self] . identifier[serialize_value_map] ( identifier[SubElement] ( identifier[elem] , literal[string] ), identifier[spec] . identifier[defines] )
identifier[self] . identifier[serialize_value_list] ( identifier[SubElement] ( identifier[elem] , literal[string] ),
identifier[spec] . identifier[pre_assign] )
identifier[self] . identifier[serialize_value_list] ( identifier[SubElement] ( identifier[elem] , literal[string] ),
identifier[spec] . identifier[post_assign] )
keyword[return] identifier[elem] | def serialize_task_spec(self, spec, elem):
"""
Serializes common attributes of :meth:`SpiffWorkflow.specs.TaskSpec`.
"""
if spec.id is not None:
SubElement(elem, 'id').text = str(spec.id) # depends on [control=['if'], data=[]]
SubElement(elem, 'name').text = spec.name
if spec.description:
SubElement(elem, 'description').text = spec.description # depends on [control=['if'], data=[]]
if spec.manual:
SubElement(elem, 'manual') # depends on [control=['if'], data=[]]
if spec.internal:
SubElement(elem, 'internal') # depends on [control=['if'], data=[]]
SubElement(elem, 'lookahead').text = str(spec.lookahead)
inputs = [t.name for t in spec.inputs]
outputs = [t.name for t in spec.outputs]
self.serialize_value_list(SubElement(elem, 'inputs'), inputs)
self.serialize_value_list(SubElement(elem, 'outputs'), outputs)
self.serialize_value_map(SubElement(elem, 'data'), spec.data)
self.serialize_value_map(SubElement(elem, 'defines'), spec.defines)
self.serialize_value_list(SubElement(elem, 'pre-assign'), spec.pre_assign)
self.serialize_value_list(SubElement(elem, 'post-assign'), spec.post_assign)
# Note: Events are not serialized; this is documented in
# the TaskSpec API docs.
return elem |
def case_variants(*elements):
"""
For configs which take case-insensitive options, it is necessary to extend the list with
various common case variants (all combinations are not practical). In the future, this should
be removed, when parser filters are made case-insensitive.
Args:
*elements (str): list of elements which need case-sensitive expansion, you should use
default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries`
Returns:
list: list of all expanded elements
"""
expanded_list = []
for element in elements:
low = element.lower()
up = element.upper()
title = element.title()
# Inner case conversion, such as `MACs` or `UsePAM` to `Macs` and `UsePam`
converted = []
for i, letter in enumerate(element):
if i == 0:
converted.append(letter)
else:
if element[i - 1].isupper():
converted.append(letter.lower())
else:
converted.append(letter)
converted = "".join(converted)
for new_element in (element, converted, low, up, title):
if new_element not in expanded_list:
expanded_list.append(new_element)
return expanded_list | def function[case_variants, parameter[]]:
constant[
For configs which take case-insensitive options, it is necessary to extend the list with
various common case variants (all combinations are not practical). In the future, this should
be removed, when parser filters are made case-insensitive.
Args:
*elements (str): list of elements which need case-sensitive expansion, you should use
default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries`
Returns:
list: list of all expanded elements
]
variable[expanded_list] assign[=] list[[]]
for taget[name[element]] in starred[name[elements]] begin[:]
variable[low] assign[=] call[name[element].lower, parameter[]]
variable[up] assign[=] call[name[element].upper, parameter[]]
variable[title] assign[=] call[name[element].title, parameter[]]
variable[converted] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b184aa10>, <ast.Name object at 0x7da1b184a2c0>]]] in starred[call[name[enumerate], parameter[name[element]]]] begin[:]
if compare[name[i] equal[==] constant[0]] begin[:]
call[name[converted].append, parameter[name[letter]]]
variable[converted] assign[=] call[constant[].join, parameter[name[converted]]]
for taget[name[new_element]] in starred[tuple[[<ast.Name object at 0x7da20cabc460>, <ast.Name object at 0x7da20cabdbd0>, <ast.Name object at 0x7da20cabcfd0>, <ast.Name object at 0x7da20cabc6d0>, <ast.Name object at 0x7da20cabcc10>]]] begin[:]
if compare[name[new_element] <ast.NotIn object at 0x7da2590d7190> name[expanded_list]] begin[:]
call[name[expanded_list].append, parameter[name[new_element]]]
return[name[expanded_list]] | keyword[def] identifier[case_variants] (* identifier[elements] ):
literal[string]
identifier[expanded_list] =[]
keyword[for] identifier[element] keyword[in] identifier[elements] :
identifier[low] = identifier[element] . identifier[lower] ()
identifier[up] = identifier[element] . identifier[upper] ()
identifier[title] = identifier[element] . identifier[title] ()
identifier[converted] =[]
keyword[for] identifier[i] , identifier[letter] keyword[in] identifier[enumerate] ( identifier[element] ):
keyword[if] identifier[i] == literal[int] :
identifier[converted] . identifier[append] ( identifier[letter] )
keyword[else] :
keyword[if] identifier[element] [ identifier[i] - literal[int] ]. identifier[isupper] ():
identifier[converted] . identifier[append] ( identifier[letter] . identifier[lower] ())
keyword[else] :
identifier[converted] . identifier[append] ( identifier[letter] )
identifier[converted] = literal[string] . identifier[join] ( identifier[converted] )
keyword[for] identifier[new_element] keyword[in] ( identifier[element] , identifier[converted] , identifier[low] , identifier[up] , identifier[title] ):
keyword[if] identifier[new_element] keyword[not] keyword[in] identifier[expanded_list] :
identifier[expanded_list] . identifier[append] ( identifier[new_element] )
keyword[return] identifier[expanded_list] | def case_variants(*elements):
"""
For configs which take case-insensitive options, it is necessary to extend the list with
various common case variants (all combinations are not practical). In the future, this should
be removed, when parser filters are made case-insensitive.
Args:
*elements (str): list of elements which need case-sensitive expansion, you should use
default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries`
Returns:
list: list of all expanded elements
"""
expanded_list = []
for element in elements:
low = element.lower()
up = element.upper()
title = element.title()
# Inner case conversion, such as `MACs` or `UsePAM` to `Macs` and `UsePam`
converted = []
for (i, letter) in enumerate(element):
if i == 0:
converted.append(letter) # depends on [control=['if'], data=[]]
elif element[i - 1].isupper():
converted.append(letter.lower()) # depends on [control=['if'], data=[]]
else:
converted.append(letter) # depends on [control=['for'], data=[]]
converted = ''.join(converted)
for new_element in (element, converted, low, up, title):
if new_element not in expanded_list:
expanded_list.append(new_element) # depends on [control=['if'], data=['new_element', 'expanded_list']] # depends on [control=['for'], data=['new_element']] # depends on [control=['for'], data=['element']]
return expanded_list |
def wait_until(func, wait_for=None, sleep_for=0.5):
"""Test for a function and wait for it to return a truth value or to timeout.
Returns the value or None if a timeout is given and the function didn't return
inside time timeout
Args:
func (callable): a function to be evaluated, use lambda if parameters are required
wait_for (float, integer, None): the maximum time to wait, or None for an infinite loop
sleep_for (float, integer): how much to gevent.sleep between calls
Returns:
func(): result of func, if truth value, or None"""
res = func()
if res:
return res
if wait_for:
deadline = time.time() + wait_for
while not res and time.time() <= deadline:
gevent.sleep(sleep_for)
res = func()
else:
while not res:
gevent.sleep(sleep_for)
res = func()
return res | def function[wait_until, parameter[func, wait_for, sleep_for]]:
constant[Test for a function and wait for it to return a truth value or to timeout.
Returns the value or None if a timeout is given and the function didn't return
inside time timeout
Args:
func (callable): a function to be evaluated, use lambda if parameters are required
wait_for (float, integer, None): the maximum time to wait, or None for an infinite loop
sleep_for (float, integer): how much to gevent.sleep between calls
Returns:
func(): result of func, if truth value, or None]
variable[res] assign[=] call[name[func], parameter[]]
if name[res] begin[:]
return[name[res]]
if name[wait_for] begin[:]
variable[deadline] assign[=] binary_operation[call[name[time].time, parameter[]] + name[wait_for]]
while <ast.BoolOp object at 0x7da1b19ba740> begin[:]
call[name[gevent].sleep, parameter[name[sleep_for]]]
variable[res] assign[=] call[name[func], parameter[]]
return[name[res]] | keyword[def] identifier[wait_until] ( identifier[func] , identifier[wait_for] = keyword[None] , identifier[sleep_for] = literal[int] ):
literal[string]
identifier[res] = identifier[func] ()
keyword[if] identifier[res] :
keyword[return] identifier[res]
keyword[if] identifier[wait_for] :
identifier[deadline] = identifier[time] . identifier[time] ()+ identifier[wait_for]
keyword[while] keyword[not] identifier[res] keyword[and] identifier[time] . identifier[time] ()<= identifier[deadline] :
identifier[gevent] . identifier[sleep] ( identifier[sleep_for] )
identifier[res] = identifier[func] ()
keyword[else] :
keyword[while] keyword[not] identifier[res] :
identifier[gevent] . identifier[sleep] ( identifier[sleep_for] )
identifier[res] = identifier[func] ()
keyword[return] identifier[res] | def wait_until(func, wait_for=None, sleep_for=0.5):
"""Test for a function and wait for it to return a truth value or to timeout.
Returns the value or None if a timeout is given and the function didn't return
inside time timeout
Args:
func (callable): a function to be evaluated, use lambda if parameters are required
wait_for (float, integer, None): the maximum time to wait, or None for an infinite loop
sleep_for (float, integer): how much to gevent.sleep between calls
Returns:
func(): result of func, if truth value, or None"""
res = func()
if res:
return res # depends on [control=['if'], data=[]]
if wait_for:
deadline = time.time() + wait_for
while not res and time.time() <= deadline:
gevent.sleep(sleep_for)
res = func() # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
while not res:
gevent.sleep(sleep_for)
res = func() # depends on [control=['while'], data=[]]
return res |
def _validate_interface_option(attr, value, addrfam='inet'):
'''lookup the validation function for a [addrfam][attr] and
return the results
:param attr: attribute name
:param value: raw setting value
:param addrfam: address family (inet, inet6,
'''
valid, _value, errmsg = False, value, 'Unknown validator'
attrmaps = ATTRMAPS.get(addrfam, [])
for attrmap in attrmaps:
if attr in attrmap:
validate_func = attrmap[attr]
(valid, _value, errmsg) = validate_func(value)
break
return (valid, _value, errmsg) | def function[_validate_interface_option, parameter[attr, value, addrfam]]:
constant[lookup the validation function for a [addrfam][attr] and
return the results
:param attr: attribute name
:param value: raw setting value
:param addrfam: address family (inet, inet6,
]
<ast.Tuple object at 0x7da1b1f9bc40> assign[=] tuple[[<ast.Constant object at 0x7da1b1f99f00>, <ast.Name object at 0x7da1b1f9bf10>, <ast.Constant object at 0x7da1b1f9aa10>]]
variable[attrmaps] assign[=] call[name[ATTRMAPS].get, parameter[name[addrfam], list[[]]]]
for taget[name[attrmap]] in starred[name[attrmaps]] begin[:]
if compare[name[attr] in name[attrmap]] begin[:]
variable[validate_func] assign[=] call[name[attrmap]][name[attr]]
<ast.Tuple object at 0x7da1b1f9b700> assign[=] call[name[validate_func], parameter[name[value]]]
break
return[tuple[[<ast.Name object at 0x7da1b1f9aa40>, <ast.Name object at 0x7da1b1f9be50>, <ast.Name object at 0x7da1b1f9ae00>]]] | keyword[def] identifier[_validate_interface_option] ( identifier[attr] , identifier[value] , identifier[addrfam] = literal[string] ):
literal[string]
identifier[valid] , identifier[_value] , identifier[errmsg] = keyword[False] , identifier[value] , literal[string]
identifier[attrmaps] = identifier[ATTRMAPS] . identifier[get] ( identifier[addrfam] ,[])
keyword[for] identifier[attrmap] keyword[in] identifier[attrmaps] :
keyword[if] identifier[attr] keyword[in] identifier[attrmap] :
identifier[validate_func] = identifier[attrmap] [ identifier[attr] ]
( identifier[valid] , identifier[_value] , identifier[errmsg] )= identifier[validate_func] ( identifier[value] )
keyword[break]
keyword[return] ( identifier[valid] , identifier[_value] , identifier[errmsg] ) | def _validate_interface_option(attr, value, addrfam='inet'):
"""lookup the validation function for a [addrfam][attr] and
return the results
:param attr: attribute name
:param value: raw setting value
:param addrfam: address family (inet, inet6,
"""
(valid, _value, errmsg) = (False, value, 'Unknown validator')
attrmaps = ATTRMAPS.get(addrfam, [])
for attrmap in attrmaps:
if attr in attrmap:
validate_func = attrmap[attr]
(valid, _value, errmsg) = validate_func(value)
break # depends on [control=['if'], data=['attr', 'attrmap']] # depends on [control=['for'], data=['attrmap']]
return (valid, _value, errmsg) |
def on_install(self, editor):
"""
Add the folding menu to the editor, on install.
:param editor: editor instance on which the mode has been installed to.
"""
super(FoldingPanel, self).on_install(editor)
self.context_menu = QtWidgets.QMenu(_('Folding'), self.editor)
action = self.action_collapse = QtWidgets.QAction(
_('Collapse'), self.context_menu)
action.setShortcut('Shift+-')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
action = self.action_expand = QtWidgets.QAction(_('Expand'),
self.context_menu)
action.setShortcut('Shift++')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
self.context_menu.addSeparator()
action = self.action_collapse_all = QtWidgets.QAction(
_('Collapse all'), self.context_menu)
action.setShortcut('Ctrl+Shift+-')
action.triggered.connect(self._on_action_collapse_all_triggered)
self.context_menu.addAction(action)
action = self.action_expand_all = QtWidgets.QAction(
_('Expand all'), self.context_menu)
action.setShortcut('Ctrl+Shift++')
action.triggered.connect(self._on_action_expand_all_triggered)
self.context_menu.addAction(action)
self.editor.add_menu(self.context_menu) | def function[on_install, parameter[self, editor]]:
constant[
Add the folding menu to the editor, on install.
:param editor: editor instance on which the mode has been installed to.
]
call[call[name[super], parameter[name[FoldingPanel], name[self]]].on_install, parameter[name[editor]]]
name[self].context_menu assign[=] call[name[QtWidgets].QMenu, parameter[call[name[_], parameter[constant[Folding]]], name[self].editor]]
variable[action] assign[=] call[name[QtWidgets].QAction, parameter[call[name[_], parameter[constant[Collapse]]], name[self].context_menu]]
call[name[action].setShortcut, parameter[constant[Shift+-]]]
call[name[action].triggered.connect, parameter[name[self]._on_action_toggle]]
call[name[self].context_menu.addAction, parameter[name[action]]]
variable[action] assign[=] call[name[QtWidgets].QAction, parameter[call[name[_], parameter[constant[Expand]]], name[self].context_menu]]
call[name[action].setShortcut, parameter[constant[Shift++]]]
call[name[action].triggered.connect, parameter[name[self]._on_action_toggle]]
call[name[self].context_menu.addAction, parameter[name[action]]]
call[name[self].context_menu.addSeparator, parameter[]]
variable[action] assign[=] call[name[QtWidgets].QAction, parameter[call[name[_], parameter[constant[Collapse all]]], name[self].context_menu]]
call[name[action].setShortcut, parameter[constant[Ctrl+Shift+-]]]
call[name[action].triggered.connect, parameter[name[self]._on_action_collapse_all_triggered]]
call[name[self].context_menu.addAction, parameter[name[action]]]
variable[action] assign[=] call[name[QtWidgets].QAction, parameter[call[name[_], parameter[constant[Expand all]]], name[self].context_menu]]
call[name[action].setShortcut, parameter[constant[Ctrl+Shift++]]]
call[name[action].triggered.connect, parameter[name[self]._on_action_expand_all_triggered]]
call[name[self].context_menu.addAction, parameter[name[action]]]
call[name[self].editor.add_menu, parameter[name[self].context_menu]] | keyword[def] identifier[on_install] ( identifier[self] , identifier[editor] ):
literal[string]
identifier[super] ( identifier[FoldingPanel] , identifier[self] ). identifier[on_install] ( identifier[editor] )
identifier[self] . identifier[context_menu] = identifier[QtWidgets] . identifier[QMenu] ( identifier[_] ( literal[string] ), identifier[self] . identifier[editor] )
identifier[action] = identifier[self] . identifier[action_collapse] = identifier[QtWidgets] . identifier[QAction] (
identifier[_] ( literal[string] ), identifier[self] . identifier[context_menu] )
identifier[action] . identifier[setShortcut] ( literal[string] )
identifier[action] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[_on_action_toggle] )
identifier[self] . identifier[context_menu] . identifier[addAction] ( identifier[action] )
identifier[action] = identifier[self] . identifier[action_expand] = identifier[QtWidgets] . identifier[QAction] ( identifier[_] ( literal[string] ),
identifier[self] . identifier[context_menu] )
identifier[action] . identifier[setShortcut] ( literal[string] )
identifier[action] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[_on_action_toggle] )
identifier[self] . identifier[context_menu] . identifier[addAction] ( identifier[action] )
identifier[self] . identifier[context_menu] . identifier[addSeparator] ()
identifier[action] = identifier[self] . identifier[action_collapse_all] = identifier[QtWidgets] . identifier[QAction] (
identifier[_] ( literal[string] ), identifier[self] . identifier[context_menu] )
identifier[action] . identifier[setShortcut] ( literal[string] )
identifier[action] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[_on_action_collapse_all_triggered] )
identifier[self] . identifier[context_menu] . identifier[addAction] ( identifier[action] )
identifier[action] = identifier[self] . identifier[action_expand_all] = identifier[QtWidgets] . identifier[QAction] (
identifier[_] ( literal[string] ), identifier[self] . identifier[context_menu] )
identifier[action] . identifier[setShortcut] ( literal[string] )
identifier[action] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[_on_action_expand_all_triggered] )
identifier[self] . identifier[context_menu] . identifier[addAction] ( identifier[action] )
identifier[self] . identifier[editor] . identifier[add_menu] ( identifier[self] . identifier[context_menu] ) | def on_install(self, editor):
"""
Add the folding menu to the editor, on install.
:param editor: editor instance on which the mode has been installed to.
"""
super(FoldingPanel, self).on_install(editor)
self.context_menu = QtWidgets.QMenu(_('Folding'), self.editor)
action = self.action_collapse = QtWidgets.QAction(_('Collapse'), self.context_menu)
action.setShortcut('Shift+-')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
action = self.action_expand = QtWidgets.QAction(_('Expand'), self.context_menu)
action.setShortcut('Shift++')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
self.context_menu.addSeparator()
action = self.action_collapse_all = QtWidgets.QAction(_('Collapse all'), self.context_menu)
action.setShortcut('Ctrl+Shift+-')
action.triggered.connect(self._on_action_collapse_all_triggered)
self.context_menu.addAction(action)
action = self.action_expand_all = QtWidgets.QAction(_('Expand all'), self.context_menu)
action.setShortcut('Ctrl+Shift++')
action.triggered.connect(self._on_action_expand_all_triggered)
self.context_menu.addAction(action)
self.editor.add_menu(self.context_menu) |
def _connected(cm, nodes, connection):
"""Test connectivity for the connectivity matrix."""
if nodes is not None:
cm = cm[np.ix_(nodes, nodes)]
num_components, _ = connected_components(cm, connection=connection)
return num_components < 2 | def function[_connected, parameter[cm, nodes, connection]]:
constant[Test connectivity for the connectivity matrix.]
if compare[name[nodes] is_not constant[None]] begin[:]
variable[cm] assign[=] call[name[cm]][call[name[np].ix_, parameter[name[nodes], name[nodes]]]]
<ast.Tuple object at 0x7da1b26af0d0> assign[=] call[name[connected_components], parameter[name[cm]]]
return[compare[name[num_components] less[<] constant[2]]] | keyword[def] identifier[_connected] ( identifier[cm] , identifier[nodes] , identifier[connection] ):
literal[string]
keyword[if] identifier[nodes] keyword[is] keyword[not] keyword[None] :
identifier[cm] = identifier[cm] [ identifier[np] . identifier[ix_] ( identifier[nodes] , identifier[nodes] )]
identifier[num_components] , identifier[_] = identifier[connected_components] ( identifier[cm] , identifier[connection] = identifier[connection] )
keyword[return] identifier[num_components] < literal[int] | def _connected(cm, nodes, connection):
"""Test connectivity for the connectivity matrix."""
if nodes is not None:
cm = cm[np.ix_(nodes, nodes)] # depends on [control=['if'], data=['nodes']]
(num_components, _) = connected_components(cm, connection=connection)
return num_components < 2 |
def getScales(self,term_i=None):
"""
Returns the Parameters
Args:
term_i: index of the term we are interested in
if term_i==None returns the whole vector of parameters
"""
if term_i==None:
RV = self.vd.getScales()
else:
assert term_i<self.n_terms, 'Term index non valid'
RV = self.vd.getScales(term_i)
return RV | def function[getScales, parameter[self, term_i]]:
constant[
Returns the Parameters
Args:
term_i: index of the term we are interested in
if term_i==None returns the whole vector of parameters
]
if compare[name[term_i] equal[==] constant[None]] begin[:]
variable[RV] assign[=] call[name[self].vd.getScales, parameter[]]
return[name[RV]] | keyword[def] identifier[getScales] ( identifier[self] , identifier[term_i] = keyword[None] ):
literal[string]
keyword[if] identifier[term_i] == keyword[None] :
identifier[RV] = identifier[self] . identifier[vd] . identifier[getScales] ()
keyword[else] :
keyword[assert] identifier[term_i] < identifier[self] . identifier[n_terms] , literal[string]
identifier[RV] = identifier[self] . identifier[vd] . identifier[getScales] ( identifier[term_i] )
keyword[return] identifier[RV] | def getScales(self, term_i=None):
"""
Returns the Parameters
Args:
term_i: index of the term we are interested in
if term_i==None returns the whole vector of parameters
"""
if term_i == None:
RV = self.vd.getScales() # depends on [control=['if'], data=[]]
else:
assert term_i < self.n_terms, 'Term index non valid'
RV = self.vd.getScales(term_i)
return RV |
def Carcinogen(CASRN, AvailableMethods=False, Method=None):
r'''Looks up if a chemical is listed as a carcinogen or not according to
either a specifc method or with all methods.
Returns either the status as a string for a specified method, or the
status of the chemical in all available data sources, in the format
{source: status}.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
status : str or dict
Carcinogen status information [-]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain carcinogen status with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Carcinogen_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
if a chemical is listed as carcinogenic, and will return methods
instead of the status
Notes
-----
Supported methods are:
* **IARC**: International Agency for Research on Cancer, [1]_. As
extracted with a last update of February 22, 2016. Has listing
information of 843 chemicals with CAS numbers. Chemicals without
CAS numbers not included here. If two listings for the same CAS
were available, that closest to the CAS number was used. If two
listings were available published at different times, the latest
value was used. All else equal, the most pessimistic value was used.
* **NTP**: National Toxicology Program, [2]_. Has data on 226
chemicals.
Examples
--------
>>> Carcinogen('61-82-5')
{'National Toxicology Program 13th Report on Carcinogens': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'}
References
----------
.. [1] International Agency for Research on Cancer. Agents Classified by
the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available
from: http://monographs.iarc.fr/ENG/Classification/
.. [2] NTP (National Toxicology Program). 2014. Report on Carcinogens,
Thirteenth Edition. Research Triangle Park, NC: U.S. Department of
Health and Human Services, Public Health Service.
http://ntp.niehs.nih.gov/pubhealth/roc/roc13/
'''
methods = [COMBINED, IARC, NTP]
if AvailableMethods:
return methods
if not Method:
Method = methods[0]
if Method == IARC:
if CASRN in IARC_data.index:
status = IARC_codes[IARC_data.at[CASRN, 'group']]
else:
status = UNLISTED
elif Method == NTP:
if CASRN in NTP_data.index:
status = NTP_codes[NTP_data.at[CASRN, 'Listing']]
else:
status = UNLISTED
elif Method == COMBINED:
status = {}
for method in methods[1:]:
status[method] = Carcinogen(CASRN, Method=method)
else:
raise Exception('Failure in in function')
return status | def function[Carcinogen, parameter[CASRN, AvailableMethods, Method]]:
constant[Looks up if a chemical is listed as a carcinogen or not according to
either a specifc method or with all methods.
Returns either the status as a string for a specified method, or the
status of the chemical in all available data sources, in the format
{source: status}.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
status : str or dict
Carcinogen status information [-]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain carcinogen status with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Carcinogen_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
if a chemical is listed as carcinogenic, and will return methods
instead of the status
Notes
-----
Supported methods are:
* **IARC**: International Agency for Research on Cancer, [1]_. As
extracted with a last update of February 22, 2016. Has listing
information of 843 chemicals with CAS numbers. Chemicals without
CAS numbers not included here. If two listings for the same CAS
were available, that closest to the CAS number was used. If two
listings were available published at different times, the latest
value was used. All else equal, the most pessimistic value was used.
* **NTP**: National Toxicology Program, [2]_. Has data on 226
chemicals.
Examples
--------
>>> Carcinogen('61-82-5')
{'National Toxicology Program 13th Report on Carcinogens': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'}
References
----------
.. [1] International Agency for Research on Cancer. Agents Classified by
the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available
from: http://monographs.iarc.fr/ENG/Classification/
.. [2] NTP (National Toxicology Program). 2014. Report on Carcinogens,
Thirteenth Edition. Research Triangle Park, NC: U.S. Department of
Health and Human Services, Public Health Service.
http://ntp.niehs.nih.gov/pubhealth/roc/roc13/
]
variable[methods] assign[=] list[[<ast.Name object at 0x7da20c794070>, <ast.Name object at 0x7da20c794a00>, <ast.Name object at 0x7da20c795a50>]]
if name[AvailableMethods] begin[:]
return[name[methods]]
if <ast.UnaryOp object at 0x7da20c794f10> begin[:]
variable[Method] assign[=] call[name[methods]][constant[0]]
if compare[name[Method] equal[==] name[IARC]] begin[:]
if compare[name[CASRN] in name[IARC_data].index] begin[:]
variable[status] assign[=] call[name[IARC_codes]][call[name[IARC_data].at][tuple[[<ast.Name object at 0x7da1b021eef0>, <ast.Constant object at 0x7da1b021e7d0>]]]]
return[name[status]] | keyword[def] identifier[Carcinogen] ( identifier[CASRN] , identifier[AvailableMethods] = keyword[False] , identifier[Method] = keyword[None] ):
literal[string]
identifier[methods] =[ identifier[COMBINED] , identifier[IARC] , identifier[NTP] ]
keyword[if] identifier[AvailableMethods] :
keyword[return] identifier[methods]
keyword[if] keyword[not] identifier[Method] :
identifier[Method] = identifier[methods] [ literal[int] ]
keyword[if] identifier[Method] == identifier[IARC] :
keyword[if] identifier[CASRN] keyword[in] identifier[IARC_data] . identifier[index] :
identifier[status] = identifier[IARC_codes] [ identifier[IARC_data] . identifier[at] [ identifier[CASRN] , literal[string] ]]
keyword[else] :
identifier[status] = identifier[UNLISTED]
keyword[elif] identifier[Method] == identifier[NTP] :
keyword[if] identifier[CASRN] keyword[in] identifier[NTP_data] . identifier[index] :
identifier[status] = identifier[NTP_codes] [ identifier[NTP_data] . identifier[at] [ identifier[CASRN] , literal[string] ]]
keyword[else] :
identifier[status] = identifier[UNLISTED]
keyword[elif] identifier[Method] == identifier[COMBINED] :
identifier[status] ={}
keyword[for] identifier[method] keyword[in] identifier[methods] [ literal[int] :]:
identifier[status] [ identifier[method] ]= identifier[Carcinogen] ( identifier[CASRN] , identifier[Method] = identifier[method] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[status] | def Carcinogen(CASRN, AvailableMethods=False, Method=None):
"""Looks up if a chemical is listed as a carcinogen or not according to
either a specifc method or with all methods.
Returns either the status as a string for a specified method, or the
status of the chemical in all available data sources, in the format
{source: status}.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
status : str or dict
Carcinogen status information [-]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain carcinogen status with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Carcinogen_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
if a chemical is listed as carcinogenic, and will return methods
instead of the status
Notes
-----
Supported methods are:
* **IARC**: International Agency for Research on Cancer, [1]_. As
extracted with a last update of February 22, 2016. Has listing
information of 843 chemicals with CAS numbers. Chemicals without
CAS numbers not included here. If two listings for the same CAS
were available, that closest to the CAS number was used. If two
listings were available published at different times, the latest
value was used. All else equal, the most pessimistic value was used.
* **NTP**: National Toxicology Program, [2]_. Has data on 226
chemicals.
Examples
--------
>>> Carcinogen('61-82-5')
{'National Toxicology Program 13th Report on Carcinogens': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'}
References
----------
.. [1] International Agency for Research on Cancer. Agents Classified by
the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available
from: http://monographs.iarc.fr/ENG/Classification/
.. [2] NTP (National Toxicology Program). 2014. Report on Carcinogens,
Thirteenth Edition. Research Triangle Park, NC: U.S. Department of
Health and Human Services, Public Health Service.
http://ntp.niehs.nih.gov/pubhealth/roc/roc13/
"""
methods = [COMBINED, IARC, NTP]
if AvailableMethods:
return methods # depends on [control=['if'], data=[]]
if not Method:
Method = methods[0] # depends on [control=['if'], data=[]]
if Method == IARC:
if CASRN in IARC_data.index:
status = IARC_codes[IARC_data.at[CASRN, 'group']] # depends on [control=['if'], data=['CASRN']]
else:
status = UNLISTED # depends on [control=['if'], data=[]]
elif Method == NTP:
if CASRN in NTP_data.index:
status = NTP_codes[NTP_data.at[CASRN, 'Listing']] # depends on [control=['if'], data=['CASRN']]
else:
status = UNLISTED # depends on [control=['if'], data=[]]
elif Method == COMBINED:
status = {}
for method in methods[1:]:
status[method] = Carcinogen(CASRN, Method=method) # depends on [control=['for'], data=['method']] # depends on [control=['if'], data=[]]
else:
raise Exception('Failure in in function')
return status |
def from_packed(cls, packed):
"""Unpack diploid genotypes that have been bit-packed into single
bytes.
Parameters
----------
packed : ndarray, uint8, shape (n_variants, n_samples)
Bit-packed diploid genotype array.
Returns
-------
g : GenotypeArray, shape (n_variants, n_samples, 2)
Genotype array.
Examples
--------
>>> import allel
>>> import numpy as np
>>> packed = np.array([[0, 1],
... [2, 17],
... [34, 239]], dtype='u1')
>>> allel.GenotypeArray.from_packed(packed)
<GenotypeArray shape=(3, 2, 2) dtype=int8>
0/0 0/1
0/2 1/1
2/2 ./.
"""
# check arguments
packed = np.asarray(packed)
check_ndim(packed, 2)
check_dtype(packed, 'u1')
packed = memoryview_safe(packed)
data = genotype_array_unpack_diploid(packed)
return cls(data) | def function[from_packed, parameter[cls, packed]]:
constant[Unpack diploid genotypes that have been bit-packed into single
bytes.
Parameters
----------
packed : ndarray, uint8, shape (n_variants, n_samples)
Bit-packed diploid genotype array.
Returns
-------
g : GenotypeArray, shape (n_variants, n_samples, 2)
Genotype array.
Examples
--------
>>> import allel
>>> import numpy as np
>>> packed = np.array([[0, 1],
... [2, 17],
... [34, 239]], dtype='u1')
>>> allel.GenotypeArray.from_packed(packed)
<GenotypeArray shape=(3, 2, 2) dtype=int8>
0/0 0/1
0/2 1/1
2/2 ./.
]
variable[packed] assign[=] call[name[np].asarray, parameter[name[packed]]]
call[name[check_ndim], parameter[name[packed], constant[2]]]
call[name[check_dtype], parameter[name[packed], constant[u1]]]
variable[packed] assign[=] call[name[memoryview_safe], parameter[name[packed]]]
variable[data] assign[=] call[name[genotype_array_unpack_diploid], parameter[name[packed]]]
return[call[name[cls], parameter[name[data]]]] | keyword[def] identifier[from_packed] ( identifier[cls] , identifier[packed] ):
literal[string]
identifier[packed] = identifier[np] . identifier[asarray] ( identifier[packed] )
identifier[check_ndim] ( identifier[packed] , literal[int] )
identifier[check_dtype] ( identifier[packed] , literal[string] )
identifier[packed] = identifier[memoryview_safe] ( identifier[packed] )
identifier[data] = identifier[genotype_array_unpack_diploid] ( identifier[packed] )
keyword[return] identifier[cls] ( identifier[data] ) | def from_packed(cls, packed):
"""Unpack diploid genotypes that have been bit-packed into single
bytes.
Parameters
----------
packed : ndarray, uint8, shape (n_variants, n_samples)
Bit-packed diploid genotype array.
Returns
-------
g : GenotypeArray, shape (n_variants, n_samples, 2)
Genotype array.
Examples
--------
>>> import allel
>>> import numpy as np
>>> packed = np.array([[0, 1],
... [2, 17],
... [34, 239]], dtype='u1')
>>> allel.GenotypeArray.from_packed(packed)
<GenotypeArray shape=(3, 2, 2) dtype=int8>
0/0 0/1
0/2 1/1
2/2 ./.
"""
# check arguments
packed = np.asarray(packed)
check_ndim(packed, 2)
check_dtype(packed, 'u1')
packed = memoryview_safe(packed)
data = genotype_array_unpack_diploid(packed)
return cls(data) |
def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
"""
class SubDict (UserDict.DictMixin):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
return e.key
raise KeyError(key)
def __setitem__(self, key, val):
for e in self._entries:
if e.key is None:
continue
if e.key.get_name() == key:
# replace
e.key = val
break
else:
# add a new one
e = HostKeyEntry([hostname], val)
self._entries.append(e)
self._hostkeys._entries.append(e)
def keys(self):
return [e.key.get_name() for e in self._entries if e.key is not None]
entries = []
for e in self._entries:
for h in e.hostnames:
if (h.startswith('|1|') and (self.hash_host(hostname, h) == h)) or (h == hostname):
entries.append(e)
if len(entries) == 0:
return None
return SubDict(hostname, entries, self) | def function[lookup, parameter[self, hostname]]:
constant[
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
]
class class[SubDict, parameter[]] begin[:]
def function[__init__, parameter[self, hostname, entries, hostkeys]]:
name[self]._hostname assign[=] name[hostname]
name[self]._entries assign[=] name[entries]
name[self]._hostkeys assign[=] name[hostkeys]
def function[__getitem__, parameter[self, key]]:
for taget[name[e]] in starred[name[self]._entries] begin[:]
if compare[call[name[e].key.get_name, parameter[]] equal[==] name[key]] begin[:]
return[name[e].key]
<ast.Raise object at 0x7da1b1043430>
def function[__setitem__, parameter[self, key, val]]:
for taget[name[e]] in starred[name[self]._entries] begin[:]
if compare[name[e].key is constant[None]] begin[:]
continue
if compare[call[name[e].key.get_name, parameter[]] equal[==] name[key]] begin[:]
name[e].key assign[=] name[val]
break
def function[keys, parameter[self]]:
return[<ast.ListComp object at 0x7da1b11be290>]
variable[entries] assign[=] list[[]]
for taget[name[e]] in starred[name[self]._entries] begin[:]
for taget[name[h]] in starred[name[e].hostnames] begin[:]
if <ast.BoolOp object at 0x7da1b11bed40> begin[:]
call[name[entries].append, parameter[name[e]]]
if compare[call[name[len], parameter[name[entries]]] equal[==] constant[0]] begin[:]
return[constant[None]]
return[call[name[SubDict], parameter[name[hostname], name[entries], name[self]]]] | keyword[def] identifier[lookup] ( identifier[self] , identifier[hostname] ):
literal[string]
keyword[class] identifier[SubDict] ( identifier[UserDict] . identifier[DictMixin] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[hostname] , identifier[entries] , identifier[hostkeys] ):
identifier[self] . identifier[_hostname] = identifier[hostname]
identifier[self] . identifier[_entries] = identifier[entries]
identifier[self] . identifier[_hostkeys] = identifier[hostkeys]
keyword[def] identifier[__getitem__] ( identifier[self] , identifier[key] ):
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_entries] :
keyword[if] identifier[e] . identifier[key] . identifier[get_name] ()== identifier[key] :
keyword[return] identifier[e] . identifier[key]
keyword[raise] identifier[KeyError] ( identifier[key] )
keyword[def] identifier[__setitem__] ( identifier[self] , identifier[key] , identifier[val] ):
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_entries] :
keyword[if] identifier[e] . identifier[key] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[e] . identifier[key] . identifier[get_name] ()== identifier[key] :
identifier[e] . identifier[key] = identifier[val]
keyword[break]
keyword[else] :
identifier[e] = identifier[HostKeyEntry] ([ identifier[hostname] ], identifier[val] )
identifier[self] . identifier[_entries] . identifier[append] ( identifier[e] )
identifier[self] . identifier[_hostkeys] . identifier[_entries] . identifier[append] ( identifier[e] )
keyword[def] identifier[keys] ( identifier[self] ):
keyword[return] [ identifier[e] . identifier[key] . identifier[get_name] () keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_entries] keyword[if] identifier[e] . identifier[key] keyword[is] keyword[not] keyword[None] ]
identifier[entries] =[]
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_entries] :
keyword[for] identifier[h] keyword[in] identifier[e] . identifier[hostnames] :
keyword[if] ( identifier[h] . identifier[startswith] ( literal[string] ) keyword[and] ( identifier[self] . identifier[hash_host] ( identifier[hostname] , identifier[h] )== identifier[h] )) keyword[or] ( identifier[h] == identifier[hostname] ):
identifier[entries] . identifier[append] ( identifier[e] )
keyword[if] identifier[len] ( identifier[entries] )== literal[int] :
keyword[return] keyword[None]
keyword[return] identifier[SubDict] ( identifier[hostname] , identifier[entries] , identifier[self] ) | def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
"""
class SubDict(UserDict.DictMixin):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
return e.key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
raise KeyError(key)
def __setitem__(self, key, val):
for e in self._entries:
if e.key is None:
continue # depends on [control=['if'], data=[]]
if e.key.get_name() == key:
# replace
e.key = val
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
else:
# add a new one
e = HostKeyEntry([hostname], val)
self._entries.append(e)
self._hostkeys._entries.append(e)
def keys(self):
return [e.key.get_name() for e in self._entries if e.key is not None]
entries = []
for e in self._entries:
for h in e.hostnames:
if h.startswith('|1|') and self.hash_host(hostname, h) == h or h == hostname:
entries.append(e) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['h']] # depends on [control=['for'], data=['e']]
if len(entries) == 0:
return None # depends on [control=['if'], data=[]]
return SubDict(hostname, entries, self) |
def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=None):
"""Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map (optional): A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:param addl_reverse_map (optional): A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
"""
name_map = {}
name_map.update(base_interfaces)
interface_type, interface_number = split_interface(interface)
if isinstance(addl_name_map, dict):
name_map.update(addl_name_map)
rev_name_map = {}
rev_name_map.update(reverse_mapping)
if isinstance(addl_reverse_map, dict):
rev_name_map.update(addl_reverse_map)
# Try to ensure canonical type.
if name_map.get(interface_type):
canonical_type = name_map.get(interface_type)
else:
canonical_type = interface_type
try:
abbreviated_name = rev_name_map[canonical_type] + py23_compat.text_type(
interface_number
)
return abbreviated_name
except KeyError:
pass
# If abbreviated name lookup fails, return original name
return interface | def function[abbreviated_interface_name, parameter[interface, addl_name_map, addl_reverse_map]]:
constant[Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map (optional): A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:param addl_reverse_map (optional): A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
]
variable[name_map] assign[=] dictionary[[], []]
call[name[name_map].update, parameter[name[base_interfaces]]]
<ast.Tuple object at 0x7da1b1d901f0> assign[=] call[name[split_interface], parameter[name[interface]]]
if call[name[isinstance], parameter[name[addl_name_map], name[dict]]] begin[:]
call[name[name_map].update, parameter[name[addl_name_map]]]
variable[rev_name_map] assign[=] dictionary[[], []]
call[name[rev_name_map].update, parameter[name[reverse_mapping]]]
if call[name[isinstance], parameter[name[addl_reverse_map], name[dict]]] begin[:]
call[name[rev_name_map].update, parameter[name[addl_reverse_map]]]
if call[name[name_map].get, parameter[name[interface_type]]] begin[:]
variable[canonical_type] assign[=] call[name[name_map].get, parameter[name[interface_type]]]
<ast.Try object at 0x7da1b1d935e0>
return[name[interface]] | keyword[def] identifier[abbreviated_interface_name] ( identifier[interface] , identifier[addl_name_map] = keyword[None] , identifier[addl_reverse_map] = keyword[None] ):
literal[string]
identifier[name_map] ={}
identifier[name_map] . identifier[update] ( identifier[base_interfaces] )
identifier[interface_type] , identifier[interface_number] = identifier[split_interface] ( identifier[interface] )
keyword[if] identifier[isinstance] ( identifier[addl_name_map] , identifier[dict] ):
identifier[name_map] . identifier[update] ( identifier[addl_name_map] )
identifier[rev_name_map] ={}
identifier[rev_name_map] . identifier[update] ( identifier[reverse_mapping] )
keyword[if] identifier[isinstance] ( identifier[addl_reverse_map] , identifier[dict] ):
identifier[rev_name_map] . identifier[update] ( identifier[addl_reverse_map] )
keyword[if] identifier[name_map] . identifier[get] ( identifier[interface_type] ):
identifier[canonical_type] = identifier[name_map] . identifier[get] ( identifier[interface_type] )
keyword[else] :
identifier[canonical_type] = identifier[interface_type]
keyword[try] :
identifier[abbreviated_name] = identifier[rev_name_map] [ identifier[canonical_type] ]+ identifier[py23_compat] . identifier[text_type] (
identifier[interface_number]
)
keyword[return] identifier[abbreviated_name]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[return] identifier[interface] | def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=None):
"""Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map (optional): A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:param addl_reverse_map (optional): A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
"""
name_map = {}
name_map.update(base_interfaces)
(interface_type, interface_number) = split_interface(interface)
if isinstance(addl_name_map, dict):
name_map.update(addl_name_map) # depends on [control=['if'], data=[]]
rev_name_map = {}
rev_name_map.update(reverse_mapping)
if isinstance(addl_reverse_map, dict):
rev_name_map.update(addl_reverse_map) # depends on [control=['if'], data=[]]
# Try to ensure canonical type.
if name_map.get(interface_type):
canonical_type = name_map.get(interface_type) # depends on [control=['if'], data=[]]
else:
canonical_type = interface_type
try:
abbreviated_name = rev_name_map[canonical_type] + py23_compat.text_type(interface_number)
return abbreviated_name # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
# If abbreviated name lookup fails, return original name
return interface |
def gibson(seq_list, linear=False, homology=10, tm=63.0):
'''Simulate a Gibson reaction.
:param seq_list: list of DNA sequences to Gibson
:type seq_list: list of coral.DNA
:param linear: Attempt to produce linear, rather than circular,
fragment from input fragments.
:type linear: bool
:param homology_min: minimum bp of homology allowed
:type homology_min: int
:param tm: Minimum tm of overlaps
:type tm: float
:returns: coral.reaction.Gibson instance.
:raises: ValueError if any input sequences are circular DNA.
'''
# FIXME: Preserve features in overlap
# TODO: set a max length?
# TODO: add 'expected' keyword argument somewhere to automate
# validation
# Remove any redundant (identical) sequences
seq_list = list(set(seq_list))
for seq in seq_list:
if seq.circular:
raise ValueError('Input sequences must be linear.')
# Copy input list
working_list = [s.copy() for s in seq_list]
# Attempt to fuse fragments together until only one is left
while len(working_list) > 1:
working_list = _find_fuse_next(working_list, homology, tm)
if not linear:
# Fuse the final fragment to itself
working_list = _fuse_last(working_list, homology, tm)
# Clear features
working_list[0].features = []
return _annotate_features(working_list[0], seq_list) | def function[gibson, parameter[seq_list, linear, homology, tm]]:
constant[Simulate a Gibson reaction.
:param seq_list: list of DNA sequences to Gibson
:type seq_list: list of coral.DNA
:param linear: Attempt to produce linear, rather than circular,
fragment from input fragments.
:type linear: bool
:param homology_min: minimum bp of homology allowed
:type homology_min: int
:param tm: Minimum tm of overlaps
:type tm: float
:returns: coral.reaction.Gibson instance.
:raises: ValueError if any input sequences are circular DNA.
]
variable[seq_list] assign[=] call[name[list], parameter[call[name[set], parameter[name[seq_list]]]]]
for taget[name[seq]] in starred[name[seq_list]] begin[:]
if name[seq].circular begin[:]
<ast.Raise object at 0x7da18f720940>
variable[working_list] assign[=] <ast.ListComp object at 0x7da18f721ae0>
while compare[call[name[len], parameter[name[working_list]]] greater[>] constant[1]] begin[:]
variable[working_list] assign[=] call[name[_find_fuse_next], parameter[name[working_list], name[homology], name[tm]]]
if <ast.UnaryOp object at 0x7da18f723670> begin[:]
variable[working_list] assign[=] call[name[_fuse_last], parameter[name[working_list], name[homology], name[tm]]]
call[name[working_list]][constant[0]].features assign[=] list[[]]
return[call[name[_annotate_features], parameter[call[name[working_list]][constant[0]], name[seq_list]]]] | keyword[def] identifier[gibson] ( identifier[seq_list] , identifier[linear] = keyword[False] , identifier[homology] = literal[int] , identifier[tm] = literal[int] ):
literal[string]
identifier[seq_list] = identifier[list] ( identifier[set] ( identifier[seq_list] ))
keyword[for] identifier[seq] keyword[in] identifier[seq_list] :
keyword[if] identifier[seq] . identifier[circular] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[working_list] =[ identifier[s] . identifier[copy] () keyword[for] identifier[s] keyword[in] identifier[seq_list] ]
keyword[while] identifier[len] ( identifier[working_list] )> literal[int] :
identifier[working_list] = identifier[_find_fuse_next] ( identifier[working_list] , identifier[homology] , identifier[tm] )
keyword[if] keyword[not] identifier[linear] :
identifier[working_list] = identifier[_fuse_last] ( identifier[working_list] , identifier[homology] , identifier[tm] )
identifier[working_list] [ literal[int] ]. identifier[features] =[]
keyword[return] identifier[_annotate_features] ( identifier[working_list] [ literal[int] ], identifier[seq_list] ) | def gibson(seq_list, linear=False, homology=10, tm=63.0):
"""Simulate a Gibson reaction.
:param seq_list: list of DNA sequences to Gibson
:type seq_list: list of coral.DNA
:param linear: Attempt to produce linear, rather than circular,
fragment from input fragments.
:type linear: bool
:param homology_min: minimum bp of homology allowed
:type homology_min: int
:param tm: Minimum tm of overlaps
:type tm: float
:returns: coral.reaction.Gibson instance.
:raises: ValueError if any input sequences are circular DNA.
"""
# FIXME: Preserve features in overlap
# TODO: set a max length?
# TODO: add 'expected' keyword argument somewhere to automate
# validation
# Remove any redundant (identical) sequences
seq_list = list(set(seq_list))
for seq in seq_list:
if seq.circular:
raise ValueError('Input sequences must be linear.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seq']]
# Copy input list
working_list = [s.copy() for s in seq_list]
# Attempt to fuse fragments together until only one is left
while len(working_list) > 1:
working_list = _find_fuse_next(working_list, homology, tm) # depends on [control=['while'], data=[]]
if not linear:
# Fuse the final fragment to itself
working_list = _fuse_last(working_list, homology, tm) # depends on [control=['if'], data=[]]
# Clear features
working_list[0].features = []
return _annotate_features(working_list[0], seq_list) |
def commit_version(self, version, msg=None):
""" Add tag, commit, and push changes """
assert version not in self.versions, 'Will not overwrite a version name.'
if not msg:
feat, targ = self.training_data
msg = 'Training set has {0} examples. '.format(len(feat))
feat, targ = self.testing_data
msg += 'Testing set has {0} examples.'.format(len(feat))
cmd = self.repo.commit(m=msg, a=True)
cmd = self.repo.tag(version)
cmd = self.repo.checkout('master')
self.update()
cmd = self.repo.merge('working')
cmd = self.repo.branch('working', d=True)
self.set_version(version)
try:
stdout = self.repo.push('origin', 'master', '--tags').stdout
logger.info(stdout)
except:
logger.info('Push not working. Remote not defined?') | def function[commit_version, parameter[self, version, msg]]:
constant[ Add tag, commit, and push changes ]
assert[compare[name[version] <ast.NotIn object at 0x7da2590d7190> name[self].versions]]
if <ast.UnaryOp object at 0x7da18bccb850> begin[:]
<ast.Tuple object at 0x7da18bccb730> assign[=] name[self].training_data
variable[msg] assign[=] call[constant[Training set has {0} examples. ].format, parameter[call[name[len], parameter[name[feat]]]]]
<ast.Tuple object at 0x7da18bcc8df0> assign[=] name[self].testing_data
<ast.AugAssign object at 0x7da18bccb520>
variable[cmd] assign[=] call[name[self].repo.commit, parameter[]]
variable[cmd] assign[=] call[name[self].repo.tag, parameter[name[version]]]
variable[cmd] assign[=] call[name[self].repo.checkout, parameter[constant[master]]]
call[name[self].update, parameter[]]
variable[cmd] assign[=] call[name[self].repo.merge, parameter[constant[working]]]
variable[cmd] assign[=] call[name[self].repo.branch, parameter[constant[working]]]
call[name[self].set_version, parameter[name[version]]]
<ast.Try object at 0x7da18bccb3a0> | keyword[def] identifier[commit_version] ( identifier[self] , identifier[version] , identifier[msg] = keyword[None] ):
literal[string]
keyword[assert] identifier[version] keyword[not] keyword[in] identifier[self] . identifier[versions] , literal[string]
keyword[if] keyword[not] identifier[msg] :
identifier[feat] , identifier[targ] = identifier[self] . identifier[training_data]
identifier[msg] = literal[string] . identifier[format] ( identifier[len] ( identifier[feat] ))
identifier[feat] , identifier[targ] = identifier[self] . identifier[testing_data]
identifier[msg] += literal[string] . identifier[format] ( identifier[len] ( identifier[feat] ))
identifier[cmd] = identifier[self] . identifier[repo] . identifier[commit] ( identifier[m] = identifier[msg] , identifier[a] = keyword[True] )
identifier[cmd] = identifier[self] . identifier[repo] . identifier[tag] ( identifier[version] )
identifier[cmd] = identifier[self] . identifier[repo] . identifier[checkout] ( literal[string] )
identifier[self] . identifier[update] ()
identifier[cmd] = identifier[self] . identifier[repo] . identifier[merge] ( literal[string] )
identifier[cmd] = identifier[self] . identifier[repo] . identifier[branch] ( literal[string] , identifier[d] = keyword[True] )
identifier[self] . identifier[set_version] ( identifier[version] )
keyword[try] :
identifier[stdout] = identifier[self] . identifier[repo] . identifier[push] ( literal[string] , literal[string] , literal[string] ). identifier[stdout]
identifier[logger] . identifier[info] ( identifier[stdout] )
keyword[except] :
identifier[logger] . identifier[info] ( literal[string] ) | def commit_version(self, version, msg=None):
""" Add tag, commit, and push changes """
assert version not in self.versions, 'Will not overwrite a version name.'
if not msg:
(feat, targ) = self.training_data
msg = 'Training set has {0} examples. '.format(len(feat))
(feat, targ) = self.testing_data
msg += 'Testing set has {0} examples.'.format(len(feat)) # depends on [control=['if'], data=[]]
cmd = self.repo.commit(m=msg, a=True)
cmd = self.repo.tag(version)
cmd = self.repo.checkout('master')
self.update()
cmd = self.repo.merge('working')
cmd = self.repo.branch('working', d=True)
self.set_version(version)
try:
stdout = self.repo.push('origin', 'master', '--tags').stdout
logger.info(stdout) # depends on [control=['try'], data=[]]
except:
logger.info('Push not working. Remote not defined?') # depends on [control=['except'], data=[]] |
def update_task(deadline, label, task_id):
"""
Executor for `globus task update`
"""
client = get_client()
task_doc = assemble_generic_doc("task", label=label, deadline=deadline)
res = client.update_task(task_id, task_doc)
formatted_print(res, simple_text="Success") | def function[update_task, parameter[deadline, label, task_id]]:
constant[
Executor for `globus task update`
]
variable[client] assign[=] call[name[get_client], parameter[]]
variable[task_doc] assign[=] call[name[assemble_generic_doc], parameter[constant[task]]]
variable[res] assign[=] call[name[client].update_task, parameter[name[task_id], name[task_doc]]]
call[name[formatted_print], parameter[name[res]]] | keyword[def] identifier[update_task] ( identifier[deadline] , identifier[label] , identifier[task_id] ):
literal[string]
identifier[client] = identifier[get_client] ()
identifier[task_doc] = identifier[assemble_generic_doc] ( literal[string] , identifier[label] = identifier[label] , identifier[deadline] = identifier[deadline] )
identifier[res] = identifier[client] . identifier[update_task] ( identifier[task_id] , identifier[task_doc] )
identifier[formatted_print] ( identifier[res] , identifier[simple_text] = literal[string] ) | def update_task(deadline, label, task_id):
"""
Executor for `globus task update`
"""
client = get_client()
task_doc = assemble_generic_doc('task', label=label, deadline=deadline)
res = client.update_task(task_id, task_doc)
formatted_print(res, simple_text='Success') |
async def set_permissions(self, target, *, overwrite=_undefined, reason=None, **permissions):
r"""|coro|
Sets the channel specific permission overwrites for a target in the
channel.
The ``target`` parameter should either be a :class:`Member` or a
:class:`Role` that belongs to guild.
The ``overwrite`` parameter, if given, must either be ``None`` or
:class:`PermissionOverwrite`. For convenience, you can pass in
keyword arguments denoting :class:`Permissions` attributes. If this is
done, then you cannot mix the keyword arguments with the ``overwrite``
parameter.
If the ``overwrite`` parameter is ``None``, then the permission
overwrites are deleted.
You must have the :attr:`~Permissions.manage_roles` permission to use this.
Examples
----------
Setting allow and deny: ::
await message.channel.set_permissions(message.author, read_messages=True,
send_messages=False)
Deleting overwrites ::
await channel.set_permissions(member, overwrite=None)
Using :class:`PermissionOverwrite` ::
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
await channel.set_permissions(member, overwrite=overwrite)
Parameters
-----------
target
The :class:`Member` or :class:`Role` to overwrite permissions for.
overwrite: :class:`PermissionOverwrite`
The permissions to allow and deny to the target.
\*\*permissions
A keyword argument list of permissions to set for ease of use.
Cannot be mixed with ``overwrite``.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit channel specific permissions.
HTTPException
Editing channel specific permissions failed.
NotFound
The role or member being edited is not part of the guild.
InvalidArgument
The overwrite parameter invalid or the target type was not
:class:`Role` or :class:`Member`.
"""
http = self._state.http
if isinstance(target, User):
perm_type = 'member'
elif isinstance(target, Role):
perm_type = 'role'
else:
raise InvalidArgument('target parameter must be either Member or Role')
if isinstance(overwrite, _Undefined):
if len(permissions) == 0:
raise InvalidArgument('No overwrite provided.')
try:
overwrite = PermissionOverwrite(**permissions)
except (ValueError, TypeError):
raise InvalidArgument('Invalid permissions given to keyword arguments.')
else:
if len(permissions) > 0:
raise InvalidArgument('Cannot mix overwrite and keyword arguments.')
# TODO: wait for event
if overwrite is None:
await http.delete_channel_permissions(self.id, target.id, reason=reason)
elif isinstance(overwrite, PermissionOverwrite):
(allow, deny) = overwrite.pair()
await http.edit_channel_permissions(self.id, target.id, allow.value, deny.value, perm_type, reason=reason)
else:
raise InvalidArgument('Invalid overwrite type provided.') | <ast.AsyncFunctionDef object at 0x7da1b20caef0> | keyword[async] keyword[def] identifier[set_permissions] ( identifier[self] , identifier[target] ,*, identifier[overwrite] = identifier[_undefined] , identifier[reason] = keyword[None] ,** identifier[permissions] ):
literal[string]
identifier[http] = identifier[self] . identifier[_state] . identifier[http]
keyword[if] identifier[isinstance] ( identifier[target] , identifier[User] ):
identifier[perm_type] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[target] , identifier[Role] ):
identifier[perm_type] = literal[string]
keyword[else] :
keyword[raise] identifier[InvalidArgument] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[overwrite] , identifier[_Undefined] ):
keyword[if] identifier[len] ( identifier[permissions] )== literal[int] :
keyword[raise] identifier[InvalidArgument] ( literal[string] )
keyword[try] :
identifier[overwrite] = identifier[PermissionOverwrite] (** identifier[permissions] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[raise] identifier[InvalidArgument] ( literal[string] )
keyword[else] :
keyword[if] identifier[len] ( identifier[permissions] )> literal[int] :
keyword[raise] identifier[InvalidArgument] ( literal[string] )
keyword[if] identifier[overwrite] keyword[is] keyword[None] :
keyword[await] identifier[http] . identifier[delete_channel_permissions] ( identifier[self] . identifier[id] , identifier[target] . identifier[id] , identifier[reason] = identifier[reason] )
keyword[elif] identifier[isinstance] ( identifier[overwrite] , identifier[PermissionOverwrite] ):
( identifier[allow] , identifier[deny] )= identifier[overwrite] . identifier[pair] ()
keyword[await] identifier[http] . identifier[edit_channel_permissions] ( identifier[self] . identifier[id] , identifier[target] . identifier[id] , identifier[allow] . identifier[value] , identifier[deny] . identifier[value] , identifier[perm_type] , identifier[reason] = identifier[reason] )
keyword[else] :
keyword[raise] identifier[InvalidArgument] ( literal[string] ) | async def set_permissions(self, target, *, overwrite=_undefined, reason=None, **permissions):
"""|coro|
Sets the channel specific permission overwrites for a target in the
channel.
The ``target`` parameter should either be a :class:`Member` or a
:class:`Role` that belongs to guild.
The ``overwrite`` parameter, if given, must either be ``None`` or
:class:`PermissionOverwrite`. For convenience, you can pass in
keyword arguments denoting :class:`Permissions` attributes. If this is
done, then you cannot mix the keyword arguments with the ``overwrite``
parameter.
If the ``overwrite`` parameter is ``None``, then the permission
overwrites are deleted.
You must have the :attr:`~Permissions.manage_roles` permission to use this.
Examples
----------
Setting allow and deny: ::
await message.channel.set_permissions(message.author, read_messages=True,
send_messages=False)
Deleting overwrites ::
await channel.set_permissions(member, overwrite=None)
Using :class:`PermissionOverwrite` ::
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
await channel.set_permissions(member, overwrite=overwrite)
Parameters
-----------
target
The :class:`Member` or :class:`Role` to overwrite permissions for.
overwrite: :class:`PermissionOverwrite`
The permissions to allow and deny to the target.
\\*\\*permissions
A keyword argument list of permissions to set for ease of use.
Cannot be mixed with ``overwrite``.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit channel specific permissions.
HTTPException
Editing channel specific permissions failed.
NotFound
The role or member being edited is not part of the guild.
InvalidArgument
The overwrite parameter invalid or the target type was not
:class:`Role` or :class:`Member`.
"""
http = self._state.http
if isinstance(target, User):
perm_type = 'member' # depends on [control=['if'], data=[]]
elif isinstance(target, Role):
perm_type = 'role' # depends on [control=['if'], data=[]]
else:
raise InvalidArgument('target parameter must be either Member or Role')
if isinstance(overwrite, _Undefined):
if len(permissions) == 0:
raise InvalidArgument('No overwrite provided.') # depends on [control=['if'], data=[]]
try:
overwrite = PermissionOverwrite(**permissions) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
raise InvalidArgument('Invalid permissions given to keyword arguments.') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif len(permissions) > 0:
raise InvalidArgument('Cannot mix overwrite and keyword arguments.') # depends on [control=['if'], data=[]]
# TODO: wait for event
if overwrite is None:
await http.delete_channel_permissions(self.id, target.id, reason=reason) # depends on [control=['if'], data=[]]
elif isinstance(overwrite, PermissionOverwrite):
(allow, deny) = overwrite.pair()
await http.edit_channel_permissions(self.id, target.id, allow.value, deny.value, perm_type, reason=reason) # depends on [control=['if'], data=[]]
else:
raise InvalidArgument('Invalid overwrite type provided.') |
def _condition_as_text(lambda_inspection: icontract._represent.ConditionLambdaInspection) -> str:
"""Format condition lambda function as reST."""
lambda_ast_node = lambda_inspection.node
assert isinstance(lambda_ast_node, ast.Lambda)
body_node = lambda_ast_node.body
text = None # type: Optional[str]
if isinstance(body_node, ast.BoolOp) and isinstance(body_node.op, ast.Or) and len(body_node.values) == 2:
left, right = body_node.values
if isinstance(left, ast.UnaryOp) and isinstance(left.op, ast.Not):
# Handle the case: not A or B is transformed to A => B
text = ':code:`{}` ⇒ :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left.operand), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.UnaryOp, ast.BinOp, ast.GeneratorExp, ast.IfExp)):
text = ':code:`not ({})` ⇒ :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, ast.Compare) and len(left.ops) == 1:
text = ':code:`{}` ⇒ :code:`{}`'.format(
_negate_compare_text(atok=lambda_inspection.atok, node=left),
lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.Call, ast.Attribute, ast.Name, ast.Subscript, ast.Index, ast.Slice, ast.ExtSlice,
ast.ListComp, ast.SetComp, ast.DictComp)):
text = ':code:`not {}` ⇒ :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(body_node, ast.IfExp) and isinstance(body_node.orelse, ast.NameConstant) and body_node.orelse.value:
text = ':code:`{}` ⇒ :code:`{}`'.format(
lambda_inspection.atok.get_text(node=body_node.test), lambda_inspection.atok.get_text(node=body_node.body))
if text is None:
# None of the previous reformatings worked, take the default approach.
text = ':code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node))
return text | def function[_condition_as_text, parameter[lambda_inspection]]:
constant[Format condition lambda function as reST.]
variable[lambda_ast_node] assign[=] name[lambda_inspection].node
assert[call[name[isinstance], parameter[name[lambda_ast_node], name[ast].Lambda]]]
variable[body_node] assign[=] name[lambda_ast_node].body
variable[text] assign[=] constant[None]
if <ast.BoolOp object at 0x7da2054a4e20> begin[:]
<ast.Tuple object at 0x7da2054a5b70> assign[=] name[body_node].values
if <ast.BoolOp object at 0x7da2054a7b80> begin[:]
variable[text] assign[=] call[constant[:code:`{}` ⇒ :code:`{}`].format, parameter[call[name[lambda_inspection].atok.get_text, parameter[]], call[name[lambda_inspection].atok.get_text, parameter[]]]]
if compare[name[text] is constant[None]] begin[:]
variable[text] assign[=] call[constant[:code:`{}`].format, parameter[call[name[lambda_inspection].atok.get_text, parameter[]]]]
return[name[text]] | keyword[def] identifier[_condition_as_text] ( identifier[lambda_inspection] : identifier[icontract] . identifier[_represent] . identifier[ConditionLambdaInspection] )-> identifier[str] :
literal[string]
identifier[lambda_ast_node] = identifier[lambda_inspection] . identifier[node]
keyword[assert] identifier[isinstance] ( identifier[lambda_ast_node] , identifier[ast] . identifier[Lambda] )
identifier[body_node] = identifier[lambda_ast_node] . identifier[body]
identifier[text] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[body_node] , identifier[ast] . identifier[BoolOp] ) keyword[and] identifier[isinstance] ( identifier[body_node] . identifier[op] , identifier[ast] . identifier[Or] ) keyword[and] identifier[len] ( identifier[body_node] . identifier[values] )== literal[int] :
identifier[left] , identifier[right] = identifier[body_node] . identifier[values]
keyword[if] identifier[isinstance] ( identifier[left] , identifier[ast] . identifier[UnaryOp] ) keyword[and] identifier[isinstance] ( identifier[left] . identifier[op] , identifier[ast] . identifier[Not] ):
identifier[text] = literal[string] . identifier[format] (
identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[left] . identifier[operand] ), identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[right] ))
keyword[elif] identifier[isinstance] ( identifier[left] ,( identifier[ast] . identifier[UnaryOp] , identifier[ast] . identifier[BinOp] , identifier[ast] . identifier[GeneratorExp] , identifier[ast] . identifier[IfExp] )):
identifier[text] = literal[string] . identifier[format] (
identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[left] ), identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[right] ))
keyword[elif] identifier[isinstance] ( identifier[left] , identifier[ast] . identifier[Compare] ) keyword[and] identifier[len] ( identifier[left] . identifier[ops] )== literal[int] :
identifier[text] = literal[string] . identifier[format] (
identifier[_negate_compare_text] ( identifier[atok] = identifier[lambda_inspection] . identifier[atok] , identifier[node] = identifier[left] ),
identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[right] ))
keyword[elif] identifier[isinstance] ( identifier[left] ,( identifier[ast] . identifier[Call] , identifier[ast] . identifier[Attribute] , identifier[ast] . identifier[Name] , identifier[ast] . identifier[Subscript] , identifier[ast] . identifier[Index] , identifier[ast] . identifier[Slice] , identifier[ast] . identifier[ExtSlice] ,
identifier[ast] . identifier[ListComp] , identifier[ast] . identifier[SetComp] , identifier[ast] . identifier[DictComp] )):
identifier[text] = literal[string] . identifier[format] (
identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[left] ), identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[right] ))
keyword[elif] identifier[isinstance] ( identifier[body_node] , identifier[ast] . identifier[IfExp] ) keyword[and] identifier[isinstance] ( identifier[body_node] . identifier[orelse] , identifier[ast] . identifier[NameConstant] ) keyword[and] identifier[body_node] . identifier[orelse] . identifier[value] :
identifier[text] = literal[string] . identifier[format] (
identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[body_node] . identifier[test] ), identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[body_node] . identifier[body] ))
keyword[if] identifier[text] keyword[is] keyword[None] :
identifier[text] = literal[string] . identifier[format] ( identifier[lambda_inspection] . identifier[atok] . identifier[get_text] ( identifier[node] = identifier[body_node] ))
keyword[return] identifier[text] | def _condition_as_text(lambda_inspection: icontract._represent.ConditionLambdaInspection) -> str:
"""Format condition lambda function as reST."""
lambda_ast_node = lambda_inspection.node
assert isinstance(lambda_ast_node, ast.Lambda)
body_node = lambda_ast_node.body
text = None # type: Optional[str]
if isinstance(body_node, ast.BoolOp) and isinstance(body_node.op, ast.Or) and (len(body_node.values) == 2):
(left, right) = body_node.values
if isinstance(left, ast.UnaryOp) and isinstance(left.op, ast.Not):
# Handle the case: not A or B is transformed to A => B
text = ':code:`{}` ⇒ :code:`{}`'.format(lambda_inspection.atok.get_text(node=left.operand), lambda_inspection.atok.get_text(node=right)) # depends on [control=['if'], data=[]]
elif isinstance(left, (ast.UnaryOp, ast.BinOp, ast.GeneratorExp, ast.IfExp)):
text = ':code:`not ({})` ⇒ :code:`{}`'.format(lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right)) # depends on [control=['if'], data=[]]
elif isinstance(left, ast.Compare) and len(left.ops) == 1:
text = ':code:`{}` ⇒ :code:`{}`'.format(_negate_compare_text(atok=lambda_inspection.atok, node=left), lambda_inspection.atok.get_text(node=right)) # depends on [control=['if'], data=[]]
elif isinstance(left, (ast.Call, ast.Attribute, ast.Name, ast.Subscript, ast.Index, ast.Slice, ast.ExtSlice, ast.ListComp, ast.SetComp, ast.DictComp)):
text = ':code:`not {}` ⇒ :code:`{}`'.format(lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(body_node, ast.IfExp) and isinstance(body_node.orelse, ast.NameConstant) and body_node.orelse.value:
text = ':code:`{}` ⇒ :code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node.test), lambda_inspection.atok.get_text(node=body_node.body)) # depends on [control=['if'], data=[]]
if text is None:
# None of the previous reformatings worked, take the default approach.
text = ':code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node)) # depends on [control=['if'], data=['text']]
return text |
def check_chain(chain):
"""Verify a merkle chain to see if the Merkle root can be reproduced.
"""
link = chain[0][0]
for i in range(1, len(chain) - 1):
if chain[i][1] == 'R':
link = hash_function(link + chain[i][0]).digest()
elif chain[i][1] == 'L':
link = hash_function(chain[i][0] + link).digest()
else:
raise MerkleError('Link %s has no side value: %s' % (str(i), str(codecs.encode(chain[i][0], 'hex_codec'))))
if link == chain[-1][0]:
return link
else:
raise MerkleError('The Merkle Chain is not valid.') | def function[check_chain, parameter[chain]]:
constant[Verify a merkle chain to see if the Merkle root can be reproduced.
]
variable[link] assign[=] call[call[name[chain]][constant[0]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[len], parameter[name[chain]]] - constant[1]]]]] begin[:]
if compare[call[call[name[chain]][name[i]]][constant[1]] equal[==] constant[R]] begin[:]
variable[link] assign[=] call[call[name[hash_function], parameter[binary_operation[name[link] + call[call[name[chain]][name[i]]][constant[0]]]]].digest, parameter[]]
if compare[name[link] equal[==] call[call[name[chain]][<ast.UnaryOp object at 0x7da18f58f2e0>]][constant[0]]] begin[:]
return[name[link]] | keyword[def] identifier[check_chain] ( identifier[chain] ):
literal[string]
identifier[link] = identifier[chain] [ literal[int] ][ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[chain] )- literal[int] ):
keyword[if] identifier[chain] [ identifier[i] ][ literal[int] ]== literal[string] :
identifier[link] = identifier[hash_function] ( identifier[link] + identifier[chain] [ identifier[i] ][ literal[int] ]). identifier[digest] ()
keyword[elif] identifier[chain] [ identifier[i] ][ literal[int] ]== literal[string] :
identifier[link] = identifier[hash_function] ( identifier[chain] [ identifier[i] ][ literal[int] ]+ identifier[link] ). identifier[digest] ()
keyword[else] :
keyword[raise] identifier[MerkleError] ( literal[string] %( identifier[str] ( identifier[i] ), identifier[str] ( identifier[codecs] . identifier[encode] ( identifier[chain] [ identifier[i] ][ literal[int] ], literal[string] ))))
keyword[if] identifier[link] == identifier[chain] [- literal[int] ][ literal[int] ]:
keyword[return] identifier[link]
keyword[else] :
keyword[raise] identifier[MerkleError] ( literal[string] ) | def check_chain(chain):
"""Verify a merkle chain to see if the Merkle root can be reproduced.
"""
link = chain[0][0]
for i in range(1, len(chain) - 1):
if chain[i][1] == 'R':
link = hash_function(link + chain[i][0]).digest() # depends on [control=['if'], data=[]]
elif chain[i][1] == 'L':
link = hash_function(chain[i][0] + link).digest() # depends on [control=['if'], data=[]]
else:
raise MerkleError('Link %s has no side value: %s' % (str(i), str(codecs.encode(chain[i][0], 'hex_codec')))) # depends on [control=['for'], data=['i']]
if link == chain[-1][0]:
return link # depends on [control=['if'], data=['link']]
else:
raise MerkleError('The Merkle Chain is not valid.') |
def handle_debug_break(self, call_id, payload):
"""Handle responses `DebugBreakEvent`."""
line = payload['line']
config = self.launcher.config # TODO: make an attribute of client
path = os.path.relpath(payload['file'], config['root-dir'])
self.editor.raw_message(feedback['notify_break'].format(line, path))
self.debug_thread_id = payload["threadId"] | def function[handle_debug_break, parameter[self, call_id, payload]]:
constant[Handle responses `DebugBreakEvent`.]
variable[line] assign[=] call[name[payload]][constant[line]]
variable[config] assign[=] name[self].launcher.config
variable[path] assign[=] call[name[os].path.relpath, parameter[call[name[payload]][constant[file]], call[name[config]][constant[root-dir]]]]
call[name[self].editor.raw_message, parameter[call[call[name[feedback]][constant[notify_break]].format, parameter[name[line], name[path]]]]]
name[self].debug_thread_id assign[=] call[name[payload]][constant[threadId]] | keyword[def] identifier[handle_debug_break] ( identifier[self] , identifier[call_id] , identifier[payload] ):
literal[string]
identifier[line] = identifier[payload] [ literal[string] ]
identifier[config] = identifier[self] . identifier[launcher] . identifier[config]
identifier[path] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[payload] [ literal[string] ], identifier[config] [ literal[string] ])
identifier[self] . identifier[editor] . identifier[raw_message] ( identifier[feedback] [ literal[string] ]. identifier[format] ( identifier[line] , identifier[path] ))
identifier[self] . identifier[debug_thread_id] = identifier[payload] [ literal[string] ] | def handle_debug_break(self, call_id, payload):
"""Handle responses `DebugBreakEvent`."""
line = payload['line']
config = self.launcher.config # TODO: make an attribute of client
path = os.path.relpath(payload['file'], config['root-dir'])
self.editor.raw_message(feedback['notify_break'].format(line, path))
self.debug_thread_id = payload['threadId'] |
def ensure_ajax(valid_request_methods, error_response_context=None):
"""
Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function
"""
def real_decorator(view_func):
def wrap_func(request, *args, **kwargs):
if not isinstance(request, HttpRequest):
# make sure the request is a django httprequest
return generate_error_json_response("Invalid request!",
error_response_context)
elif not request.is_ajax():
# ensure the request is an ajax request
return generate_error_json_response("Invalid request type!",
error_response_context)
elif request.method not in valid_request_methods:
# check if the request method is in allowed request methods
return generate_error_json_response("Invalid request method!",
error_response_context)
else:
return view_func(request, *args, **kwargs)
wrap_func.__doc__ = view_func.__doc__
wrap_func.__name__ = view_func.__name__
return wrap_func
return real_decorator | def function[ensure_ajax, parameter[valid_request_methods, error_response_context]]:
constant[
Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function
]
def function[real_decorator, parameter[view_func]]:
def function[wrap_func, parameter[request]]:
if <ast.UnaryOp object at 0x7da1b24afeb0> begin[:]
return[call[name[generate_error_json_response], parameter[constant[Invalid request!], name[error_response_context]]]]
name[wrap_func].__doc__ assign[=] name[view_func].__doc__
name[wrap_func].__name__ assign[=] name[view_func].__name__
return[name[wrap_func]]
return[name[real_decorator]] | keyword[def] identifier[ensure_ajax] ( identifier[valid_request_methods] , identifier[error_response_context] = keyword[None] ):
literal[string]
keyword[def] identifier[real_decorator] ( identifier[view_func] ):
keyword[def] identifier[wrap_func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[request] , identifier[HttpRequest] ):
keyword[return] identifier[generate_error_json_response] ( literal[string] ,
identifier[error_response_context] )
keyword[elif] keyword[not] identifier[request] . identifier[is_ajax] ():
keyword[return] identifier[generate_error_json_response] ( literal[string] ,
identifier[error_response_context] )
keyword[elif] identifier[request] . identifier[method] keyword[not] keyword[in] identifier[valid_request_methods] :
keyword[return] identifier[generate_error_json_response] ( literal[string] ,
identifier[error_response_context] )
keyword[else] :
keyword[return] identifier[view_func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
identifier[wrap_func] . identifier[__doc__] = identifier[view_func] . identifier[__doc__]
identifier[wrap_func] . identifier[__name__] = identifier[view_func] . identifier[__name__]
keyword[return] identifier[wrap_func]
keyword[return] identifier[real_decorator] | def ensure_ajax(valid_request_methods, error_response_context=None):
"""
Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function
"""
def real_decorator(view_func):
def wrap_func(request, *args, **kwargs):
if not isinstance(request, HttpRequest):
# make sure the request is a django httprequest
return generate_error_json_response('Invalid request!', error_response_context) # depends on [control=['if'], data=[]]
elif not request.is_ajax():
# ensure the request is an ajax request
return generate_error_json_response('Invalid request type!', error_response_context) # depends on [control=['if'], data=[]]
elif request.method not in valid_request_methods:
# check if the request method is in allowed request methods
return generate_error_json_response('Invalid request method!', error_response_context) # depends on [control=['if'], data=[]]
else:
return view_func(request, *args, **kwargs)
wrap_func.__doc__ = view_func.__doc__
wrap_func.__name__ = view_func.__name__
return wrap_func
return real_decorator |
def _cli_check_data_dir(data_dir):
'''Checks that the data dir exists and contains METADATA.json'''
if data_dir is None:
return None
data_dir = os.path.expanduser(data_dir)
data_dir = os.path.expandvars(data_dir)
if not os.path.isdir(data_dir):
raise RuntimeError("Data directory '{}' does not exist or is not a directory".format(data_dir))
if not os.path.isfile(os.path.join(data_dir, 'METADATA.json')):
raise RuntimeError("Data directory '{}' does not contain a METADATA.json file".format(data_dir))
return data_dir | def function[_cli_check_data_dir, parameter[data_dir]]:
constant[Checks that the data dir exists and contains METADATA.json]
if compare[name[data_dir] is constant[None]] begin[:]
return[constant[None]]
variable[data_dir] assign[=] call[name[os].path.expanduser, parameter[name[data_dir]]]
variable[data_dir] assign[=] call[name[os].path.expandvars, parameter[name[data_dir]]]
if <ast.UnaryOp object at 0x7da2054a68c0> begin[:]
<ast.Raise object at 0x7da20c6a8550>
if <ast.UnaryOp object at 0x7da20c6aad10> begin[:]
<ast.Raise object at 0x7da20c6abf40>
return[name[data_dir]] | keyword[def] identifier[_cli_check_data_dir] ( identifier[data_dir] ):
literal[string]
keyword[if] identifier[data_dir] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[data_dir] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[data_dir] )
identifier[data_dir] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[data_dir] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[data_dir] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[data_dir] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , literal[string] )):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[data_dir] ))
keyword[return] identifier[data_dir] | def _cli_check_data_dir(data_dir):
"""Checks that the data dir exists and contains METADATA.json"""
if data_dir is None:
return None # depends on [control=['if'], data=[]]
data_dir = os.path.expanduser(data_dir)
data_dir = os.path.expandvars(data_dir)
if not os.path.isdir(data_dir):
raise RuntimeError("Data directory '{}' does not exist or is not a directory".format(data_dir)) # depends on [control=['if'], data=[]]
if not os.path.isfile(os.path.join(data_dir, 'METADATA.json')):
raise RuntimeError("Data directory '{}' does not contain a METADATA.json file".format(data_dir)) # depends on [control=['if'], data=[]]
return data_dir |
def author_id_normalize_and_schema(uid, schema=None):
"""Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
regex, template = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid'))
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return idutils.normalize_orcid(uid), 'ORCID'
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
raise UnknownUIDSchema(uid)
if schema:
normalized_uid = _get_uid_normalized_in_schema(uid, schema)
if normalized_uid:
return normalized_uid, schema
else:
raise SchemaUIDConflict(schema, uid)
match_schema, normalized_uid = None, None
for candidate_schema in _RE_AUTHORS_UID:
candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
if candidate_uid:
if match_schema:
# Valid against more than one candidate schema, ambiguous
raise UnknownUIDSchema(uid)
match_schema = candidate_schema
normalized_uid = candidate_uid
if match_schema:
return normalized_uid, match_schema
# No guessess have been found
raise UnknownUIDSchema(uid) | def function[author_id_normalize_and_schema, parameter[uid, schema]]:
constant[Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
]
def function[_get_uid_normalized_in_schema, parameter[_uid, _schema]]:
<ast.Tuple object at 0x7da1b24ad3f0> assign[=] call[name[_RE_AUTHORS_UID]][name[_schema]]
variable[match] assign[=] call[name[regex].match, parameter[name[_uid]]]
if name[match] begin[:]
return[call[name[template].format, parameter[call[name[match].group, parameter[constant[uid]]]]]]
if <ast.BoolOp object at 0x7da1b24afa30> begin[:]
return[tuple[[<ast.Call object at 0x7da1b24ad690>, <ast.Constant object at 0x7da1b24ad6c0>]]]
if <ast.BoolOp object at 0x7da1b24ac8b0> begin[:]
<ast.Raise object at 0x7da1b24ae4a0>
if name[schema] begin[:]
variable[normalized_uid] assign[=] call[name[_get_uid_normalized_in_schema], parameter[name[uid], name[schema]]]
if name[normalized_uid] begin[:]
return[tuple[[<ast.Name object at 0x7da1b25ef2e0>, <ast.Name object at 0x7da1b25eeaa0>]]]
<ast.Tuple object at 0x7da1b25ec490> assign[=] tuple[[<ast.Constant object at 0x7da1b25ef3d0>, <ast.Constant object at 0x7da1b25ef370>]]
for taget[name[candidate_schema]] in starred[name[_RE_AUTHORS_UID]] begin[:]
variable[candidate_uid] assign[=] call[name[_get_uid_normalized_in_schema], parameter[name[uid], name[candidate_schema]]]
if name[candidate_uid] begin[:]
if name[match_schema] begin[:]
<ast.Raise object at 0x7da1b25ef670>
variable[match_schema] assign[=] name[candidate_schema]
variable[normalized_uid] assign[=] name[candidate_uid]
if name[match_schema] begin[:]
return[tuple[[<ast.Name object at 0x7da1b242a860>, <ast.Name object at 0x7da1b2429b10>]]]
<ast.Raise object at 0x7da1b2429f30> | keyword[def] identifier[author_id_normalize_and_schema] ( identifier[uid] , identifier[schema] = keyword[None] ):
literal[string]
keyword[def] identifier[_get_uid_normalized_in_schema] ( identifier[_uid] , identifier[_schema] ):
identifier[regex] , identifier[template] = identifier[_RE_AUTHORS_UID] [ identifier[_schema] ]
identifier[match] = identifier[regex] . identifier[match] ( identifier[_uid] )
keyword[if] identifier[match] :
keyword[return] identifier[template] . identifier[format] ( identifier[match] . identifier[group] ( literal[string] ))
keyword[if] identifier[idutils] . identifier[is_orcid] ( identifier[uid] ) keyword[and] identifier[schema] keyword[in] ( keyword[None] , literal[string] ):
keyword[return] identifier[idutils] . identifier[normalize_orcid] ( identifier[uid] ), literal[string]
keyword[if] identifier[schema] keyword[and] identifier[schema] keyword[not] keyword[in] identifier[_RE_AUTHORS_UID] :
keyword[raise] identifier[UnknownUIDSchema] ( identifier[uid] )
keyword[if] identifier[schema] :
identifier[normalized_uid] = identifier[_get_uid_normalized_in_schema] ( identifier[uid] , identifier[schema] )
keyword[if] identifier[normalized_uid] :
keyword[return] identifier[normalized_uid] , identifier[schema]
keyword[else] :
keyword[raise] identifier[SchemaUIDConflict] ( identifier[schema] , identifier[uid] )
identifier[match_schema] , identifier[normalized_uid] = keyword[None] , keyword[None]
keyword[for] identifier[candidate_schema] keyword[in] identifier[_RE_AUTHORS_UID] :
identifier[candidate_uid] = identifier[_get_uid_normalized_in_schema] ( identifier[uid] , identifier[candidate_schema] )
keyword[if] identifier[candidate_uid] :
keyword[if] identifier[match_schema] :
keyword[raise] identifier[UnknownUIDSchema] ( identifier[uid] )
identifier[match_schema] = identifier[candidate_schema]
identifier[normalized_uid] = identifier[candidate_uid]
keyword[if] identifier[match_schema] :
keyword[return] identifier[normalized_uid] , identifier[match_schema]
keyword[raise] identifier[UnknownUIDSchema] ( identifier[uid] ) | def author_id_normalize_and_schema(uid, schema=None):
"""Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
(regex, template) = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid')) # depends on [control=['if'], data=[]]
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return (idutils.normalize_orcid(uid), 'ORCID') # depends on [control=['if'], data=[]]
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
raise UnknownUIDSchema(uid) # depends on [control=['if'], data=[]]
if schema:
normalized_uid = _get_uid_normalized_in_schema(uid, schema)
if normalized_uid:
return (normalized_uid, schema) # depends on [control=['if'], data=[]]
else:
raise SchemaUIDConflict(schema, uid) # depends on [control=['if'], data=[]]
(match_schema, normalized_uid) = (None, None)
for candidate_schema in _RE_AUTHORS_UID:
candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
if candidate_uid:
if match_schema:
# Valid against more than one candidate schema, ambiguous
raise UnknownUIDSchema(uid) # depends on [control=['if'], data=[]]
match_schema = candidate_schema
normalized_uid = candidate_uid # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['candidate_schema']]
if match_schema:
return (normalized_uid, match_schema) # depends on [control=['if'], data=[]]
# No guessess have been found
raise UnknownUIDSchema(uid) |
def _stream(self):
"""
Returns a generator of lines instead of a list of lines.
"""
if self._exception:
raise self._exception
try:
if self._content:
yield self._content
else:
args = self.create_args()
with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:
yield s
except StopIteration:
raise
except Exception as ex:
self._exception = ex
raise ContentException(str(ex)) | def function[_stream, parameter[self]]:
constant[
Returns a generator of lines instead of a list of lines.
]
if name[self]._exception begin[:]
<ast.Raise object at 0x7da2046208e0>
<ast.Try object at 0x7da2046208b0> | keyword[def] identifier[_stream] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_exception] :
keyword[raise] identifier[self] . identifier[_exception]
keyword[try] :
keyword[if] identifier[self] . identifier[_content] :
keyword[yield] identifier[self] . identifier[_content]
keyword[else] :
identifier[args] = identifier[self] . identifier[create_args] ()
keyword[with] identifier[self] . identifier[ctx] . identifier[connect] (* identifier[args] , identifier[env] = identifier[self] . identifier[create_env] (), identifier[timeout] = identifier[self] . identifier[timeout] ) keyword[as] identifier[s] :
keyword[yield] identifier[s]
keyword[except] identifier[StopIteration] :
keyword[raise]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[self] . identifier[_exception] = identifier[ex]
keyword[raise] identifier[ContentException] ( identifier[str] ( identifier[ex] )) | def _stream(self):
"""
Returns a generator of lines instead of a list of lines.
"""
if self._exception:
raise self._exception # depends on [control=['if'], data=[]]
try:
if self._content:
yield self._content # depends on [control=['if'], data=[]]
else:
args = self.create_args()
with self.ctx.connect(*args, env=self.create_env(), timeout=self.timeout) as s:
yield s # depends on [control=['with'], data=['s']] # depends on [control=['try'], data=[]]
except StopIteration:
raise # depends on [control=['except'], data=[]]
except Exception as ex:
self._exception = ex
raise ContentException(str(ex)) # depends on [control=['except'], data=['ex']] |
def get(self, value):
"""
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
"""
_nothing = object()
item = self._values.get(value, _nothing)
if item is _nothing:
raise InvalidEnumItem(value)
return item | def function[get, parameter[self, value]]:
constant[
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
]
variable[_nothing] assign[=] call[name[object], parameter[]]
variable[item] assign[=] call[name[self]._values.get, parameter[name[value], name[_nothing]]]
if compare[name[item] is name[_nothing]] begin[:]
<ast.Raise object at 0x7da20e9578e0>
return[name[item]] | keyword[def] identifier[get] ( identifier[self] , identifier[value] ):
literal[string]
identifier[_nothing] = identifier[object] ()
identifier[item] = identifier[self] . identifier[_values] . identifier[get] ( identifier[value] , identifier[_nothing] )
keyword[if] identifier[item] keyword[is] identifier[_nothing] :
keyword[raise] identifier[InvalidEnumItem] ( identifier[value] )
keyword[return] identifier[item] | def get(self, value):
"""
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
"""
_nothing = object()
item = self._values.get(value, _nothing)
if item is _nothing:
raise InvalidEnumItem(value) # depends on [control=['if'], data=[]]
return item |
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False | def function[_has_tag, parameter[version, debug]]:
constant[
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
]
variable[cmd] assign[=] call[name[sh].git.bake, parameter[constant[show-ref], constant[--verify], constant[--quiet], call[constant[refs/tags/{}].format, parameter[name[version]]]]]
<ast.Try object at 0x7da1b1260190> | keyword[def] identifier[_has_tag] ( identifier[version] , identifier[debug] = keyword[False] ):
literal[string]
identifier[cmd] = identifier[sh] . identifier[git] . identifier[bake] ( literal[string] , literal[string] , literal[string] ,
literal[string] . identifier[format] ( identifier[version] ))
keyword[try] :
identifier[util] . identifier[run_command] ( identifier[cmd] , identifier[debug] = identifier[debug] )
keyword[return] keyword[True]
keyword[except] identifier[sh] . identifier[ErrorReturnCode] :
keyword[return] keyword[False] | def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet', 'refs/tags/{}'.format(version))
try:
util.run_command(cmd, debug=debug)
return True # depends on [control=['try'], data=[]]
except sh.ErrorReturnCode:
return False # depends on [control=['except'], data=[]] |
def map_to_adjust(self, strain, **params):
"""Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
# calibration param names
arg_names = ['delta_fs', 'delta_fc', 'delta_qinv', 'kappa_c',
'kappa_tst_re', 'kappa_tst_im', 'kappa_pu_re',
'kappa_pu_im']
# calibration param labels as they exist in config files
arg_labels = [''.join(['calib_', name]) for name in arg_names]
# default values for calibration params
default_values = [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0]
# make list of calibration param values
calib_args = []
for arg, val in zip(arg_labels, default_values):
if arg in params:
calib_args.append(params[arg])
else:
calib_args.append(val)
# adjust the strain using calibration param values
strain_adjusted = self.adjust_strain(strain, delta_fs=calib_args[0],
delta_fc=calib_args[1], delta_qinv=calib_args[2],
kappa_c=calib_args[3],
kappa_tst_re=calib_args[4],
kappa_tst_im=calib_args[5],
kappa_pu_re=calib_args[6],
kappa_pu_im=calib_args[7])
return strain_adjusted | def function[map_to_adjust, parameter[self, strain]]:
constant[Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
]
variable[arg_names] assign[=] list[[<ast.Constant object at 0x7da20c6e47c0>, <ast.Constant object at 0x7da20c6e6b90>, <ast.Constant object at 0x7da20c6e5d20>, <ast.Constant object at 0x7da20c6e4c40>, <ast.Constant object at 0x7da20c6e56f0>, <ast.Constant object at 0x7da20c6e40a0>, <ast.Constant object at 0x7da20c6e6e60>, <ast.Constant object at 0x7da20c6e4340>]]
variable[arg_labels] assign[=] <ast.ListComp object at 0x7da20c6e5ab0>
variable[default_values] assign[=] list[[<ast.Constant object at 0x7da20c6e4d30>, <ast.Constant object at 0x7da20c6e5420>, <ast.Constant object at 0x7da20c6e51e0>, <ast.Constant object at 0x7da20c6e50f0>, <ast.Constant object at 0x7da20c6e64d0>, <ast.Constant object at 0x7da20c6e5210>, <ast.Constant object at 0x7da20c6e4640>, <ast.Constant object at 0x7da20c6e52d0>]]
variable[calib_args] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6e6320>, <ast.Name object at 0x7da20c6e7dc0>]]] in starred[call[name[zip], parameter[name[arg_labels], name[default_values]]]] begin[:]
if compare[name[arg] in name[params]] begin[:]
call[name[calib_args].append, parameter[call[name[params]][name[arg]]]]
variable[strain_adjusted] assign[=] call[name[self].adjust_strain, parameter[name[strain]]]
return[name[strain_adjusted]] | keyword[def] identifier[map_to_adjust] ( identifier[self] , identifier[strain] ,** identifier[params] ):
literal[string]
identifier[arg_names] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] ]
identifier[arg_labels] =[ literal[string] . identifier[join] ([ literal[string] , identifier[name] ]) keyword[for] identifier[name] keyword[in] identifier[arg_names] ]
identifier[default_values] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[calib_args] =[]
keyword[for] identifier[arg] , identifier[val] keyword[in] identifier[zip] ( identifier[arg_labels] , identifier[default_values] ):
keyword[if] identifier[arg] keyword[in] identifier[params] :
identifier[calib_args] . identifier[append] ( identifier[params] [ identifier[arg] ])
keyword[else] :
identifier[calib_args] . identifier[append] ( identifier[val] )
identifier[strain_adjusted] = identifier[self] . identifier[adjust_strain] ( identifier[strain] , identifier[delta_fs] = identifier[calib_args] [ literal[int] ],
identifier[delta_fc] = identifier[calib_args] [ literal[int] ], identifier[delta_qinv] = identifier[calib_args] [ literal[int] ],
identifier[kappa_c] = identifier[calib_args] [ literal[int] ],
identifier[kappa_tst_re] = identifier[calib_args] [ literal[int] ],
identifier[kappa_tst_im] = identifier[calib_args] [ literal[int] ],
identifier[kappa_pu_re] = identifier[calib_args] [ literal[int] ],
identifier[kappa_pu_im] = identifier[calib_args] [ literal[int] ])
keyword[return] identifier[strain_adjusted] | def map_to_adjust(self, strain, **params):
"""Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
# calibration param names
arg_names = ['delta_fs', 'delta_fc', 'delta_qinv', 'kappa_c', 'kappa_tst_re', 'kappa_tst_im', 'kappa_pu_re', 'kappa_pu_im']
# calibration param labels as they exist in config files
arg_labels = [''.join(['calib_', name]) for name in arg_names]
# default values for calibration params
default_values = [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0]
# make list of calibration param values
calib_args = []
for (arg, val) in zip(arg_labels, default_values):
if arg in params:
calib_args.append(params[arg]) # depends on [control=['if'], data=['arg', 'params']]
else:
calib_args.append(val) # depends on [control=['for'], data=[]]
# adjust the strain using calibration param values
strain_adjusted = self.adjust_strain(strain, delta_fs=calib_args[0], delta_fc=calib_args[1], delta_qinv=calib_args[2], kappa_c=calib_args[3], kappa_tst_re=calib_args[4], kappa_tst_im=calib_args[5], kappa_pu_re=calib_args[6], kappa_pu_im=calib_args[7])
return strain_adjusted |
async def get_txn(self, seq_no: int) -> str:
"""
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
"""
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json | <ast.AsyncFunctionDef object at 0x7da20c6c75b0> | keyword[async] keyword[def] identifier[get_txn] ( identifier[self] , identifier[seq_no] : identifier[int] )-> identifier[str] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[seq_no] )
identifier[rv_json] = identifier[json] . identifier[dumps] ({})
identifier[req_json] = keyword[await] identifier[ledger] . identifier[build_get_txn_request] ( identifier[self] . identifier[did] , keyword[None] , identifier[seq_no] )
identifier[resp] = identifier[json] . identifier[loads] ( keyword[await] identifier[self] . identifier[_submit] ( identifier[req_json] ))
identifier[rv_json] = identifier[self] . identifier[pool] . identifier[protocol] . identifier[txn2data] ( identifier[resp] )
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv_json] )
keyword[return] identifier[rv_json] | async def get_txn(self, seq_no: int) -> str:
"""
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
"""
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json |
def make_encoder(activation, latent_size, base_depth):
"""Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
"""
conv = functools.partial(
tf.keras.layers.Conv2D, padding="SAME", activation=activation)
encoder_net = tf.keras.Sequential([
conv(base_depth, 5, 1),
conv(base_depth, 5, 2),
conv(2 * base_depth, 5, 1),
conv(2 * base_depth, 5, 2),
conv(4 * latent_size, 7, padding="VALID"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2 * latent_size, activation=None),
])
def encoder(images):
images = 2 * tf.cast(images, dtype=tf.float32) - 1
net = encoder_net(images)
return tfd.MultivariateNormalDiag(
loc=net[..., :latent_size],
scale_diag=tf.nn.softplus(net[..., latent_size:] +
_softplus_inverse(1.0)),
name="code")
return encoder | def function[make_encoder, parameter[activation, latent_size, base_depth]]:
constant[Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
]
variable[conv] assign[=] call[name[functools].partial, parameter[name[tf].keras.layers.Conv2D]]
variable[encoder_net] assign[=] call[name[tf].keras.Sequential, parameter[list[[<ast.Call object at 0x7da1b0229600>, <ast.Call object at 0x7da1b022b250>, <ast.Call object at 0x7da1b03570a0>, <ast.Call object at 0x7da1b0320520>, <ast.Call object at 0x7da1b0320fd0>, <ast.Call object at 0x7da1b0320e50>, <ast.Call object at 0x7da1b0228910>]]]]
def function[encoder, parameter[images]]:
variable[images] assign[=] binary_operation[binary_operation[constant[2] * call[name[tf].cast, parameter[name[images]]]] - constant[1]]
variable[net] assign[=] call[name[encoder_net], parameter[name[images]]]
return[call[name[tfd].MultivariateNormalDiag, parameter[]]]
return[name[encoder]] | keyword[def] identifier[make_encoder] ( identifier[activation] , identifier[latent_size] , identifier[base_depth] ):
literal[string]
identifier[conv] = identifier[functools] . identifier[partial] (
identifier[tf] . identifier[keras] . identifier[layers] . identifier[Conv2D] , identifier[padding] = literal[string] , identifier[activation] = identifier[activation] )
identifier[encoder_net] = identifier[tf] . identifier[keras] . identifier[Sequential] ([
identifier[conv] ( identifier[base_depth] , literal[int] , literal[int] ),
identifier[conv] ( identifier[base_depth] , literal[int] , literal[int] ),
identifier[conv] ( literal[int] * identifier[base_depth] , literal[int] , literal[int] ),
identifier[conv] ( literal[int] * identifier[base_depth] , literal[int] , literal[int] ),
identifier[conv] ( literal[int] * identifier[latent_size] , literal[int] , identifier[padding] = literal[string] ),
identifier[tf] . identifier[keras] . identifier[layers] . identifier[Flatten] (),
identifier[tf] . identifier[keras] . identifier[layers] . identifier[Dense] ( literal[int] * identifier[latent_size] , identifier[activation] = keyword[None] ),
])
keyword[def] identifier[encoder] ( identifier[images] ):
identifier[images] = literal[int] * identifier[tf] . identifier[cast] ( identifier[images] , identifier[dtype] = identifier[tf] . identifier[float32] )- literal[int]
identifier[net] = identifier[encoder_net] ( identifier[images] )
keyword[return] identifier[tfd] . identifier[MultivariateNormalDiag] (
identifier[loc] = identifier[net] [...,: identifier[latent_size] ],
identifier[scale_diag] = identifier[tf] . identifier[nn] . identifier[softplus] ( identifier[net] [..., identifier[latent_size] :]+
identifier[_softplus_inverse] ( literal[int] )),
identifier[name] = literal[string] )
keyword[return] identifier[encoder] | def make_encoder(activation, latent_size, base_depth):
"""Creates the encoder function.
Args:
activation: Activation function in hidden layers.
latent_size: The dimensionality of the encoding.
base_depth: The lowest depth for a layer.
Returns:
encoder: A `callable` mapping a `Tensor` of images to a
`tfd.Distribution` instance over encodings.
"""
conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation)
encoder_net = tf.keras.Sequential([conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv(2 * base_depth, 5, 1), conv(2 * base_depth, 5, 2), conv(4 * latent_size, 7, padding='VALID'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(2 * latent_size, activation=None)])
def encoder(images):
images = 2 * tf.cast(images, dtype=tf.float32) - 1
net = encoder_net(images)
return tfd.MultivariateNormalDiag(loc=net[..., :latent_size], scale_diag=tf.nn.softplus(net[..., latent_size:] + _softplus_inverse(1.0)), name='code')
return encoder |
def readout(self, *args, **kwargs):
''' Running the FIFO readout while executing other statements.
Starting and stopping of the FIFO readout is synchronized between the threads.
'''
timeout = kwargs.pop('timeout', 10.0)
self.start_readout(*args, **kwargs)
try:
yield
finally:
try:
self.stop_readout(timeout=timeout)
except Exception:
# in case something fails, call this on last resort
# if run was aborted, immediately stop readout
if self.abort_run.is_set():
with self._readout_lock:
if self.fifo_readout.is_running:
self.fifo_readout.stop(timeout=0.0) | def function[readout, parameter[self]]:
constant[ Running the FIFO readout while executing other statements.
Starting and stopping of the FIFO readout is synchronized between the threads.
]
variable[timeout] assign[=] call[name[kwargs].pop, parameter[constant[timeout], constant[10.0]]]
call[name[self].start_readout, parameter[<ast.Starred object at 0x7da1b10c5900>]]
<ast.Try object at 0x7da1b10c4370> | keyword[def] identifier[readout] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[timeout] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
identifier[self] . identifier[start_readout] (* identifier[args] ,** identifier[kwargs] )
keyword[try] :
keyword[yield]
keyword[finally] :
keyword[try] :
identifier[self] . identifier[stop_readout] ( identifier[timeout] = identifier[timeout] )
keyword[except] identifier[Exception] :
keyword[if] identifier[self] . identifier[abort_run] . identifier[is_set] ():
keyword[with] identifier[self] . identifier[_readout_lock] :
keyword[if] identifier[self] . identifier[fifo_readout] . identifier[is_running] :
identifier[self] . identifier[fifo_readout] . identifier[stop] ( identifier[timeout] = literal[int] ) | def readout(self, *args, **kwargs):
""" Running the FIFO readout while executing other statements.
Starting and stopping of the FIFO readout is synchronized between the threads.
"""
timeout = kwargs.pop('timeout', 10.0)
self.start_readout(*args, **kwargs)
try:
yield # depends on [control=['try'], data=[]]
finally:
try:
self.stop_readout(timeout=timeout) # depends on [control=['try'], data=[]]
except Exception:
# in case something fails, call this on last resort
# if run was aborted, immediately stop readout
if self.abort_run.is_set():
with self._readout_lock:
if self.fifo_readout.is_running:
self.fifo_readout.stop(timeout=0.0) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] |
def keywords(s, top=10, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return parser.find_keywords(s, top=top, frequency=parser.frequency) | def function[keywords, parameter[s, top]]:
constant[ Returns a sorted list of keywords in the given string.
]
return[call[name[parser].find_keywords, parameter[name[s]]]] | keyword[def] identifier[keywords] ( identifier[s] , identifier[top] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[parser] . identifier[find_keywords] ( identifier[s] , identifier[top] = identifier[top] , identifier[frequency] = identifier[parser] . identifier[frequency] ) | def keywords(s, top=10, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return parser.find_keywords(s, top=top, frequency=parser.frequency) |
def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the sentences in an individual CONLL formatted file.
"""
for document in self.dataset_document_iterator(file_path):
for sentence in document:
yield sentence | def function[sentence_iterator, parameter[self, file_path]]:
constant[
An iterator over the sentences in an individual CONLL formatted file.
]
for taget[name[document]] in starred[call[name[self].dataset_document_iterator, parameter[name[file_path]]]] begin[:]
for taget[name[sentence]] in starred[name[document]] begin[:]
<ast.Yield object at 0x7da1b1f95f90> | keyword[def] identifier[sentence_iterator] ( identifier[self] , identifier[file_path] : identifier[str] )-> identifier[Iterator] [ identifier[OntonotesSentence] ]:
literal[string]
keyword[for] identifier[document] keyword[in] identifier[self] . identifier[dataset_document_iterator] ( identifier[file_path] ):
keyword[for] identifier[sentence] keyword[in] identifier[document] :
keyword[yield] identifier[sentence] | def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the sentences in an individual CONLL formatted file.
"""
for document in self.dataset_document_iterator(file_path):
for sentence in document:
yield sentence # depends on [control=['for'], data=['sentence']] # depends on [control=['for'], data=['document']] |
def target(self, request, key, limit, ttl):
"""this will only run the request if the key has a value, if you want to
fail if the key doesn't have a value, then normalize_key() should raise
an exception
:param request: Request, the request instance
:param key: string, the unique key for the endpoint, this is generated using
self.normalize_key, so override that method to customize the key
:param limit: int, max requests that should be received in ttl
:param ttl: int, how many seconds the request should be throttled (eg, 3600 = 1 hour)
"""
ret = True
if key:
#backend = self.create_backend()
#method = getattr(backend, "normalize_limit", None)
#if method:
# limit = method(request, limit)
#method = getattr(backend, "normalize_ttl", None)
#if method:
# ttl = method(request, ttl)
#ret = backend.target(request, key, limit, ttl)
ret = super(RateLimitDecorator, self).target(request, key, limit, ttl)
else:
logger.warn("No ratelimit key found for {}".format(request.path))
return ret | def function[target, parameter[self, request, key, limit, ttl]]:
constant[this will only run the request if the key has a value, if you want to
fail if the key doesn't have a value, then normalize_key() should raise
an exception
:param request: Request, the request instance
:param key: string, the unique key for the endpoint, this is generated using
self.normalize_key, so override that method to customize the key
:param limit: int, max requests that should be received in ttl
:param ttl: int, how many seconds the request should be throttled (eg, 3600 = 1 hour)
]
variable[ret] assign[=] constant[True]
if name[key] begin[:]
variable[ret] assign[=] call[call[name[super], parameter[name[RateLimitDecorator], name[self]]].target, parameter[name[request], name[key], name[limit], name[ttl]]]
return[name[ret]] | keyword[def] identifier[target] ( identifier[self] , identifier[request] , identifier[key] , identifier[limit] , identifier[ttl] ):
literal[string]
identifier[ret] = keyword[True]
keyword[if] identifier[key] :
identifier[ret] = identifier[super] ( identifier[RateLimitDecorator] , identifier[self] ). identifier[target] ( identifier[request] , identifier[key] , identifier[limit] , identifier[ttl] )
keyword[else] :
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[request] . identifier[path] ))
keyword[return] identifier[ret] | def target(self, request, key, limit, ttl):
"""this will only run the request if the key has a value, if you want to
fail if the key doesn't have a value, then normalize_key() should raise
an exception
:param request: Request, the request instance
:param key: string, the unique key for the endpoint, this is generated using
self.normalize_key, so override that method to customize the key
:param limit: int, max requests that should be received in ttl
:param ttl: int, how many seconds the request should be throttled (eg, 3600 = 1 hour)
"""
ret = True
if key:
#backend = self.create_backend()
#method = getattr(backend, "normalize_limit", None)
#if method:
# limit = method(request, limit)
#method = getattr(backend, "normalize_ttl", None)
#if method:
# ttl = method(request, ttl)
#ret = backend.target(request, key, limit, ttl)
ret = super(RateLimitDecorator, self).target(request, key, limit, ttl) # depends on [control=['if'], data=[]]
else:
logger.warn('No ratelimit key found for {}'.format(request.path))
return ret |
def moment(p, v, order=1):
""" Calculates the moments of the probability distribution p with vector v """
if order == 1:
return (v*p).sum()
elif order == 2:
return np.sqrt( ((v**2)*p).sum() - (v*p).sum()**2 ) | def function[moment, parameter[p, v, order]]:
constant[ Calculates the moments of the probability distribution p with vector v ]
if compare[name[order] equal[==] constant[1]] begin[:]
return[call[binary_operation[name[v] * name[p]].sum, parameter[]]] | keyword[def] identifier[moment] ( identifier[p] , identifier[v] , identifier[order] = literal[int] ):
literal[string]
keyword[if] identifier[order] == literal[int] :
keyword[return] ( identifier[v] * identifier[p] ). identifier[sum] ()
keyword[elif] identifier[order] == literal[int] :
keyword[return] identifier[np] . identifier[sqrt] ((( identifier[v] ** literal[int] )* identifier[p] ). identifier[sum] ()-( identifier[v] * identifier[p] ). identifier[sum] ()** literal[int] ) | def moment(p, v, order=1):
""" Calculates the moments of the probability distribution p with vector v """
if order == 1:
return (v * p).sum() # depends on [control=['if'], data=[]]
elif order == 2:
return np.sqrt((v ** 2 * p).sum() - (v * p).sum() ** 2) # depends on [control=['if'], data=[]] |
def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None,
favicon=None, all_images=None, parser=None, handle_file_content=None,
canonical=None):
"""Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
# Set params, method params have priority over class params
open_graph = merge_settings(open_graph, self.open_graph)
twitter_card = merge_settings(twitter_card, self.twitter_card)
touch_icon = merge_settings(touch_icon, self.touch_icon)
favicon = merge_settings(favicon, self.favicon)
canonical = merge_settings(canonical, self.canonical)
all_images = merge_settings(all_images, self.all_images)
parser = merge_settings(parser, self.parser)
handle_file_content = merge_settings(handle_file_content, self.handle_file_content)
data = {
'images': [],
'videos': [],
}
has_file_content = False
content_type = None
if handle_file_content:
headers, status_code = self._retrieve_headers(url)
content_type = headers.get('Content-Type')
has_file_content = content_type and not 'text/html' in content_type
if has_file_content and content_type:
has_image_content = content_type in IMAGE_MIMETYPES
if has_image_content:
parsed_url = urlparse(url)
data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?
data['url'] = url
data['images'].append({
'type': 'body_image',
'src': url,
})
else:
try:
oembed_data, status_code = self._retrieve_oembed_data(url)
parse_oembed_data(oembed_data, data)
except LassieError:
oembed_data = None
html, status_code = self._retrieve_content(url)
if not html and not oembed_data:
raise LassieError('There was no content to parse.')
if '<html' not in html:
html = re.sub(r'(?:<!DOCTYPE(?:\s\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html)
soup = BeautifulSoup(clean_text(html), parser)
self._filter_amp_data(soup, data, url, all_images)
if open_graph:
self._filter_meta_data('open_graph', soup, data, url)
if twitter_card:
self._filter_meta_data('twitter_card', soup, data)
self._filter_meta_data('generic', soup, data)
if touch_icon:
self._filter_link_tag_data('touch_icon', soup, data, url)
if favicon:
self._filter_link_tag_data('favicon', soup, data, url)
if canonical:
self._filter_link_tag_data('canonical', soup, data, url)
if all_images:
# Maybe filter out 1x1, no "good" way to do this if image doesn't supply
# width/height.
self._find_all_images(soup, data, url)
# TODO: Find a good place for setting url, title and locale
if soup.html.get('lang'):
lang = soup.html.get('lang')
else:
lang = soup.html.get('xml:lang')
if lang and ('locale' not in data):
locale = normalize_locale(lang)
if locale:
data['locale'] = locale
data_url = data.get('url')
if not data_url or (data_url in url and len(data_url) < len(url)):
data['url'] = url
if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'):
data['title'] = soup.title.string
data['status_code'] = status_code
return data | def function[fetch, parameter[self, url, open_graph, twitter_card, touch_icon, favicon, all_images, parser, handle_file_content, canonical]]:
constant[Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
]
variable[open_graph] assign[=] call[name[merge_settings], parameter[name[open_graph], name[self].open_graph]]
variable[twitter_card] assign[=] call[name[merge_settings], parameter[name[twitter_card], name[self].twitter_card]]
variable[touch_icon] assign[=] call[name[merge_settings], parameter[name[touch_icon], name[self].touch_icon]]
variable[favicon] assign[=] call[name[merge_settings], parameter[name[favicon], name[self].favicon]]
variable[canonical] assign[=] call[name[merge_settings], parameter[name[canonical], name[self].canonical]]
variable[all_images] assign[=] call[name[merge_settings], parameter[name[all_images], name[self].all_images]]
variable[parser] assign[=] call[name[merge_settings], parameter[name[parser], name[self].parser]]
variable[handle_file_content] assign[=] call[name[merge_settings], parameter[name[handle_file_content], name[self].handle_file_content]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2ec0>, <ast.Constant object at 0x7da2044c39d0>], [<ast.List object at 0x7da2044c2c20>, <ast.List object at 0x7da2044c39a0>]]
variable[has_file_content] assign[=] constant[False]
variable[content_type] assign[=] constant[None]
if name[handle_file_content] begin[:]
<ast.Tuple object at 0x7da2044c1810> assign[=] call[name[self]._retrieve_headers, parameter[name[url]]]
variable[content_type] assign[=] call[name[headers].get, parameter[constant[Content-Type]]]
variable[has_file_content] assign[=] <ast.BoolOp object at 0x7da2044c2140>
if <ast.BoolOp object at 0x7da2044c18a0> begin[:]
variable[has_image_content] assign[=] compare[name[content_type] in name[IMAGE_MIMETYPES]]
if name[has_image_content] begin[:]
variable[parsed_url] assign[=] call[name[urlparse], parameter[name[url]]]
call[name[data]][constant[title]] assign[=] call[name[basename], parameter[call[name[parsed_url].path.lstrip, parameter[constant[/]]]]]
call[name[data]][constant[url]] assign[=] name[url]
call[call[name[data]][constant[images]].append, parameter[dictionary[[<ast.Constant object at 0x7da2044c0490>, <ast.Constant object at 0x7da2044c0a90>], [<ast.Constant object at 0x7da2044c0c40>, <ast.Name object at 0x7da2044c38e0>]]]]
call[name[data]][constant[status_code]] assign[=] name[status_code]
return[name[data]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[url] , identifier[open_graph] = keyword[None] , identifier[twitter_card] = keyword[None] , identifier[touch_icon] = keyword[None] ,
identifier[favicon] = keyword[None] , identifier[all_images] = keyword[None] , identifier[parser] = keyword[None] , identifier[handle_file_content] = keyword[None] ,
identifier[canonical] = keyword[None] ):
literal[string]
identifier[open_graph] = identifier[merge_settings] ( identifier[open_graph] , identifier[self] . identifier[open_graph] )
identifier[twitter_card] = identifier[merge_settings] ( identifier[twitter_card] , identifier[self] . identifier[twitter_card] )
identifier[touch_icon] = identifier[merge_settings] ( identifier[touch_icon] , identifier[self] . identifier[touch_icon] )
identifier[favicon] = identifier[merge_settings] ( identifier[favicon] , identifier[self] . identifier[favicon] )
identifier[canonical] = identifier[merge_settings] ( identifier[canonical] , identifier[self] . identifier[canonical] )
identifier[all_images] = identifier[merge_settings] ( identifier[all_images] , identifier[self] . identifier[all_images] )
identifier[parser] = identifier[merge_settings] ( identifier[parser] , identifier[self] . identifier[parser] )
identifier[handle_file_content] = identifier[merge_settings] ( identifier[handle_file_content] , identifier[self] . identifier[handle_file_content] )
identifier[data] ={
literal[string] :[],
literal[string] :[],
}
identifier[has_file_content] = keyword[False]
identifier[content_type] = keyword[None]
keyword[if] identifier[handle_file_content] :
identifier[headers] , identifier[status_code] = identifier[self] . identifier[_retrieve_headers] ( identifier[url] )
identifier[content_type] = identifier[headers] . identifier[get] ( literal[string] )
identifier[has_file_content] = identifier[content_type] keyword[and] keyword[not] literal[string] keyword[in] identifier[content_type]
keyword[if] identifier[has_file_content] keyword[and] identifier[content_type] :
identifier[has_image_content] = identifier[content_type] keyword[in] identifier[IMAGE_MIMETYPES]
keyword[if] identifier[has_image_content] :
identifier[parsed_url] = identifier[urlparse] ( identifier[url] )
identifier[data] [ literal[string] ]= identifier[basename] ( identifier[parsed_url] . identifier[path] . identifier[lstrip] ( literal[string] ))
identifier[data] [ literal[string] ]= identifier[url]
identifier[data] [ literal[string] ]. identifier[append] ({
literal[string] : literal[string] ,
literal[string] : identifier[url] ,
})
keyword[else] :
keyword[try] :
identifier[oembed_data] , identifier[status_code] = identifier[self] . identifier[_retrieve_oembed_data] ( identifier[url] )
identifier[parse_oembed_data] ( identifier[oembed_data] , identifier[data] )
keyword[except] identifier[LassieError] :
identifier[oembed_data] = keyword[None]
identifier[html] , identifier[status_code] = identifier[self] . identifier[_retrieve_content] ( identifier[url] )
keyword[if] keyword[not] identifier[html] keyword[and] keyword[not] identifier[oembed_data] :
keyword[raise] identifier[LassieError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[html] :
identifier[html] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[html] )
identifier[soup] = identifier[BeautifulSoup] ( identifier[clean_text] ( identifier[html] ), identifier[parser] )
identifier[self] . identifier[_filter_amp_data] ( identifier[soup] , identifier[data] , identifier[url] , identifier[all_images] )
keyword[if] identifier[open_graph] :
identifier[self] . identifier[_filter_meta_data] ( literal[string] , identifier[soup] , identifier[data] , identifier[url] )
keyword[if] identifier[twitter_card] :
identifier[self] . identifier[_filter_meta_data] ( literal[string] , identifier[soup] , identifier[data] )
identifier[self] . identifier[_filter_meta_data] ( literal[string] , identifier[soup] , identifier[data] )
keyword[if] identifier[touch_icon] :
identifier[self] . identifier[_filter_link_tag_data] ( literal[string] , identifier[soup] , identifier[data] , identifier[url] )
keyword[if] identifier[favicon] :
identifier[self] . identifier[_filter_link_tag_data] ( literal[string] , identifier[soup] , identifier[data] , identifier[url] )
keyword[if] identifier[canonical] :
identifier[self] . identifier[_filter_link_tag_data] ( literal[string] , identifier[soup] , identifier[data] , identifier[url] )
keyword[if] identifier[all_images] :
identifier[self] . identifier[_find_all_images] ( identifier[soup] , identifier[data] , identifier[url] )
keyword[if] identifier[soup] . identifier[html] . identifier[get] ( literal[string] ):
identifier[lang] = identifier[soup] . identifier[html] . identifier[get] ( literal[string] )
keyword[else] :
identifier[lang] = identifier[soup] . identifier[html] . identifier[get] ( literal[string] )
keyword[if] identifier[lang] keyword[and] ( literal[string] keyword[not] keyword[in] identifier[data] ):
identifier[locale] = identifier[normalize_locale] ( identifier[lang] )
keyword[if] identifier[locale] :
identifier[data] [ literal[string] ]= identifier[locale]
identifier[data_url] = identifier[data] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[data_url] keyword[or] ( identifier[data_url] keyword[in] identifier[url] keyword[and] identifier[len] ( identifier[data_url] )< identifier[len] ( identifier[url] )):
identifier[data] [ literal[string] ]= identifier[url]
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[data] keyword[or] keyword[not] identifier[data] . identifier[get] ( literal[string] )) keyword[and] identifier[hasattr] ( identifier[soup] . identifier[title] , literal[string] ):
identifier[data] [ literal[string] ]= identifier[soup] . identifier[title] . identifier[string]
identifier[data] [ literal[string] ]= identifier[status_code]
keyword[return] identifier[data] | def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None, favicon=None, all_images=None, parser=None, handle_file_content=None, canonical=None):
"""Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
# Set params, method params have priority over class params
open_graph = merge_settings(open_graph, self.open_graph)
twitter_card = merge_settings(twitter_card, self.twitter_card)
touch_icon = merge_settings(touch_icon, self.touch_icon)
favicon = merge_settings(favicon, self.favicon)
canonical = merge_settings(canonical, self.canonical)
all_images = merge_settings(all_images, self.all_images)
parser = merge_settings(parser, self.parser)
handle_file_content = merge_settings(handle_file_content, self.handle_file_content)
data = {'images': [], 'videos': []}
has_file_content = False
content_type = None
if handle_file_content:
(headers, status_code) = self._retrieve_headers(url)
content_type = headers.get('Content-Type')
has_file_content = content_type and (not 'text/html' in content_type) # depends on [control=['if'], data=[]]
if has_file_content and content_type:
has_image_content = content_type in IMAGE_MIMETYPES
if has_image_content:
parsed_url = urlparse(url)
data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?
data['url'] = url
data['images'].append({'type': 'body_image', 'src': url}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
(oembed_data, status_code) = self._retrieve_oembed_data(url)
parse_oembed_data(oembed_data, data) # depends on [control=['try'], data=[]]
except LassieError:
oembed_data = None # depends on [control=['except'], data=[]]
(html, status_code) = self._retrieve_content(url)
if not html and (not oembed_data):
raise LassieError('There was no content to parse.') # depends on [control=['if'], data=[]]
if '<html' not in html:
html = re.sub('(?:<!DOCTYPE(?:\\s\\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html) # depends on [control=['if'], data=['html']]
soup = BeautifulSoup(clean_text(html), parser)
self._filter_amp_data(soup, data, url, all_images)
if open_graph:
self._filter_meta_data('open_graph', soup, data, url) # depends on [control=['if'], data=[]]
if twitter_card:
self._filter_meta_data('twitter_card', soup, data) # depends on [control=['if'], data=[]]
self._filter_meta_data('generic', soup, data)
if touch_icon:
self._filter_link_tag_data('touch_icon', soup, data, url) # depends on [control=['if'], data=[]]
if favicon:
self._filter_link_tag_data('favicon', soup, data, url) # depends on [control=['if'], data=[]]
if canonical:
self._filter_link_tag_data('canonical', soup, data, url) # depends on [control=['if'], data=[]]
if all_images:
# Maybe filter out 1x1, no "good" way to do this if image doesn't supply
# width/height.
self._find_all_images(soup, data, url) # depends on [control=['if'], data=[]]
# TODO: Find a good place for setting url, title and locale
if soup.html.get('lang'):
lang = soup.html.get('lang') # depends on [control=['if'], data=[]]
else:
lang = soup.html.get('xml:lang')
if lang and 'locale' not in data:
locale = normalize_locale(lang)
if locale:
data['locale'] = locale # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
data_url = data.get('url')
if not data_url or (data_url in url and len(data_url) < len(url)):
data['url'] = url # depends on [control=['if'], data=[]]
if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'):
data['title'] = soup.title.string # depends on [control=['if'], data=[]]
data['status_code'] = status_code
return data |
def date_time_this_month():
"""
获取当前月的随机时间
:return:
* date_this_month: (datetime) 当前月份的随机时间
举例如下::
print('--- GetRandomTime.date_time_this_month demo ---')
print(GetRandomTime.date_time_this_month())
print('---')
执行结果::
--- GetRandomTime.date_time_this_month demo demo ---
2018-07-01 12:47:20
---
"""
now = datetime.now()
this_month_start = now.replace(
day=1, hour=0, minute=0, second=0, microsecond=0)
this_month_days = calendar.monthrange(now.year, now.month)
random_seconds = random.randint(0, this_month_days[1]*A_DAY_SECONDS)
return this_month_start + timedelta(seconds=random_seconds) | def function[date_time_this_month, parameter[]]:
constant[
获取当前月的随机时间
:return:
* date_this_month: (datetime) 当前月份的随机时间
举例如下::
print('--- GetRandomTime.date_time_this_month demo ---')
print(GetRandomTime.date_time_this_month())
print('---')
执行结果::
--- GetRandomTime.date_time_this_month demo demo ---
2018-07-01 12:47:20
---
]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[this_month_start] assign[=] call[name[now].replace, parameter[]]
variable[this_month_days] assign[=] call[name[calendar].monthrange, parameter[name[now].year, name[now].month]]
variable[random_seconds] assign[=] call[name[random].randint, parameter[constant[0], binary_operation[call[name[this_month_days]][constant[1]] * name[A_DAY_SECONDS]]]]
return[binary_operation[name[this_month_start] + call[name[timedelta], parameter[]]]] | keyword[def] identifier[date_time_this_month] ():
literal[string]
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[this_month_start] = identifier[now] . identifier[replace] (
identifier[day] = literal[int] , identifier[hour] = literal[int] , identifier[minute] = literal[int] , identifier[second] = literal[int] , identifier[microsecond] = literal[int] )
identifier[this_month_days] = identifier[calendar] . identifier[monthrange] ( identifier[now] . identifier[year] , identifier[now] . identifier[month] )
identifier[random_seconds] = identifier[random] . identifier[randint] ( literal[int] , identifier[this_month_days] [ literal[int] ]* identifier[A_DAY_SECONDS] )
keyword[return] identifier[this_month_start] + identifier[timedelta] ( identifier[seconds] = identifier[random_seconds] ) | def date_time_this_month():
"""
获取当前月的随机时间
:return:
* date_this_month: (datetime) 当前月份的随机时间
举例如下::
print('--- GetRandomTime.date_time_this_month demo ---')
print(GetRandomTime.date_time_this_month())
print('---')
执行结果::
--- GetRandomTime.date_time_this_month demo demo ---
2018-07-01 12:47:20
---
"""
now = datetime.now()
this_month_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
this_month_days = calendar.monthrange(now.year, now.month)
random_seconds = random.randint(0, this_month_days[1] * A_DAY_SECONDS)
return this_month_start + timedelta(seconds=random_seconds) |
def reload_list(self):
'''Press R in home view to retrieve quiz list'''
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.home_view = self.make_listview(self.leetcode.quizzes)
self.view_stack = []
self.goto_view(self.home_view) | def function[reload_list, parameter[self]]:
constant[Press R in home view to retrieve quiz list]
call[name[self].leetcode.load, parameter[]]
if <ast.BoolOp object at 0x7da1b0cf7070> begin[:]
name[self].home_view assign[=] call[name[self].make_listview, parameter[name[self].leetcode.quizzes]]
name[self].view_stack assign[=] list[[]]
call[name[self].goto_view, parameter[name[self].home_view]] | keyword[def] identifier[reload_list] ( identifier[self] ):
literal[string]
identifier[self] . identifier[leetcode] . identifier[load] ()
keyword[if] identifier[self] . identifier[leetcode] . identifier[quizzes] keyword[and] identifier[len] ( identifier[self] . identifier[leetcode] . identifier[quizzes] )> literal[int] :
identifier[self] . identifier[home_view] = identifier[self] . identifier[make_listview] ( identifier[self] . identifier[leetcode] . identifier[quizzes] )
identifier[self] . identifier[view_stack] =[]
identifier[self] . identifier[goto_view] ( identifier[self] . identifier[home_view] ) | def reload_list(self):
"""Press R in home view to retrieve quiz list"""
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.home_view = self.make_listview(self.leetcode.quizzes)
self.view_stack = []
self.goto_view(self.home_view) # depends on [control=['if'], data=[]] |
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None | def function[terminate_process, parameter[self, idf]]:
constant[ Terminate a process by id ]
<ast.Try object at 0x7da207f985b0> | keyword[def] identifier[terminate_process] ( identifier[self] , identifier[idf] ):
literal[string]
keyword[try] :
identifier[p] = identifier[self] . identifier[q] . identifier[pop] ( identifier[idf] )
identifier[p] . identifier[terminate] ()
keyword[return] identifier[p]
keyword[except] :
keyword[return] keyword[None] | def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p # depends on [control=['try'], data=[]]
except:
return None # depends on [control=['except'], data=[]] |
def save_data(self,session, exp_id, content):
'''save data will obtain the current subid from the session, and save it
depending on the database type.'''
from expfactory.database.models import (
Participant,
Result
)
subid = session.get('subid')
bot.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content['data']
if isinstance(content,dict):
content = json.dumps(content)
result = Result(data=content,
exp_id=exp_id,
participant_id=p.id) # check if changes from str/int
self.session.add(result)
p.results.append(result)
self.session.commit()
bot.info("Participant: %s" %p)
bot.info("Result: %s" %result) | def function[save_data, parameter[self, session, exp_id, content]]:
constant[save data will obtain the current subid from the session, and save it
depending on the database type.]
from relative_module[expfactory.database.models] import module[Participant], module[Result]
variable[subid] assign[=] call[name[session].get, parameter[constant[subid]]]
call[name[bot].info, parameter[binary_operation[constant[Saving data for subid %s] <ast.Mod object at 0x7da2590d6920> name[subid]]]]
if compare[name[subid] is_not constant[None]] begin[:]
variable[p] assign[=] call[call[name[Participant].query.filter, parameter[compare[name[Participant].id equal[==] name[subid]]]].first, parameter[]]
if compare[constant[data] in name[content]] begin[:]
variable[content] assign[=] call[name[content]][constant[data]]
if call[name[isinstance], parameter[name[content], name[dict]]] begin[:]
variable[content] assign[=] call[name[json].dumps, parameter[name[content]]]
variable[result] assign[=] call[name[Result], parameter[]]
call[name[self].session.add, parameter[name[result]]]
call[name[p].results.append, parameter[name[result]]]
call[name[self].session.commit, parameter[]]
call[name[bot].info, parameter[binary_operation[constant[Participant: %s] <ast.Mod object at 0x7da2590d6920> name[p]]]]
call[name[bot].info, parameter[binary_operation[constant[Result: %s] <ast.Mod object at 0x7da2590d6920> name[result]]]] | keyword[def] identifier[save_data] ( identifier[self] , identifier[session] , identifier[exp_id] , identifier[content] ):
literal[string]
keyword[from] identifier[expfactory] . identifier[database] . identifier[models] keyword[import] (
identifier[Participant] ,
identifier[Result]
)
identifier[subid] = identifier[session] . identifier[get] ( literal[string] )
identifier[bot] . identifier[info] ( literal[string] % identifier[subid] )
keyword[if] identifier[subid] keyword[is] keyword[not] keyword[None] :
identifier[p] = identifier[Participant] . identifier[query] . identifier[filter] ( identifier[Participant] . identifier[id] == identifier[subid] ). identifier[first] ()
keyword[if] literal[string] keyword[in] identifier[content] :
identifier[content] = identifier[content] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[content] , identifier[dict] ):
identifier[content] = identifier[json] . identifier[dumps] ( identifier[content] )
identifier[result] = identifier[Result] ( identifier[data] = identifier[content] ,
identifier[exp_id] = identifier[exp_id] ,
identifier[participant_id] = identifier[p] . identifier[id] )
identifier[self] . identifier[session] . identifier[add] ( identifier[result] )
identifier[p] . identifier[results] . identifier[append] ( identifier[result] )
identifier[self] . identifier[session] . identifier[commit] ()
identifier[bot] . identifier[info] ( literal[string] % identifier[p] )
identifier[bot] . identifier[info] ( literal[string] % identifier[result] ) | def save_data(self, session, exp_id, content):
"""save data will obtain the current subid from the session, and save it
depending on the database type."""
from expfactory.database.models import Participant, Result
subid = session.get('subid')
bot.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Preference is to save data under 'data', otherwise do all of it
if 'data' in content:
content = content['data'] # depends on [control=['if'], data=['content']]
if isinstance(content, dict):
content = json.dumps(content) # depends on [control=['if'], data=[]]
result = Result(data=content, exp_id=exp_id, participant_id=p.id) # check if changes from str/int
self.session.add(result)
p.results.append(result)
self.session.commit()
bot.info('Participant: %s' % p)
bot.info('Result: %s' % result) # depends on [control=['if'], data=['subid']] |
def _triage_error(self, e, nofail):
""" Print a message and decide what to do about an error. """
if not nofail:
self.fail_pipeline(e)
elif self._failed:
print("This is a nofail process, but the pipeline was terminated for other reasons, so we fail.")
raise e
else:
print(e)
print("ERROR: Subprocess returned nonzero result, but pipeline is continuing because nofail=True") | def function[_triage_error, parameter[self, e, nofail]]:
constant[ Print a message and decide what to do about an error. ]
if <ast.UnaryOp object at 0x7da1b0340220> begin[:]
call[name[self].fail_pipeline, parameter[name[e]]] | keyword[def] identifier[_triage_error] ( identifier[self] , identifier[e] , identifier[nofail] ):
literal[string]
keyword[if] keyword[not] identifier[nofail] :
identifier[self] . identifier[fail_pipeline] ( identifier[e] )
keyword[elif] identifier[self] . identifier[_failed] :
identifier[print] ( literal[string] )
keyword[raise] identifier[e]
keyword[else] :
identifier[print] ( identifier[e] )
identifier[print] ( literal[string] ) | def _triage_error(self, e, nofail):
""" Print a message and decide what to do about an error. """
if not nofail:
self.fail_pipeline(e) # depends on [control=['if'], data=[]]
elif self._failed:
print('This is a nofail process, but the pipeline was terminated for other reasons, so we fail.')
raise e # depends on [control=['if'], data=[]]
else:
print(e)
print('ERROR: Subprocess returned nonzero result, but pipeline is continuing because nofail=True') |
def set_window_size(self, width, height):
"""Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
"""
self._imgwin_wd = int(width)
self._imgwin_ht = int(height)
self._ctr_x = width // 2
self._ctr_y = height // 2
self.logger.debug("widget resized to %dx%d" % (width, height))
self.make_callback('configure', width, height)
self.redraw(whence=0) | def function[set_window_size, parameter[self, width, height]]:
constant[Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
]
name[self]._imgwin_wd assign[=] call[name[int], parameter[name[width]]]
name[self]._imgwin_ht assign[=] call[name[int], parameter[name[height]]]
name[self]._ctr_x assign[=] binary_operation[name[width] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
name[self]._ctr_y assign[=] binary_operation[name[height] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
call[name[self].logger.debug, parameter[binary_operation[constant[widget resized to %dx%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0c25000>, <ast.Name object at 0x7da1b0c24220>]]]]]
call[name[self].make_callback, parameter[constant[configure], name[width], name[height]]]
call[name[self].redraw, parameter[]] | keyword[def] identifier[set_window_size] ( identifier[self] , identifier[width] , identifier[height] ):
literal[string]
identifier[self] . identifier[_imgwin_wd] = identifier[int] ( identifier[width] )
identifier[self] . identifier[_imgwin_ht] = identifier[int] ( identifier[height] )
identifier[self] . identifier[_ctr_x] = identifier[width] // literal[int]
identifier[self] . identifier[_ctr_y] = identifier[height] // literal[int]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[width] , identifier[height] ))
identifier[self] . identifier[make_callback] ( literal[string] , identifier[width] , identifier[height] )
identifier[self] . identifier[redraw] ( identifier[whence] = literal[int] ) | def set_window_size(self, width, height):
"""Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
"""
self._imgwin_wd = int(width)
self._imgwin_ht = int(height)
self._ctr_x = width // 2
self._ctr_y = height // 2
self.logger.debug('widget resized to %dx%d' % (width, height))
self.make_callback('configure', width, height)
self.redraw(whence=0) |
def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
} | def function[_get_processed_dataframe, parameter[self, dataframe]]:
constant[Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
]
name[dataframe].index assign[=] call[name[pd].to_datetime, parameter[call[name[dataframe]][constant[epoch]]]]
<ast.Delete object at 0x7da18ede4e80>
variable[summary] assign[=] call[call[call[name[dataframe].describe, parameter[]].transpose, parameter[]].loc][constant[scriptrun_time]]
variable[df_grp] assign[=] call[name[dataframe].groupby, parameter[call[name[pd].TimeGrouper, parameter[call[constant[{}S].format, parameter[name[self].interval]]]]]]
variable[df_final] assign[=] call[name[df_grp].apply, parameter[<ast.Lambda object at 0x7da18ede5cf0>]]
return[dictionary[[<ast.Constant object at 0x7da18ede44f0>, <ast.Constant object at 0x7da18ede4100>, <ast.Constant object at 0x7da18ede7c40>], [<ast.Call object at 0x7da18ede48e0>, <ast.Call object at 0x7da18ede6950>, <ast.Call object at 0x7da18ede5c90>]]] | keyword[def] identifier[_get_processed_dataframe] ( identifier[self] , identifier[dataframe] ):
literal[string]
identifier[dataframe] . identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[dataframe] [ literal[string] ], identifier[unit] = literal[string] , identifier[utc] = keyword[True] )
keyword[del] identifier[dataframe] [ literal[string] ]
identifier[summary] = identifier[dataframe] . identifier[describe] ( identifier[percentiles] =[ literal[int] , literal[int] , literal[int] ]). identifier[transpose] (). identifier[loc] [ literal[string] ]
identifier[df_grp] = identifier[dataframe] . identifier[groupby] ( identifier[pd] . identifier[TimeGrouper] ( literal[string] . identifier[format] ( identifier[self] . identifier[interval] )))
identifier[df_final] = identifier[df_grp] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[describe] ( identifier[percentiles] =[ literal[int] , literal[int] , literal[int] ])[ literal[string] ])
keyword[return] {
literal[string] : identifier[dataframe] . identifier[round] ( literal[int] ),
literal[string] : identifier[df_final] . identifier[round] ( literal[int] ),
literal[string] : identifier[summary] . identifier[round] ( literal[int] )
} | def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[0.8, 0.9, 0.95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[0.8, 0.9, 0.95])['scriptrun_time'])
return {'raw': dataframe.round(2), 'compiled': df_final.round(2), 'summary': summary.round(2)} |
def attach_list(filepaths, notes):
'''
all the arguments are lists
returns a list of dictionaries; each dictionary "represent" an attachment
'''
assert type(filepaths) in (list, tuple)
assert type(notes) in (list, tuple)
# this if clause means "if those lists are not of the same length"
if len(filepaths) != len(notes):
die('The number of --filepath, and --notes must be the same')
attach_list = []
for fname, note in zip(filepaths, notes):
name = os.path.basename(fname)
assert os.path.exists(fname)
mime = mimetypes.guess_type(fname)[0]
if mime is not None and '/' not in mime:
mime = None
attach_list.append({
'file': fname,
'name': name,
'mime': mime,
'note': note
})
return attach_list | def function[attach_list, parameter[filepaths, notes]]:
constant[
all the arguments are lists
returns a list of dictionaries; each dictionary "represent" an attachment
]
assert[compare[call[name[type], parameter[name[filepaths]]] in tuple[[<ast.Name object at 0x7da1b26d4a60>, <ast.Name object at 0x7da1b26d5a80>]]]]
assert[compare[call[name[type], parameter[name[notes]]] in tuple[[<ast.Name object at 0x7da1b26d5e70>, <ast.Name object at 0x7da1b26d69e0>]]]]
if compare[call[name[len], parameter[name[filepaths]]] not_equal[!=] call[name[len], parameter[name[notes]]]] begin[:]
call[name[die], parameter[constant[The number of --filepath, and --notes must be the same]]]
variable[attach_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b26d78e0>, <ast.Name object at 0x7da1b26d47f0>]]] in starred[call[name[zip], parameter[name[filepaths], name[notes]]]] begin[:]
variable[name] assign[=] call[name[os].path.basename, parameter[name[fname]]]
assert[call[name[os].path.exists, parameter[name[fname]]]]
variable[mime] assign[=] call[call[name[mimetypes].guess_type, parameter[name[fname]]]][constant[0]]
if <ast.BoolOp object at 0x7da1b26d7f10> begin[:]
variable[mime] assign[=] constant[None]
call[name[attach_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b26d5360>, <ast.Constant object at 0x7da1b26d5630>, <ast.Constant object at 0x7da1b265ecb0>, <ast.Constant object at 0x7da1b265e4a0>], [<ast.Name object at 0x7da1b265fe20>, <ast.Name object at 0x7da1b265f190>, <ast.Name object at 0x7da1b265eb30>, <ast.Name object at 0x7da1b265fd30>]]]]
return[name[attach_list]] | keyword[def] identifier[attach_list] ( identifier[filepaths] , identifier[notes] ):
literal[string]
keyword[assert] identifier[type] ( identifier[filepaths] ) keyword[in] ( identifier[list] , identifier[tuple] )
keyword[assert] identifier[type] ( identifier[notes] ) keyword[in] ( identifier[list] , identifier[tuple] )
keyword[if] identifier[len] ( identifier[filepaths] )!= identifier[len] ( identifier[notes] ):
identifier[die] ( literal[string] )
identifier[attach_list] =[]
keyword[for] identifier[fname] , identifier[note] keyword[in] identifier[zip] ( identifier[filepaths] , identifier[notes] ):
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fname] )
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[fname] )
identifier[mime] = identifier[mimetypes] . identifier[guess_type] ( identifier[fname] )[ literal[int] ]
keyword[if] identifier[mime] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] keyword[not] keyword[in] identifier[mime] :
identifier[mime] = keyword[None]
identifier[attach_list] . identifier[append] ({
literal[string] : identifier[fname] ,
literal[string] : identifier[name] ,
literal[string] : identifier[mime] ,
literal[string] : identifier[note]
})
keyword[return] identifier[attach_list] | def attach_list(filepaths, notes):
"""
all the arguments are lists
returns a list of dictionaries; each dictionary "represent" an attachment
"""
assert type(filepaths) in (list, tuple)
assert type(notes) in (list, tuple)
# this if clause means "if those lists are not of the same length"
if len(filepaths) != len(notes):
die('The number of --filepath, and --notes must be the same') # depends on [control=['if'], data=[]]
attach_list = []
for (fname, note) in zip(filepaths, notes):
name = os.path.basename(fname)
assert os.path.exists(fname)
mime = mimetypes.guess_type(fname)[0]
if mime is not None and '/' not in mime:
mime = None # depends on [control=['if'], data=[]]
attach_list.append({'file': fname, 'name': name, 'mime': mime, 'note': note}) # depends on [control=['for'], data=[]]
return attach_list |
def get_hdrs_len(self):
# type: () -> int
""" get_hdrs_len computes the length of the hdrs field
To do this computation, the length of the padlen field, reserved,
stream_id and the actual padding is subtracted to the string that was
provided to the pre_dissect fun of the pkt parameter.
@return int: the length of the hdrs field
@raise AssertionError
"""
fld, padding_len = self.getfield_and_val('padlen')
padding_len_len = fld.i2len(self, padding_len)
bit_len = self.get_field('reserved').size
bit_len += self.get_field('stream_id').size
ret = int(self.s_len -
padding_len_len -
padding_len -
(bit_len / 8)
)
assert(ret >= 0)
return ret | def function[get_hdrs_len, parameter[self]]:
constant[ get_hdrs_len computes the length of the hdrs field
To do this computation, the length of the padlen field, reserved,
stream_id and the actual padding is subtracted to the string that was
provided to the pre_dissect fun of the pkt parameter.
@return int: the length of the hdrs field
@raise AssertionError
]
<ast.Tuple object at 0x7da2041db250> assign[=] call[name[self].getfield_and_val, parameter[constant[padlen]]]
variable[padding_len_len] assign[=] call[name[fld].i2len, parameter[name[self], name[padding_len]]]
variable[bit_len] assign[=] call[name[self].get_field, parameter[constant[reserved]]].size
<ast.AugAssign object at 0x7da1b1fc9f00>
variable[ret] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[name[self].s_len - name[padding_len_len]] - name[padding_len]] - binary_operation[name[bit_len] / constant[8]]]]]
assert[compare[name[ret] greater_or_equal[>=] constant[0]]]
return[name[ret]] | keyword[def] identifier[get_hdrs_len] ( identifier[self] ):
literal[string]
identifier[fld] , identifier[padding_len] = identifier[self] . identifier[getfield_and_val] ( literal[string] )
identifier[padding_len_len] = identifier[fld] . identifier[i2len] ( identifier[self] , identifier[padding_len] )
identifier[bit_len] = identifier[self] . identifier[get_field] ( literal[string] ). identifier[size]
identifier[bit_len] += identifier[self] . identifier[get_field] ( literal[string] ). identifier[size]
identifier[ret] = identifier[int] ( identifier[self] . identifier[s_len] -
identifier[padding_len_len] -
identifier[padding_len] -
( identifier[bit_len] / literal[int] )
)
keyword[assert] ( identifier[ret] >= literal[int] )
keyword[return] identifier[ret] | def get_hdrs_len(self):
# type: () -> int
' get_hdrs_len computes the length of the hdrs field\n\n To do this computation, the length of the padlen field, reserved,\n stream_id and the actual padding is subtracted to the string that was\n provided to the pre_dissect fun of the pkt parameter.\n @return int: the length of the hdrs field\n @raise AssertionError\n '
(fld, padding_len) = self.getfield_and_val('padlen')
padding_len_len = fld.i2len(self, padding_len)
bit_len = self.get_field('reserved').size
bit_len += self.get_field('stream_id').size
ret = int(self.s_len - padding_len_len - padding_len - bit_len / 8)
assert ret >= 0
return ret |
def getKeplerFov(fieldnum):
"""Returns a `fov.KeplerFov` object for a given campaign.
Parameters
----------
fieldnum : int
K2 Campaign number.
Returns
-------
fovobj : `fov.KeplerFov` object
Details the footprint of the requested K2 campaign.
"""
info = getFieldInfo(fieldnum)
ra, dec, scRoll = info["ra"], info["dec"], info["roll"]
# convert from SC roll to FOV coordinates
# do not use the fovRoll coords anywhere else
# they are internal to this script only
fovRoll = fov.getFovAngleFromSpacecraftRoll(scRoll)
# KeplerFov takes a listen of broken CCD channels as optional argument;
# these channels will be ignored during plotting and on-silicon determination.
# Modules 3 and 7 broke prior to the start of K2:
brokenChannels = [5, 6, 7, 8, 17, 18, 19, 20]
# Module 4 failed during Campaign 10
if fieldnum > 10:
brokenChannels.extend([9, 10, 11, 12])
# Hack: the Kepler field is defined as "Campaign 1000"
# and (initially) had no broken channels
if fieldnum == 1000:
brokenChannels = []
return fov.KeplerFov(ra, dec, fovRoll, brokenChannels=brokenChannels) | def function[getKeplerFov, parameter[fieldnum]]:
constant[Returns a `fov.KeplerFov` object for a given campaign.
Parameters
----------
fieldnum : int
K2 Campaign number.
Returns
-------
fovobj : `fov.KeplerFov` object
Details the footprint of the requested K2 campaign.
]
variable[info] assign[=] call[name[getFieldInfo], parameter[name[fieldnum]]]
<ast.Tuple object at 0x7da1b0a339a0> assign[=] tuple[[<ast.Subscript object at 0x7da1b0a32cb0>, <ast.Subscript object at 0x7da1b0a33ac0>, <ast.Subscript object at 0x7da1b0a32aa0>]]
variable[fovRoll] assign[=] call[name[fov].getFovAngleFromSpacecraftRoll, parameter[name[scRoll]]]
variable[brokenChannels] assign[=] list[[<ast.Constant object at 0x7da1b0a30e20>, <ast.Constant object at 0x7da1b0a30cd0>, <ast.Constant object at 0x7da1b0a30a60>, <ast.Constant object at 0x7da1b0a31d80>, <ast.Constant object at 0x7da1b0a31270>, <ast.Constant object at 0x7da1b0a33cd0>, <ast.Constant object at 0x7da1b0a30fa0>, <ast.Constant object at 0x7da1b0a314b0>]]
if compare[name[fieldnum] greater[>] constant[10]] begin[:]
call[name[brokenChannels].extend, parameter[list[[<ast.Constant object at 0x7da1b0bdbf10>, <ast.Constant object at 0x7da1b0bd89d0>, <ast.Constant object at 0x7da1b0bd99f0>, <ast.Constant object at 0x7da1b0bdad40>]]]]
if compare[name[fieldnum] equal[==] constant[1000]] begin[:]
variable[brokenChannels] assign[=] list[[]]
return[call[name[fov].KeplerFov, parameter[name[ra], name[dec], name[fovRoll]]]] | keyword[def] identifier[getKeplerFov] ( identifier[fieldnum] ):
literal[string]
identifier[info] = identifier[getFieldInfo] ( identifier[fieldnum] )
identifier[ra] , identifier[dec] , identifier[scRoll] = identifier[info] [ literal[string] ], identifier[info] [ literal[string] ], identifier[info] [ literal[string] ]
identifier[fovRoll] = identifier[fov] . identifier[getFovAngleFromSpacecraftRoll] ( identifier[scRoll] )
identifier[brokenChannels] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
keyword[if] identifier[fieldnum] > literal[int] :
identifier[brokenChannels] . identifier[extend] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[fieldnum] == literal[int] :
identifier[brokenChannels] =[]
keyword[return] identifier[fov] . identifier[KeplerFov] ( identifier[ra] , identifier[dec] , identifier[fovRoll] , identifier[brokenChannels] = identifier[brokenChannels] ) | def getKeplerFov(fieldnum):
"""Returns a `fov.KeplerFov` object for a given campaign.
Parameters
----------
fieldnum : int
K2 Campaign number.
Returns
-------
fovobj : `fov.KeplerFov` object
Details the footprint of the requested K2 campaign.
"""
info = getFieldInfo(fieldnum)
(ra, dec, scRoll) = (info['ra'], info['dec'], info['roll'])
# convert from SC roll to FOV coordinates
# do not use the fovRoll coords anywhere else
# they are internal to this script only
fovRoll = fov.getFovAngleFromSpacecraftRoll(scRoll)
# KeplerFov takes a listen of broken CCD channels as optional argument;
# these channels will be ignored during plotting and on-silicon determination.
# Modules 3 and 7 broke prior to the start of K2:
brokenChannels = [5, 6, 7, 8, 17, 18, 19, 20]
# Module 4 failed during Campaign 10
if fieldnum > 10:
brokenChannels.extend([9, 10, 11, 12]) # depends on [control=['if'], data=[]]
# Hack: the Kepler field is defined as "Campaign 1000"
# and (initially) had no broken channels
if fieldnum == 1000:
brokenChannels = [] # depends on [control=['if'], data=[]]
return fov.KeplerFov(ra, dec, fovRoll, brokenChannels=brokenChannels) |
def fetchThreadInfo(self, *thread_ids):
"""
Get threads' info from IDs, unordered
.. warning::
Sends two requests if users or pages are present, to fetch all available info!
:param thread_ids: One or more thread ID(s) to query
:return: :class:`models.Thread` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
"""
queries = []
for thread_id in thread_ids:
params = {
"id": thread_id,
"message_limit": 0,
"load_messages": False,
"load_read_receipts": False,
"before": None,
}
queries.append(GraphQL(doc_id="2147762685294928", params=params))
j = self.graphql_requests(*queries)
for i, entry in enumerate(j):
if entry.get("message_thread") is None:
# If you don't have an existing thread with this person, attempt to retrieve user data anyways
j[i]["message_thread"] = {
"thread_key": {"other_user_id": thread_ids[i]},
"thread_type": "ONE_TO_ONE",
}
pages_and_user_ids = [
k["message_thread"]["thread_key"]["other_user_id"]
for k in j
if k["message_thread"].get("thread_type") == "ONE_TO_ONE"
]
pages_and_users = {}
if len(pages_and_user_ids) != 0:
pages_and_users = self._fetchInfo(*pages_and_user_ids)
rtn = {}
for i, entry in enumerate(j):
entry = entry["message_thread"]
if entry.get("thread_type") == "GROUP":
_id = entry["thread_key"]["thread_fbid"]
rtn[_id] = Group._from_graphql(entry)
elif entry.get("thread_type") == "ONE_TO_ONE":
_id = entry["thread_key"]["other_user_id"]
if pages_and_users.get(_id) is None:
raise FBchatException("Could not fetch thread {}".format(_id))
entry.update(pages_and_users[_id])
if entry["type"] == ThreadType.USER:
rtn[_id] = User._from_graphql(entry)
else:
rtn[_id] = Page._from_graphql(entry)
else:
raise FBchatException(
"{} had an unknown thread type: {}".format(thread_ids[i], entry)
)
return rtn | def function[fetchThreadInfo, parameter[self]]:
constant[
Get threads' info from IDs, unordered
.. warning::
Sends two requests if users or pages are present, to fetch all available info!
:param thread_ids: One or more thread ID(s) to query
:return: :class:`models.Thread` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
]
variable[queries] assign[=] list[[]]
for taget[name[thread_id]] in starred[name[thread_ids]] begin[:]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b184db10>, <ast.Constant object at 0x7da1b184cbb0>, <ast.Constant object at 0x7da1b184c2b0>, <ast.Constant object at 0x7da1b184d660>, <ast.Constant object at 0x7da1b184e380>], [<ast.Name object at 0x7da1b184c550>, <ast.Constant object at 0x7da1b184cd90>, <ast.Constant object at 0x7da1b184d240>, <ast.Constant object at 0x7da1b184e890>, <ast.Constant object at 0x7da1b184c6d0>]]
call[name[queries].append, parameter[call[name[GraphQL], parameter[]]]]
variable[j] assign[=] call[name[self].graphql_requests, parameter[<ast.Starred object at 0x7da1b184f2b0>]]
for taget[tuple[[<ast.Name object at 0x7da1b184c340>, <ast.Name object at 0x7da1b184e2c0>]]] in starred[call[name[enumerate], parameter[name[j]]]] begin[:]
if compare[call[name[entry].get, parameter[constant[message_thread]]] is constant[None]] begin[:]
call[call[name[j]][name[i]]][constant[message_thread]] assign[=] dictionary[[<ast.Constant object at 0x7da1b184da80>, <ast.Constant object at 0x7da1b184c520>], [<ast.Dict object at 0x7da1b184ca90>, <ast.Constant object at 0x7da1b184cd00>]]
variable[pages_and_user_ids] assign[=] <ast.ListComp object at 0x7da1b184dc90>
variable[pages_and_users] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[pages_and_user_ids]]] not_equal[!=] constant[0]] begin[:]
variable[pages_and_users] assign[=] call[name[self]._fetchInfo, parameter[<ast.Starred object at 0x7da1b184c220>]]
variable[rtn] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b184c190>, <ast.Name object at 0x7da1b184e800>]]] in starred[call[name[enumerate], parameter[name[j]]]] begin[:]
variable[entry] assign[=] call[name[entry]][constant[message_thread]]
if compare[call[name[entry].get, parameter[constant[thread_type]]] equal[==] constant[GROUP]] begin[:]
variable[_id] assign[=] call[call[name[entry]][constant[thread_key]]][constant[thread_fbid]]
call[name[rtn]][name[_id]] assign[=] call[name[Group]._from_graphql, parameter[name[entry]]]
return[name[rtn]] | keyword[def] identifier[fetchThreadInfo] ( identifier[self] ,* identifier[thread_ids] ):
literal[string]
identifier[queries] =[]
keyword[for] identifier[thread_id] keyword[in] identifier[thread_ids] :
identifier[params] ={
literal[string] : identifier[thread_id] ,
literal[string] : literal[int] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[None] ,
}
identifier[queries] . identifier[append] ( identifier[GraphQL] ( identifier[doc_id] = literal[string] , identifier[params] = identifier[params] ))
identifier[j] = identifier[self] . identifier[graphql_requests] (* identifier[queries] )
keyword[for] identifier[i] , identifier[entry] keyword[in] identifier[enumerate] ( identifier[j] ):
keyword[if] identifier[entry] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[j] [ identifier[i] ][ literal[string] ]={
literal[string] :{ literal[string] : identifier[thread_ids] [ identifier[i] ]},
literal[string] : literal[string] ,
}
identifier[pages_and_user_ids] =[
identifier[k] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[for] identifier[k] keyword[in] identifier[j]
keyword[if] identifier[k] [ literal[string] ]. identifier[get] ( literal[string] )== literal[string]
]
identifier[pages_and_users] ={}
keyword[if] identifier[len] ( identifier[pages_and_user_ids] )!= literal[int] :
identifier[pages_and_users] = identifier[self] . identifier[_fetchInfo] (* identifier[pages_and_user_ids] )
identifier[rtn] ={}
keyword[for] identifier[i] , identifier[entry] keyword[in] identifier[enumerate] ( identifier[j] ):
identifier[entry] = identifier[entry] [ literal[string] ]
keyword[if] identifier[entry] . identifier[get] ( literal[string] )== literal[string] :
identifier[_id] = identifier[entry] [ literal[string] ][ literal[string] ]
identifier[rtn] [ identifier[_id] ]= identifier[Group] . identifier[_from_graphql] ( identifier[entry] )
keyword[elif] identifier[entry] . identifier[get] ( literal[string] )== literal[string] :
identifier[_id] = identifier[entry] [ literal[string] ][ literal[string] ]
keyword[if] identifier[pages_and_users] . identifier[get] ( identifier[_id] ) keyword[is] keyword[None] :
keyword[raise] identifier[FBchatException] ( literal[string] . identifier[format] ( identifier[_id] ))
identifier[entry] . identifier[update] ( identifier[pages_and_users] [ identifier[_id] ])
keyword[if] identifier[entry] [ literal[string] ]== identifier[ThreadType] . identifier[USER] :
identifier[rtn] [ identifier[_id] ]= identifier[User] . identifier[_from_graphql] ( identifier[entry] )
keyword[else] :
identifier[rtn] [ identifier[_id] ]= identifier[Page] . identifier[_from_graphql] ( identifier[entry] )
keyword[else] :
keyword[raise] identifier[FBchatException] (
literal[string] . identifier[format] ( identifier[thread_ids] [ identifier[i] ], identifier[entry] )
)
keyword[return] identifier[rtn] | def fetchThreadInfo(self, *thread_ids):
"""
Get threads' info from IDs, unordered
.. warning::
Sends two requests if users or pages are present, to fetch all available info!
:param thread_ids: One or more thread ID(s) to query
:return: :class:`models.Thread` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
"""
queries = []
for thread_id in thread_ids:
params = {'id': thread_id, 'message_limit': 0, 'load_messages': False, 'load_read_receipts': False, 'before': None}
queries.append(GraphQL(doc_id='2147762685294928', params=params)) # depends on [control=['for'], data=['thread_id']]
j = self.graphql_requests(*queries)
for (i, entry) in enumerate(j):
if entry.get('message_thread') is None:
# If you don't have an existing thread with this person, attempt to retrieve user data anyways
j[i]['message_thread'] = {'thread_key': {'other_user_id': thread_ids[i]}, 'thread_type': 'ONE_TO_ONE'} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
pages_and_user_ids = [k['message_thread']['thread_key']['other_user_id'] for k in j if k['message_thread'].get('thread_type') == 'ONE_TO_ONE']
pages_and_users = {}
if len(pages_and_user_ids) != 0:
pages_and_users = self._fetchInfo(*pages_and_user_ids) # depends on [control=['if'], data=[]]
rtn = {}
for (i, entry) in enumerate(j):
entry = entry['message_thread']
if entry.get('thread_type') == 'GROUP':
_id = entry['thread_key']['thread_fbid']
rtn[_id] = Group._from_graphql(entry) # depends on [control=['if'], data=[]]
elif entry.get('thread_type') == 'ONE_TO_ONE':
_id = entry['thread_key']['other_user_id']
if pages_and_users.get(_id) is None:
raise FBchatException('Could not fetch thread {}'.format(_id)) # depends on [control=['if'], data=[]]
entry.update(pages_and_users[_id])
if entry['type'] == ThreadType.USER:
rtn[_id] = User._from_graphql(entry) # depends on [control=['if'], data=[]]
else:
rtn[_id] = Page._from_graphql(entry) # depends on [control=['if'], data=[]]
else:
raise FBchatException('{} had an unknown thread type: {}'.format(thread_ids[i], entry)) # depends on [control=['for'], data=[]]
return rtn |
def expand_path(path):
"""Expands directories and globs in given path."""
paths = []
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isdir(path):
for (dir, dirs, files) in os.walk(path):
for file in files:
paths.append(os.path.join(dir, file))
else:
paths.extend(glob(path))
return paths | def function[expand_path, parameter[path]]:
constant[Expands directories and globs in given path.]
variable[paths] assign[=] list[[]]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
variable[path] assign[=] call[name[os].path.expandvars, parameter[name[path]]]
if call[name[os].path.isdir, parameter[name[path]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1d5e0e0>, <ast.Name object at 0x7da1b1d5fca0>, <ast.Name object at 0x7da1b1d5d3c0>]]] in starred[call[name[os].walk, parameter[name[path]]]] begin[:]
for taget[name[file]] in starred[name[files]] begin[:]
call[name[paths].append, parameter[call[name[os].path.join, parameter[name[dir], name[file]]]]]
return[name[paths]] | keyword[def] identifier[expand_path] ( identifier[path] ):
literal[string]
identifier[paths] =[]
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
identifier[path] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[path] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[for] ( identifier[dir] , identifier[dirs] , identifier[files] ) keyword[in] identifier[os] . identifier[walk] ( identifier[path] ):
keyword[for] identifier[file] keyword[in] identifier[files] :
identifier[paths] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , identifier[file] ))
keyword[else] :
identifier[paths] . identifier[extend] ( identifier[glob] ( identifier[path] ))
keyword[return] identifier[paths] | def expand_path(path):
"""Expands directories and globs in given path."""
paths = []
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isdir(path):
for (dir, dirs, files) in os.walk(path):
for file in files:
paths.append(os.path.join(dir, file)) # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
paths.extend(glob(path))
return paths |
def knn_impute_reference(
X,
missing_mask,
k,
verbose=False,
print_interval=100):
"""
Reference implementation of kNN imputation logic.
"""
n_rows, n_cols = X.shape
X_result, D, effective_infinity = \
knn_initialize(X, missing_mask, verbose=verbose)
for i in range(n_rows):
for j in np.where(missing_mask[i, :])[0]:
distances = D[i, :].copy()
# any rows that don't have the value we're currently trying
# to impute are set to infinite distances
distances[missing_mask[:, j]] = effective_infinity
neighbor_indices = np.argsort(distances)
neighbor_distances = distances[neighbor_indices]
# get rid of any infinite distance neighbors in the top k
valid_distances = neighbor_distances < effective_infinity
neighbor_distances = neighbor_distances[valid_distances][:k]
neighbor_indices = neighbor_indices[valid_distances][:k]
weights = 1.0 / neighbor_distances
weight_sum = weights.sum()
if weight_sum > 0:
column = X[:, j]
values = column[neighbor_indices]
X_result[i, j] = np.dot(values, weights) / weight_sum
return X_result | def function[knn_impute_reference, parameter[X, missing_mask, k, verbose, print_interval]]:
constant[
Reference implementation of kNN imputation logic.
]
<ast.Tuple object at 0x7da18c4ced70> assign[=] name[X].shape
<ast.Tuple object at 0x7da18c4ce3b0> assign[=] call[name[knn_initialize], parameter[name[X], name[missing_mask]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_rows]]]] begin[:]
for taget[name[j]] in starred[call[call[name[np].where, parameter[call[name[missing_mask]][tuple[[<ast.Name object at 0x7da18c4cd9f0>, <ast.Slice object at 0x7da18c4cf850>]]]]]][constant[0]]] begin[:]
variable[distances] assign[=] call[call[name[D]][tuple[[<ast.Name object at 0x7da18c4cc5b0>, <ast.Slice object at 0x7da18c4cf0d0>]]].copy, parameter[]]
call[name[distances]][call[name[missing_mask]][tuple[[<ast.Slice object at 0x7da18c4cc280>, <ast.Name object at 0x7da18c4cfd90>]]]] assign[=] name[effective_infinity]
variable[neighbor_indices] assign[=] call[name[np].argsort, parameter[name[distances]]]
variable[neighbor_distances] assign[=] call[name[distances]][name[neighbor_indices]]
variable[valid_distances] assign[=] compare[name[neighbor_distances] less[<] name[effective_infinity]]
variable[neighbor_distances] assign[=] call[call[name[neighbor_distances]][name[valid_distances]]][<ast.Slice object at 0x7da18c4ce470>]
variable[neighbor_indices] assign[=] call[call[name[neighbor_indices]][name[valid_distances]]][<ast.Slice object at 0x7da18c4cd000>]
variable[weights] assign[=] binary_operation[constant[1.0] / name[neighbor_distances]]
variable[weight_sum] assign[=] call[name[weights].sum, parameter[]]
if compare[name[weight_sum] greater[>] constant[0]] begin[:]
variable[column] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da18c4cd2d0>, <ast.Name object at 0x7da18c4cc130>]]]
variable[values] assign[=] call[name[column]][name[neighbor_indices]]
call[name[X_result]][tuple[[<ast.Name object at 0x7da18c4cfdc0>, <ast.Name object at 0x7da18c4cdc00>]]] assign[=] binary_operation[call[name[np].dot, parameter[name[values], name[weights]]] / name[weight_sum]]
return[name[X_result]] | keyword[def] identifier[knn_impute_reference] (
identifier[X] ,
identifier[missing_mask] ,
identifier[k] ,
identifier[verbose] = keyword[False] ,
identifier[print_interval] = literal[int] ):
literal[string]
identifier[n_rows] , identifier[n_cols] = identifier[X] . identifier[shape]
identifier[X_result] , identifier[D] , identifier[effective_infinity] = identifier[knn_initialize] ( identifier[X] , identifier[missing_mask] , identifier[verbose] = identifier[verbose] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_rows] ):
keyword[for] identifier[j] keyword[in] identifier[np] . identifier[where] ( identifier[missing_mask] [ identifier[i] ,:])[ literal[int] ]:
identifier[distances] = identifier[D] [ identifier[i] ,:]. identifier[copy] ()
identifier[distances] [ identifier[missing_mask] [:, identifier[j] ]]= identifier[effective_infinity]
identifier[neighbor_indices] = identifier[np] . identifier[argsort] ( identifier[distances] )
identifier[neighbor_distances] = identifier[distances] [ identifier[neighbor_indices] ]
identifier[valid_distances] = identifier[neighbor_distances] < identifier[effective_infinity]
identifier[neighbor_distances] = identifier[neighbor_distances] [ identifier[valid_distances] ][: identifier[k] ]
identifier[neighbor_indices] = identifier[neighbor_indices] [ identifier[valid_distances] ][: identifier[k] ]
identifier[weights] = literal[int] / identifier[neighbor_distances]
identifier[weight_sum] = identifier[weights] . identifier[sum] ()
keyword[if] identifier[weight_sum] > literal[int] :
identifier[column] = identifier[X] [:, identifier[j] ]
identifier[values] = identifier[column] [ identifier[neighbor_indices] ]
identifier[X_result] [ identifier[i] , identifier[j] ]= identifier[np] . identifier[dot] ( identifier[values] , identifier[weights] )/ identifier[weight_sum]
keyword[return] identifier[X_result] | def knn_impute_reference(X, missing_mask, k, verbose=False, print_interval=100):
"""
Reference implementation of kNN imputation logic.
"""
(n_rows, n_cols) = X.shape
(X_result, D, effective_infinity) = knn_initialize(X, missing_mask, verbose=verbose)
for i in range(n_rows):
for j in np.where(missing_mask[i, :])[0]:
distances = D[i, :].copy()
# any rows that don't have the value we're currently trying
# to impute are set to infinite distances
distances[missing_mask[:, j]] = effective_infinity
neighbor_indices = np.argsort(distances)
neighbor_distances = distances[neighbor_indices]
# get rid of any infinite distance neighbors in the top k
valid_distances = neighbor_distances < effective_infinity
neighbor_distances = neighbor_distances[valid_distances][:k]
neighbor_indices = neighbor_indices[valid_distances][:k]
weights = 1.0 / neighbor_distances
weight_sum = weights.sum()
if weight_sum > 0:
column = X[:, j]
values = column[neighbor_indices]
X_result[i, j] = np.dot(values, weights) / weight_sum # depends on [control=['if'], data=['weight_sum']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return X_result |
def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service | def function[cleanup, parameter[self, force]]:
constant[Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
]
variable[manager] assign[=] call[name[self].getManager, parameter[]]
if compare[name[manager] is_not constant[None]] begin[:]
variable[service] assign[=] call[name[manager].current, parameter[]]
call[name[self].destroyManager, parameter[]]
return[name[service]] | keyword[def] identifier[cleanup] ( identifier[self] , identifier[force] = keyword[False] ):
literal[string]
identifier[manager] = identifier[self] . identifier[getManager] ( identifier[force] = identifier[force] )
keyword[if] identifier[manager] keyword[is] keyword[not] keyword[None] :
identifier[service] = identifier[manager] . identifier[current] ()
identifier[self] . identifier[destroyManager] ( identifier[force] = identifier[force] )
keyword[else] :
identifier[service] = keyword[None]
keyword[return] identifier[service] | def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force) # depends on [control=['if'], data=['manager']]
else:
service = None
return service |
def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2 | def function[get_constrained_fc2, parameter[supercell, dataset_second_atoms, atom1, reduced_site_sym, symprec]]:
constant[
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
]
variable[lattice] assign[=] call[name[supercell].get_cell, parameter[]].T
variable[positions] assign[=] call[name[supercell].get_scaled_positions, parameter[]]
variable[num_atom] assign[=] call[name[supercell].get_number_of_atoms, parameter[]]
variable[fc2] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da18f09d270>, <ast.Name object at 0x7da18f09ebf0>, <ast.Constant object at 0x7da18f09f070>, <ast.Constant object at 0x7da18f09feb0>]]]]
variable[atom_list] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da18f09e920>]]
for taget[name[atom2]] in starred[name[atom_list]] begin[:]
variable[disps2] assign[=] list[[]]
variable[sets_of_forces] assign[=] list[[]]
for taget[name[disps_second]] in starred[name[dataset_second_atoms]] begin[:]
if compare[name[atom2] not_equal[!=] call[name[disps_second]][constant[number]]] begin[:]
continue
variable[bond_sym] assign[=] call[name[get_bond_symmetry], parameter[name[reduced_site_sym], name[lattice], name[positions], name[atom1], name[atom2], name[symprec]]]
call[name[disps2].append, parameter[call[name[disps_second]][constant[displacement]]]]
call[name[sets_of_forces].append, parameter[call[name[disps_second]][constant[delta_forces]]]]
call[name[solve_force_constants], parameter[name[fc2], name[atom2], name[disps2], name[sets_of_forces], name[supercell], name[bond_sym], name[symprec]]]
variable[pos_center] assign[=] call[call[name[positions]][name[atom1]].copy, parameter[]]
<ast.AugAssign object at 0x7da20c9939a0>
variable[rotations] assign[=] call[name[np].array, parameter[name[reduced_site_sym]]]
variable[translations] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Call object at 0x7da1b07817e0>, <ast.Constant object at 0x7da1b0780dc0>]]]]
variable[permutations] assign[=] call[name[compute_all_sg_permutations], parameter[name[positions], name[rotations], name[translations], name[lattice], name[symprec]]]
call[name[distribute_force_constants], parameter[name[fc2], name[atom_list], name[lattice], name[rotations], name[permutations]]]
return[name[fc2]] | keyword[def] identifier[get_constrained_fc2] ( identifier[supercell] ,
identifier[dataset_second_atoms] ,
identifier[atom1] ,
identifier[reduced_site_sym] ,
identifier[symprec] ):
literal[string]
identifier[lattice] = identifier[supercell] . identifier[get_cell] (). identifier[T]
identifier[positions] = identifier[supercell] . identifier[get_scaled_positions] ()
identifier[num_atom] = identifier[supercell] . identifier[get_number_of_atoms] ()
identifier[fc2] = identifier[np] . identifier[zeros] (( identifier[num_atom] , identifier[num_atom] , literal[int] , literal[int] ), identifier[dtype] = literal[string] )
identifier[atom_list] = identifier[np] . identifier[unique] ([ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[dataset_second_atoms] ])
keyword[for] identifier[atom2] keyword[in] identifier[atom_list] :
identifier[disps2] =[]
identifier[sets_of_forces] =[]
keyword[for] identifier[disps_second] keyword[in] identifier[dataset_second_atoms] :
keyword[if] identifier[atom2] != identifier[disps_second] [ literal[string] ]:
keyword[continue]
identifier[bond_sym] = identifier[get_bond_symmetry] (
identifier[reduced_site_sym] ,
identifier[lattice] ,
identifier[positions] ,
identifier[atom1] ,
identifier[atom2] ,
identifier[symprec] )
identifier[disps2] . identifier[append] ( identifier[disps_second] [ literal[string] ])
identifier[sets_of_forces] . identifier[append] ( identifier[disps_second] [ literal[string] ])
identifier[solve_force_constants] ( identifier[fc2] ,
identifier[atom2] ,
identifier[disps2] ,
identifier[sets_of_forces] ,
identifier[supercell] ,
identifier[bond_sym] ,
identifier[symprec] )
identifier[pos_center] = identifier[positions] [ identifier[atom1] ]. identifier[copy] ()
identifier[positions] -= identifier[pos_center]
identifier[rotations] = identifier[np] . identifier[array] ( identifier[reduced_site_sym] , identifier[dtype] = literal[string] , identifier[order] = literal[string] )
identifier[translations] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[reduced_site_sym] ), literal[int] ),
identifier[dtype] = literal[string] , identifier[order] = literal[string] )
identifier[permutations] = identifier[compute_all_sg_permutations] ( identifier[positions] ,
identifier[rotations] ,
identifier[translations] ,
identifier[lattice] ,
identifier[symprec] )
identifier[distribute_force_constants] ( identifier[fc2] ,
identifier[atom_list] ,
identifier[lattice] ,
identifier[rotations] ,
identifier[permutations] )
keyword[return] identifier[fc2] | def get_constrained_fc2(supercell, dataset_second_atoms, atom1, reduced_site_sym, symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue # depends on [control=['if'], data=[]]
bond_sym = get_bond_symmetry(reduced_site_sym, lattice, positions, atom1, atom2, symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces']) # depends on [control=['for'], data=['disps_second']]
solve_force_constants(fc2, atom2, disps2, sets_of_forces, supercell, bond_sym, symprec) # depends on [control=['for'], data=['atom2']]
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3), dtype='double', order='C')
permutations = compute_all_sg_permutations(positions, rotations, translations, lattice, symprec)
distribute_force_constants(fc2, atom_list, lattice, rotations, permutations)
return fc2 |
def generate_public_and_private():
"""
<Purpose>
Generate a pair of ed25519 public and private keys with PyNaCl. The public
and private keys returned conform to
'securesystemslib.formats.ED25519PULIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively, and have the
form:
'\xa2F\x99\xe0\x86\x80%\xc8\xee\x11\xb95T\xd9\...'
An ed25519 seed key is a random 32-byte string. Public keys are also 32
bytes.
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.ED25519PUBLIC_SCHEMA.matches(public)
True
>>> securesystemslib.formats.ED25519SEED_SCHEMA.matches(private)
True
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError, if the PyNaCl ('nacl')
module is unavailable.
NotImplementedError, if a randomness source is not found by 'os.urandom'.
<Side Effects>
The ed25519 keys are generated by first creating a random 32-byte seed
with os.urandom() and then calling PyNaCl's nacl.signing.SigningKey().
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.ED25519PUBLIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively.
"""
# Generate ed25519's seed key by calling os.urandom(). The random bytes
# returned should be suitable for cryptographic use and is OS-specific.
# Raise 'NotImplementedError' if a randomness source is not found.
# ed25519 seed keys are fixed at 32 bytes (256-bit keys).
# http://blog.mozilla.org/warner/2011/11/29/ed25519-keys/
seed = os.urandom(32)
public = None
# Generate the public key. PyNaCl (i.e., 'nacl' module) performs the actual
# key generation.
try:
nacl_key = nacl.signing.SigningKey(seed)
public = nacl_key.verify_key.encode(encoder=nacl.encoding.RawEncoder())
except NameError: # pragma: no cover
raise securesystemslib.exceptions.UnsupportedLibraryError('The PyNaCl'
' library and/or its dependencies unavailable.')
return public, seed | def function[generate_public_and_private, parameter[]]:
constant[
<Purpose>
Generate a pair of ed25519 public and private keys with PyNaCl. The public
and private keys returned conform to
'securesystemslib.formats.ED25519PULIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively, and have the
form:
'¢Fà%Èî¹5TÙ\...'
An ed25519 seed key is a random 32-byte string. Public keys are also 32
bytes.
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.ED25519PUBLIC_SCHEMA.matches(public)
True
>>> securesystemslib.formats.ED25519SEED_SCHEMA.matches(private)
True
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError, if the PyNaCl ('nacl')
module is unavailable.
NotImplementedError, if a randomness source is not found by 'os.urandom'.
<Side Effects>
The ed25519 keys are generated by first creating a random 32-byte seed
with os.urandom() and then calling PyNaCl's nacl.signing.SigningKey().
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.ED25519PUBLIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively.
]
variable[seed] assign[=] call[name[os].urandom, parameter[constant[32]]]
variable[public] assign[=] constant[None]
<ast.Try object at 0x7da20c992bf0>
return[tuple[[<ast.Name object at 0x7da20c993df0>, <ast.Name object at 0x7da20c990ee0>]]] | keyword[def] identifier[generate_public_and_private] ():
literal[string]
identifier[seed] = identifier[os] . identifier[urandom] ( literal[int] )
identifier[public] = keyword[None]
keyword[try] :
identifier[nacl_key] = identifier[nacl] . identifier[signing] . identifier[SigningKey] ( identifier[seed] )
identifier[public] = identifier[nacl_key] . identifier[verify_key] . identifier[encode] ( identifier[encoder] = identifier[nacl] . identifier[encoding] . identifier[RawEncoder] ())
keyword[except] identifier[NameError] :
keyword[raise] identifier[securesystemslib] . identifier[exceptions] . identifier[UnsupportedLibraryError] ( literal[string]
literal[string] )
keyword[return] identifier[public] , identifier[seed] | def generate_public_and_private():
"""
<Purpose>
Generate a pair of ed25519 public and private keys with PyNaCl. The public
and private keys returned conform to
'securesystemslib.formats.ED25519PULIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively, and have the
form:
'¢F\x99à\x86\x80%Èî\x11¹5TÙ\\...'
An ed25519 seed key is a random 32-byte string. Public keys are also 32
bytes.
>>> public, private = generate_public_and_private()
>>> securesystemslib.formats.ED25519PUBLIC_SCHEMA.matches(public)
True
>>> securesystemslib.formats.ED25519SEED_SCHEMA.matches(private)
True
<Arguments>
None.
<Exceptions>
securesystemslib.exceptions.UnsupportedLibraryError, if the PyNaCl ('nacl')
module is unavailable.
NotImplementedError, if a randomness source is not found by 'os.urandom'.
<Side Effects>
The ed25519 keys are generated by first creating a random 32-byte seed
with os.urandom() and then calling PyNaCl's nacl.signing.SigningKey().
<Returns>
A (public, private) tuple that conform to
'securesystemslib.formats.ED25519PUBLIC_SCHEMA' and
'securesystemslib.formats.ED25519SEED_SCHEMA', respectively.
"""
# Generate ed25519's seed key by calling os.urandom(). The random bytes
# returned should be suitable for cryptographic use and is OS-specific.
# Raise 'NotImplementedError' if a randomness source is not found.
# ed25519 seed keys are fixed at 32 bytes (256-bit keys).
# http://blog.mozilla.org/warner/2011/11/29/ed25519-keys/
seed = os.urandom(32)
public = None
# Generate the public key. PyNaCl (i.e., 'nacl' module) performs the actual
# key generation.
try:
nacl_key = nacl.signing.SigningKey(seed)
public = nacl_key.verify_key.encode(encoder=nacl.encoding.RawEncoder()) # depends on [control=['try'], data=[]]
except NameError: # pragma: no cover
raise securesystemslib.exceptions.UnsupportedLibraryError('The PyNaCl library and/or its dependencies unavailable.') # depends on [control=['except'], data=[]]
return (public, seed) |
def append_index_id(id, ids):
"""
add index to id to make it unique wrt ids
"""
index = 1
mod = '%s_%s' % (id, index)
while mod in ids:
index += 1
mod = '%s_%s' % (id, index)
ids.append(mod)
return mod, ids | def function[append_index_id, parameter[id, ids]]:
constant[
add index to id to make it unique wrt ids
]
variable[index] assign[=] constant[1]
variable[mod] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2411f90>, <ast.Name object at 0x7da1b24120b0>]]]
while compare[name[mod] in name[ids]] begin[:]
<ast.AugAssign object at 0x7da1b2410d00>
variable[mod] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24120e0>, <ast.Name object at 0x7da1b2411ab0>]]]
call[name[ids].append, parameter[name[mod]]]
return[tuple[[<ast.Name object at 0x7da1b2411ea0>, <ast.Name object at 0x7da20cabc0a0>]]] | keyword[def] identifier[append_index_id] ( identifier[id] , identifier[ids] ):
literal[string]
identifier[index] = literal[int]
identifier[mod] = literal[string] %( identifier[id] , identifier[index] )
keyword[while] identifier[mod] keyword[in] identifier[ids] :
identifier[index] += literal[int]
identifier[mod] = literal[string] %( identifier[id] , identifier[index] )
identifier[ids] . identifier[append] ( identifier[mod] )
keyword[return] identifier[mod] , identifier[ids] | def append_index_id(id, ids):
"""
add index to id to make it unique wrt ids
"""
index = 1
mod = '%s_%s' % (id, index)
while mod in ids:
index += 1
mod = '%s_%s' % (id, index) # depends on [control=['while'], data=['mod']]
ids.append(mod)
return (mod, ids) |
def send_file(request, filename, content_type='image/jpeg'):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
wrapper = FixedFileWrapper(file(filename, 'rb'))
response = HttpResponse(wrapper, content_type=content_type)
response['Content-Length'] = os.path.getsize(filename)
return response | def function[send_file, parameter[request, filename, content_type]]:
constant[
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
]
variable[wrapper] assign[=] call[name[FixedFileWrapper], parameter[call[name[file], parameter[name[filename], constant[rb]]]]]
variable[response] assign[=] call[name[HttpResponse], parameter[name[wrapper]]]
call[name[response]][constant[Content-Length]] assign[=] call[name[os].path.getsize, parameter[name[filename]]]
return[name[response]] | keyword[def] identifier[send_file] ( identifier[request] , identifier[filename] , identifier[content_type] = literal[string] ):
literal[string]
identifier[wrapper] = identifier[FixedFileWrapper] ( identifier[file] ( identifier[filename] , literal[string] ))
identifier[response] = identifier[HttpResponse] ( identifier[wrapper] , identifier[content_type] = identifier[content_type] )
identifier[response] [ literal[string] ]= identifier[os] . identifier[path] . identifier[getsize] ( identifier[filename] )
keyword[return] identifier[response] | def send_file(request, filename, content_type='image/jpeg'):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
wrapper = FixedFileWrapper(file(filename, 'rb'))
response = HttpResponse(wrapper, content_type=content_type)
response['Content-Length'] = os.path.getsize(filename)
return response |
def validate_version_pragma(version_str: str, start: ParserPosition) -> None:
"""
Validates a version pragma directive against the current compiler version.
"""
from vyper import (
__version__,
)
version_arr = version_str.split('@version')
file_version = version_arr[1].strip()
file_major, file_minor, file_patch = _parse_version_str(file_version, start)
compiler_major, compiler_minor, compiler_patch = _parse_version_str(__version__, start)
if (file_major, file_minor) != (compiler_major, compiler_minor):
raise VersionException(
f'File version "{file_version}" is not compatible '
f'with compiler version "{__version__}"',
start,
) | def function[validate_version_pragma, parameter[version_str, start]]:
constant[
Validates a version pragma directive against the current compiler version.
]
from relative_module[vyper] import module[__version__]
variable[version_arr] assign[=] call[name[version_str].split, parameter[constant[@version]]]
variable[file_version] assign[=] call[call[name[version_arr]][constant[1]].strip, parameter[]]
<ast.Tuple object at 0x7da2044c3b20> assign[=] call[name[_parse_version_str], parameter[name[file_version], name[start]]]
<ast.Tuple object at 0x7da2044c2440> assign[=] call[name[_parse_version_str], parameter[name[__version__], name[start]]]
if compare[tuple[[<ast.Name object at 0x7da18dc054e0>, <ast.Name object at 0x7da18dc04a90>]] not_equal[!=] tuple[[<ast.Name object at 0x7da18dc048e0>, <ast.Name object at 0x7da18dc06920>]]] begin[:]
<ast.Raise object at 0x7da18dc06320> | keyword[def] identifier[validate_version_pragma] ( identifier[version_str] : identifier[str] , identifier[start] : identifier[ParserPosition] )-> keyword[None] :
literal[string]
keyword[from] identifier[vyper] keyword[import] (
identifier[__version__] ,
)
identifier[version_arr] = identifier[version_str] . identifier[split] ( literal[string] )
identifier[file_version] = identifier[version_arr] [ literal[int] ]. identifier[strip] ()
identifier[file_major] , identifier[file_minor] , identifier[file_patch] = identifier[_parse_version_str] ( identifier[file_version] , identifier[start] )
identifier[compiler_major] , identifier[compiler_minor] , identifier[compiler_patch] = identifier[_parse_version_str] ( identifier[__version__] , identifier[start] )
keyword[if] ( identifier[file_major] , identifier[file_minor] )!=( identifier[compiler_major] , identifier[compiler_minor] ):
keyword[raise] identifier[VersionException] (
literal[string]
literal[string] ,
identifier[start] ,
) | def validate_version_pragma(version_str: str, start: ParserPosition) -> None:
"""
Validates a version pragma directive against the current compiler version.
"""
from vyper import __version__
version_arr = version_str.split('@version')
file_version = version_arr[1].strip()
(file_major, file_minor, file_patch) = _parse_version_str(file_version, start)
(compiler_major, compiler_minor, compiler_patch) = _parse_version_str(__version__, start)
if (file_major, file_minor) != (compiler_major, compiler_minor):
raise VersionException(f'File version "{file_version}" is not compatible with compiler version "{__version__}"', start) # depends on [control=['if'], data=[]] |
def stop(cls):
"""Change back the normal stdout after the end"""
if any(cls.streams):
sys.stdout = cls.streams.pop(-1)
else:
sys.stdout = sys.__stdout__ | def function[stop, parameter[cls]]:
constant[Change back the normal stdout after the end]
if call[name[any], parameter[name[cls].streams]] begin[:]
name[sys].stdout assign[=] call[name[cls].streams.pop, parameter[<ast.UnaryOp object at 0x7da207f02710>]] | keyword[def] identifier[stop] ( identifier[cls] ):
literal[string]
keyword[if] identifier[any] ( identifier[cls] . identifier[streams] ):
identifier[sys] . identifier[stdout] = identifier[cls] . identifier[streams] . identifier[pop] (- literal[int] )
keyword[else] :
identifier[sys] . identifier[stdout] = identifier[sys] . identifier[__stdout__] | def stop(cls):
"""Change back the normal stdout after the end"""
if any(cls.streams):
sys.stdout = cls.streams.pop(-1) # depends on [control=['if'], data=[]]
else:
sys.stdout = sys.__stdout__ |
def parse_track_header(self, fp):
"""Return the size of the track chunk."""
# Check the header
try:
h = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track header from file. Byte %d."
% self.bytes_read)
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.'
% self.bytes_read)
# Parse the size of the header
try:
chunk_size = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track chunk size from file.")
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size | def function[parse_track_header, parameter[self, fp]]:
constant[Return the size of the track chunk.]
<ast.Try object at 0x7da1b13069e0>
if compare[name[h] not_equal[!=] constant[MTrk]] begin[:]
<ast.Raise object at 0x7da1b26af3d0>
<ast.Try object at 0x7da1b26ad150>
variable[chunk_size] assign[=] call[name[self].bytes_to_int, parameter[name[chunk_size]]]
return[name[chunk_size]] | keyword[def] identifier[parse_track_header] ( identifier[self] , identifier[fp] ):
literal[string]
keyword[try] :
identifier[h] = identifier[fp] . identifier[read] ( literal[int] )
identifier[self] . identifier[bytes_read] += literal[int]
keyword[except] :
keyword[raise] identifier[IOError] ( literal[string]
% identifier[self] . identifier[bytes_read] )
keyword[if] identifier[h] != literal[string] :
keyword[raise] identifier[HeaderError] ( literal[string]
% identifier[self] . identifier[bytes_read] )
keyword[try] :
identifier[chunk_size] = identifier[fp] . identifier[read] ( literal[int] )
identifier[self] . identifier[bytes_read] += literal[int]
keyword[except] :
keyword[raise] identifier[IOError] ( literal[string] )
identifier[chunk_size] = identifier[self] . identifier[bytes_to_int] ( identifier[chunk_size] )
keyword[return] identifier[chunk_size] | def parse_track_header(self, fp):
"""Return the size of the track chunk."""
# Check the header
try:
h = fp.read(4)
self.bytes_read += 4 # depends on [control=['try'], data=[]]
except:
raise IOError("Couldn't read track header from file. Byte %d." % self.bytes_read) # depends on [control=['except'], data=[]]
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.' % self.bytes_read) # depends on [control=['if'], data=[]]
# Parse the size of the header
try:
chunk_size = fp.read(4)
self.bytes_read += 4 # depends on [control=['try'], data=[]]
except:
raise IOError("Couldn't read track chunk size from file.") # depends on [control=['except'], data=[]]
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size |
def run(self):
"""
Creates new permissions.
"""
from pyoko.lib.utils import get_object_from_path
from zengine.config import settings
model = get_object_from_path(settings.PERMISSION_MODEL)
perm_provider = get_object_from_path(settings.PERMISSION_PROVIDER)
existing_perms = []
new_perms = []
for code, name, desc in perm_provider():
code = six.text_type(code)
if self.manager.args.dry:
exists = model.objects.filter(code=code, name=name)
if exists:
perm = exists[0]
new = False
else:
new = True
perm = model(code=code, name=name)
else:
try:
perm = model.objects.get(code)
existing_perms.append(perm)
except ObjectDoesNotExist:
perm = model(description=desc, code=code, name=name)
perm.key = code
perm.save()
new_perms.append(perm)
# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)
# if new:
# new_perms.append(perm)
# else:
# existing_perms.append(perm)
report = "\n\n%s permission(s) were found in DB. " % len(existing_perms)
if new_perms:
report += "\n%s new permission record added. " % len(new_perms)
else:
report += 'No new perms added. '
if new_perms:
if not self.manager.args.dry:
SelectBoxCache.flush(model.__name__)
report += 'Total %s perms exists.' % (len(existing_perms) + len(new_perms))
report = "\n + " + "\n + ".join([p.name or p.code for p in new_perms]) + report
if self.manager.args.dry:
print("\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\n")
print(report + "\n") | def function[run, parameter[self]]:
constant[
Creates new permissions.
]
from relative_module[pyoko.lib.utils] import module[get_object_from_path]
from relative_module[zengine.config] import module[settings]
variable[model] assign[=] call[name[get_object_from_path], parameter[name[settings].PERMISSION_MODEL]]
variable[perm_provider] assign[=] call[name[get_object_from_path], parameter[name[settings].PERMISSION_PROVIDER]]
variable[existing_perms] assign[=] list[[]]
variable[new_perms] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c990310>, <ast.Name object at 0x7da20c993700>, <ast.Name object at 0x7da20c992d10>]]] in starred[call[name[perm_provider], parameter[]]] begin[:]
variable[code] assign[=] call[name[six].text_type, parameter[name[code]]]
if name[self].manager.args.dry begin[:]
variable[exists] assign[=] call[name[model].objects.filter, parameter[]]
if name[exists] begin[:]
variable[perm] assign[=] call[name[exists]][constant[0]]
variable[new] assign[=] constant[False]
variable[report] assign[=] binary_operation[constant[
%s permission(s) were found in DB. ] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[existing_perms]]]]
if name[new_perms] begin[:]
<ast.AugAssign object at 0x7da20c990040>
if name[new_perms] begin[:]
if <ast.UnaryOp object at 0x7da20c993460> begin[:]
call[name[SelectBoxCache].flush, parameter[name[model].__name__]]
<ast.AugAssign object at 0x7da20c993eb0>
variable[report] assign[=] binary_operation[binary_operation[constant[
+ ] + call[constant[
+ ].join, parameter[<ast.ListComp object at 0x7da20c992e30>]]] + name[report]]
if name[self].manager.args.dry begin[:]
call[name[print], parameter[constant[
~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~
]]]
call[name[print], parameter[binary_operation[name[report] + constant[
]]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[from] identifier[pyoko] . identifier[lib] . identifier[utils] keyword[import] identifier[get_object_from_path]
keyword[from] identifier[zengine] . identifier[config] keyword[import] identifier[settings]
identifier[model] = identifier[get_object_from_path] ( identifier[settings] . identifier[PERMISSION_MODEL] )
identifier[perm_provider] = identifier[get_object_from_path] ( identifier[settings] . identifier[PERMISSION_PROVIDER] )
identifier[existing_perms] =[]
identifier[new_perms] =[]
keyword[for] identifier[code] , identifier[name] , identifier[desc] keyword[in] identifier[perm_provider] ():
identifier[code] = identifier[six] . identifier[text_type] ( identifier[code] )
keyword[if] identifier[self] . identifier[manager] . identifier[args] . identifier[dry] :
identifier[exists] = identifier[model] . identifier[objects] . identifier[filter] ( identifier[code] = identifier[code] , identifier[name] = identifier[name] )
keyword[if] identifier[exists] :
identifier[perm] = identifier[exists] [ literal[int] ]
identifier[new] = keyword[False]
keyword[else] :
identifier[new] = keyword[True]
identifier[perm] = identifier[model] ( identifier[code] = identifier[code] , identifier[name] = identifier[name] )
keyword[else] :
keyword[try] :
identifier[perm] = identifier[model] . identifier[objects] . identifier[get] ( identifier[code] )
identifier[existing_perms] . identifier[append] ( identifier[perm] )
keyword[except] identifier[ObjectDoesNotExist] :
identifier[perm] = identifier[model] ( identifier[description] = identifier[desc] , identifier[code] = identifier[code] , identifier[name] = identifier[name] )
identifier[perm] . identifier[key] = identifier[code]
identifier[perm] . identifier[save] ()
identifier[new_perms] . identifier[append] ( identifier[perm] )
identifier[report] = literal[string] % identifier[len] ( identifier[existing_perms] )
keyword[if] identifier[new_perms] :
identifier[report] += literal[string] % identifier[len] ( identifier[new_perms] )
keyword[else] :
identifier[report] += literal[string]
keyword[if] identifier[new_perms] :
keyword[if] keyword[not] identifier[self] . identifier[manager] . identifier[args] . identifier[dry] :
identifier[SelectBoxCache] . identifier[flush] ( identifier[model] . identifier[__name__] )
identifier[report] += literal[string] %( identifier[len] ( identifier[existing_perms] )+ identifier[len] ( identifier[new_perms] ))
identifier[report] = literal[string] + literal[string] . identifier[join] ([ identifier[p] . identifier[name] keyword[or] identifier[p] . identifier[code] keyword[for] identifier[p] keyword[in] identifier[new_perms] ])+ identifier[report]
keyword[if] identifier[self] . identifier[manager] . identifier[args] . identifier[dry] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[report] + literal[string] ) | def run(self):
"""
Creates new permissions.
"""
from pyoko.lib.utils import get_object_from_path
from zengine.config import settings
model = get_object_from_path(settings.PERMISSION_MODEL)
perm_provider = get_object_from_path(settings.PERMISSION_PROVIDER)
existing_perms = []
new_perms = []
for (code, name, desc) in perm_provider():
code = six.text_type(code)
if self.manager.args.dry:
exists = model.objects.filter(code=code, name=name)
if exists:
perm = exists[0]
new = False # depends on [control=['if'], data=[]]
else:
new = True
perm = model(code=code, name=name) # depends on [control=['if'], data=[]]
else:
try:
perm = model.objects.get(code)
existing_perms.append(perm) # depends on [control=['try'], data=[]]
except ObjectDoesNotExist:
perm = model(description=desc, code=code, name=name)
perm.key = code
perm.save()
new_perms.append(perm) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)
# if new:
# new_perms.append(perm)
# else:
# existing_perms.append(perm)
report = '\n\n%s permission(s) were found in DB. ' % len(existing_perms)
if new_perms:
report += '\n%s new permission record added. ' % len(new_perms) # depends on [control=['if'], data=[]]
else:
report += 'No new perms added. '
if new_perms:
if not self.manager.args.dry:
SelectBoxCache.flush(model.__name__) # depends on [control=['if'], data=[]]
report += 'Total %s perms exists.' % (len(existing_perms) + len(new_perms))
report = '\n + ' + '\n + '.join([p.name or p.code for p in new_perms]) + report # depends on [control=['if'], data=[]]
if self.manager.args.dry:
print('\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\n') # depends on [control=['if'], data=[]]
print(report + '\n') |
def domain_of_validity(self):
"""
Return the domain of validity for this CRS as:
(west, east, south, north).
For example::
>>> print(get(21781).domain_of_validity())
[5.96, 10.49, 45.82, 47.81]
"""
# TODO: Generalise interface to return a polygon? (Can we find
# something that uses a polygon instead?)
domain = self.element.find(GML_NS + 'domainOfValidity')
domain_href = domain.attrib[XLINK_NS + 'href']
url = '{prefix}{code}.gml?download'.format(prefix=EPSG_IO_URL,
code=domain_href)
xml = requests.get(url).content
gml = ET.fromstring(xml)
def extract_bound(tag):
ns = '{http://www.isotc211.org/2005/gmd}'
xpath = './/{ns}EX_GeographicBoundingBox/{ns}{tag}/'.format(
ns=ns,
tag=tag)
bound = gml.find(xpath)
return float(bound.text)
tags = ('westBoundLongitude', 'eastBoundLongitude',
'southBoundLatitude', 'northBoundLatitude')
bounds = [extract_bound(tag) for tag in tags]
return bounds | def function[domain_of_validity, parameter[self]]:
constant[
Return the domain of validity for this CRS as:
(west, east, south, north).
For example::
>>> print(get(21781).domain_of_validity())
[5.96, 10.49, 45.82, 47.81]
]
variable[domain] assign[=] call[name[self].element.find, parameter[binary_operation[name[GML_NS] + constant[domainOfValidity]]]]
variable[domain_href] assign[=] call[name[domain].attrib][binary_operation[name[XLINK_NS] + constant[href]]]
variable[url] assign[=] call[constant[{prefix}{code}.gml?download].format, parameter[]]
variable[xml] assign[=] call[name[requests].get, parameter[name[url]]].content
variable[gml] assign[=] call[name[ET].fromstring, parameter[name[xml]]]
def function[extract_bound, parameter[tag]]:
variable[ns] assign[=] constant[{http://www.isotc211.org/2005/gmd}]
variable[xpath] assign[=] call[constant[.//{ns}EX_GeographicBoundingBox/{ns}{tag}/].format, parameter[]]
variable[bound] assign[=] call[name[gml].find, parameter[name[xpath]]]
return[call[name[float], parameter[name[bound].text]]]
variable[tags] assign[=] tuple[[<ast.Constant object at 0x7da1b25ec610>, <ast.Constant object at 0x7da1b25ed7b0>, <ast.Constant object at 0x7da1b25ed210>, <ast.Constant object at 0x7da1b25ee200>]]
variable[bounds] assign[=] <ast.ListComp object at 0x7da1b25ef610>
return[name[bounds]] | keyword[def] identifier[domain_of_validity] ( identifier[self] ):
literal[string]
identifier[domain] = identifier[self] . identifier[element] . identifier[find] ( identifier[GML_NS] + literal[string] )
identifier[domain_href] = identifier[domain] . identifier[attrib] [ identifier[XLINK_NS] + literal[string] ]
identifier[url] = literal[string] . identifier[format] ( identifier[prefix] = identifier[EPSG_IO_URL] ,
identifier[code] = identifier[domain_href] )
identifier[xml] = identifier[requests] . identifier[get] ( identifier[url] ). identifier[content]
identifier[gml] = identifier[ET] . identifier[fromstring] ( identifier[xml] )
keyword[def] identifier[extract_bound] ( identifier[tag] ):
identifier[ns] = literal[string]
identifier[xpath] = literal[string] . identifier[format] (
identifier[ns] = identifier[ns] ,
identifier[tag] = identifier[tag] )
identifier[bound] = identifier[gml] . identifier[find] ( identifier[xpath] )
keyword[return] identifier[float] ( identifier[bound] . identifier[text] )
identifier[tags] =( literal[string] , literal[string] ,
literal[string] , literal[string] )
identifier[bounds] =[ identifier[extract_bound] ( identifier[tag] ) keyword[for] identifier[tag] keyword[in] identifier[tags] ]
keyword[return] identifier[bounds] | def domain_of_validity(self):
"""
Return the domain of validity for this CRS as:
(west, east, south, north).
For example::
>>> print(get(21781).domain_of_validity())
[5.96, 10.49, 45.82, 47.81]
"""
# TODO: Generalise interface to return a polygon? (Can we find
# something that uses a polygon instead?)
domain = self.element.find(GML_NS + 'domainOfValidity')
domain_href = domain.attrib[XLINK_NS + 'href']
url = '{prefix}{code}.gml?download'.format(prefix=EPSG_IO_URL, code=domain_href)
xml = requests.get(url).content
gml = ET.fromstring(xml)
def extract_bound(tag):
ns = '{http://www.isotc211.org/2005/gmd}'
xpath = './/{ns}EX_GeographicBoundingBox/{ns}{tag}/'.format(ns=ns, tag=tag)
bound = gml.find(xpath)
return float(bound.text)
tags = ('westBoundLongitude', 'eastBoundLongitude', 'southBoundLatitude', 'northBoundLatitude')
bounds = [extract_bound(tag) for tag in tags]
return bounds |
def get_job(self):
"""Get the Streams job that owns this view.
Returns:
Job: Streams Job owning this view.
"""
return Job(self.rest_client.make_request(self.job), self.rest_client) | def function[get_job, parameter[self]]:
constant[Get the Streams job that owns this view.
Returns:
Job: Streams Job owning this view.
]
return[call[name[Job], parameter[call[name[self].rest_client.make_request, parameter[name[self].job]], name[self].rest_client]]] | keyword[def] identifier[get_job] ( identifier[self] ):
literal[string]
keyword[return] identifier[Job] ( identifier[self] . identifier[rest_client] . identifier[make_request] ( identifier[self] . identifier[job] ), identifier[self] . identifier[rest_client] ) | def get_job(self):
"""Get the Streams job that owns this view.
Returns:
Job: Streams Job owning this view.
"""
return Job(self.rest_client.make_request(self.job), self.rest_client) |
def M(self, t, tips=None, gaps=None):
"""See docs for method in `Model` abstract base class."""
assert isinstance(t, float) and t > 0, "Invalid t: {0}".format(t)
with scipy.errstate(under='ignore'): # don't worry if some values 0
if ('expD', t) not in self._cached:
self._cached[('expD', t)] = scipy.exp(self.D * self.mu * t)
expD = self._cached[('expD', t)]
# swap axes to broadcast multiply D as diagonal matrix
temp = scipy.ascontiguousarray((self.A.swapaxes(0, 1)
* expD).swapaxes(1, 0), dtype=float)
M = broadcastMatrixMultiply(temp, self.Ainv)
assert M.min() > -1e-3, "Overly negative M: {0}".format(M.min())
M[M < 0] = 0.0
if tips is None:
return scipy.tile(M, (self.nsites, 1, 1))
else:
newM = scipy.zeros((len(tips), N_CODON))
for i in range(len(tips)):
newM[i] =(M[0][:,tips[i]])
if gaps is not None:
newM[gaps] = scipy.ones(N_CODON, dtype='float')
return newM | def function[M, parameter[self, t, tips, gaps]]:
constant[See docs for method in `Model` abstract base class.]
assert[<ast.BoolOp object at 0x7da1b26ad900>]
with call[name[scipy].errstate, parameter[]] begin[:]
if compare[tuple[[<ast.Constant object at 0x7da1b26afa60>, <ast.Name object at 0x7da1b26af040>]] <ast.NotIn object at 0x7da2590d7190> name[self]._cached] begin[:]
call[name[self]._cached][tuple[[<ast.Constant object at 0x7da1b26af0a0>, <ast.Name object at 0x7da1b26ac2b0>]]] assign[=] call[name[scipy].exp, parameter[binary_operation[binary_operation[name[self].D * name[self].mu] * name[t]]]]
variable[expD] assign[=] call[name[self]._cached][tuple[[<ast.Constant object at 0x7da1b26acee0>, <ast.Name object at 0x7da1b26ad9f0>]]]
variable[temp] assign[=] call[name[scipy].ascontiguousarray, parameter[call[binary_operation[call[name[self].A.swapaxes, parameter[constant[0], constant[1]]] * name[expD]].swapaxes, parameter[constant[1], constant[0]]]]]
variable[M] assign[=] call[name[broadcastMatrixMultiply], parameter[name[temp], name[self].Ainv]]
assert[compare[call[name[M].min, parameter[]] greater[>] <ast.UnaryOp object at 0x7da1b26aff70>]]
call[name[M]][compare[name[M] less[<] constant[0]]] assign[=] constant[0.0]
if compare[name[tips] is constant[None]] begin[:]
return[call[name[scipy].tile, parameter[name[M], tuple[[<ast.Attribute object at 0x7da1b26ae7a0>, <ast.Constant object at 0x7da1b26aeb60>, <ast.Constant object at 0x7da1b26ae950>]]]]] | keyword[def] identifier[M] ( identifier[self] , identifier[t] , identifier[tips] = keyword[None] , identifier[gaps] = keyword[None] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[t] , identifier[float] ) keyword[and] identifier[t] > literal[int] , literal[string] . identifier[format] ( identifier[t] )
keyword[with] identifier[scipy] . identifier[errstate] ( identifier[under] = literal[string] ):
keyword[if] ( literal[string] , identifier[t] ) keyword[not] keyword[in] identifier[self] . identifier[_cached] :
identifier[self] . identifier[_cached] [( literal[string] , identifier[t] )]= identifier[scipy] . identifier[exp] ( identifier[self] . identifier[D] * identifier[self] . identifier[mu] * identifier[t] )
identifier[expD] = identifier[self] . identifier[_cached] [( literal[string] , identifier[t] )]
identifier[temp] = identifier[scipy] . identifier[ascontiguousarray] (( identifier[self] . identifier[A] . identifier[swapaxes] ( literal[int] , literal[int] )
* identifier[expD] ). identifier[swapaxes] ( literal[int] , literal[int] ), identifier[dtype] = identifier[float] )
identifier[M] = identifier[broadcastMatrixMultiply] ( identifier[temp] , identifier[self] . identifier[Ainv] )
keyword[assert] identifier[M] . identifier[min] ()>- literal[int] , literal[string] . identifier[format] ( identifier[M] . identifier[min] ())
identifier[M] [ identifier[M] < literal[int] ]= literal[int]
keyword[if] identifier[tips] keyword[is] keyword[None] :
keyword[return] identifier[scipy] . identifier[tile] ( identifier[M] ,( identifier[self] . identifier[nsites] , literal[int] , literal[int] ))
keyword[else] :
identifier[newM] = identifier[scipy] . identifier[zeros] (( identifier[len] ( identifier[tips] ), identifier[N_CODON] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tips] )):
identifier[newM] [ identifier[i] ]=( identifier[M] [ literal[int] ][:, identifier[tips] [ identifier[i] ]])
keyword[if] identifier[gaps] keyword[is] keyword[not] keyword[None] :
identifier[newM] [ identifier[gaps] ]= identifier[scipy] . identifier[ones] ( identifier[N_CODON] , identifier[dtype] = literal[string] )
keyword[return] identifier[newM] | def M(self, t, tips=None, gaps=None):
"""See docs for method in `Model` abstract base class."""
assert isinstance(t, float) and t > 0, 'Invalid t: {0}'.format(t)
with scipy.errstate(under='ignore'): # don't worry if some values 0
if ('expD', t) not in self._cached:
self._cached['expD', t] = scipy.exp(self.D * self.mu * t) # depends on [control=['if'], data=[]]
expD = self._cached['expD', t]
# swap axes to broadcast multiply D as diagonal matrix
temp = scipy.ascontiguousarray((self.A.swapaxes(0, 1) * expD).swapaxes(1, 0), dtype=float)
M = broadcastMatrixMultiply(temp, self.Ainv)
assert M.min() > -0.001, 'Overly negative M: {0}'.format(M.min())
M[M < 0] = 0.0
if tips is None:
return scipy.tile(M, (self.nsites, 1, 1)) # depends on [control=['if'], data=[]]
else:
newM = scipy.zeros((len(tips), N_CODON))
for i in range(len(tips)):
newM[i] = M[0][:, tips[i]] # depends on [control=['for'], data=['i']]
if gaps is not None:
newM[gaps] = scipy.ones(N_CODON, dtype='float') # depends on [control=['if'], data=['gaps']]
return newM # depends on [control=['with'], data=[]] |
def read_turbomole(basis_lines, fname):
'''Reads turbomole-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '*#$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
elementsym = line.split()[0]
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if "ecp" in line.lower():
if not 'ecp_potentials' in element_data:
element_data['ecp_potentials'] = []
i += 1
line = basis_lines[i]
lsplt = line.split('=')
maxam = int(lsplt[2])
n_elec = int(lsplt[1].split()[0])
amlist = [maxam]
amlist.extend(list(range(0, maxam)))
i += 1
for shell_am in amlist:
shell_am2 = lut.amchar_to_int(basis_lines[i][0])[0]
if shell_am2 != shell_am:
raise RuntimeError("AM not in expected order?")
i += 1
ecp_shell = {
'ecp_type': 'scalar_ecp',
'angular_momentum': [shell_am],
}
ecp_exponents = []
ecp_rexponents = []
ecp_coefficients = []
while i < len(basis_lines) and basis_lines[i][0].isalpha() is False:
lsplt = basis_lines[i].split()
ecp_exponents.append(lsplt[2])
ecp_rexponents.append(int(lsplt[1]))
ecp_coefficients.append(lsplt[0])
i += 1
ecp_shell['r_exponents'] = ecp_rexponents
ecp_shell['gaussian_exponents'] = ecp_exponents
ecp_shell['coefficients'] = [ecp_coefficients]
element_data['ecp_potentials'].append(ecp_shell)
element_data['ecp_electrons'] = n_elec
else:
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 1
while i < len(basis_lines) and basis_lines[i][0].isalpha() == False:
lsplt = basis_lines[i].split()
shell_am = lut.amchar_to_int(lsplt[1])
nprim = int(lsplt[0])
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': shell_am
}
exponents = []
coefficients = []
i += 1
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data | def function[read_turbomole, parameter[basis_lines, fname]]:
constant[Reads turbomole-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
]
variable[skipchars] assign[=] constant[*#$]
variable[basis_lines] assign[=] <ast.ListComp object at 0x7da2054a61d0>
variable[bs_data] assign[=] call[name[create_skel], parameter[constant[component]]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] call[name[len], parameter[name[basis_lines]]]] begin[:]
variable[line] assign[=] call[name[basis_lines]][name[i]]
variable[elementsym] assign[=] call[call[name[line].split, parameter[]]][constant[0]]
variable[element_Z] assign[=] call[name[lut].element_Z_from_sym, parameter[name[elementsym]]]
variable[element_Z] assign[=] call[name[str], parameter[name[element_Z]]]
if <ast.UnaryOp object at 0x7da2054a4b20> begin[:]
call[call[name[bs_data]][constant[elements]]][name[element_Z]] assign[=] dictionary[[], []]
variable[element_data] assign[=] call[call[name[bs_data]][constant[elements]]][name[element_Z]]
if compare[constant[ecp] in call[name[line].lower, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da2054a56c0> begin[:]
call[name[element_data]][constant[ecp_potentials]] assign[=] list[[]]
<ast.AugAssign object at 0x7da2054a7760>
variable[line] assign[=] call[name[basis_lines]][name[i]]
variable[lsplt] assign[=] call[name[line].split, parameter[constant[=]]]
variable[maxam] assign[=] call[name[int], parameter[call[name[lsplt]][constant[2]]]]
variable[n_elec] assign[=] call[name[int], parameter[call[call[call[name[lsplt]][constant[1]].split, parameter[]]][constant[0]]]]
variable[amlist] assign[=] list[[<ast.Name object at 0x7da2054a5a20>]]
call[name[amlist].extend, parameter[call[name[list], parameter[call[name[range], parameter[constant[0], name[maxam]]]]]]]
<ast.AugAssign object at 0x7da2054a7700>
for taget[name[shell_am]] in starred[name[amlist]] begin[:]
variable[shell_am2] assign[=] call[call[name[lut].amchar_to_int, parameter[call[call[name[basis_lines]][name[i]]][constant[0]]]]][constant[0]]
if compare[name[shell_am2] not_equal[!=] name[shell_am]] begin[:]
<ast.Raise object at 0x7da2054a42b0>
<ast.AugAssign object at 0x7da2054a6140>
variable[ecp_shell] assign[=] dictionary[[<ast.Constant object at 0x7da2054a7310>, <ast.Constant object at 0x7da2054a52d0>], [<ast.Constant object at 0x7da2054a58d0>, <ast.List object at 0x7da2054a65f0>]]
variable[ecp_exponents] assign[=] list[[]]
variable[ecp_rexponents] assign[=] list[[]]
variable[ecp_coefficients] assign[=] list[[]]
while <ast.BoolOp object at 0x7da2054a4c40> begin[:]
variable[lsplt] assign[=] call[call[name[basis_lines]][name[i]].split, parameter[]]
call[name[ecp_exponents].append, parameter[call[name[lsplt]][constant[2]]]]
call[name[ecp_rexponents].append, parameter[call[name[int], parameter[call[name[lsplt]][constant[1]]]]]]
call[name[ecp_coefficients].append, parameter[call[name[lsplt]][constant[0]]]]
<ast.AugAssign object at 0x7da2054a7910>
call[name[ecp_shell]][constant[r_exponents]] assign[=] name[ecp_rexponents]
call[name[ecp_shell]][constant[gaussian_exponents]] assign[=] name[ecp_exponents]
call[name[ecp_shell]][constant[coefficients]] assign[=] list[[<ast.Name object at 0x7da2054a6230>]]
call[call[name[element_data]][constant[ecp_potentials]].append, parameter[name[ecp_shell]]]
call[name[element_data]][constant[ecp_electrons]] assign[=] name[n_elec]
return[name[bs_data]] | keyword[def] identifier[read_turbomole] ( identifier[basis_lines] , identifier[fname] ):
literal[string]
identifier[skipchars] = literal[string]
identifier[basis_lines] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[basis_lines] keyword[if] identifier[l] keyword[and] keyword[not] identifier[l] [ literal[int] ] keyword[in] identifier[skipchars] ]
identifier[bs_data] = identifier[create_skel] ( literal[string] )
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[basis_lines] ):
identifier[line] = identifier[basis_lines] [ identifier[i] ]
identifier[elementsym] = identifier[line] . identifier[split] ()[ literal[int] ]
identifier[element_Z] = identifier[lut] . identifier[element_Z_from_sym] ( identifier[elementsym] )
identifier[element_Z] = identifier[str] ( identifier[element_Z] )
keyword[if] keyword[not] identifier[element_Z] keyword[in] identifier[bs_data] [ literal[string] ]:
identifier[bs_data] [ literal[string] ][ identifier[element_Z] ]={}
identifier[element_data] = identifier[bs_data] [ literal[string] ][ identifier[element_Z] ]
keyword[if] literal[string] keyword[in] identifier[line] . identifier[lower] ():
keyword[if] keyword[not] literal[string] keyword[in] identifier[element_data] :
identifier[element_data] [ literal[string] ]=[]
identifier[i] += literal[int]
identifier[line] = identifier[basis_lines] [ identifier[i] ]
identifier[lsplt] = identifier[line] . identifier[split] ( literal[string] )
identifier[maxam] = identifier[int] ( identifier[lsplt] [ literal[int] ])
identifier[n_elec] = identifier[int] ( identifier[lsplt] [ literal[int] ]. identifier[split] ()[ literal[int] ])
identifier[amlist] =[ identifier[maxam] ]
identifier[amlist] . identifier[extend] ( identifier[list] ( identifier[range] ( literal[int] , identifier[maxam] )))
identifier[i] += literal[int]
keyword[for] identifier[shell_am] keyword[in] identifier[amlist] :
identifier[shell_am2] = identifier[lut] . identifier[amchar_to_int] ( identifier[basis_lines] [ identifier[i] ][ literal[int] ])[ literal[int] ]
keyword[if] identifier[shell_am2] != identifier[shell_am] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[i] += literal[int]
identifier[ecp_shell] ={
literal[string] : literal[string] ,
literal[string] :[ identifier[shell_am] ],
}
identifier[ecp_exponents] =[]
identifier[ecp_rexponents] =[]
identifier[ecp_coefficients] =[]
keyword[while] identifier[i] < identifier[len] ( identifier[basis_lines] ) keyword[and] identifier[basis_lines] [ identifier[i] ][ literal[int] ]. identifier[isalpha] () keyword[is] keyword[False] :
identifier[lsplt] = identifier[basis_lines] [ identifier[i] ]. identifier[split] ()
identifier[ecp_exponents] . identifier[append] ( identifier[lsplt] [ literal[int] ])
identifier[ecp_rexponents] . identifier[append] ( identifier[int] ( identifier[lsplt] [ literal[int] ]))
identifier[ecp_coefficients] . identifier[append] ( identifier[lsplt] [ literal[int] ])
identifier[i] += literal[int]
identifier[ecp_shell] [ literal[string] ]= identifier[ecp_rexponents]
identifier[ecp_shell] [ literal[string] ]= identifier[ecp_exponents]
identifier[ecp_shell] [ literal[string] ]=[ identifier[ecp_coefficients] ]
identifier[element_data] [ literal[string] ]. identifier[append] ( identifier[ecp_shell] )
identifier[element_data] [ literal[string] ]= identifier[n_elec]
keyword[else] :
keyword[if] keyword[not] literal[string] keyword[in] identifier[element_data] :
identifier[element_data] [ literal[string] ]=[]
identifier[i] += literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[basis_lines] ) keyword[and] identifier[basis_lines] [ identifier[i] ][ literal[int] ]. identifier[isalpha] ()== keyword[False] :
identifier[lsplt] = identifier[basis_lines] [ identifier[i] ]. identifier[split] ()
identifier[shell_am] = identifier[lut] . identifier[amchar_to_int] ( identifier[lsplt] [ literal[int] ])
identifier[nprim] = identifier[int] ( identifier[lsplt] [ literal[int] ])
keyword[if] identifier[max] ( identifier[shell_am] )<= literal[int] :
identifier[func_type] = literal[string]
keyword[else] :
identifier[func_type] = literal[string]
identifier[shell] ={
literal[string] : identifier[func_type] ,
literal[string] : literal[string] ,
literal[string] : identifier[shell_am]
}
identifier[exponents] =[]
identifier[coefficients] =[]
identifier[i] += literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[nprim] ):
identifier[line] = identifier[basis_lines] [ identifier[i] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
identifier[lsplt] = identifier[line] . identifier[split] ()
identifier[exponents] . identifier[append] ( identifier[lsplt] [ literal[int] ])
identifier[coefficients] . identifier[append] ( identifier[lsplt] [ literal[int] :])
identifier[i] += literal[int]
identifier[shell] [ literal[string] ]= identifier[exponents]
identifier[shell] [ literal[string] ]= identifier[list] ( identifier[map] ( identifier[list] , identifier[zip] (* identifier[coefficients] )))
identifier[element_data] [ literal[string] ]. identifier[append] ( identifier[shell] )
keyword[return] identifier[bs_data] | def read_turbomole(basis_lines, fname):
"""Reads turbomole-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
"""
skipchars = '*#$'
basis_lines = [l for l in basis_lines if l and (not l[0] in skipchars)]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
elementsym = line.split()[0]
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {} # depends on [control=['if'], data=[]]
element_data = bs_data['elements'][element_Z]
if 'ecp' in line.lower():
if not 'ecp_potentials' in element_data:
element_data['ecp_potentials'] = [] # depends on [control=['if'], data=[]]
i += 1
line = basis_lines[i]
lsplt = line.split('=')
maxam = int(lsplt[2])
n_elec = int(lsplt[1].split()[0])
amlist = [maxam]
amlist.extend(list(range(0, maxam)))
i += 1
for shell_am in amlist:
shell_am2 = lut.amchar_to_int(basis_lines[i][0])[0]
if shell_am2 != shell_am:
raise RuntimeError('AM not in expected order?') # depends on [control=['if'], data=[]]
i += 1
ecp_shell = {'ecp_type': 'scalar_ecp', 'angular_momentum': [shell_am]}
ecp_exponents = []
ecp_rexponents = []
ecp_coefficients = []
while i < len(basis_lines) and basis_lines[i][0].isalpha() is False:
lsplt = basis_lines[i].split()
ecp_exponents.append(lsplt[2])
ecp_rexponents.append(int(lsplt[1]))
ecp_coefficients.append(lsplt[0])
i += 1 # depends on [control=['while'], data=[]]
ecp_shell['r_exponents'] = ecp_rexponents
ecp_shell['gaussian_exponents'] = ecp_exponents
ecp_shell['coefficients'] = [ecp_coefficients]
element_data['ecp_potentials'].append(ecp_shell) # depends on [control=['for'], data=['shell_am']]
element_data['ecp_electrons'] = n_elec # depends on [control=['if'], data=[]]
else:
if not 'electron_shells' in element_data:
element_data['electron_shells'] = [] # depends on [control=['if'], data=[]]
i += 1
while i < len(basis_lines) and basis_lines[i][0].isalpha() == False:
lsplt = basis_lines[i].split()
shell_am = lut.amchar_to_int(lsplt[1])
nprim = int(lsplt[0])
if max(shell_am) <= 1:
func_type = 'gto' # depends on [control=['if'], data=[]]
else:
func_type = 'gto_spherical'
shell = {'function_type': func_type, 'region': '', 'angular_momentum': shell_am}
exponents = []
coefficients = []
i += 1
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1 # depends on [control=['for'], data=[]]
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell) # depends on [control=['while'], data=[]] # depends on [control=['while'], data=['i']]
return bs_data |
def parallel_bulk(
client,
actions,
thread_count=4,
chunk_size=500,
max_chunk_bytes=100 * 1024 * 1024,
queue_size=4,
expand_action_callback=expand_action,
*args,
**kwargs
):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
"""
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.pool import ThreadPool
actions = map(expand_action_callback, actions)
class BlockingPool(ThreadPool):
def _setup_queues(self):
super(BlockingPool, self)._setup_queues()
# The queue must be at least the size of the number of threads to
# prevent hanging when inserting sentinel values during teardown.
self._inqueue = Queue(max(queue_size, thread_count))
self._quick_put = self._inqueue.put
pool = BlockingPool(thread_count)
try:
for result in pool.imap(
lambda bulk_chunk: list(
_process_bulk_chunk(
client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs
)
),
_chunk_actions(
actions, chunk_size, max_chunk_bytes, client.transport.serializer
),
):
for item in result:
yield item
finally:
pool.close()
pool.join() | def function[parallel_bulk, parameter[client, actions, thread_count, chunk_size, max_chunk_bytes, queue_size, expand_action_callback]]:
constant[
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
]
from relative_module[multiprocessing.pool] import module[ThreadPool]
variable[actions] assign[=] call[name[map], parameter[name[expand_action_callback], name[actions]]]
class class[BlockingPool, parameter[]] begin[:]
def function[_setup_queues, parameter[self]]:
call[call[name[super], parameter[name[BlockingPool], name[self]]]._setup_queues, parameter[]]
name[self]._inqueue assign[=] call[name[Queue], parameter[call[name[max], parameter[name[queue_size], name[thread_count]]]]]
name[self]._quick_put assign[=] name[self]._inqueue.put
variable[pool] assign[=] call[name[BlockingPool], parameter[name[thread_count]]]
<ast.Try object at 0x7da1b21e3b50> | keyword[def] identifier[parallel_bulk] (
identifier[client] ,
identifier[actions] ,
identifier[thread_count] = literal[int] ,
identifier[chunk_size] = literal[int] ,
identifier[max_chunk_bytes] = literal[int] * literal[int] * literal[int] ,
identifier[queue_size] = literal[int] ,
identifier[expand_action_callback] = identifier[expand_action] ,
* identifier[args] ,
** identifier[kwargs]
):
literal[string]
keyword[from] identifier[multiprocessing] . identifier[pool] keyword[import] identifier[ThreadPool]
identifier[actions] = identifier[map] ( identifier[expand_action_callback] , identifier[actions] )
keyword[class] identifier[BlockingPool] ( identifier[ThreadPool] ):
keyword[def] identifier[_setup_queues] ( identifier[self] ):
identifier[super] ( identifier[BlockingPool] , identifier[self] ). identifier[_setup_queues] ()
identifier[self] . identifier[_inqueue] = identifier[Queue] ( identifier[max] ( identifier[queue_size] , identifier[thread_count] ))
identifier[self] . identifier[_quick_put] = identifier[self] . identifier[_inqueue] . identifier[put]
identifier[pool] = identifier[BlockingPool] ( identifier[thread_count] )
keyword[try] :
keyword[for] identifier[result] keyword[in] identifier[pool] . identifier[imap] (
keyword[lambda] identifier[bulk_chunk] : identifier[list] (
identifier[_process_bulk_chunk] (
identifier[client] , identifier[bulk_chunk] [ literal[int] ], identifier[bulk_chunk] [ literal[int] ],* identifier[args] ,** identifier[kwargs]
)
),
identifier[_chunk_actions] (
identifier[actions] , identifier[chunk_size] , identifier[max_chunk_bytes] , identifier[client] . identifier[transport] . identifier[serializer]
),
):
keyword[for] identifier[item] keyword[in] identifier[result] :
keyword[yield] identifier[item]
keyword[finally] :
identifier[pool] . identifier[close] ()
identifier[pool] . identifier[join] () | def parallel_bulk(client, actions, thread_count=4, chunk_size=500, max_chunk_bytes=100 * 1024 * 1024, queue_size=4, expand_action_callback=expand_action, *args, **kwargs):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
"""
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.pool import ThreadPool
actions = map(expand_action_callback, actions)
class BlockingPool(ThreadPool):
def _setup_queues(self):
super(BlockingPool, self)._setup_queues() # The queue must be at least the size of the number of threads to
# prevent hanging when inserting sentinel values during teardown.
self._inqueue = Queue(max(queue_size, thread_count))
self._quick_put = self._inqueue.put
pool = BlockingPool(thread_count)
try:
for result in pool.imap(lambda bulk_chunk: list(_process_bulk_chunk(client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs)), _chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer)):
for item in result:
yield item # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['result']] # depends on [control=['try'], data=[]]
finally:
pool.close()
pool.join() |
def add_group(self, number, name, led_type):
""" Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group.
"""
group = group_factory(self, number, name, led_type)
self.groups.append(group)
return group | def function[add_group, parameter[self, number, name, led_type]]:
constant[ Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group.
]
variable[group] assign[=] call[name[group_factory], parameter[name[self], name[number], name[name], name[led_type]]]
call[name[self].groups.append, parameter[name[group]]]
return[name[group]] | keyword[def] identifier[add_group] ( identifier[self] , identifier[number] , identifier[name] , identifier[led_type] ):
literal[string]
identifier[group] = identifier[group_factory] ( identifier[self] , identifier[number] , identifier[name] , identifier[led_type] )
identifier[self] . identifier[groups] . identifier[append] ( identifier[group] )
keyword[return] identifier[group] | def add_group(self, number, name, led_type):
""" Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group.
"""
group = group_factory(self, number, name, led_type)
self.groups.append(group)
return group |
def _get_current_migration_state(self, loader, apps):
"""
Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations().
"""
# Only care about applied migrations for the passed-in apps.
apps = set(apps)
relevant_applied = [migration for migration in loader.applied_migrations if migration[0] in apps]
# Sort them by the most recent migration and convert to a dictionary,
# leaving apps as keys and most recent migration as values.
# NB: this is a dirty trick
most_recents = dict(sorted(relevant_applied, key=lambda m: m[1]))
# Fill in the apps with no migrations with 'zero'.
# NOTE: Unicode Django application names are unsupported.
most_recents = [[app, 'zero' if app not in most_recents else str(most_recents[app])] for app in apps]
return most_recents | def function[_get_current_migration_state, parameter[self, loader, apps]]:
constant[
Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations().
]
variable[apps] assign[=] call[name[set], parameter[name[apps]]]
variable[relevant_applied] assign[=] <ast.ListComp object at 0x7da1b1800940>
variable[most_recents] assign[=] call[name[dict], parameter[call[name[sorted], parameter[name[relevant_applied]]]]]
variable[most_recents] assign[=] <ast.ListComp object at 0x7da1b1801030>
return[name[most_recents]] | keyword[def] identifier[_get_current_migration_state] ( identifier[self] , identifier[loader] , identifier[apps] ):
literal[string]
identifier[apps] = identifier[set] ( identifier[apps] )
identifier[relevant_applied] =[ identifier[migration] keyword[for] identifier[migration] keyword[in] identifier[loader] . identifier[applied_migrations] keyword[if] identifier[migration] [ literal[int] ] keyword[in] identifier[apps] ]
identifier[most_recents] = identifier[dict] ( identifier[sorted] ( identifier[relevant_applied] , identifier[key] = keyword[lambda] identifier[m] : identifier[m] [ literal[int] ]))
identifier[most_recents] =[[ identifier[app] , literal[string] keyword[if] identifier[app] keyword[not] keyword[in] identifier[most_recents] keyword[else] identifier[str] ( identifier[most_recents] [ identifier[app] ])] keyword[for] identifier[app] keyword[in] identifier[apps] ]
keyword[return] identifier[most_recents] | def _get_current_migration_state(self, loader, apps):
"""
Extract the most recent migrations from the relevant apps.
If no migrations have been performed, return 'zero' as the most recent migration for the app.
This should only be called from list_migrations().
"""
# Only care about applied migrations for the passed-in apps.
apps = set(apps)
relevant_applied = [migration for migration in loader.applied_migrations if migration[0] in apps]
# Sort them by the most recent migration and convert to a dictionary,
# leaving apps as keys and most recent migration as values.
# NB: this is a dirty trick
most_recents = dict(sorted(relevant_applied, key=lambda m: m[1]))
# Fill in the apps with no migrations with 'zero'.
# NOTE: Unicode Django application names are unsupported.
most_recents = [[app, 'zero' if app not in most_recents else str(most_recents[app])] for app in apps]
return most_recents |
def disable_host_svc_checks(self, host):
"""Disable service checks for a host
Format of the line that triggers function call::
DISABLE_HOST_SVC_CHECKS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.disable_svc_check(service)
self.send_an_element(service.get_update_status_brok()) | def function[disable_host_svc_checks, parameter[self, host]]:
constant[Disable service checks for a host
Format of the line that triggers function call::
DISABLE_HOST_SVC_CHECKS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
]
for taget[name[service_id]] in starred[name[host].services] begin[:]
if compare[name[service_id] in name[self].daemon.services] begin[:]
variable[service] assign[=] call[name[self].daemon.services][name[service_id]]
call[name[self].disable_svc_check, parameter[name[service]]]
call[name[self].send_an_element, parameter[call[name[service].get_update_status_brok, parameter[]]]] | keyword[def] identifier[disable_host_svc_checks] ( identifier[self] , identifier[host] ):
literal[string]
keyword[for] identifier[service_id] keyword[in] identifier[host] . identifier[services] :
keyword[if] identifier[service_id] keyword[in] identifier[self] . identifier[daemon] . identifier[services] :
identifier[service] = identifier[self] . identifier[daemon] . identifier[services] [ identifier[service_id] ]
identifier[self] . identifier[disable_svc_check] ( identifier[service] )
identifier[self] . identifier[send_an_element] ( identifier[service] . identifier[get_update_status_brok] ()) | def disable_host_svc_checks(self, host):
"""Disable service checks for a host
Format of the line that triggers function call::
DISABLE_HOST_SVC_CHECKS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.disable_svc_check(service)
self.send_an_element(service.get_update_status_brok()) # depends on [control=['if'], data=['service_id']] # depends on [control=['for'], data=['service_id']] |
def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
"""
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (
len(items1), len(items2)))
return cls(zip(items1, items2)) | def function[dzip, parameter[items1, items2, cls]]:
constant[
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
]
<ast.Try object at 0x7da1b01d9600>
<ast.Try object at 0x7da1b01d94b0>
if <ast.BoolOp object at 0x7da1b01d8e20> begin[:]
variable[items2] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b01dbb80> begin[:]
variable[items2] assign[=] binary_operation[name[items2] * call[name[len], parameter[name[items1]]]]
if compare[call[name[len], parameter[name[items1]]] not_equal[!=] call[name[len], parameter[name[items2]]]] begin[:]
<ast.Raise object at 0x7da1b01d81f0>
return[call[name[cls], parameter[call[name[zip], parameter[name[items1], name[items2]]]]]] | keyword[def] identifier[dzip] ( identifier[items1] , identifier[items2] , identifier[cls] = identifier[dict] ):
literal[string]
keyword[try] :
identifier[len] ( identifier[items1] )
keyword[except] identifier[TypeError] :
identifier[items1] = identifier[list] ( identifier[items1] )
keyword[try] :
identifier[len] ( identifier[items2] )
keyword[except] identifier[TypeError] :
identifier[items2] = identifier[list] ( identifier[items2] )
keyword[if] identifier[len] ( identifier[items1] )== literal[int] keyword[and] identifier[len] ( identifier[items2] )== literal[int] :
identifier[items2] =[]
keyword[if] identifier[len] ( identifier[items2] )== literal[int] keyword[and] identifier[len] ( identifier[items1] )> literal[int] :
identifier[items2] = identifier[items2] * identifier[len] ( identifier[items1] )
keyword[if] identifier[len] ( identifier[items1] )!= identifier[len] ( identifier[items2] ):
keyword[raise] identifier[ValueError] ( literal[string] %(
identifier[len] ( identifier[items1] ), identifier[len] ( identifier[items2] )))
keyword[return] identifier[cls] ( identifier[zip] ( identifier[items1] , identifier[items2] )) | def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
"""
try:
len(items1) # depends on [control=['try'], data=[]]
except TypeError:
items1 = list(items1) # depends on [control=['except'], data=[]]
try:
len(items2) # depends on [control=['try'], data=[]]
except TypeError:
items2 = list(items2) # depends on [control=['except'], data=[]]
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = [] # depends on [control=['if'], data=[]]
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1) # depends on [control=['if'], data=[]]
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (len(items1), len(items2))) # depends on [control=['if'], data=[]]
return cls(zip(items1, items2)) |
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
# in Poisson regression weights are proportional to the exposure
# so we want to pump up all our predictions
# NOTE: we assume the targets are counts, not rate.
# ie if observations were scaled to account for exposure, they have
# been rescaled before calling this function.
# since some samples have higher exposure,
# they also need to have higher variance,
# we do this by multiplying mu by the weight=exposure
mu = mu * weights
return sp.stats.poisson.logpmf(y, mu=mu) | def function[log_pdf, parameter[self, y, mu, weights]]:
constant[
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
]
if compare[name[weights] is constant[None]] begin[:]
variable[weights] assign[=] call[name[np].ones_like, parameter[name[mu]]]
variable[mu] assign[=] binary_operation[name[mu] * name[weights]]
return[call[name[sp].stats.poisson.logpmf, parameter[name[y]]]] | keyword[def] identifier[log_pdf] ( identifier[self] , identifier[y] , identifier[mu] , identifier[weights] = keyword[None] ):
literal[string]
keyword[if] identifier[weights] keyword[is] keyword[None] :
identifier[weights] = identifier[np] . identifier[ones_like] ( identifier[mu] )
identifier[mu] = identifier[mu] * identifier[weights]
keyword[return] identifier[sp] . identifier[stats] . identifier[poisson] . identifier[logpmf] ( identifier[y] , identifier[mu] = identifier[mu] ) | def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu) # depends on [control=['if'], data=['weights']]
# in Poisson regression weights are proportional to the exposure
# so we want to pump up all our predictions
# NOTE: we assume the targets are counts, not rate.
# ie if observations were scaled to account for exposure, they have
# been rescaled before calling this function.
# since some samples have higher exposure,
# they also need to have higher variance,
# we do this by multiplying mu by the weight=exposure
mu = mu * weights
return sp.stats.poisson.logpmf(y, mu=mu) |
def dataReceived(self, data):
"""
Translates bytes into lines, and calls lineReceived.
Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using
str.splitlines() to split on ``\r\n``, ``\n``, and ``\r``.
"""
self.resetTimeout()
lines = (self._buffer + data).splitlines()
# str.splitlines() doesn't split the string after a trailing newline
# character so we must check if there is a trailing newline and, if so,
# clear the buffer as the line is "complete". Else, the line is
# incomplete and we keep the last line in the buffer.
if data.endswith(b'\n') or data.endswith(b'\r'):
self._buffer = b''
else:
self._buffer = lines.pop(-1)
for line in lines:
if self.transport.disconnecting:
# this is necessary because the transport may be told to lose
# the connection by a line within a larger packet, and it is
# important to disregard all the lines in that packet following
# the one that told it to close.
return
if len(line) > self._max_length:
self.lineLengthExceeded(line)
return
else:
self.lineReceived(line)
if len(self._buffer) > self._max_length:
self.lineLengthExceeded(self._buffer)
return | def function[dataReceived, parameter[self, data]]:
constant[
Translates bytes into lines, and calls lineReceived.
Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using
str.splitlines() to split on ``
``, ``
``, and ``
``.
]
call[name[self].resetTimeout, parameter[]]
variable[lines] assign[=] call[binary_operation[name[self]._buffer + name[data]].splitlines, parameter[]]
if <ast.BoolOp object at 0x7da20c76c7c0> begin[:]
name[self]._buffer assign[=] constant[b'']
for taget[name[line]] in starred[name[lines]] begin[:]
if name[self].transport.disconnecting begin[:]
return[None]
if compare[call[name[len], parameter[name[line]]] greater[>] name[self]._max_length] begin[:]
call[name[self].lineLengthExceeded, parameter[name[line]]]
return[None]
if compare[call[name[len], parameter[name[self]._buffer]] greater[>] name[self]._max_length] begin[:]
call[name[self].lineLengthExceeded, parameter[name[self]._buffer]]
return[None] | keyword[def] identifier[dataReceived] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[resetTimeout] ()
identifier[lines] =( identifier[self] . identifier[_buffer] + identifier[data] ). identifier[splitlines] ()
keyword[if] identifier[data] . identifier[endswith] ( literal[string] ) keyword[or] identifier[data] . identifier[endswith] ( literal[string] ):
identifier[self] . identifier[_buffer] = literal[string]
keyword[else] :
identifier[self] . identifier[_buffer] = identifier[lines] . identifier[pop] (- literal[int] )
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[self] . identifier[transport] . identifier[disconnecting] :
keyword[return]
keyword[if] identifier[len] ( identifier[line] )> identifier[self] . identifier[_max_length] :
identifier[self] . identifier[lineLengthExceeded] ( identifier[line] )
keyword[return]
keyword[else] :
identifier[self] . identifier[lineReceived] ( identifier[line] )
keyword[if] identifier[len] ( identifier[self] . identifier[_buffer] )> identifier[self] . identifier[_max_length] :
identifier[self] . identifier[lineLengthExceeded] ( identifier[self] . identifier[_buffer] )
keyword[return] | def dataReceived(self, data):
"""
Translates bytes into lines, and calls lineReceived.
Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using
str.splitlines() to split on ``\r
``, ``
``, and ``\r``.
"""
self.resetTimeout()
lines = (self._buffer + data).splitlines()
# str.splitlines() doesn't split the string after a trailing newline
# character so we must check if there is a trailing newline and, if so,
# clear the buffer as the line is "complete". Else, the line is
# incomplete and we keep the last line in the buffer.
if data.endswith(b'\n') or data.endswith(b'\r'):
self._buffer = b'' # depends on [control=['if'], data=[]]
else:
self._buffer = lines.pop(-1)
for line in lines:
if self.transport.disconnecting:
# this is necessary because the transport may be told to lose
# the connection by a line within a larger packet, and it is
# important to disregard all the lines in that packet following
# the one that told it to close.
return # depends on [control=['if'], data=[]]
if len(line) > self._max_length:
self.lineLengthExceeded(line)
return # depends on [control=['if'], data=[]]
else:
self.lineReceived(line) # depends on [control=['for'], data=['line']]
if len(self._buffer) > self._max_length:
self.lineLengthExceeded(self._buffer)
return # depends on [control=['if'], data=[]] |
def two_phase_dP(m, x, rhol, D, L=1, rhog=None, mul=None, mug=None, sigma=None,
P=None, Pc=None, roughness=0, angle=0, Method=None,
AvailableMethods=False):
r'''This function handles calculation of two-phase liquid-gas pressure drop
for flow inside channels. 23 calculation methods are available, with
varying input requirements. A correlation will be automatically selected if
none is specified. The full list of correlation can be obtained with the
`AvailableMethods` flag.
If no correlation is selected, the following rules are used, with the
earlier options attempted first:
* If rhog, mul, mug, and sigma are specified, use the Kim_Mudawar model
* If rhog, mul, and mug are specified, use the Chisholm model
* If mul, P, and Pc are specified, use the Zhang_Webb model
* If rhog and sigma are specified, use the Lombardi_Pedrocchi model
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
D : float
Diameter of pipe, [m]
L : float, optional
Length of pipe, [m]
rhog : float, optional
Gas density, [kg/m^3]
mul : float, optional
Viscosity of liquid, [Pa*s]
mug : float, optional
Viscosity of gas, [Pa*s]
sigma : float, optional
Surface tension, [N/m]
P : float, optional
Pressure of fluid, [Pa]
Pc : float, optional
Critical pressure of fluid, [Pa]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
angle : float, optional
The angle of the pipe with respect to the horizontal, [degrees]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to calculate two-phase pressure drop
with the given inputs.
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
two_phase_correlations.
AvailableMethods : bool, optional
If True, function will consider which methods which can be used to
calculate two-phase pressure drop with the given inputs and return
them as a list instead of performing a calculation.
Notes
-----
These functions may be integrated over, with properties recalculated as
the fluid's quality changes.
This model considers only the frictional pressure drop, not that due to
gravity or acceleration.
Examples
--------
>>> two_phase_dP(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... sigma=0.0487, D=0.05, L=1)
840.4137796786074
'''
def list_methods():
usable_indices = []
if all([rhog, sigma]):
usable_indices.append(5)
if all([rhog, mul, mug, sigma]):
usable_indices.extend([4, 3, 102, 103]) # Differs only in the addition of roughness
if all([rhog, mul, mug]):
usable_indices.extend([1,2, 101]) # Differs only in the addition of roughness
if all([mul, P, Pc]):
usable_indices.append(0)
if all([rhog, mul, mug, sigma, P, angle]):
usable_indices.append(104)
return [key for key, value in two_phase_correlations.items() if value[1] in usable_indices]
if AvailableMethods:
return list_methods()
if not Method:
if all([rhog, mul, mug, sigma]):
Method = 'Kim_Mudawar' # Kim_Mudawar preferred; 3 or 4
elif all([rhog, mul, mug]):
Method = 'Chisholm' # Second choice, indexes 1 or 2
elif all([mul, P, Pc,]) :
Method = 'Zhang_Webb' # Not a good choice
elif all([rhog, sigma]):
Method = 'Lombardi_Pedrocchi' # Last try
else:
raise Exception('All possible methods require more information \
than provided; provide more inputs!')
if Method in two_phase_correlations:
f, i = two_phase_correlations[Method]
if i == 0:
return f(m=m, x=x, rhol=rhol, mul=mul, P=P, Pc=Pc, D=D,
roughness=roughness, L=L)
elif i == 1:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D, L=L)
elif i == 2:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D,
L=L, roughness=roughness)
elif i == 3:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug,
sigma=sigma, D=D, L=L)
elif i == 4:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug,
sigma=sigma, D=D, L=L, roughness=roughness)
elif i == 5:
return f(m=m, x=x, rhol=rhol, rhog=rhog, sigma=sigma, D=D, L=L)
elif i == 101:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D,
L=L, roughness=roughness, rough_correction=True)
elif i == 102:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug,
sigma=sigma, D=D, L=L, roughness=roughness,
flowtype='adiabatic gas')
elif i == 103:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug,
sigma=sigma, D=D, L=L, roughness=roughness,
flowtype='flow boiling')
elif i == 104:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug,
sigma=sigma, P=P, D=D, angle=angle, L=L,
roughness=roughness, acceleration=False, g=g)
else:
raise Exception('Failure in in function') | def function[two_phase_dP, parameter[m, x, rhol, D, L, rhog, mul, mug, sigma, P, Pc, roughness, angle, Method, AvailableMethods]]:
constant[This function handles calculation of two-phase liquid-gas pressure drop
for flow inside channels. 23 calculation methods are available, with
varying input requirements. A correlation will be automatically selected if
none is specified. The full list of correlation can be obtained with the
`AvailableMethods` flag.
If no correlation is selected, the following rules are used, with the
earlier options attempted first:
* If rhog, mul, mug, and sigma are specified, use the Kim_Mudawar model
* If rhog, mul, and mug are specified, use the Chisholm model
* If mul, P, and Pc are specified, use the Zhang_Webb model
* If rhog and sigma are specified, use the Lombardi_Pedrocchi model
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
D : float
Diameter of pipe, [m]
L : float, optional
Length of pipe, [m]
rhog : float, optional
Gas density, [kg/m^3]
mul : float, optional
Viscosity of liquid, [Pa*s]
mug : float, optional
Viscosity of gas, [Pa*s]
sigma : float, optional
Surface tension, [N/m]
P : float, optional
Pressure of fluid, [Pa]
Pc : float, optional
Critical pressure of fluid, [Pa]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
angle : float, optional
The angle of the pipe with respect to the horizontal, [degrees]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to calculate two-phase pressure drop
with the given inputs.
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
two_phase_correlations.
AvailableMethods : bool, optional
If True, function will consider which methods which can be used to
calculate two-phase pressure drop with the given inputs and return
them as a list instead of performing a calculation.
Notes
-----
These functions may be integrated over, with properties recalculated as
the fluid's quality changes.
This model considers only the frictional pressure drop, not that due to
gravity or acceleration.
Examples
--------
>>> two_phase_dP(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... sigma=0.0487, D=0.05, L=1)
840.4137796786074
]
def function[list_methods, parameter[]]:
variable[usable_indices] assign[=] list[[]]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12fc520>, <ast.Name object at 0x7da1b12fee30>]]]] begin[:]
call[name[usable_indices].append, parameter[constant[5]]]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12fe9b0>, <ast.Name object at 0x7da1b12fe500>, <ast.Name object at 0x7da1b12ffb20>, <ast.Name object at 0x7da1b12fd570>]]]] begin[:]
call[name[usable_indices].extend, parameter[list[[<ast.Constant object at 0x7da1b12ff0a0>, <ast.Constant object at 0x7da1b12fff70>, <ast.Constant object at 0x7da1b12fc070>, <ast.Constant object at 0x7da1b12fd6c0>]]]]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12fe770>, <ast.Name object at 0x7da1b12fc1f0>, <ast.Name object at 0x7da1b12fd450>]]]] begin[:]
call[name[usable_indices].extend, parameter[list[[<ast.Constant object at 0x7da1b12fcd30>, <ast.Constant object at 0x7da1b12fc130>, <ast.Constant object at 0x7da1b12fc1c0>]]]]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12fd000>, <ast.Name object at 0x7da1b12ffd90>, <ast.Name object at 0x7da1b12ffa90>]]]] begin[:]
call[name[usable_indices].append, parameter[constant[0]]]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12fcaf0>, <ast.Name object at 0x7da1b12fc700>, <ast.Name object at 0x7da1b12fcd60>, <ast.Name object at 0x7da1b12fef20>, <ast.Name object at 0x7da1b12fcb50>, <ast.Name object at 0x7da1b12fee00>]]]] begin[:]
call[name[usable_indices].append, parameter[constant[104]]]
return[<ast.ListComp object at 0x7da1b12fd8d0>]
if name[AvailableMethods] begin[:]
return[call[name[list_methods], parameter[]]]
if <ast.UnaryOp object at 0x7da1b12fd4e0> begin[:]
if call[name[all], parameter[list[[<ast.Name object at 0x7da1b12ff1f0>, <ast.Name object at 0x7da1b12fc610>, <ast.Name object at 0x7da1b12fc550>, <ast.Name object at 0x7da1b12fd0c0>]]]] begin[:]
variable[Method] assign[=] constant[Kim_Mudawar]
if compare[name[Method] in name[two_phase_correlations]] begin[:]
<ast.Tuple object at 0x7da1b12fd660> assign[=] call[name[two_phase_correlations]][name[Method]]
if compare[name[i] equal[==] constant[0]] begin[:]
return[call[name[f], parameter[]]] | keyword[def] identifier[two_phase_dP] ( identifier[m] , identifier[x] , identifier[rhol] , identifier[D] , identifier[L] = literal[int] , identifier[rhog] = keyword[None] , identifier[mul] = keyword[None] , identifier[mug] = keyword[None] , identifier[sigma] = keyword[None] ,
identifier[P] = keyword[None] , identifier[Pc] = keyword[None] , identifier[roughness] = literal[int] , identifier[angle] = literal[int] , identifier[Method] = keyword[None] ,
identifier[AvailableMethods] = keyword[False] ):
literal[string]
keyword[def] identifier[list_methods] ():
identifier[usable_indices] =[]
keyword[if] identifier[all] ([ identifier[rhog] , identifier[sigma] ]):
identifier[usable_indices] . identifier[append] ( literal[int] )
keyword[if] identifier[all] ([ identifier[rhog] , identifier[mul] , identifier[mug] , identifier[sigma] ]):
identifier[usable_indices] . identifier[extend] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[all] ([ identifier[rhog] , identifier[mul] , identifier[mug] ]):
identifier[usable_indices] . identifier[extend] ([ literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[all] ([ identifier[mul] , identifier[P] , identifier[Pc] ]):
identifier[usable_indices] . identifier[append] ( literal[int] )
keyword[if] identifier[all] ([ identifier[rhog] , identifier[mul] , identifier[mug] , identifier[sigma] , identifier[P] , identifier[angle] ]):
identifier[usable_indices] . identifier[append] ( literal[int] )
keyword[return] [ identifier[key] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[two_phase_correlations] . identifier[items] () keyword[if] identifier[value] [ literal[int] ] keyword[in] identifier[usable_indices] ]
keyword[if] identifier[AvailableMethods] :
keyword[return] identifier[list_methods] ()
keyword[if] keyword[not] identifier[Method] :
keyword[if] identifier[all] ([ identifier[rhog] , identifier[mul] , identifier[mug] , identifier[sigma] ]):
identifier[Method] = literal[string]
keyword[elif] identifier[all] ([ identifier[rhog] , identifier[mul] , identifier[mug] ]):
identifier[Method] = literal[string]
keyword[elif] identifier[all] ([ identifier[mul] , identifier[P] , identifier[Pc] ,]):
identifier[Method] = literal[string]
keyword[elif] identifier[all] ([ identifier[rhog] , identifier[sigma] ]):
identifier[Method] = literal[string]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Method] keyword[in] identifier[two_phase_correlations] :
identifier[f] , identifier[i] = identifier[two_phase_correlations] [ identifier[Method] ]
keyword[if] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[mul] = identifier[mul] , identifier[P] = identifier[P] , identifier[Pc] = identifier[Pc] , identifier[D] = identifier[D] ,
identifier[roughness] = identifier[roughness] , identifier[L] = identifier[L] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] , identifier[D] = identifier[D] , identifier[L] = identifier[L] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] , identifier[D] = identifier[D] ,
identifier[L] = identifier[L] , identifier[roughness] = identifier[roughness] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] ,
identifier[sigma] = identifier[sigma] , identifier[D] = identifier[D] , identifier[L] = identifier[L] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] ,
identifier[sigma] = identifier[sigma] , identifier[D] = identifier[D] , identifier[L] = identifier[L] , identifier[roughness] = identifier[roughness] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[sigma] = identifier[sigma] , identifier[D] = identifier[D] , identifier[L] = identifier[L] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] , identifier[D] = identifier[D] ,
identifier[L] = identifier[L] , identifier[roughness] = identifier[roughness] , identifier[rough_correction] = keyword[True] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] ,
identifier[sigma] = identifier[sigma] , identifier[D] = identifier[D] , identifier[L] = identifier[L] , identifier[roughness] = identifier[roughness] ,
identifier[flowtype] = literal[string] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] ,
identifier[sigma] = identifier[sigma] , identifier[D] = identifier[D] , identifier[L] = identifier[L] , identifier[roughness] = identifier[roughness] ,
identifier[flowtype] = literal[string] )
keyword[elif] identifier[i] == literal[int] :
keyword[return] identifier[f] ( identifier[m] = identifier[m] , identifier[x] = identifier[x] , identifier[rhol] = identifier[rhol] , identifier[rhog] = identifier[rhog] , identifier[mul] = identifier[mul] , identifier[mug] = identifier[mug] ,
identifier[sigma] = identifier[sigma] , identifier[P] = identifier[P] , identifier[D] = identifier[D] , identifier[angle] = identifier[angle] , identifier[L] = identifier[L] ,
identifier[roughness] = identifier[roughness] , identifier[acceleration] = keyword[False] , identifier[g] = identifier[g] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def two_phase_dP(m, x, rhol, D, L=1, rhog=None, mul=None, mug=None, sigma=None, P=None, Pc=None, roughness=0, angle=0, Method=None, AvailableMethods=False):
"""This function handles calculation of two-phase liquid-gas pressure drop
for flow inside channels. 23 calculation methods are available, with
varying input requirements. A correlation will be automatically selected if
none is specified. The full list of correlation can be obtained with the
`AvailableMethods` flag.
If no correlation is selected, the following rules are used, with the
earlier options attempted first:
* If rhog, mul, mug, and sigma are specified, use the Kim_Mudawar model
* If rhog, mul, and mug are specified, use the Chisholm model
* If mul, P, and Pc are specified, use the Zhang_Webb model
* If rhog and sigma are specified, use the Lombardi_Pedrocchi model
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
D : float
Diameter of pipe, [m]
L : float, optional
Length of pipe, [m]
rhog : float, optional
Gas density, [kg/m^3]
mul : float, optional
Viscosity of liquid, [Pa*s]
mug : float, optional
Viscosity of gas, [Pa*s]
sigma : float, optional
Surface tension, [N/m]
P : float, optional
Pressure of fluid, [Pa]
Pc : float, optional
Critical pressure of fluid, [Pa]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
angle : float, optional
The angle of the pipe with respect to the horizontal, [degrees]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to calculate two-phase pressure drop
with the given inputs.
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
two_phase_correlations.
AvailableMethods : bool, optional
If True, function will consider which methods which can be used to
calculate two-phase pressure drop with the given inputs and return
them as a list instead of performing a calculation.
Notes
-----
These functions may be integrated over, with properties recalculated as
the fluid's quality changes.
This model considers only the frictional pressure drop, not that due to
gravity or acceleration.
Examples
--------
>>> two_phase_dP(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... sigma=0.0487, D=0.05, L=1)
840.4137796786074
"""
def list_methods():
usable_indices = []
if all([rhog, sigma]):
usable_indices.append(5) # depends on [control=['if'], data=[]]
if all([rhog, mul, mug, sigma]):
usable_indices.extend([4, 3, 102, 103]) # Differs only in the addition of roughness # depends on [control=['if'], data=[]]
if all([rhog, mul, mug]):
usable_indices.extend([1, 2, 101]) # Differs only in the addition of roughness # depends on [control=['if'], data=[]]
if all([mul, P, Pc]):
usable_indices.append(0) # depends on [control=['if'], data=[]]
if all([rhog, mul, mug, sigma, P, angle]):
usable_indices.append(104) # depends on [control=['if'], data=[]]
return [key for (key, value) in two_phase_correlations.items() if value[1] in usable_indices]
if AvailableMethods:
return list_methods() # depends on [control=['if'], data=[]]
if not Method:
if all([rhog, mul, mug, sigma]):
Method = 'Kim_Mudawar' # Kim_Mudawar preferred; 3 or 4 # depends on [control=['if'], data=[]]
elif all([rhog, mul, mug]):
Method = 'Chisholm' # Second choice, indexes 1 or 2 # depends on [control=['if'], data=[]]
elif all([mul, P, Pc]):
Method = 'Zhang_Webb' # Not a good choice # depends on [control=['if'], data=[]]
elif all([rhog, sigma]):
Method = 'Lombardi_Pedrocchi' # Last try # depends on [control=['if'], data=[]]
else:
raise Exception('All possible methods require more information than provided; provide more inputs!') # depends on [control=['if'], data=[]]
if Method in two_phase_correlations:
(f, i) = two_phase_correlations[Method]
if i == 0:
return f(m=m, x=x, rhol=rhol, mul=mul, P=P, Pc=Pc, D=D, roughness=roughness, L=L) # depends on [control=['if'], data=[]]
elif i == 1:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D, L=L) # depends on [control=['if'], data=[]]
elif i == 2:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D, L=L, roughness=roughness) # depends on [control=['if'], data=[]]
elif i == 3:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, sigma=sigma, D=D, L=L) # depends on [control=['if'], data=[]]
elif i == 4:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, sigma=sigma, D=D, L=L, roughness=roughness) # depends on [control=['if'], data=[]]
elif i == 5:
return f(m=m, x=x, rhol=rhol, rhog=rhog, sigma=sigma, D=D, L=L) # depends on [control=['if'], data=[]]
elif i == 101:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, D=D, L=L, roughness=roughness, rough_correction=True) # depends on [control=['if'], data=[]]
elif i == 102:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, sigma=sigma, D=D, L=L, roughness=roughness, flowtype='adiabatic gas') # depends on [control=['if'], data=[]]
elif i == 103:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, sigma=sigma, D=D, L=L, roughness=roughness, flowtype='flow boiling') # depends on [control=['if'], data=[]]
elif i == 104:
return f(m=m, x=x, rhol=rhol, rhog=rhog, mul=mul, mug=mug, sigma=sigma, P=P, D=D, angle=angle, L=L, roughness=roughness, acceleration=False, g=g) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['Method', 'two_phase_correlations']]
else:
raise Exception('Failure in in function') |
def update_config_from_dockerfile(self, config):
"""Updates build config with values from the Dockerfile
Updates:
* set "name" from LABEL com.redhat.component (if exists)
* set "version" from LABEL version (if exists)
:param config: ConfigParser object
"""
labels = Labels(df_parser(self.workflow.builder.df_path).labels)
for config_key, label in (
('name', Labels.LABEL_TYPE_COMPONENT),
('version', Labels.LABEL_TYPE_VERSION),
):
try:
_, value = labels.get_name_and_value(label)
except KeyError:
pass
else:
config.set('image-build', config_key, value) | def function[update_config_from_dockerfile, parameter[self, config]]:
constant[Updates build config with values from the Dockerfile
Updates:
* set "name" from LABEL com.redhat.component (if exists)
* set "version" from LABEL version (if exists)
:param config: ConfigParser object
]
variable[labels] assign[=] call[name[Labels], parameter[call[name[df_parser], parameter[name[self].workflow.builder.df_path]].labels]]
for taget[tuple[[<ast.Name object at 0x7da20c76d690>, <ast.Name object at 0x7da20c76d990>]]] in starred[tuple[[<ast.Tuple object at 0x7da20c76c5b0>, <ast.Tuple object at 0x7da20c76e6b0>]]] begin[:]
<ast.Try object at 0x7da20c76dfc0> | keyword[def] identifier[update_config_from_dockerfile] ( identifier[self] , identifier[config] ):
literal[string]
identifier[labels] = identifier[Labels] ( identifier[df_parser] ( identifier[self] . identifier[workflow] . identifier[builder] . identifier[df_path] ). identifier[labels] )
keyword[for] identifier[config_key] , identifier[label] keyword[in] (
( literal[string] , identifier[Labels] . identifier[LABEL_TYPE_COMPONENT] ),
( literal[string] , identifier[Labels] . identifier[LABEL_TYPE_VERSION] ),
):
keyword[try] :
identifier[_] , identifier[value] = identifier[labels] . identifier[get_name_and_value] ( identifier[label] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[else] :
identifier[config] . identifier[set] ( literal[string] , identifier[config_key] , identifier[value] ) | def update_config_from_dockerfile(self, config):
"""Updates build config with values from the Dockerfile
Updates:
* set "name" from LABEL com.redhat.component (if exists)
* set "version" from LABEL version (if exists)
:param config: ConfigParser object
"""
labels = Labels(df_parser(self.workflow.builder.df_path).labels)
for (config_key, label) in (('name', Labels.LABEL_TYPE_COMPONENT), ('version', Labels.LABEL_TYPE_VERSION)):
try:
(_, value) = labels.get_name_and_value(label) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
else:
config.set('image-build', config_key, value) # depends on [control=['for'], data=[]] |
def get_menu(self, name):
"""Return or create a menu."""
if name not in self._menus:
self._menus[name] = self.menuBar().addMenu(name)
return self._menus[name] | def function[get_menu, parameter[self, name]]:
constant[Return or create a menu.]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._menus] begin[:]
call[name[self]._menus][name[name]] assign[=] call[call[name[self].menuBar, parameter[]].addMenu, parameter[name[name]]]
return[call[name[self]._menus][name[name]]] | keyword[def] identifier[get_menu] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_menus] :
identifier[self] . identifier[_menus] [ identifier[name] ]= identifier[self] . identifier[menuBar] (). identifier[addMenu] ( identifier[name] )
keyword[return] identifier[self] . identifier[_menus] [ identifier[name] ] | def get_menu(self, name):
"""Return or create a menu."""
if name not in self._menus:
self._menus[name] = self.menuBar().addMenu(name) # depends on [control=['if'], data=['name']]
return self._menus[name] |
def next(self):
"""Pops and returns the first outgoing message from the list.
If message list currently has no messages, the calling thread will
be put to sleep until we have at-least one message in the list that
can be popped and returned.
"""
# We pick the first outgoing available and send it.
outgoing_msg = self.outgoing_msg_list.pop_first()
# If we do not have any outgoing msg., we wait.
if outgoing_msg is None:
self.outgoing_msg_event.clear()
self.outgoing_msg_event.wait()
outgoing_msg = self.outgoing_msg_list.pop_first()
return outgoing_msg | def function[next, parameter[self]]:
constant[Pops and returns the first outgoing message from the list.
If message list currently has no messages, the calling thread will
be put to sleep until we have at-least one message in the list that
can be popped and returned.
]
variable[outgoing_msg] assign[=] call[name[self].outgoing_msg_list.pop_first, parameter[]]
if compare[name[outgoing_msg] is constant[None]] begin[:]
call[name[self].outgoing_msg_event.clear, parameter[]]
call[name[self].outgoing_msg_event.wait, parameter[]]
variable[outgoing_msg] assign[=] call[name[self].outgoing_msg_list.pop_first, parameter[]]
return[name[outgoing_msg]] | keyword[def] identifier[next] ( identifier[self] ):
literal[string]
identifier[outgoing_msg] = identifier[self] . identifier[outgoing_msg_list] . identifier[pop_first] ()
keyword[if] identifier[outgoing_msg] keyword[is] keyword[None] :
identifier[self] . identifier[outgoing_msg_event] . identifier[clear] ()
identifier[self] . identifier[outgoing_msg_event] . identifier[wait] ()
identifier[outgoing_msg] = identifier[self] . identifier[outgoing_msg_list] . identifier[pop_first] ()
keyword[return] identifier[outgoing_msg] | def next(self):
"""Pops and returns the first outgoing message from the list.
If message list currently has no messages, the calling thread will
be put to sleep until we have at-least one message in the list that
can be popped and returned.
"""
# We pick the first outgoing available and send it.
outgoing_msg = self.outgoing_msg_list.pop_first()
# If we do not have any outgoing msg., we wait.
if outgoing_msg is None:
self.outgoing_msg_event.clear()
self.outgoing_msg_event.wait()
outgoing_msg = self.outgoing_msg_list.pop_first() # depends on [control=['if'], data=['outgoing_msg']]
return outgoing_msg |
def iso_to_gregorian(iso_year, iso_week, iso_day):
"Gregorian calendar date for the given ISO year, week and day"
year_start = iso_year_start(iso_year)
return year_start + datetime.timedelta(days=iso_day - 1, weeks=iso_week - 1) | def function[iso_to_gregorian, parameter[iso_year, iso_week, iso_day]]:
constant[Gregorian calendar date for the given ISO year, week and day]
variable[year_start] assign[=] call[name[iso_year_start], parameter[name[iso_year]]]
return[binary_operation[name[year_start] + call[name[datetime].timedelta, parameter[]]]] | keyword[def] identifier[iso_to_gregorian] ( identifier[iso_year] , identifier[iso_week] , identifier[iso_day] ):
literal[string]
identifier[year_start] = identifier[iso_year_start] ( identifier[iso_year] )
keyword[return] identifier[year_start] + identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[iso_day] - literal[int] , identifier[weeks] = identifier[iso_week] - literal[int] ) | def iso_to_gregorian(iso_year, iso_week, iso_day):
"""Gregorian calendar date for the given ISO year, week and day"""
year_start = iso_year_start(iso_year)
return year_start + datetime.timedelta(days=iso_day - 1, weeks=iso_week - 1) |
def _generate_base_namespace_module(self, api, namespace):
"""Creates a module for the namespace. All data types and routes are
represented as Python classes."""
self.cur_namespace = namespace
generate_module_header(self)
if namespace.doc is not None:
self.emit('"""')
self.emit_raw(namespace.doc)
self.emit('"""')
self.emit()
self.emit_raw(validators_import)
# Generate import statements for all referenced namespaces.
self._generate_imports_for_referenced_namespaces(namespace)
for annotation_type in namespace.annotation_types:
self._generate_annotation_type_class(namespace, annotation_type)
for data_type in namespace.linearize_data_types():
if isinstance(data_type, Struct):
self._generate_struct_class(namespace, data_type)
elif isinstance(data_type, Union):
self._generate_union_class(namespace, data_type)
else:
raise TypeError('Cannot handle type %r' % type(data_type))
for alias in namespace.linearize_aliases():
self._generate_alias_definition(namespace, alias)
# Generate the struct->subtype tag mapping at the end so that
# references to later-defined subtypes don't cause errors.
for data_type in namespace.linearize_data_types():
if is_struct_type(data_type):
self._generate_struct_class_reflection_attributes(
namespace, data_type)
if data_type.has_enumerated_subtypes():
self._generate_enumerated_subtypes_tag_mapping(
namespace, data_type)
elif is_union_type(data_type):
self._generate_union_class_reflection_attributes(
namespace, data_type)
self._generate_union_class_symbol_creators(data_type)
self._generate_routes(api.route_schema, namespace) | def function[_generate_base_namespace_module, parameter[self, api, namespace]]:
constant[Creates a module for the namespace. All data types and routes are
represented as Python classes.]
name[self].cur_namespace assign[=] name[namespace]
call[name[generate_module_header], parameter[name[self]]]
if compare[name[namespace].doc is_not constant[None]] begin[:]
call[name[self].emit, parameter[constant["""]]]
call[name[self].emit_raw, parameter[name[namespace].doc]]
call[name[self].emit, parameter[constant["""]]]
call[name[self].emit, parameter[]]
call[name[self].emit_raw, parameter[name[validators_import]]]
call[name[self]._generate_imports_for_referenced_namespaces, parameter[name[namespace]]]
for taget[name[annotation_type]] in starred[name[namespace].annotation_types] begin[:]
call[name[self]._generate_annotation_type_class, parameter[name[namespace], name[annotation_type]]]
for taget[name[data_type]] in starred[call[name[namespace].linearize_data_types, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[data_type], name[Struct]]] begin[:]
call[name[self]._generate_struct_class, parameter[name[namespace], name[data_type]]]
for taget[name[alias]] in starred[call[name[namespace].linearize_aliases, parameter[]]] begin[:]
call[name[self]._generate_alias_definition, parameter[name[namespace], name[alias]]]
for taget[name[data_type]] in starred[call[name[namespace].linearize_data_types, parameter[]]] begin[:]
if call[name[is_struct_type], parameter[name[data_type]]] begin[:]
call[name[self]._generate_struct_class_reflection_attributes, parameter[name[namespace], name[data_type]]]
if call[name[data_type].has_enumerated_subtypes, parameter[]] begin[:]
call[name[self]._generate_enumerated_subtypes_tag_mapping, parameter[name[namespace], name[data_type]]]
call[name[self]._generate_routes, parameter[name[api].route_schema, name[namespace]]] | keyword[def] identifier[_generate_base_namespace_module] ( identifier[self] , identifier[api] , identifier[namespace] ):
literal[string]
identifier[self] . identifier[cur_namespace] = identifier[namespace]
identifier[generate_module_header] ( identifier[self] )
keyword[if] identifier[namespace] . identifier[doc] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[emit] ( literal[string] )
identifier[self] . identifier[emit_raw] ( identifier[namespace] . identifier[doc] )
identifier[self] . identifier[emit] ( literal[string] )
identifier[self] . identifier[emit] ()
identifier[self] . identifier[emit_raw] ( identifier[validators_import] )
identifier[self] . identifier[_generate_imports_for_referenced_namespaces] ( identifier[namespace] )
keyword[for] identifier[annotation_type] keyword[in] identifier[namespace] . identifier[annotation_types] :
identifier[self] . identifier[_generate_annotation_type_class] ( identifier[namespace] , identifier[annotation_type] )
keyword[for] identifier[data_type] keyword[in] identifier[namespace] . identifier[linearize_data_types] ():
keyword[if] identifier[isinstance] ( identifier[data_type] , identifier[Struct] ):
identifier[self] . identifier[_generate_struct_class] ( identifier[namespace] , identifier[data_type] )
keyword[elif] identifier[isinstance] ( identifier[data_type] , identifier[Union] ):
identifier[self] . identifier[_generate_union_class] ( identifier[namespace] , identifier[data_type] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[data_type] ))
keyword[for] identifier[alias] keyword[in] identifier[namespace] . identifier[linearize_aliases] ():
identifier[self] . identifier[_generate_alias_definition] ( identifier[namespace] , identifier[alias] )
keyword[for] identifier[data_type] keyword[in] identifier[namespace] . identifier[linearize_data_types] ():
keyword[if] identifier[is_struct_type] ( identifier[data_type] ):
identifier[self] . identifier[_generate_struct_class_reflection_attributes] (
identifier[namespace] , identifier[data_type] )
keyword[if] identifier[data_type] . identifier[has_enumerated_subtypes] ():
identifier[self] . identifier[_generate_enumerated_subtypes_tag_mapping] (
identifier[namespace] , identifier[data_type] )
keyword[elif] identifier[is_union_type] ( identifier[data_type] ):
identifier[self] . identifier[_generate_union_class_reflection_attributes] (
identifier[namespace] , identifier[data_type] )
identifier[self] . identifier[_generate_union_class_symbol_creators] ( identifier[data_type] )
identifier[self] . identifier[_generate_routes] ( identifier[api] . identifier[route_schema] , identifier[namespace] ) | def _generate_base_namespace_module(self, api, namespace):
"""Creates a module for the namespace. All data types and routes are
represented as Python classes."""
self.cur_namespace = namespace
generate_module_header(self)
if namespace.doc is not None:
self.emit('"""')
self.emit_raw(namespace.doc)
self.emit('"""')
self.emit() # depends on [control=['if'], data=[]]
self.emit_raw(validators_import)
# Generate import statements for all referenced namespaces.
self._generate_imports_for_referenced_namespaces(namespace)
for annotation_type in namespace.annotation_types:
self._generate_annotation_type_class(namespace, annotation_type) # depends on [control=['for'], data=['annotation_type']]
for data_type in namespace.linearize_data_types():
if isinstance(data_type, Struct):
self._generate_struct_class(namespace, data_type) # depends on [control=['if'], data=[]]
elif isinstance(data_type, Union):
self._generate_union_class(namespace, data_type) # depends on [control=['if'], data=[]]
else:
raise TypeError('Cannot handle type %r' % type(data_type)) # depends on [control=['for'], data=['data_type']]
for alias in namespace.linearize_aliases():
self._generate_alias_definition(namespace, alias) # depends on [control=['for'], data=['alias']]
# Generate the struct->subtype tag mapping at the end so that
# references to later-defined subtypes don't cause errors.
for data_type in namespace.linearize_data_types():
if is_struct_type(data_type):
self._generate_struct_class_reflection_attributes(namespace, data_type)
if data_type.has_enumerated_subtypes():
self._generate_enumerated_subtypes_tag_mapping(namespace, data_type) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif is_union_type(data_type):
self._generate_union_class_reflection_attributes(namespace, data_type)
self._generate_union_class_symbol_creators(data_type) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data_type']]
self._generate_routes(api.route_schema, namespace) |
def open(self, bus):
"""Open the smbus interface on the specified bus."""
# Close the device if it's already open.
if self._device is not None:
self.close()
# Try to open the file for the specified bus. Must turn off buffering
# or else Python 3 fails (see: https://bugs.python.org/issue20074)
self._device = open('/dev/i2c-{0}'.format(bus), 'r+b', buffering=0) | def function[open, parameter[self, bus]]:
constant[Open the smbus interface on the specified bus.]
if compare[name[self]._device is_not constant[None]] begin[:]
call[name[self].close, parameter[]]
name[self]._device assign[=] call[name[open], parameter[call[constant[/dev/i2c-{0}].format, parameter[name[bus]]], constant[r+b]]] | keyword[def] identifier[open] ( identifier[self] , identifier[bus] ):
literal[string]
keyword[if] identifier[self] . identifier[_device] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[close] ()
identifier[self] . identifier[_device] = identifier[open] ( literal[string] . identifier[format] ( identifier[bus] ), literal[string] , identifier[buffering] = literal[int] ) | def open(self, bus):
"""Open the smbus interface on the specified bus."""
# Close the device if it's already open.
if self._device is not None:
self.close() # depends on [control=['if'], data=[]]
# Try to open the file for the specified bus. Must turn off buffering
# or else Python 3 fails (see: https://bugs.python.org/issue20074)
self._device = open('/dev/i2c-{0}'.format(bus), 'r+b', buffering=0) |
def coindex_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM Weather API for Carbon Monoxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *COIndex* object instance is returned, encapsulating a
*Location* object and the list of CO samples
If `start` is not provided, the latest available CO samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available CO samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *COIndex* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self._pollapi.get_coi(params)
coindex = self._parsers['coindex'].parse_JSON(json_data)
if interval is None:
interval = 'year'
coindex._interval = interval
return coindex | def function[coindex_around_coords, parameter[self, lat, lon, start, interval]]:
constant[
Queries the OWM Weather API for Carbon Monoxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *COIndex* object instance is returned, encapsulating a
*Location* object and the list of CO samples
If `start` is not provided, the latest available CO samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available CO samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *COIndex* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
]
call[name[geo].assert_is_lon, parameter[name[lon]]]
call[name[geo].assert_is_lat, parameter[name[lat]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da204566ef0>, <ast.Constant object at 0x7da204567610>, <ast.Constant object at 0x7da204567850>, <ast.Constant object at 0x7da204565ae0>], [<ast.Name object at 0x7da204567250>, <ast.Name object at 0x7da2045655d0>, <ast.Name object at 0x7da204567100>, <ast.Name object at 0x7da204566110>]]
variable[json_data] assign[=] call[name[self]._pollapi.get_coi, parameter[name[params]]]
variable[coindex] assign[=] call[call[name[self]._parsers][constant[coindex]].parse_JSON, parameter[name[json_data]]]
if compare[name[interval] is constant[None]] begin[:]
variable[interval] assign[=] constant[year]
name[coindex]._interval assign[=] name[interval]
return[name[coindex]] | keyword[def] identifier[coindex_around_coords] ( identifier[self] , identifier[lat] , identifier[lon] , identifier[start] = keyword[None] , identifier[interval] = keyword[None] ):
literal[string]
identifier[geo] . identifier[assert_is_lon] ( identifier[lon] )
identifier[geo] . identifier[assert_is_lat] ( identifier[lat] )
identifier[params] ={ literal[string] : identifier[lon] , literal[string] : identifier[lat] , literal[string] : identifier[start] , literal[string] : identifier[interval] }
identifier[json_data] = identifier[self] . identifier[_pollapi] . identifier[get_coi] ( identifier[params] )
identifier[coindex] = identifier[self] . identifier[_parsers] [ literal[string] ]. identifier[parse_JSON] ( identifier[json_data] )
keyword[if] identifier[interval] keyword[is] keyword[None] :
identifier[interval] = literal[string]
identifier[coindex] . identifier[_interval] = identifier[interval]
keyword[return] identifier[coindex] | def coindex_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM Weather API for Carbon Monoxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *COIndex* object instance is returned, encapsulating a
*Location* object and the list of CO samples
If `start` is not provided, the latest available CO samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available CO samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *COIndex* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self._pollapi.get_coi(params)
coindex = self._parsers['coindex'].parse_JSON(json_data)
if interval is None:
interval = 'year' # depends on [control=['if'], data=['interval']]
coindex._interval = interval
return coindex |
def retrieve_netting_set(self, asset_manager_id, transaction_id):
"""
Returns all the transaction_ids associated with a single netting set. Pass in the ID for any transaction in
the set.
:param asset_manager_id: The asset_manager_id for the netting set owner.
:param transaction_id: A transaction_id of an entry within the netting set.
:return:
"""
self.logger.info('Retrieve Netting Set - Asset Manager: %s - Transaction ID: %s', asset_manager_id,
transaction_id)
url = '%s/netting/%s/%s' % (self.endpoint, asset_manager_id, transaction_id)
response = self.session.get(url)
if response.ok:
net_transaction_id, netting_set_json = next(iter(response.json().items()))
netting_set = [json_to_transaction(net_transaction) for net_transaction in netting_set_json]
self.logger.info('Returned %s Transactions in Netting Set.', len(netting_set))
return net_transaction_id, netting_set
else:
self.logger.error(response.text)
response.raise_for_status() | def function[retrieve_netting_set, parameter[self, asset_manager_id, transaction_id]]:
constant[
Returns all the transaction_ids associated with a single netting set. Pass in the ID for any transaction in
the set.
:param asset_manager_id: The asset_manager_id for the netting set owner.
:param transaction_id: A transaction_id of an entry within the netting set.
:return:
]
call[name[self].logger.info, parameter[constant[Retrieve Netting Set - Asset Manager: %s - Transaction ID: %s], name[asset_manager_id], name[transaction_id]]]
variable[url] assign[=] binary_operation[constant[%s/netting/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b09eb580>, <ast.Name object at 0x7da1b09eb9a0>, <ast.Name object at 0x7da1b09e80a0>]]]
variable[response] assign[=] call[name[self].session.get, parameter[name[url]]]
if name[response].ok begin[:]
<ast.Tuple object at 0x7da1b09e81f0> assign[=] call[name[next], parameter[call[name[iter], parameter[call[call[name[response].json, parameter[]].items, parameter[]]]]]]
variable[netting_set] assign[=] <ast.ListComp object at 0x7da1b09b9150>
call[name[self].logger.info, parameter[constant[Returned %s Transactions in Netting Set.], call[name[len], parameter[name[netting_set]]]]]
return[tuple[[<ast.Name object at 0x7da1b09b93f0>, <ast.Name object at 0x7da1b09ba620>]]] | keyword[def] identifier[retrieve_netting_set] ( identifier[self] , identifier[asset_manager_id] , identifier[transaction_id] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[asset_manager_id] ,
identifier[transaction_id] )
identifier[url] = literal[string] %( identifier[self] . identifier[endpoint] , identifier[asset_manager_id] , identifier[transaction_id] )
identifier[response] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] )
keyword[if] identifier[response] . identifier[ok] :
identifier[net_transaction_id] , identifier[netting_set_json] = identifier[next] ( identifier[iter] ( identifier[response] . identifier[json] (). identifier[items] ()))
identifier[netting_set] =[ identifier[json_to_transaction] ( identifier[net_transaction] ) keyword[for] identifier[net_transaction] keyword[in] identifier[netting_set_json] ]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[len] ( identifier[netting_set] ))
keyword[return] identifier[net_transaction_id] , identifier[netting_set]
keyword[else] :
identifier[self] . identifier[logger] . identifier[error] ( identifier[response] . identifier[text] )
identifier[response] . identifier[raise_for_status] () | def retrieve_netting_set(self, asset_manager_id, transaction_id):
"""
Returns all the transaction_ids associated with a single netting set. Pass in the ID for any transaction in
the set.
:param asset_manager_id: The asset_manager_id for the netting set owner.
:param transaction_id: A transaction_id of an entry within the netting set.
:return:
"""
self.logger.info('Retrieve Netting Set - Asset Manager: %s - Transaction ID: %s', asset_manager_id, transaction_id)
url = '%s/netting/%s/%s' % (self.endpoint, asset_manager_id, transaction_id)
response = self.session.get(url)
if response.ok:
(net_transaction_id, netting_set_json) = next(iter(response.json().items()))
netting_set = [json_to_transaction(net_transaction) for net_transaction in netting_set_json]
self.logger.info('Returned %s Transactions in Netting Set.', len(netting_set))
return (net_transaction_id, netting_set) # depends on [control=['if'], data=[]]
else:
self.logger.error(response.text)
response.raise_for_status() |
def _string_to_int(self, s):
"""Read an integer in s, in Little Indian. """
base = len(self.alphabet)
return sum((self._letter_to_int(l) * base**lsb
for lsb, l in enumerate(s)
)) | def function[_string_to_int, parameter[self, s]]:
constant[Read an integer in s, in Little Indian. ]
variable[base] assign[=] call[name[len], parameter[name[self].alphabet]]
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18dc04700>]]] | keyword[def] identifier[_string_to_int] ( identifier[self] , identifier[s] ):
literal[string]
identifier[base] = identifier[len] ( identifier[self] . identifier[alphabet] )
keyword[return] identifier[sum] (( identifier[self] . identifier[_letter_to_int] ( identifier[l] )* identifier[base] ** identifier[lsb]
keyword[for] identifier[lsb] , identifier[l] keyword[in] identifier[enumerate] ( identifier[s] )
)) | def _string_to_int(self, s):
"""Read an integer in s, in Little Indian. """
base = len(self.alphabet)
return sum((self._letter_to_int(l) * base ** lsb for (lsb, l) in enumerate(s))) |
def get_custom_views(name=None):
"""
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
if name is None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false'
elif name is not None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+ name + '&desc=false&total=false'
f_url = url + get_custom_views_url
r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents
if r.status_code == 200:
customviewlist = (json.loads(r.text))['customView']
if type(customviewlist) is dict:
customviewlist = [customviewlist]
return customviewlist
else:
return customviewlist
else:
print(r.status_code)
print("An Error has occured") | def function[get_custom_views, parameter[name]]:
constant[
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
]
if <ast.BoolOp object at 0x7da20c6aae60> begin[:]
call[name[set_imc_creds], parameter[]]
if compare[name[name] is constant[None]] begin[:]
variable[get_custom_views_url] assign[=] constant[/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false]
variable[f_url] assign[=] binary_operation[name[url] + name[get_custom_views_url]]
variable[r] assign[=] call[name[requests].get, parameter[name[f_url]]]
if compare[name[r].status_code equal[==] constant[200]] begin[:]
variable[customviewlist] assign[=] call[call[name[json].loads, parameter[name[r].text]]][constant[customView]]
if compare[call[name[type], parameter[name[customviewlist]]] is name[dict]] begin[:]
variable[customviewlist] assign[=] list[[<ast.Name object at 0x7da18f09f310>]]
return[name[customviewlist]] | keyword[def] identifier[get_custom_views] ( identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[auth] keyword[is] keyword[None] keyword[or] identifier[url] keyword[is] keyword[None] :
identifier[set_imc_creds] ()
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[get_custom_views_url] = literal[string]
keyword[elif] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[get_custom_views_url] = literal[string] + identifier[name] + literal[string]
identifier[f_url] = identifier[url] + identifier[get_custom_views_url]
identifier[r] = identifier[requests] . identifier[get] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[headers] )
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
identifier[customviewlist] =( identifier[json] . identifier[loads] ( identifier[r] . identifier[text] ))[ literal[string] ]
keyword[if] identifier[type] ( identifier[customviewlist] ) keyword[is] identifier[dict] :
identifier[customviewlist] =[ identifier[customviewlist] ]
keyword[return] identifier[customviewlist]
keyword[else] :
keyword[return] identifier[customviewlist]
keyword[else] :
identifier[print] ( identifier[r] . identifier[status_code] )
identifier[print] ( literal[string] ) | def get_custom_views(name=None):
"""
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds() # depends on [control=['if'], data=[]]
if name is None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false' # depends on [control=['if'], data=[]]
elif name is not None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name=' + name + '&desc=false&total=false' # depends on [control=['if'], data=['name']]
f_url = url + get_custom_views_url
r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents
if r.status_code == 200:
customviewlist = json.loads(r.text)['customView']
if type(customviewlist) is dict:
customviewlist = [customviewlist]
return customviewlist # depends on [control=['if'], data=[]]
else:
return customviewlist # depends on [control=['if'], data=[]]
else:
print(r.status_code)
print('An Error has occured') |
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix) | def function[subnet2block, parameter[subnet]]:
constant[Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
]
if <ast.UnaryOp object at 0x7da20c6c7430> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da20c6c5900> assign[=] call[name[subnet].split, parameter[constant[/]]]
variable[prefix] assign[=] call[name[netmask2prefix], parameter[name[netmask]]]
variable[network] assign[=] call[name[ip2network], parameter[name[ip]]]
return[call[name[_block_from_ip_and_prefix], parameter[name[network], name[prefix]]]] | keyword[def] identifier[subnet2block] ( identifier[subnet] ):
literal[string]
keyword[if] keyword[not] identifier[validate_subnet] ( identifier[subnet] ):
keyword[return] keyword[None]
identifier[ip] , identifier[netmask] = identifier[subnet] . identifier[split] ( literal[string] )
identifier[prefix] = identifier[netmask2prefix] ( identifier[netmask] )
identifier[network] = identifier[ip2network] ( identifier[ip] )
keyword[return] identifier[_block_from_ip_and_prefix] ( identifier[network] , identifier[prefix] ) | def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None # depends on [control=['if'], data=[]]
(ip, netmask) = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix) |
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt | def function[memorized_datetime, parameter[seconds]]:
constant[Create only one instance of each distinct datetime]
<ast.Try object at 0x7da18f812bf0> | keyword[def] identifier[memorized_datetime] ( identifier[seconds] ):
literal[string]
keyword[try] :
keyword[return] identifier[_datetime_cache] [ identifier[seconds] ]
keyword[except] identifier[KeyError] :
identifier[dt] = identifier[_epoch] + identifier[timedelta] ( identifier[seconds] = identifier[seconds] )
identifier[_datetime_cache] [ identifier[seconds] ]= identifier[dt]
keyword[return] identifier[dt] | def memorized_datetime(seconds):
"""Create only one instance of each distinct datetime"""
try:
return _datetime_cache[seconds] # depends on [control=['try'], data=[]]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt # depends on [control=['except'], data=[]] |
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
'''
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents)
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file)
else:
table = process_csv(reader, csv_file)
return [table] | def function[get_data_csv, parameter[file_name, encoding, file_contents, on_demand]]:
constant[
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
]
def function[yield_csv, parameter[csv_contents, csv_file]]:
<ast.Try object at 0x7da18f813700>
def function[process_csv, parameter[csv_contents, csv_file]]:
return[<ast.ListComp object at 0x7da18f811b10>]
if name[file_contents] begin[:]
variable[csv_file] assign[=] call[name[BytesIO], parameter[name[file_contents]]]
variable[reader] assign[=] call[name[csv].reader, parameter[name[csv_file]]]
if name[on_demand] begin[:]
variable[table] assign[=] call[name[yield_csv], parameter[name[reader], name[csv_file]]]
return[list[[<ast.Name object at 0x7da18f813010>]]] | keyword[def] identifier[get_data_csv] ( identifier[file_name] , identifier[encoding] = literal[string] , identifier[file_contents] = keyword[None] , identifier[on_demand] = keyword[False] ):
literal[string]
keyword[def] identifier[yield_csv] ( identifier[csv_contents] , identifier[csv_file] ):
keyword[try] :
keyword[for] identifier[line] keyword[in] identifier[csv_contents] :
keyword[yield] identifier[line]
keyword[finally] :
keyword[try] :
identifier[csv_file] . identifier[close] ()
keyword[except] :
keyword[pass]
keyword[def] identifier[process_csv] ( identifier[csv_contents] , identifier[csv_file] ):
keyword[return] [ identifier[line] keyword[for] identifier[line] keyword[in] identifier[yield_csv] ( identifier[csv_contents] , identifier[csv_file] )]
keyword[if] identifier[file_contents] :
identifier[csv_file] = identifier[BytesIO] ( identifier[file_contents] )
keyword[else] :
identifier[csv_file] = identifier[open] ( identifier[file_name] , literal[string] )
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[csv_file] , identifier[dialect] = identifier[csv] . identifier[excel] , identifier[encoding] = identifier[encoding] )
keyword[if] identifier[on_demand] :
identifier[table] = identifier[yield_csv] ( identifier[reader] , identifier[csv_file] )
keyword[else] :
identifier[table] = identifier[process_csv] ( identifier[reader] , identifier[csv_file] )
keyword[return] [ identifier[table] ] | def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
"""
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
"""
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
finally:
try:
csv_file.close() # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents) # depends on [control=['if'], data=[]]
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file) # depends on [control=['if'], data=[]]
else:
table = process_csv(reader, csv_file)
return [table] |
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: modified PlaceQuery, or ``False`` if country is not acceptable.
"""
# Map country, but don't let map overwrite
if pq.country not in self.acceptable_countries and pq.country in self.country_map:
pq.country = self.country_map[pq.country]
if pq.country != '' and \
self.acceptable_countries != [] and \
pq.country not in self.acceptable_countries:
return False
return pq | def function[process, parameter[self, pq]]:
constant[
:arg PlaceQuery pq: PlaceQuery instance
:returns: modified PlaceQuery, or ``False`` if country is not acceptable.
]
if <ast.BoolOp object at 0x7da20c794490> begin[:]
name[pq].country assign[=] call[name[self].country_map][name[pq].country]
if <ast.BoolOp object at 0x7da20c795780> begin[:]
return[constant[False]]
return[name[pq]] | keyword[def] identifier[process] ( identifier[self] , identifier[pq] ):
literal[string]
keyword[if] identifier[pq] . identifier[country] keyword[not] keyword[in] identifier[self] . identifier[acceptable_countries] keyword[and] identifier[pq] . identifier[country] keyword[in] identifier[self] . identifier[country_map] :
identifier[pq] . identifier[country] = identifier[self] . identifier[country_map] [ identifier[pq] . identifier[country] ]
keyword[if] identifier[pq] . identifier[country] != literal[string] keyword[and] identifier[self] . identifier[acceptable_countries] !=[] keyword[and] identifier[pq] . identifier[country] keyword[not] keyword[in] identifier[self] . identifier[acceptable_countries] :
keyword[return] keyword[False]
keyword[return] identifier[pq] | def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: modified PlaceQuery, or ``False`` if country is not acceptable.
"""
# Map country, but don't let map overwrite
if pq.country not in self.acceptable_countries and pq.country in self.country_map:
pq.country = self.country_map[pq.country] # depends on [control=['if'], data=[]]
if pq.country != '' and self.acceptable_countries != [] and (pq.country not in self.acceptable_countries):
return False # depends on [control=['if'], data=[]]
return pq |
def analyze(self, file, filename):
"""
:param file: The File object itself.
:param filename: string; filename of File object, used for creating
PotentialSecret objects
:returns dictionary representation of set (for random access by hash)
{ detect_secrets.core.potential_secret.__hash__:
detect_secrets.core.potential_secret }
"""
potential_secrets = {}
for line_num, line in enumerate(file.readlines(), start=1):
secrets = self.analyze_string(line, line_num, filename)
potential_secrets.update(secrets)
return potential_secrets | def function[analyze, parameter[self, file, filename]]:
constant[
:param file: The File object itself.
:param filename: string; filename of File object, used for creating
PotentialSecret objects
:returns dictionary representation of set (for random access by hash)
{ detect_secrets.core.potential_secret.__hash__:
detect_secrets.core.potential_secret }
]
variable[potential_secrets] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6c51b0>, <ast.Name object at 0x7da20c6c4460>]]] in starred[call[name[enumerate], parameter[call[name[file].readlines, parameter[]]]]] begin[:]
variable[secrets] assign[=] call[name[self].analyze_string, parameter[name[line], name[line_num], name[filename]]]
call[name[potential_secrets].update, parameter[name[secrets]]]
return[name[potential_secrets]] | keyword[def] identifier[analyze] ( identifier[self] , identifier[file] , identifier[filename] ):
literal[string]
identifier[potential_secrets] ={}
keyword[for] identifier[line_num] , identifier[line] keyword[in] identifier[enumerate] ( identifier[file] . identifier[readlines] (), identifier[start] = literal[int] ):
identifier[secrets] = identifier[self] . identifier[analyze_string] ( identifier[line] , identifier[line_num] , identifier[filename] )
identifier[potential_secrets] . identifier[update] ( identifier[secrets] )
keyword[return] identifier[potential_secrets] | def analyze(self, file, filename):
"""
:param file: The File object itself.
:param filename: string; filename of File object, used for creating
PotentialSecret objects
:returns dictionary representation of set (for random access by hash)
{ detect_secrets.core.potential_secret.__hash__:
detect_secrets.core.potential_secret }
"""
potential_secrets = {}
for (line_num, line) in enumerate(file.readlines(), start=1):
secrets = self.analyze_string(line, line_num, filename)
potential_secrets.update(secrets) # depends on [control=['for'], data=[]]
return potential_secrets |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.