code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _add_dependency(self, p, template, inlane, outlane, pid):
"""Automatically Adds a dependency of a process.
This method adds a template to the process list attribute as a
dependency. It will adapt the input lane, output lane and process
id of the process that depends on it.
Parameters
----------
p : Process
Process class that contains the dependency.
template : str
Template name of the dependency.
inlane : int
Input lane.
outlane : int
Output lane.
pid : int
Process ID.
"""
dependency_proc = self.process_map[template](template=template)
if dependency_proc.input_type != p.input_type:
logger.error("Cannot automatically add dependency with different"
" input type. Input type of process '{}' is '{}."
" Input type of dependency '{}' is '{}'".format(
p.template, p.input_type, template,
dependency_proc.input_type))
input_suf = "{}_{}_dep".format(inlane, pid)
output_suf = "{}_{}_dep".format(outlane, pid)
dependency_proc.set_main_channel_names(input_suf, output_suf, outlane)
# To insert the dependency process before the current process, we'll
# need to move the input channel name of the later to the former, and
# set a new connection between the dependency and the process.
dependency_proc.input_channel = p.input_channel
p.input_channel = dependency_proc.output_channel
# If the current process was the first in the pipeline, change the
# lanes so that the dependency becomes the first process
if not p.parent_lane:
p.parent_lane = outlane
dependency_proc.parent_lane = None
else:
dependency_proc.parent_lane = inlane
p.parent_lane = outlane
self.processes.append(dependency_proc) | def function[_add_dependency, parameter[self, p, template, inlane, outlane, pid]]:
constant[Automatically Adds a dependency of a process.
This method adds a template to the process list attribute as a
dependency. It will adapt the input lane, output lane and process
id of the process that depends on it.
Parameters
----------
p : Process
Process class that contains the dependency.
template : str
Template name of the dependency.
inlane : int
Input lane.
outlane : int
Output lane.
pid : int
Process ID.
]
variable[dependency_proc] assign[=] call[call[name[self].process_map][name[template]], parameter[]]
if compare[name[dependency_proc].input_type not_equal[!=] name[p].input_type] begin[:]
call[name[logger].error, parameter[call[constant[Cannot automatically add dependency with different input type. Input type of process '{}' is '{}. Input type of dependency '{}' is '{}'].format, parameter[name[p].template, name[p].input_type, name[template], name[dependency_proc].input_type]]]]
variable[input_suf] assign[=] call[constant[{}_{}_dep].format, parameter[name[inlane], name[pid]]]
variable[output_suf] assign[=] call[constant[{}_{}_dep].format, parameter[name[outlane], name[pid]]]
call[name[dependency_proc].set_main_channel_names, parameter[name[input_suf], name[output_suf], name[outlane]]]
name[dependency_proc].input_channel assign[=] name[p].input_channel
name[p].input_channel assign[=] name[dependency_proc].output_channel
if <ast.UnaryOp object at 0x7da1b023d060> begin[:]
name[p].parent_lane assign[=] name[outlane]
name[dependency_proc].parent_lane assign[=] constant[None]
call[name[self].processes.append, parameter[name[dependency_proc]]] | keyword[def] identifier[_add_dependency] ( identifier[self] , identifier[p] , identifier[template] , identifier[inlane] , identifier[outlane] , identifier[pid] ):
literal[string]
identifier[dependency_proc] = identifier[self] . identifier[process_map] [ identifier[template] ]( identifier[template] = identifier[template] )
keyword[if] identifier[dependency_proc] . identifier[input_type] != identifier[p] . identifier[input_type] :
identifier[logger] . identifier[error] ( literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[p] . identifier[template] , identifier[p] . identifier[input_type] , identifier[template] ,
identifier[dependency_proc] . identifier[input_type] ))
identifier[input_suf] = literal[string] . identifier[format] ( identifier[inlane] , identifier[pid] )
identifier[output_suf] = literal[string] . identifier[format] ( identifier[outlane] , identifier[pid] )
identifier[dependency_proc] . identifier[set_main_channel_names] ( identifier[input_suf] , identifier[output_suf] , identifier[outlane] )
identifier[dependency_proc] . identifier[input_channel] = identifier[p] . identifier[input_channel]
identifier[p] . identifier[input_channel] = identifier[dependency_proc] . identifier[output_channel]
keyword[if] keyword[not] identifier[p] . identifier[parent_lane] :
identifier[p] . identifier[parent_lane] = identifier[outlane]
identifier[dependency_proc] . identifier[parent_lane] = keyword[None]
keyword[else] :
identifier[dependency_proc] . identifier[parent_lane] = identifier[inlane]
identifier[p] . identifier[parent_lane] = identifier[outlane]
identifier[self] . identifier[processes] . identifier[append] ( identifier[dependency_proc] ) | def _add_dependency(self, p, template, inlane, outlane, pid):
"""Automatically Adds a dependency of a process.
This method adds a template to the process list attribute as a
dependency. It will adapt the input lane, output lane and process
id of the process that depends on it.
Parameters
----------
p : Process
Process class that contains the dependency.
template : str
Template name of the dependency.
inlane : int
Input lane.
outlane : int
Output lane.
pid : int
Process ID.
"""
dependency_proc = self.process_map[template](template=template)
if dependency_proc.input_type != p.input_type:
logger.error("Cannot automatically add dependency with different input type. Input type of process '{}' is '{}. Input type of dependency '{}' is '{}'".format(p.template, p.input_type, template, dependency_proc.input_type)) # depends on [control=['if'], data=[]]
input_suf = '{}_{}_dep'.format(inlane, pid)
output_suf = '{}_{}_dep'.format(outlane, pid)
dependency_proc.set_main_channel_names(input_suf, output_suf, outlane)
# To insert the dependency process before the current process, we'll
# need to move the input channel name of the later to the former, and
# set a new connection between the dependency and the process.
dependency_proc.input_channel = p.input_channel
p.input_channel = dependency_proc.output_channel
# If the current process was the first in the pipeline, change the
# lanes so that the dependency becomes the first process
if not p.parent_lane:
p.parent_lane = outlane
dependency_proc.parent_lane = None # depends on [control=['if'], data=[]]
else:
dependency_proc.parent_lane = inlane
p.parent_lane = outlane
self.processes.append(dependency_proc) |
def SendUnicodeChar(char: str) -> int:
"""
Type a single unicode char.
char: str, len(char) must equal to 1.
Return int, the number of events that it successfully inserted into the keyboard or mouse input stream.
If the function returns zero, the input was already blocked by another thread.
"""
return SendInput(KeyboardInput(0, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyDown),
KeyboardInput(0, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyUp)) | def function[SendUnicodeChar, parameter[char]]:
constant[
Type a single unicode char.
char: str, len(char) must equal to 1.
Return int, the number of events that it successfully inserted into the keyboard or mouse input stream.
If the function returns zero, the input was already blocked by another thread.
]
return[call[name[SendInput], parameter[call[name[KeyboardInput], parameter[constant[0], call[name[ord], parameter[name[char]]], binary_operation[name[KeyboardEventFlag].KeyUnicode <ast.BitOr object at 0x7da2590d6aa0> name[KeyboardEventFlag].KeyDown]]], call[name[KeyboardInput], parameter[constant[0], call[name[ord], parameter[name[char]]], binary_operation[name[KeyboardEventFlag].KeyUnicode <ast.BitOr object at 0x7da2590d6aa0> name[KeyboardEventFlag].KeyUp]]]]]] | keyword[def] identifier[SendUnicodeChar] ( identifier[char] : identifier[str] )-> identifier[int] :
literal[string]
keyword[return] identifier[SendInput] ( identifier[KeyboardInput] ( literal[int] , identifier[ord] ( identifier[char] ), identifier[KeyboardEventFlag] . identifier[KeyUnicode] | identifier[KeyboardEventFlag] . identifier[KeyDown] ),
identifier[KeyboardInput] ( literal[int] , identifier[ord] ( identifier[char] ), identifier[KeyboardEventFlag] . identifier[KeyUnicode] | identifier[KeyboardEventFlag] . identifier[KeyUp] )) | def SendUnicodeChar(char: str) -> int:
"""
Type a single unicode char.
char: str, len(char) must equal to 1.
Return int, the number of events that it successfully inserted into the keyboard or mouse input stream.
If the function returns zero, the input was already blocked by another thread.
"""
return SendInput(KeyboardInput(0, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyDown), KeyboardInput(0, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyUp)) |
def put(self, pid, record, **kwargs):
"""Replace a record.
Permissions: ``update_permission_factory``
The body should be a JSON object, which will fully replace the current
record metadata.
Procedure description:
#. The ETag is checked.
#. The record is updated by calling the record API `clear()`,
`update()` and then `commit()`.
#. The HTTP response is built with the help of the link factory.
:param pid: Persistent identifier for record.
:param record: Record object.
:returns: The modified record.
"""
if request.mimetype not in self.loaders:
raise UnsupportedMediaRESTError(request.mimetype)
data = self.loaders[request.mimetype]()
if data is None:
raise InvalidDataRESTError()
self.check_etag(str(record.revision_id))
record.clear()
record.update(data)
record.commit()
db.session.commit()
if self.indexer_class:
self.indexer_class().index(record)
return self.make_response(
pid, record, links_factory=self.links_factory) | def function[put, parameter[self, pid, record]]:
constant[Replace a record.
Permissions: ``update_permission_factory``
The body should be a JSON object, which will fully replace the current
record metadata.
Procedure description:
#. The ETag is checked.
#. The record is updated by calling the record API `clear()`,
`update()` and then `commit()`.
#. The HTTP response is built with the help of the link factory.
:param pid: Persistent identifier for record.
:param record: Record object.
:returns: The modified record.
]
if compare[name[request].mimetype <ast.NotIn object at 0x7da2590d7190> name[self].loaders] begin[:]
<ast.Raise object at 0x7da1b0341c30>
variable[data] assign[=] call[call[name[self].loaders][name[request].mimetype], parameter[]]
if compare[name[data] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0342710>
call[name[self].check_etag, parameter[call[name[str], parameter[name[record].revision_id]]]]
call[name[record].clear, parameter[]]
call[name[record].update, parameter[name[data]]]
call[name[record].commit, parameter[]]
call[name[db].session.commit, parameter[]]
if name[self].indexer_class begin[:]
call[call[name[self].indexer_class, parameter[]].index, parameter[name[record]]]
return[call[name[self].make_response, parameter[name[pid], name[record]]]] | keyword[def] identifier[put] ( identifier[self] , identifier[pid] , identifier[record] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[request] . identifier[mimetype] keyword[not] keyword[in] identifier[self] . identifier[loaders] :
keyword[raise] identifier[UnsupportedMediaRESTError] ( identifier[request] . identifier[mimetype] )
identifier[data] = identifier[self] . identifier[loaders] [ identifier[request] . identifier[mimetype] ]()
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[raise] identifier[InvalidDataRESTError] ()
identifier[self] . identifier[check_etag] ( identifier[str] ( identifier[record] . identifier[revision_id] ))
identifier[record] . identifier[clear] ()
identifier[record] . identifier[update] ( identifier[data] )
identifier[record] . identifier[commit] ()
identifier[db] . identifier[session] . identifier[commit] ()
keyword[if] identifier[self] . identifier[indexer_class] :
identifier[self] . identifier[indexer_class] (). identifier[index] ( identifier[record] )
keyword[return] identifier[self] . identifier[make_response] (
identifier[pid] , identifier[record] , identifier[links_factory] = identifier[self] . identifier[links_factory] ) | def put(self, pid, record, **kwargs):
"""Replace a record.
Permissions: ``update_permission_factory``
The body should be a JSON object, which will fully replace the current
record metadata.
Procedure description:
#. The ETag is checked.
#. The record is updated by calling the record API `clear()`,
`update()` and then `commit()`.
#. The HTTP response is built with the help of the link factory.
:param pid: Persistent identifier for record.
:param record: Record object.
:returns: The modified record.
"""
if request.mimetype not in self.loaders:
raise UnsupportedMediaRESTError(request.mimetype) # depends on [control=['if'], data=[]]
data = self.loaders[request.mimetype]()
if data is None:
raise InvalidDataRESTError() # depends on [control=['if'], data=[]]
self.check_etag(str(record.revision_id))
record.clear()
record.update(data)
record.commit()
db.session.commit()
if self.indexer_class:
self.indexer_class().index(record) # depends on [control=['if'], data=[]]
return self.make_response(pid, record, links_factory=self.links_factory) |
def load(self, dbfile, password=None, keyfile=None, readonly=False):
"""
Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool
"""
self._clear()
buf = None
is_stream = hasattr(dbfile, 'read')
if is_stream:
buf = dbfile.read()
else:
if not os.path.exists(dbfile):
raise IOError("File does not exist: {0}".format(dbfile))
with open(dbfile, 'rb') as fp:
buf = fp.read()
self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly)
# One we have successfully loaded the file, go ahead and set the internal attribute
# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)
if not is_stream:
self.filepath = dbfile | def function[load, parameter[self, dbfile, password, keyfile, readonly]]:
constant[
Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool
]
call[name[self]._clear, parameter[]]
variable[buf] assign[=] constant[None]
variable[is_stream] assign[=] call[name[hasattr], parameter[name[dbfile], constant[read]]]
if name[is_stream] begin[:]
variable[buf] assign[=] call[name[dbfile].read, parameter[]]
call[name[self].load_from_buffer, parameter[name[buf]]]
if <ast.UnaryOp object at 0x7da18fe92920> begin[:]
name[self].filepath assign[=] name[dbfile] | keyword[def] identifier[load] ( identifier[self] , identifier[dbfile] , identifier[password] = keyword[None] , identifier[keyfile] = keyword[None] , identifier[readonly] = keyword[False] ):
literal[string]
identifier[self] . identifier[_clear] ()
identifier[buf] = keyword[None]
identifier[is_stream] = identifier[hasattr] ( identifier[dbfile] , literal[string] )
keyword[if] identifier[is_stream] :
identifier[buf] = identifier[dbfile] . identifier[read] ()
keyword[else] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dbfile] ):
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[dbfile] ))
keyword[with] identifier[open] ( identifier[dbfile] , literal[string] ) keyword[as] identifier[fp] :
identifier[buf] = identifier[fp] . identifier[read] ()
identifier[self] . identifier[load_from_buffer] ( identifier[buf] , identifier[password] = identifier[password] , identifier[keyfile] = identifier[keyfile] , identifier[readonly] = identifier[readonly] )
keyword[if] keyword[not] identifier[is_stream] :
identifier[self] . identifier[filepath] = identifier[dbfile] | def load(self, dbfile, password=None, keyfile=None, readonly=False):
"""
Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool
"""
self._clear()
buf = None
is_stream = hasattr(dbfile, 'read')
if is_stream:
buf = dbfile.read() # depends on [control=['if'], data=[]]
else:
if not os.path.exists(dbfile):
raise IOError('File does not exist: {0}'.format(dbfile)) # depends on [control=['if'], data=[]]
with open(dbfile, 'rb') as fp:
buf = fp.read() # depends on [control=['with'], data=['fp']]
self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly)
# One we have successfully loaded the file, go ahead and set the internal attribute
# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)
if not is_stream:
self.filepath = dbfile # depends on [control=['if'], data=[]] |
def neutron(*arg):
"""
Neutron annotation for adding function to process neutron notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
"""
check_event_type(Openstack.Neutron, *arg)
event_type = arg[0]
def decorator(func):
if event_type.find("*") != -1:
event_type_pattern = pre_compile(event_type)
neutron_customer_process_wildcard[event_type_pattern] = func
else:
neutron_customer_process[event_type] = func
log.info("add function {0} to process event_type:{1}".format(func.__name__, event_type))
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator | def function[neutron, parameter[]]:
constant[
Neutron annotation for adding function to process neutron notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
]
call[name[check_event_type], parameter[name[Openstack].Neutron, <ast.Starred object at 0x7da1b021e050>]]
variable[event_type] assign[=] call[name[arg]][constant[0]]
def function[decorator, parameter[func]]:
if compare[call[name[event_type].find, parameter[constant[*]]] not_equal[!=] <ast.UnaryOp object at 0x7da1b021dea0>] begin[:]
variable[event_type_pattern] assign[=] call[name[pre_compile], parameter[name[event_type]]]
call[name[neutron_customer_process_wildcard]][name[event_type_pattern]] assign[=] name[func]
call[name[log].info, parameter[call[constant[add function {0} to process event_type:{1}].format, parameter[name[func].__name__, name[event_type]]]]]
def function[wrapper, parameter[]]:
call[name[func], parameter[<ast.Starred object at 0x7da1b021fd90>]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[neutron] (* identifier[arg] ):
literal[string]
identifier[check_event_type] ( identifier[Openstack] . identifier[Neutron] ,* identifier[arg] )
identifier[event_type] = identifier[arg] [ literal[int] ]
keyword[def] identifier[decorator] ( identifier[func] ):
keyword[if] identifier[event_type] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[event_type_pattern] = identifier[pre_compile] ( identifier[event_type] )
identifier[neutron_customer_process_wildcard] [ identifier[event_type_pattern] ]= identifier[func]
keyword[else] :
identifier[neutron_customer_process] [ identifier[event_type] ]= identifier[func]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[func] . identifier[__name__] , identifier[event_type] ))
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def neutron(*arg):
"""
Neutron annotation for adding function to process neutron notification.
if event_type include wildcard, will put {pattern: function} into process_wildcard dict
else will put {event_type: function} into process dict
:param arg: event_type of notification
"""
check_event_type(Openstack.Neutron, *arg)
event_type = arg[0]
def decorator(func):
if event_type.find('*') != -1:
event_type_pattern = pre_compile(event_type)
neutron_customer_process_wildcard[event_type_pattern] = func # depends on [control=['if'], data=[]]
else:
neutron_customer_process[event_type] = func
log.info('add function {0} to process event_type:{1}'.format(func.__name__, event_type))
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator |
def marginalize(self, variables, inplace=True):
u"""
Modifies the factor with marginalized values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
In this case, the result of the integration operation is a canonical
from C (K', h', g') given by,
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
Parameters
----------
variables: list or array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
CanonicalDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new CanonicalDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.K
array([[ 1, -1, 0],
[-1, 4, -2],
[ 0, -2, 4]])
>>> phi.h
array([[ 1],
[ 4],
[-1]])
>>> phi.g
-2
>>> phi.marginalize(['X3'])
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
0.22579135
"""
if not isinstance(variables, (list, tuple, np.ndarray)):
raise TypeError("variables: Expected type list or array-like, "
"got type {var_type}".format(var_type=type(variables)))
if not all([var in self.variables for var in variables]):
raise ValueError("Variable not in scope.")
phi = self if inplace else self.copy()
# index_to_keep -> i vector
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in variables]
# index_to_marginalize -> j vector
index_to_marginalize = [self.variables.index(var) for var in variables]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_marginalize)]
K_j_i = self.K[np.ix_(index_to_marginalize, index_to_keep)]
K_j_j = self.K[np.ix_(index_to_marginalize, index_to_marginalize)]
K_j_j_inv = np.linalg.inv(K_j_j)
h_i = self.h[index_to_keep]
h_j = self.h[index_to_marginalize]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i - np.dot(np.dot(K_i_j, K_j_j_inv), K_j_i)
phi.h = h_i - np.dot(np.dot(K_i_j, K_j_j_inv), h_j)
phi.g = self.g + 0.5 * (len(variables) * np.log(2 * np.pi) -
np.log(abs(np.linalg.det(K_j_j))) + np.dot(np.dot(h_j.T, K_j_j), h_j))[0][0]
if not inplace:
return phi | def function[marginalize, parameter[self, variables, inplace]]:
constant[
Modifies the factor with marginalized values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
In this case, the result of the integration operation is a canonical
from C (K', h', g') given by,
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
Parameters
----------
variables: list or array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
CanonicalDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new CanonicalDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.K
array([[ 1, -1, 0],
[-1, 4, -2],
[ 0, -2, 4]])
>>> phi.h
array([[ 1],
[ 4],
[-1]])
>>> phi.g
-2
>>> phi.marginalize(['X3'])
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
0.22579135
]
if <ast.UnaryOp object at 0x7da20c6aaa70> begin[:]
<ast.Raise object at 0x7da20c6a9120>
if <ast.UnaryOp object at 0x7da20c6a8070> begin[:]
<ast.Raise object at 0x7da20c6abe50>
variable[phi] assign[=] <ast.IfExp object at 0x7da20c6a8400>
variable[index_to_keep] assign[=] <ast.ListComp object at 0x7da20c6aaad0>
variable[index_to_marginalize] assign[=] <ast.ListComp object at 0x7da20c6ab2e0>
variable[K_i_i] assign[=] call[name[self].K][call[name[np].ix_, parameter[name[index_to_keep], name[index_to_keep]]]]
variable[K_i_j] assign[=] call[name[self].K][call[name[np].ix_, parameter[name[index_to_keep], name[index_to_marginalize]]]]
variable[K_j_i] assign[=] call[name[self].K][call[name[np].ix_, parameter[name[index_to_marginalize], name[index_to_keep]]]]
variable[K_j_j] assign[=] call[name[self].K][call[name[np].ix_, parameter[name[index_to_marginalize], name[index_to_marginalize]]]]
variable[K_j_j_inv] assign[=] call[name[np].linalg.inv, parameter[name[K_j_j]]]
variable[h_i] assign[=] call[name[self].h][name[index_to_keep]]
variable[h_j] assign[=] call[name[self].h][name[index_to_marginalize]]
name[phi].variables assign[=] <ast.ListComp object at 0x7da20c6aa770>
name[phi].K assign[=] binary_operation[name[K_i_i] - call[name[np].dot, parameter[call[name[np].dot, parameter[name[K_i_j], name[K_j_j_inv]]], name[K_j_i]]]]
name[phi].h assign[=] binary_operation[name[h_i] - call[name[np].dot, parameter[call[name[np].dot, parameter[name[K_i_j], name[K_j_j_inv]]], name[h_j]]]]
name[phi].g assign[=] binary_operation[name[self].g + binary_operation[constant[0.5] * call[call[binary_operation[binary_operation[binary_operation[call[name[len], parameter[name[variables]]] * call[name[np].log, parameter[binary_operation[constant[2] * name[np].pi]]]] - call[name[np].log, parameter[call[name[abs], parameter[call[name[np].linalg.det, parameter[name[K_j_j]]]]]]]] + call[name[np].dot, parameter[call[name[np].dot, parameter[name[h_j].T, name[K_j_j]]], name[h_j]]]]][constant[0]]][constant[0]]]]
if <ast.UnaryOp object at 0x7da18ede6aa0> begin[:]
return[name[phi]] | keyword[def] identifier[marginalize] ( identifier[self] , identifier[variables] , identifier[inplace] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[variables] ,( identifier[list] , identifier[tuple] , identifier[np] . identifier[ndarray] )):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[var_type] = identifier[type] ( identifier[variables] )))
keyword[if] keyword[not] identifier[all] ([ identifier[var] keyword[in] identifier[self] . identifier[variables] keyword[for] identifier[var] keyword[in] identifier[variables] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[phi] = identifier[self] keyword[if] identifier[inplace] keyword[else] identifier[self] . identifier[copy] ()
identifier[index_to_keep] =[ identifier[self] . identifier[variables] . identifier[index] ( identifier[var] ) keyword[for] identifier[var] keyword[in] identifier[self] . identifier[variables]
keyword[if] identifier[var] keyword[not] keyword[in] identifier[variables] ]
identifier[index_to_marginalize] =[ identifier[self] . identifier[variables] . identifier[index] ( identifier[var] ) keyword[for] identifier[var] keyword[in] identifier[variables] ]
identifier[K_i_i] = identifier[self] . identifier[K] [ identifier[np] . identifier[ix_] ( identifier[index_to_keep] , identifier[index_to_keep] )]
identifier[K_i_j] = identifier[self] . identifier[K] [ identifier[np] . identifier[ix_] ( identifier[index_to_keep] , identifier[index_to_marginalize] )]
identifier[K_j_i] = identifier[self] . identifier[K] [ identifier[np] . identifier[ix_] ( identifier[index_to_marginalize] , identifier[index_to_keep] )]
identifier[K_j_j] = identifier[self] . identifier[K] [ identifier[np] . identifier[ix_] ( identifier[index_to_marginalize] , identifier[index_to_marginalize] )]
identifier[K_j_j_inv] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[K_j_j] )
identifier[h_i] = identifier[self] . identifier[h] [ identifier[index_to_keep] ]
identifier[h_j] = identifier[self] . identifier[h] [ identifier[index_to_marginalize] ]
identifier[phi] . identifier[variables] =[ identifier[self] . identifier[variables] [ identifier[index] ] keyword[for] identifier[index] keyword[in] identifier[index_to_keep] ]
identifier[phi] . identifier[K] = identifier[K_i_i] - identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[K_i_j] , identifier[K_j_j_inv] ), identifier[K_j_i] )
identifier[phi] . identifier[h] = identifier[h_i] - identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[K_i_j] , identifier[K_j_j_inv] ), identifier[h_j] )
identifier[phi] . identifier[g] = identifier[self] . identifier[g] + literal[int] *( identifier[len] ( identifier[variables] )* identifier[np] . identifier[log] ( literal[int] * identifier[np] . identifier[pi] )-
identifier[np] . identifier[log] ( identifier[abs] ( identifier[np] . identifier[linalg] . identifier[det] ( identifier[K_j_j] )))+ identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[h_j] . identifier[T] , identifier[K_j_j] ), identifier[h_j] ))[ literal[int] ][ literal[int] ]
keyword[if] keyword[not] identifier[inplace] :
keyword[return] identifier[phi] | def marginalize(self, variables, inplace=True):
u"""
Modifies the factor with marginalized values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
In this case, the result of the integration operation is a canonical
from C (K', h', g') given by,
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
Parameters
----------
variables: list or array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
CanonicalDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new CanonicalDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi = CanonicalDistribution(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.K
array([[ 1, -1, 0],
[-1, 4, -2],
[ 0, -2, 4]])
>>> phi.h
array([[ 1],
[ 4],
[-1]])
>>> phi.g
-2
>>> phi.marginalize(['X3'])
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
0.22579135
"""
if not isinstance(variables, (list, tuple, np.ndarray)):
raise TypeError('variables: Expected type list or array-like, got type {var_type}'.format(var_type=type(variables))) # depends on [control=['if'], data=[]]
if not all([var in self.variables for var in variables]):
raise ValueError('Variable not in scope.') # depends on [control=['if'], data=[]]
phi = self if inplace else self.copy()
# index_to_keep -> i vector
index_to_keep = [self.variables.index(var) for var in self.variables if var not in variables]
# index_to_marginalize -> j vector
index_to_marginalize = [self.variables.index(var) for var in variables]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_marginalize)]
K_j_i = self.K[np.ix_(index_to_marginalize, index_to_keep)]
K_j_j = self.K[np.ix_(index_to_marginalize, index_to_marginalize)]
K_j_j_inv = np.linalg.inv(K_j_j)
h_i = self.h[index_to_keep]
h_j = self.h[index_to_marginalize]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i - np.dot(np.dot(K_i_j, K_j_j_inv), K_j_i)
phi.h = h_i - np.dot(np.dot(K_i_j, K_j_j_inv), h_j)
phi.g = self.g + 0.5 * (len(variables) * np.log(2 * np.pi) - np.log(abs(np.linalg.det(K_j_j))) + np.dot(np.dot(h_j.T, K_j_j), h_j))[0][0]
if not inplace:
return phi # depends on [control=['if'], data=[]] |
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
# Items should be an array of type CoinState, not of ints!
corrected_items = list(map(lambda i: CoinState(i), self.Items))
return super(UnspentCoinState, self).Size() + GetVarSize(corrected_items) | def function[Size, parameter[self]]:
constant[
Get the total size in bytes of the object.
Returns:
int: size.
]
variable[corrected_items] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20e9b0220>, name[self].Items]]]]
return[binary_operation[call[call[name[super], parameter[name[UnspentCoinState], name[self]]].Size, parameter[]] + call[name[GetVarSize], parameter[name[corrected_items]]]]] | keyword[def] identifier[Size] ( identifier[self] ):
literal[string]
identifier[corrected_items] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[i] : identifier[CoinState] ( identifier[i] ), identifier[self] . identifier[Items] ))
keyword[return] identifier[super] ( identifier[UnspentCoinState] , identifier[self] ). identifier[Size] ()+ identifier[GetVarSize] ( identifier[corrected_items] ) | def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
# Items should be an array of type CoinState, not of ints!
corrected_items = list(map(lambda i: CoinState(i), self.Items))
return super(UnspentCoinState, self).Size() + GetVarSize(corrected_items) |
def enable_autozoom(self, option):
"""Set ``autozoom`` behavior.
Parameters
----------
option : {'on', 'override', 'once', 'off'}
Option for zoom behavior. A list of acceptable options can
also be obtained by :meth:`get_autozoom_options`.
Raises
------
ginga.ImageView.ImageViewError
Invalid option.
"""
option = option.lower()
assert(option in self.autozoom_options), \
ImageViewError("Bad autozoom option '%s': must be one of %s" % (
str(self.autozoom_options)))
self.t_.set(autozoom=option) | def function[enable_autozoom, parameter[self, option]]:
constant[Set ``autozoom`` behavior.
Parameters
----------
option : {'on', 'override', 'once', 'off'}
Option for zoom behavior. A list of acceptable options can
also be obtained by :meth:`get_autozoom_options`.
Raises
------
ginga.ImageView.ImageViewError
Invalid option.
]
variable[option] assign[=] call[name[option].lower, parameter[]]
assert[compare[name[option] in name[self].autozoom_options]]
call[name[self].t_.set, parameter[]] | keyword[def] identifier[enable_autozoom] ( identifier[self] , identifier[option] ):
literal[string]
identifier[option] = identifier[option] . identifier[lower] ()
keyword[assert] ( identifier[option] keyword[in] identifier[self] . identifier[autozoom_options] ), identifier[ImageViewError] ( literal[string] %(
identifier[str] ( identifier[self] . identifier[autozoom_options] )))
identifier[self] . identifier[t_] . identifier[set] ( identifier[autozoom] = identifier[option] ) | def enable_autozoom(self, option):
"""Set ``autozoom`` behavior.
Parameters
----------
option : {'on', 'override', 'once', 'off'}
Option for zoom behavior. A list of acceptable options can
also be obtained by :meth:`get_autozoom_options`.
Raises
------
ginga.ImageView.ImageViewError
Invalid option.
"""
option = option.lower()
assert option in self.autozoom_options, ImageViewError("Bad autozoom option '%s': must be one of %s" % str(self.autozoom_options))
self.t_.set(autozoom=option) |
def extract_module(self, path: str, freeze: bool = True) -> Module:
"""
This method can be used to load a module from the pretrained model archive.
It is also used implicitly in FromParams based construction. So instead of using standard
params to construct a module, you can instead load a pretrained module from the model
archive directly. For eg, instead of using params like {"type": "module_type", ...}, you
can use the following template::
{
"_pretrained": {
"archive_file": "../path/to/model.tar.gz",
"path": "path.to.module.in.model",
"freeze": False
}
}
If you use this feature with FromParams, take care of the following caveat: Call to
initializer(self) at end of model initializer can potentially wipe the transferred parameters
by reinitializing them. This can happen if you have setup initializer regex that also
matches parameters of the transferred module. To safe-guard against this, you can either
update your initializer regex to prevent conflicting match or add extra initializer::
[
[".*transferred_module_name.*", "prevent"]]
]
Parameters
----------
path : ``str``, required
Path of target module to be loaded from the model.
Eg. "_textfield_embedder.token_embedder_tokens"
freeze : ``bool``, optional (default=True)
Whether to freeze the module parameters or not.
"""
modules_dict = {path: module for path, module in self.model.named_modules()}
module = modules_dict.get(path, None)
if not module:
raise ConfigurationError(f"You asked to transfer module at path {path} from "
f"the model {type(self.model)}. But it's not present.")
if not isinstance(module, Module):
raise ConfigurationError(f"The transferred object from model {type(self.model)} at path "
f"{path} is not a PyTorch Module.")
for parameter in module.parameters(): # type: ignore
parameter.requires_grad_(not freeze)
return module | def function[extract_module, parameter[self, path, freeze]]:
constant[
This method can be used to load a module from the pretrained model archive.
It is also used implicitly in FromParams based construction. So instead of using standard
params to construct a module, you can instead load a pretrained module from the model
archive directly. For eg, instead of using params like {"type": "module_type", ...}, you
can use the following template::
{
"_pretrained": {
"archive_file": "../path/to/model.tar.gz",
"path": "path.to.module.in.model",
"freeze": False
}
}
If you use this feature with FromParams, take care of the following caveat: Call to
initializer(self) at end of model initializer can potentially wipe the transferred parameters
by reinitializing them. This can happen if you have setup initializer regex that also
matches parameters of the transferred module. To safe-guard against this, you can either
update your initializer regex to prevent conflicting match or add extra initializer::
[
[".*transferred_module_name.*", "prevent"]]
]
Parameters
----------
path : ``str``, required
Path of target module to be loaded from the model.
Eg. "_textfield_embedder.token_embedder_tokens"
freeze : ``bool``, optional (default=True)
Whether to freeze the module parameters or not.
]
variable[modules_dict] assign[=] <ast.DictComp object at 0x7da18f8117b0>
variable[module] assign[=] call[name[modules_dict].get, parameter[name[path], constant[None]]]
if <ast.UnaryOp object at 0x7da18f812cb0> begin[:]
<ast.Raise object at 0x7da18f812980>
if <ast.UnaryOp object at 0x7da18f812ec0> begin[:]
<ast.Raise object at 0x7da18f8121d0>
for taget[name[parameter]] in starred[call[name[module].parameters, parameter[]]] begin[:]
call[name[parameter].requires_grad_, parameter[<ast.UnaryOp object at 0x7da18f8107c0>]]
return[name[module]] | keyword[def] identifier[extract_module] ( identifier[self] , identifier[path] : identifier[str] , identifier[freeze] : identifier[bool] = keyword[True] )-> identifier[Module] :
literal[string]
identifier[modules_dict] ={ identifier[path] : identifier[module] keyword[for] identifier[path] , identifier[module] keyword[in] identifier[self] . identifier[model] . identifier[named_modules] ()}
identifier[module] = identifier[modules_dict] . identifier[get] ( identifier[path] , keyword[None] )
keyword[if] keyword[not] identifier[module] :
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[module] , identifier[Module] ):
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string] )
keyword[for] identifier[parameter] keyword[in] identifier[module] . identifier[parameters] ():
identifier[parameter] . identifier[requires_grad_] ( keyword[not] identifier[freeze] )
keyword[return] identifier[module] | def extract_module(self, path: str, freeze: bool=True) -> Module:
"""
This method can be used to load a module from the pretrained model archive.
It is also used implicitly in FromParams based construction. So instead of using standard
params to construct a module, you can instead load a pretrained module from the model
archive directly. For eg, instead of using params like {"type": "module_type", ...}, you
can use the following template::
{
"_pretrained": {
"archive_file": "../path/to/model.tar.gz",
"path": "path.to.module.in.model",
"freeze": False
}
}
If you use this feature with FromParams, take care of the following caveat: Call to
initializer(self) at end of model initializer can potentially wipe the transferred parameters
by reinitializing them. This can happen if you have setup initializer regex that also
matches parameters of the transferred module. To safe-guard against this, you can either
update your initializer regex to prevent conflicting match or add extra initializer::
[
[".*transferred_module_name.*", "prevent"]]
]
Parameters
----------
path : ``str``, required
Path of target module to be loaded from the model.
Eg. "_textfield_embedder.token_embedder_tokens"
freeze : ``bool``, optional (default=True)
Whether to freeze the module parameters or not.
"""
modules_dict = {path: module for (path, module) in self.model.named_modules()}
module = modules_dict.get(path, None)
if not module:
raise ConfigurationError(f"You asked to transfer module at path {path} from the model {type(self.model)}. But it's not present.") # depends on [control=['if'], data=[]]
if not isinstance(module, Module):
raise ConfigurationError(f'The transferred object from model {type(self.model)} at path {path} is not a PyTorch Module.') # depends on [control=['if'], data=[]]
for parameter in module.parameters(): # type: ignore
parameter.requires_grad_(not freeze) # depends on [control=['for'], data=['parameter']]
return module |
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(
authorization_response, state=self._state
)
self.token = self._client.token
return self.token | def function[token_from_fragment, parameter[self, authorization_response]]:
constant[Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
]
call[name[self]._client.parse_request_uri_response, parameter[name[authorization_response]]]
name[self].token assign[=] name[self]._client.token
return[name[self].token] | keyword[def] identifier[token_from_fragment] ( identifier[self] , identifier[authorization_response] ):
literal[string]
identifier[self] . identifier[_client] . identifier[parse_request_uri_response] (
identifier[authorization_response] , identifier[state] = identifier[self] . identifier[_state]
)
identifier[self] . identifier[token] = identifier[self] . identifier[_client] . identifier[token]
keyword[return] identifier[self] . identifier[token] | def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response, state=self._state)
self.token = self._client.token
return self.token |
def get_static_url():
"""Return a base static url, always ending with a /"""
path = getattr(settings, 'STATIC_URL', None)
if not path:
path = getattr(settings, 'MEDIA_URL', None)
if not path:
path = '/'
return path | def function[get_static_url, parameter[]]:
constant[Return a base static url, always ending with a /]
variable[path] assign[=] call[name[getattr], parameter[name[settings], constant[STATIC_URL], constant[None]]]
if <ast.UnaryOp object at 0x7da1b20b4310> begin[:]
variable[path] assign[=] call[name[getattr], parameter[name[settings], constant[MEDIA_URL], constant[None]]]
if <ast.UnaryOp object at 0x7da1b20b6230> begin[:]
variable[path] assign[=] constant[/]
return[name[path]] | keyword[def] identifier[get_static_url] ():
literal[string]
identifier[path] = identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[path] :
identifier[path] = identifier[getattr] ( identifier[settings] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[path] :
identifier[path] = literal[string]
keyword[return] identifier[path] | def get_static_url():
"""Return a base static url, always ending with a /"""
path = getattr(settings, 'STATIC_URL', None)
if not path:
path = getattr(settings, 'MEDIA_URL', None) # depends on [control=['if'], data=[]]
if not path:
path = '/' # depends on [control=['if'], data=[]]
return path |
def _gam(self):
""" Lorentz factor array
"""
log10gmin = np.log10(self.Eemin / mec2).value
log10gmax = np.log10(self.Eemax / mec2).value
return np.logspace(
log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin))
) | def function[_gam, parameter[self]]:
constant[ Lorentz factor array
]
variable[log10gmin] assign[=] call[name[np].log10, parameter[binary_operation[name[self].Eemin / name[mec2]]]].value
variable[log10gmax] assign[=] call[name[np].log10, parameter[binary_operation[name[self].Eemax / name[mec2]]]].value
return[call[name[np].logspace, parameter[name[log10gmin], name[log10gmax], call[name[int], parameter[binary_operation[name[self].nEed * binary_operation[name[log10gmax] - name[log10gmin]]]]]]]] | keyword[def] identifier[_gam] ( identifier[self] ):
literal[string]
identifier[log10gmin] = identifier[np] . identifier[log10] ( identifier[self] . identifier[Eemin] / identifier[mec2] ). identifier[value]
identifier[log10gmax] = identifier[np] . identifier[log10] ( identifier[self] . identifier[Eemax] / identifier[mec2] ). identifier[value]
keyword[return] identifier[np] . identifier[logspace] (
identifier[log10gmin] , identifier[log10gmax] , identifier[int] ( identifier[self] . identifier[nEed] *( identifier[log10gmax] - identifier[log10gmin] ))
) | def _gam(self):
""" Lorentz factor array
"""
log10gmin = np.log10(self.Eemin / mec2).value
log10gmax = np.log10(self.Eemax / mec2).value
return np.logspace(log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin))) |
def _synthesize_multiple_c_extension(self, text_file, output_file_path, quit_after=None, backwards=False):
"""
Synthesize multiple text fragments, using the cew extension.
Return a tuple (anchors, total_time, num_chars).
:rtype: (bool, (list, :class:`~aeneas.exacttiming.TimeValue`, int))
"""
self.log(u"Synthesizing using C extension...")
# convert parameters from Python values to C values
try:
c_quit_after = float(quit_after)
except TypeError:
c_quit_after = 0.0
c_backwards = 0
if backwards:
c_backwards = 1
self.log([u"output_file_path: %s", output_file_path])
self.log([u"c_quit_after: %.3f", c_quit_after])
self.log([u"c_backwards: %d", c_backwards])
self.log(u"Preparing u_text...")
u_text = []
fragments = text_file.fragments
for fragment in fragments:
f_lang = fragment.language
f_text = fragment.filtered_text
if f_lang is None:
f_lang = self.DEFAULT_LANGUAGE
f_voice_code = self._language_to_voice_code(f_lang)
if f_text is None:
f_text = u""
u_text.append((f_voice_code, f_text))
self.log(u"Preparing u_text... done")
# call C extension
sr = None
sf = None
intervals = None
if self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED]:
self.log(u"Using cewsubprocess to call aeneas.cew")
try:
self.log(u"Importing aeneas.cewsubprocess...")
from aeneas.cewsubprocess import CEWSubprocess
self.log(u"Importing aeneas.cewsubprocess... done")
self.log(u"Calling aeneas.cewsubprocess...")
cewsub = CEWSubprocess(rconf=self.rconf, logger=self.logger)
sr, sf, intervals = cewsub.synthesize_multiple(output_file_path, c_quit_after, c_backwards, u_text)
self.log(u"Calling aeneas.cewsubprocess... done")
except Exception as exc:
self.log_exc(u"An unexpected error occurred while running cewsubprocess", exc, False, None)
# NOTE not critical, try calling aeneas.cew directly
# COMMENTED return (False, None)
if sr is None:
self.log(u"Preparing c_text...")
if gf.PY2:
# Python 2 => pass byte strings
c_text = [(gf.safe_bytes(t[0]), gf.safe_bytes(t[1])) for t in u_text]
else:
# Python 3 => pass Unicode strings
c_text = [(gf.safe_unicode(t[0]), gf.safe_unicode(t[1])) for t in u_text]
self.log(u"Preparing c_text... done")
self.log(u"Calling aeneas.cew directly")
try:
self.log(u"Importing aeneas.cew...")
import aeneas.cew.cew
self.log(u"Importing aeneas.cew... done")
self.log(u"Calling aeneas.cew...")
sr, sf, intervals = aeneas.cew.cew.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.log(u"Calling aeneas.cew... done")
except Exception as exc:
self.log_exc(u"An unexpected error occurred while running cew", exc, False, None)
return (False, None)
self.log([u"sr: %d", sr])
self.log([u"sf: %d", sf])
# create output
anchors = []
current_time = TimeValue("0.000")
num_chars = 0
if backwards:
fragments = fragments[::-1]
for i in range(sf):
# get the correct fragment
fragment = fragments[i]
# store for later output
anchors.append([
TimeValue(intervals[i][0]),
fragment.identifier,
fragment.filtered_text
])
# increase the character counter
num_chars += fragment.characters
# update current_time
current_time = TimeValue(intervals[i][1])
# return output
# NOTE anchors do not make sense if backwards == True
self.log([u"Returning %d time anchors", len(anchors)])
self.log([u"Current time %.3f", current_time])
self.log([u"Synthesized %d characters", num_chars])
self.log(u"Synthesizing using C extension... done")
return (True, (anchors, current_time, num_chars)) | def function[_synthesize_multiple_c_extension, parameter[self, text_file, output_file_path, quit_after, backwards]]:
constant[
Synthesize multiple text fragments, using the cew extension.
Return a tuple (anchors, total_time, num_chars).
:rtype: (bool, (list, :class:`~aeneas.exacttiming.TimeValue`, int))
]
call[name[self].log, parameter[constant[Synthesizing using C extension...]]]
<ast.Try object at 0x7da1b18a1e40>
variable[c_backwards] assign[=] constant[0]
if name[backwards] begin[:]
variable[c_backwards] assign[=] constant[1]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b18a17b0>, <ast.Name object at 0x7da1b18a1780>]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b18a0c40>, <ast.Name object at 0x7da1b18a09a0>]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b18a0850>, <ast.Name object at 0x7da1b18a0820>]]]]
call[name[self].log, parameter[constant[Preparing u_text...]]]
variable[u_text] assign[=] list[[]]
variable[fragments] assign[=] name[text_file].fragments
for taget[name[fragment]] in starred[name[fragments]] begin[:]
variable[f_lang] assign[=] name[fragment].language
variable[f_text] assign[=] name[fragment].filtered_text
if compare[name[f_lang] is constant[None]] begin[:]
variable[f_lang] assign[=] name[self].DEFAULT_LANGUAGE
variable[f_voice_code] assign[=] call[name[self]._language_to_voice_code, parameter[name[f_lang]]]
if compare[name[f_text] is constant[None]] begin[:]
variable[f_text] assign[=] constant[]
call[name[u_text].append, parameter[tuple[[<ast.Name object at 0x7da1b18a32b0>, <ast.Name object at 0x7da1b18a32e0>]]]]
call[name[self].log, parameter[constant[Preparing u_text... done]]]
variable[sr] assign[=] constant[None]
variable[sf] assign[=] constant[None]
variable[intervals] assign[=] constant[None]
if call[name[self].rconf][name[RuntimeConfiguration].CEW_SUBPROCESS_ENABLED] begin[:]
call[name[self].log, parameter[constant[Using cewsubprocess to call aeneas.cew]]]
<ast.Try object at 0x7da1b18a3850>
if compare[name[sr] is constant[None]] begin[:]
call[name[self].log, parameter[constant[Preparing c_text...]]]
if name[gf].PY2 begin[:]
variable[c_text] assign[=] <ast.ListComp object at 0x7da1b18a2350>
call[name[self].log, parameter[constant[Preparing c_text... done]]]
call[name[self].log, parameter[constant[Calling aeneas.cew directly]]]
<ast.Try object at 0x7da1b18a1000>
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da2054a44c0>, <ast.Name object at 0x7da2054a68f0>]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da2054a5f00>, <ast.Name object at 0x7da2054a6980>]]]]
variable[anchors] assign[=] list[[]]
variable[current_time] assign[=] call[name[TimeValue], parameter[constant[0.000]]]
variable[num_chars] assign[=] constant[0]
if name[backwards] begin[:]
variable[fragments] assign[=] call[name[fragments]][<ast.Slice object at 0x7da2054a5c00>]
for taget[name[i]] in starred[call[name[range], parameter[name[sf]]]] begin[:]
variable[fragment] assign[=] call[name[fragments]][name[i]]
call[name[anchors].append, parameter[list[[<ast.Call object at 0x7da2054a7790>, <ast.Attribute object at 0x7da2054a5420>, <ast.Attribute object at 0x7da2054a7a60>]]]]
<ast.AugAssign object at 0x7da2054a5ea0>
variable[current_time] assign[=] call[name[TimeValue], parameter[call[call[name[intervals]][name[i]]][constant[1]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da2054a6a70>, <ast.Call object at 0x7da2054a62c0>]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da2054a6020>, <ast.Name object at 0x7da2054a6590>]]]]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da2054a71c0>, <ast.Name object at 0x7da2054a5b70>]]]]
call[name[self].log, parameter[constant[Synthesizing using C extension... done]]]
return[tuple[[<ast.Constant object at 0x7da2054a78b0>, <ast.Tuple object at 0x7da2054a7f10>]]] | keyword[def] identifier[_synthesize_multiple_c_extension] ( identifier[self] , identifier[text_file] , identifier[output_file_path] , identifier[quit_after] = keyword[None] , identifier[backwards] = keyword[False] ):
literal[string]
identifier[self] . identifier[log] ( literal[string] )
keyword[try] :
identifier[c_quit_after] = identifier[float] ( identifier[quit_after] )
keyword[except] identifier[TypeError] :
identifier[c_quit_after] = literal[int]
identifier[c_backwards] = literal[int]
keyword[if] identifier[backwards] :
identifier[c_backwards] = literal[int]
identifier[self] . identifier[log] ([ literal[string] , identifier[output_file_path] ])
identifier[self] . identifier[log] ([ literal[string] , identifier[c_quit_after] ])
identifier[self] . identifier[log] ([ literal[string] , identifier[c_backwards] ])
identifier[self] . identifier[log] ( literal[string] )
identifier[u_text] =[]
identifier[fragments] = identifier[text_file] . identifier[fragments]
keyword[for] identifier[fragment] keyword[in] identifier[fragments] :
identifier[f_lang] = identifier[fragment] . identifier[language]
identifier[f_text] = identifier[fragment] . identifier[filtered_text]
keyword[if] identifier[f_lang] keyword[is] keyword[None] :
identifier[f_lang] = identifier[self] . identifier[DEFAULT_LANGUAGE]
identifier[f_voice_code] = identifier[self] . identifier[_language_to_voice_code] ( identifier[f_lang] )
keyword[if] identifier[f_text] keyword[is] keyword[None] :
identifier[f_text] = literal[string]
identifier[u_text] . identifier[append] (( identifier[f_voice_code] , identifier[f_text] ))
identifier[self] . identifier[log] ( literal[string] )
identifier[sr] = keyword[None]
identifier[sf] = keyword[None]
identifier[intervals] = keyword[None]
keyword[if] identifier[self] . identifier[rconf] [ identifier[RuntimeConfiguration] . identifier[CEW_SUBPROCESS_ENABLED] ]:
identifier[self] . identifier[log] ( literal[string] )
keyword[try] :
identifier[self] . identifier[log] ( literal[string] )
keyword[from] identifier[aeneas] . identifier[cewsubprocess] keyword[import] identifier[CEWSubprocess]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[log] ( literal[string] )
identifier[cewsub] = identifier[CEWSubprocess] ( identifier[rconf] = identifier[self] . identifier[rconf] , identifier[logger] = identifier[self] . identifier[logger] )
identifier[sr] , identifier[sf] , identifier[intervals] = identifier[cewsub] . identifier[synthesize_multiple] ( identifier[output_file_path] , identifier[c_quit_after] , identifier[c_backwards] , identifier[u_text] )
identifier[self] . identifier[log] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[self] . identifier[log_exc] ( literal[string] , identifier[exc] , keyword[False] , keyword[None] )
keyword[if] identifier[sr] keyword[is] keyword[None] :
identifier[self] . identifier[log] ( literal[string] )
keyword[if] identifier[gf] . identifier[PY2] :
identifier[c_text] =[( identifier[gf] . identifier[safe_bytes] ( identifier[t] [ literal[int] ]), identifier[gf] . identifier[safe_bytes] ( identifier[t] [ literal[int] ])) keyword[for] identifier[t] keyword[in] identifier[u_text] ]
keyword[else] :
identifier[c_text] =[( identifier[gf] . identifier[safe_unicode] ( identifier[t] [ literal[int] ]), identifier[gf] . identifier[safe_unicode] ( identifier[t] [ literal[int] ])) keyword[for] identifier[t] keyword[in] identifier[u_text] ]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[log] ( literal[string] )
keyword[try] :
identifier[self] . identifier[log] ( literal[string] )
keyword[import] identifier[aeneas] . identifier[cew] . identifier[cew]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[log] ( literal[string] )
identifier[sr] , identifier[sf] , identifier[intervals] = identifier[aeneas] . identifier[cew] . identifier[cew] . identifier[synthesize_multiple] (
identifier[output_file_path] ,
identifier[c_quit_after] ,
identifier[c_backwards] ,
identifier[c_text]
)
identifier[self] . identifier[log] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[self] . identifier[log_exc] ( literal[string] , identifier[exc] , keyword[False] , keyword[None] )
keyword[return] ( keyword[False] , keyword[None] )
identifier[self] . identifier[log] ([ literal[string] , identifier[sr] ])
identifier[self] . identifier[log] ([ literal[string] , identifier[sf] ])
identifier[anchors] =[]
identifier[current_time] = identifier[TimeValue] ( literal[string] )
identifier[num_chars] = literal[int]
keyword[if] identifier[backwards] :
identifier[fragments] = identifier[fragments] [::- literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[sf] ):
identifier[fragment] = identifier[fragments] [ identifier[i] ]
identifier[anchors] . identifier[append] ([
identifier[TimeValue] ( identifier[intervals] [ identifier[i] ][ literal[int] ]),
identifier[fragment] . identifier[identifier] ,
identifier[fragment] . identifier[filtered_text]
])
identifier[num_chars] += identifier[fragment] . identifier[characters]
identifier[current_time] = identifier[TimeValue] ( identifier[intervals] [ identifier[i] ][ literal[int] ])
identifier[self] . identifier[log] ([ literal[string] , identifier[len] ( identifier[anchors] )])
identifier[self] . identifier[log] ([ literal[string] , identifier[current_time] ])
identifier[self] . identifier[log] ([ literal[string] , identifier[num_chars] ])
identifier[self] . identifier[log] ( literal[string] )
keyword[return] ( keyword[True] ,( identifier[anchors] , identifier[current_time] , identifier[num_chars] )) | def _synthesize_multiple_c_extension(self, text_file, output_file_path, quit_after=None, backwards=False):
"""
Synthesize multiple text fragments, using the cew extension.
Return a tuple (anchors, total_time, num_chars).
:rtype: (bool, (list, :class:`~aeneas.exacttiming.TimeValue`, int))
"""
self.log(u'Synthesizing using C extension...')
# convert parameters from Python values to C values
try:
c_quit_after = float(quit_after) # depends on [control=['try'], data=[]]
except TypeError:
c_quit_after = 0.0 # depends on [control=['except'], data=[]]
c_backwards = 0
if backwards:
c_backwards = 1 # depends on [control=['if'], data=[]]
self.log([u'output_file_path: %s', output_file_path])
self.log([u'c_quit_after: %.3f', c_quit_after])
self.log([u'c_backwards: %d', c_backwards])
self.log(u'Preparing u_text...')
u_text = []
fragments = text_file.fragments
for fragment in fragments:
f_lang = fragment.language
f_text = fragment.filtered_text
if f_lang is None:
f_lang = self.DEFAULT_LANGUAGE # depends on [control=['if'], data=['f_lang']]
f_voice_code = self._language_to_voice_code(f_lang)
if f_text is None:
f_text = u'' # depends on [control=['if'], data=['f_text']]
u_text.append((f_voice_code, f_text)) # depends on [control=['for'], data=['fragment']]
self.log(u'Preparing u_text... done')
# call C extension
sr = None
sf = None
intervals = None
if self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED]:
self.log(u'Using cewsubprocess to call aeneas.cew')
try:
self.log(u'Importing aeneas.cewsubprocess...')
from aeneas.cewsubprocess import CEWSubprocess
self.log(u'Importing aeneas.cewsubprocess... done')
self.log(u'Calling aeneas.cewsubprocess...')
cewsub = CEWSubprocess(rconf=self.rconf, logger=self.logger)
(sr, sf, intervals) = cewsub.synthesize_multiple(output_file_path, c_quit_after, c_backwards, u_text)
self.log(u'Calling aeneas.cewsubprocess... done') # depends on [control=['try'], data=[]]
except Exception as exc:
self.log_exc(u'An unexpected error occurred while running cewsubprocess', exc, False, None) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
# NOTE not critical, try calling aeneas.cew directly
# COMMENTED return (False, None)
if sr is None:
self.log(u'Preparing c_text...')
if gf.PY2:
# Python 2 => pass byte strings
c_text = [(gf.safe_bytes(t[0]), gf.safe_bytes(t[1])) for t in u_text] # depends on [control=['if'], data=[]]
else:
# Python 3 => pass Unicode strings
c_text = [(gf.safe_unicode(t[0]), gf.safe_unicode(t[1])) for t in u_text]
self.log(u'Preparing c_text... done')
self.log(u'Calling aeneas.cew directly')
try:
self.log(u'Importing aeneas.cew...')
import aeneas.cew.cew
self.log(u'Importing aeneas.cew... done')
self.log(u'Calling aeneas.cew...')
(sr, sf, intervals) = aeneas.cew.cew.synthesize_multiple(output_file_path, c_quit_after, c_backwards, c_text)
self.log(u'Calling aeneas.cew... done') # depends on [control=['try'], data=[]]
except Exception as exc:
self.log_exc(u'An unexpected error occurred while running cew', exc, False, None)
return (False, None) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=['sr']]
self.log([u'sr: %d', sr])
self.log([u'sf: %d', sf])
# create output
anchors = []
current_time = TimeValue('0.000')
num_chars = 0
if backwards:
fragments = fragments[::-1] # depends on [control=['if'], data=[]]
for i in range(sf):
# get the correct fragment
fragment = fragments[i]
# store for later output
anchors.append([TimeValue(intervals[i][0]), fragment.identifier, fragment.filtered_text])
# increase the character counter
num_chars += fragment.characters
# update current_time
current_time = TimeValue(intervals[i][1]) # depends on [control=['for'], data=['i']]
# return output
# NOTE anchors do not make sense if backwards == True
self.log([u'Returning %d time anchors', len(anchors)])
self.log([u'Current time %.3f', current_time])
self.log([u'Synthesized %d characters', num_chars])
self.log(u'Synthesizing using C extension... done')
return (True, (anchors, current_time, num_chars)) |
def _get_pandasindex():
"""
>>> from hydpy import pub
>>> pub.timegrids = '2004.01.01', '2005.01.01', '1d'
>>> from hydpy.core.devicetools import _get_pandasindex
>>> _get_pandasindex() # doctest: +ELLIPSIS
DatetimeIndex(['2004-01-01 12:00:00', '2004-01-02 12:00:00',
...
'2004-12-30 12:00:00', '2004-12-31 12:00:00'],
dtype='datetime64[ns]', length=366, freq=None)
"""
tg = hydpy.pub.timegrids.init
shift = tg.stepsize / 2
index = pandas.date_range(
(tg.firstdate + shift).datetime,
(tg.lastdate - shift).datetime,
(tg.lastdate - tg.firstdate - tg.stepsize) / tg.stepsize + 1)
return index | def function[_get_pandasindex, parameter[]]:
constant[
>>> from hydpy import pub
>>> pub.timegrids = '2004.01.01', '2005.01.01', '1d'
>>> from hydpy.core.devicetools import _get_pandasindex
>>> _get_pandasindex() # doctest: +ELLIPSIS
DatetimeIndex(['2004-01-01 12:00:00', '2004-01-02 12:00:00',
...
'2004-12-30 12:00:00', '2004-12-31 12:00:00'],
dtype='datetime64[ns]', length=366, freq=None)
]
variable[tg] assign[=] name[hydpy].pub.timegrids.init
variable[shift] assign[=] binary_operation[name[tg].stepsize / constant[2]]
variable[index] assign[=] call[name[pandas].date_range, parameter[binary_operation[name[tg].firstdate + name[shift]].datetime, binary_operation[name[tg].lastdate - name[shift]].datetime, binary_operation[binary_operation[binary_operation[binary_operation[name[tg].lastdate - name[tg].firstdate] - name[tg].stepsize] / name[tg].stepsize] + constant[1]]]]
return[name[index]] | keyword[def] identifier[_get_pandasindex] ():
literal[string]
identifier[tg] = identifier[hydpy] . identifier[pub] . identifier[timegrids] . identifier[init]
identifier[shift] = identifier[tg] . identifier[stepsize] / literal[int]
identifier[index] = identifier[pandas] . identifier[date_range] (
( identifier[tg] . identifier[firstdate] + identifier[shift] ). identifier[datetime] ,
( identifier[tg] . identifier[lastdate] - identifier[shift] ). identifier[datetime] ,
( identifier[tg] . identifier[lastdate] - identifier[tg] . identifier[firstdate] - identifier[tg] . identifier[stepsize] )/ identifier[tg] . identifier[stepsize] + literal[int] )
keyword[return] identifier[index] | def _get_pandasindex():
"""
>>> from hydpy import pub
>>> pub.timegrids = '2004.01.01', '2005.01.01', '1d'
>>> from hydpy.core.devicetools import _get_pandasindex
>>> _get_pandasindex() # doctest: +ELLIPSIS
DatetimeIndex(['2004-01-01 12:00:00', '2004-01-02 12:00:00',
...
'2004-12-30 12:00:00', '2004-12-31 12:00:00'],
dtype='datetime64[ns]', length=366, freq=None)
"""
tg = hydpy.pub.timegrids.init
shift = tg.stepsize / 2
index = pandas.date_range((tg.firstdate + shift).datetime, (tg.lastdate - shift).datetime, (tg.lastdate - tg.firstdate - tg.stepsize) / tg.stepsize + 1)
return index |
def memoize_methodcalls(func, pickle=False, dumps=pickle.dumps):
'''Cache the results of the function for each input it gets called with.
'''
cache = func._memoize_cache = {}
@functools.wraps(func)
def memoizer(self, *args, **kwargs):
if pickle:
key = dumps((args, kwargs))
else:
key = args
if key not in cache:
cache[key] = func(self, *args, **kwargs)
return cache[key]
return memoizer | def function[memoize_methodcalls, parameter[func, pickle, dumps]]:
constant[Cache the results of the function for each input it gets called with.
]
variable[cache] assign[=] dictionary[[], []]
def function[memoizer, parameter[self]]:
if name[pickle] begin[:]
variable[key] assign[=] call[name[dumps], parameter[tuple[[<ast.Name object at 0x7da1b1471de0>, <ast.Name object at 0x7da1b1471510>]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[cache]] begin[:]
call[name[cache]][name[key]] assign[=] call[name[func], parameter[name[self], <ast.Starred object at 0x7da1b1472e00>]]
return[call[name[cache]][name[key]]]
return[name[memoizer]] | keyword[def] identifier[memoize_methodcalls] ( identifier[func] , identifier[pickle] = keyword[False] , identifier[dumps] = identifier[pickle] . identifier[dumps] ):
literal[string]
identifier[cache] = identifier[func] . identifier[_memoize_cache] ={}
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[memoizer] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[pickle] :
identifier[key] = identifier[dumps] (( identifier[args] , identifier[kwargs] ))
keyword[else] :
identifier[key] = identifier[args]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[cache] :
identifier[cache] [ identifier[key] ]= identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[cache] [ identifier[key] ]
keyword[return] identifier[memoizer] | def memoize_methodcalls(func, pickle=False, dumps=pickle.dumps):
"""Cache the results of the function for each input it gets called with.
"""
cache = func._memoize_cache = {}
@functools.wraps(func)
def memoizer(self, *args, **kwargs):
if pickle:
key = dumps((args, kwargs)) # depends on [control=['if'], data=[]]
else:
key = args
if key not in cache:
cache[key] = func(self, *args, **kwargs) # depends on [control=['if'], data=['key', 'cache']]
return cache[key]
return memoizer |
def nodes(self, type=None, failed=False):
"""Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``.
"""
if type is None:
type = Node
if not issubclass(type, Node):
raise(TypeError("{} is not a valid node type.".format(type)))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if failed == "all":
return type\
.query\
.filter_by(participant_id=self.id)\
.all()
else:
return type\
.query\
.filter_by(failed=failed, participant_id=self.id)\
.all() | def function[nodes, parameter[self, type, failed]]:
constant[Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``.
]
if compare[name[type] is constant[None]] begin[:]
variable[type] assign[=] name[Node]
if <ast.UnaryOp object at 0x7da1b228e410> begin[:]
<ast.Raise object at 0x7da1b228d7b0>
if compare[name[failed] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b236bbb0>, <ast.Constant object at 0x7da1b236a770>, <ast.Constant object at 0x7da1b236ae00>]]] begin[:]
<ast.Raise object at 0x7da1b236b7f0>
if compare[name[failed] equal[==] constant[all]] begin[:]
return[call[call[name[type].query.filter_by, parameter[]].all, parameter[]]] | keyword[def] identifier[nodes] ( identifier[self] , identifier[type] = keyword[None] , identifier[failed] = keyword[False] ):
literal[string]
keyword[if] identifier[type] keyword[is] keyword[None] :
identifier[type] = identifier[Node]
keyword[if] keyword[not] identifier[issubclass] ( identifier[type] , identifier[Node] ):
keyword[raise] ( identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] )))
keyword[if] identifier[failed] keyword[not] keyword[in] [ literal[string] , keyword[False] , keyword[True] ]:
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[failed] ))
keyword[if] identifier[failed] == literal[string] :
keyword[return] identifier[type] . identifier[query] . identifier[filter_by] ( identifier[participant_id] = identifier[self] . identifier[id] ). identifier[all] ()
keyword[else] :
keyword[return] identifier[type] . identifier[query] . identifier[filter_by] ( identifier[failed] = identifier[failed] , identifier[participant_id] = identifier[self] . identifier[id] ). identifier[all] () | def nodes(self, type=None, failed=False):
"""Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``.
"""
if type is None:
type = Node # depends on [control=['if'], data=['type']]
if not issubclass(type, Node):
raise TypeError('{} is not a valid node type.'.format(type)) # depends on [control=['if'], data=[]]
if failed not in ['all', False, True]:
raise ValueError('{} is not a valid node failed'.format(failed)) # depends on [control=['if'], data=['failed']]
if failed == 'all':
return type.query.filter_by(participant_id=self.id).all() # depends on [control=['if'], data=[]]
else:
return type.query.filter_by(failed=failed, participant_id=self.id).all() |
def _get_cron_info():
'''
Returns the proper group owner and path to the cron directory
'''
owner = 'root'
if __grains__['os'] == 'FreeBSD':
group = 'wheel'
crontab_dir = '/var/cron/tabs'
elif __grains__['os'] == 'OpenBSD':
group = 'crontab'
crontab_dir = '/var/cron/tabs'
elif __grains__['os_family'] == 'Solaris':
group = 'root'
crontab_dir = '/var/spool/cron/crontabs'
elif __grains__['os'] == 'MacOS':
group = 'wheel'
crontab_dir = '/usr/lib/cron/tabs'
else:
group = 'root'
crontab_dir = '/var/spool/cron'
return owner, group, crontab_dir | def function[_get_cron_info, parameter[]]:
constant[
Returns the proper group owner and path to the cron directory
]
variable[owner] assign[=] constant[root]
if compare[call[name[__grains__]][constant[os]] equal[==] constant[FreeBSD]] begin[:]
variable[group] assign[=] constant[wheel]
variable[crontab_dir] assign[=] constant[/var/cron/tabs]
return[tuple[[<ast.Name object at 0x7da1b1c168c0>, <ast.Name object at 0x7da1b1c16e00>, <ast.Name object at 0x7da1b1c16620>]]] | keyword[def] identifier[_get_cron_info] ():
literal[string]
identifier[owner] = literal[string]
keyword[if] identifier[__grains__] [ literal[string] ]== literal[string] :
identifier[group] = literal[string]
identifier[crontab_dir] = literal[string]
keyword[elif] identifier[__grains__] [ literal[string] ]== literal[string] :
identifier[group] = literal[string]
identifier[crontab_dir] = literal[string]
keyword[elif] identifier[__grains__] [ literal[string] ]== literal[string] :
identifier[group] = literal[string]
identifier[crontab_dir] = literal[string]
keyword[elif] identifier[__grains__] [ literal[string] ]== literal[string] :
identifier[group] = literal[string]
identifier[crontab_dir] = literal[string]
keyword[else] :
identifier[group] = literal[string]
identifier[crontab_dir] = literal[string]
keyword[return] identifier[owner] , identifier[group] , identifier[crontab_dir] | def _get_cron_info():
"""
Returns the proper group owner and path to the cron directory
"""
owner = 'root'
if __grains__['os'] == 'FreeBSD':
group = 'wheel'
crontab_dir = '/var/cron/tabs' # depends on [control=['if'], data=[]]
elif __grains__['os'] == 'OpenBSD':
group = 'crontab'
crontab_dir = '/var/cron/tabs' # depends on [control=['if'], data=[]]
elif __grains__['os_family'] == 'Solaris':
group = 'root'
crontab_dir = '/var/spool/cron/crontabs' # depends on [control=['if'], data=[]]
elif __grains__['os'] == 'MacOS':
group = 'wheel'
crontab_dir = '/usr/lib/cron/tabs' # depends on [control=['if'], data=[]]
else:
group = 'root'
crontab_dir = '/var/spool/cron'
return (owner, group, crontab_dir) |
def set_stim(self, signal, fs, attenuation=0):
"""Sets any vector as the next stimulus to be output. Does not call write to hardware"""
self.tone_lock.acquire()
self.stim = signal
self.fs = fs
self.atten = attenuation
self.stim_changed = True
self.tone_lock.release() | def function[set_stim, parameter[self, signal, fs, attenuation]]:
constant[Sets any vector as the next stimulus to be output. Does not call write to hardware]
call[name[self].tone_lock.acquire, parameter[]]
name[self].stim assign[=] name[signal]
name[self].fs assign[=] name[fs]
name[self].atten assign[=] name[attenuation]
name[self].stim_changed assign[=] constant[True]
call[name[self].tone_lock.release, parameter[]] | keyword[def] identifier[set_stim] ( identifier[self] , identifier[signal] , identifier[fs] , identifier[attenuation] = literal[int] ):
literal[string]
identifier[self] . identifier[tone_lock] . identifier[acquire] ()
identifier[self] . identifier[stim] = identifier[signal]
identifier[self] . identifier[fs] = identifier[fs]
identifier[self] . identifier[atten] = identifier[attenuation]
identifier[self] . identifier[stim_changed] = keyword[True]
identifier[self] . identifier[tone_lock] . identifier[release] () | def set_stim(self, signal, fs, attenuation=0):
"""Sets any vector as the next stimulus to be output. Does not call write to hardware"""
self.tone_lock.acquire()
self.stim = signal
self.fs = fs
self.atten = attenuation
self.stim_changed = True
self.tone_lock.release() |
def read_backup(self, p_todolist=None, p_timestamp=None):
"""
Retrieves a backup for p_timestamp or p_todolist (if p_timestamp is not
specified) from backup file and sets timestamp, todolist, archive and
label attributes to appropriate data from it.
"""
if not p_timestamp:
change_hash = hash_todolist(p_todolist)
index = self._get_index()
self.timestamp = index[[change[1] for change in index].index(change_hash)][0]
else:
self.timestamp = p_timestamp
d = self.backup_dict[self.timestamp]
self.todolist = TodoList(d[0])
self.archive = TodoList(d[1])
self.label = d[2] | def function[read_backup, parameter[self, p_todolist, p_timestamp]]:
constant[
Retrieves a backup for p_timestamp or p_todolist (if p_timestamp is not
specified) from backup file and sets timestamp, todolist, archive and
label attributes to appropriate data from it.
]
if <ast.UnaryOp object at 0x7da20c6e7ca0> begin[:]
variable[change_hash] assign[=] call[name[hash_todolist], parameter[name[p_todolist]]]
variable[index] assign[=] call[name[self]._get_index, parameter[]]
name[self].timestamp assign[=] call[call[name[index]][call[<ast.ListComp object at 0x7da20e961450>.index, parameter[name[change_hash]]]]][constant[0]]
variable[d] assign[=] call[name[self].backup_dict][name[self].timestamp]
name[self].todolist assign[=] call[name[TodoList], parameter[call[name[d]][constant[0]]]]
name[self].archive assign[=] call[name[TodoList], parameter[call[name[d]][constant[1]]]]
name[self].label assign[=] call[name[d]][constant[2]] | keyword[def] identifier[read_backup] ( identifier[self] , identifier[p_todolist] = keyword[None] , identifier[p_timestamp] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[p_timestamp] :
identifier[change_hash] = identifier[hash_todolist] ( identifier[p_todolist] )
identifier[index] = identifier[self] . identifier[_get_index] ()
identifier[self] . identifier[timestamp] = identifier[index] [[ identifier[change] [ literal[int] ] keyword[for] identifier[change] keyword[in] identifier[index] ]. identifier[index] ( identifier[change_hash] )][ literal[int] ]
keyword[else] :
identifier[self] . identifier[timestamp] = identifier[p_timestamp]
identifier[d] = identifier[self] . identifier[backup_dict] [ identifier[self] . identifier[timestamp] ]
identifier[self] . identifier[todolist] = identifier[TodoList] ( identifier[d] [ literal[int] ])
identifier[self] . identifier[archive] = identifier[TodoList] ( identifier[d] [ literal[int] ])
identifier[self] . identifier[label] = identifier[d] [ literal[int] ] | def read_backup(self, p_todolist=None, p_timestamp=None):
"""
Retrieves a backup for p_timestamp or p_todolist (if p_timestamp is not
specified) from backup file and sets timestamp, todolist, archive and
label attributes to appropriate data from it.
"""
if not p_timestamp:
change_hash = hash_todolist(p_todolist)
index = self._get_index()
self.timestamp = index[[change[1] for change in index].index(change_hash)][0] # depends on [control=['if'], data=[]]
else:
self.timestamp = p_timestamp
d = self.backup_dict[self.timestamp]
self.todolist = TodoList(d[0])
self.archive = TodoList(d[1])
self.label = d[2] |
def _get_xml(self, metric):
"""Returns the channel element of the RSS feed"""
self._opener = urllib2.build_opener()
self._opener.addheaders = [('User-agent', self.user_agent)]
if metric:
url = self.base_url + '?w={0}&u=c'.format(self.woeid)
else:
url = self.base_url + '?w={0}'.format(self.woeid)
return etree.parse(
self._opener.open(url)
).getroot()[0] | def function[_get_xml, parameter[self, metric]]:
constant[Returns the channel element of the RSS feed]
name[self]._opener assign[=] call[name[urllib2].build_opener, parameter[]]
name[self]._opener.addheaders assign[=] list[[<ast.Tuple object at 0x7da207f00f10>]]
if name[metric] begin[:]
variable[url] assign[=] binary_operation[name[self].base_url + call[constant[?w={0}&u=c].format, parameter[name[self].woeid]]]
return[call[call[call[name[etree].parse, parameter[call[name[self]._opener.open, parameter[name[url]]]]].getroot, parameter[]]][constant[0]]] | keyword[def] identifier[_get_xml] ( identifier[self] , identifier[metric] ):
literal[string]
identifier[self] . identifier[_opener] = identifier[urllib2] . identifier[build_opener] ()
identifier[self] . identifier[_opener] . identifier[addheaders] =[( literal[string] , identifier[self] . identifier[user_agent] )]
keyword[if] identifier[metric] :
identifier[url] = identifier[self] . identifier[base_url] + literal[string] . identifier[format] ( identifier[self] . identifier[woeid] )
keyword[else] :
identifier[url] = identifier[self] . identifier[base_url] + literal[string] . identifier[format] ( identifier[self] . identifier[woeid] )
keyword[return] identifier[etree] . identifier[parse] (
identifier[self] . identifier[_opener] . identifier[open] ( identifier[url] )
). identifier[getroot] ()[ literal[int] ] | def _get_xml(self, metric):
"""Returns the channel element of the RSS feed"""
self._opener = urllib2.build_opener()
self._opener.addheaders = [('User-agent', self.user_agent)]
if metric:
url = self.base_url + '?w={0}&u=c'.format(self.woeid) # depends on [control=['if'], data=[]]
else:
url = self.base_url + '?w={0}'.format(self.woeid)
return etree.parse(self._opener.open(url)).getroot()[0] |
def reverse_formats(format_string, resolved_strings):
"""
Reverse the string method format for a list of strings.
Given format_string and resolved_strings, for each resolved string
find arguments that would give
``format_string.format(**arguments) == resolved_string``.
Each item in the output corresponds to a new column with the key setting
the name and the values representing a mapping from list of resolved_strings
to the related value.
Parameters
----------
format_strings : str
Format template string as used with str.format method
resolved_strings : list
List of strings with same pattern as format_string but with fields
filled out.
Returns
-------
args : dict
Dict of the form ``{field: [value_0, ..., value_n], ...}`` where values are in
the same order as resolved_strings, so:
``format_sting.format(**{f: v[0] for f, v in args.items()}) == resolved_strings[0]``
Examples
--------
>>> paths = ['data_2014_01_03.csv', 'data_2014_02_03.csv', 'data_2015_12_03.csv']
>>> reverse_formats('data_{year}_{month}_{day}.csv', paths)
{'year': ['2014', '2014', '2015'],
'month': ['01', '02', '12'],
'day': ['03', '03', '03']}
>>> reverse_formats('data_{year:d}_{month:d}_{day:d}.csv', paths)
{'year': [2014, 2014, 2015], 'month': [1, 2, 12], 'day': [3, 3, 3]}
>>> reverse_formats('data_{date:%Y_%m_%d}.csv', paths)
{'date': [datetime.datetime(2014, 1, 3, 0, 0),
datetime.datetime(2014, 2, 3, 0, 0),
datetime.datetime(2015, 12, 3, 0, 0)]}
>>> reverse_formats('{state:2}{zip:5}', ['PA19104', 'PA19143', 'MA02534'])
{'state': ['PA', 'PA', 'MA'], 'zip': ['19104', '19143', '02534']}
See also
--------
str.format : method that this reverses
reverse_format : method for reversing just one string using a pattern
"""
from string import Formatter
fmt = Formatter()
# get the fields from the format_string
field_names = [i[1] for i in fmt.parse(format_string) if i[1]]
# itialize the args dict with an empty dict for each field
args = {field_name: [] for field_name in field_names}
for resolved_string in resolved_strings:
for field, value in reverse_format(format_string, resolved_string).items():
args[field].append(value)
return args | def function[reverse_formats, parameter[format_string, resolved_strings]]:
constant[
Reverse the string method format for a list of strings.
Given format_string and resolved_strings, for each resolved string
find arguments that would give
``format_string.format(**arguments) == resolved_string``.
Each item in the output corresponds to a new column with the key setting
the name and the values representing a mapping from list of resolved_strings
to the related value.
Parameters
----------
format_strings : str
Format template string as used with str.format method
resolved_strings : list
List of strings with same pattern as format_string but with fields
filled out.
Returns
-------
args : dict
Dict of the form ``{field: [value_0, ..., value_n], ...}`` where values are in
the same order as resolved_strings, so:
``format_sting.format(**{f: v[0] for f, v in args.items()}) == resolved_strings[0]``
Examples
--------
>>> paths = ['data_2014_01_03.csv', 'data_2014_02_03.csv', 'data_2015_12_03.csv']
>>> reverse_formats('data_{year}_{month}_{day}.csv', paths)
{'year': ['2014', '2014', '2015'],
'month': ['01', '02', '12'],
'day': ['03', '03', '03']}
>>> reverse_formats('data_{year:d}_{month:d}_{day:d}.csv', paths)
{'year': [2014, 2014, 2015], 'month': [1, 2, 12], 'day': [3, 3, 3]}
>>> reverse_formats('data_{date:%Y_%m_%d}.csv', paths)
{'date': [datetime.datetime(2014, 1, 3, 0, 0),
datetime.datetime(2014, 2, 3, 0, 0),
datetime.datetime(2015, 12, 3, 0, 0)]}
>>> reverse_formats('{state:2}{zip:5}', ['PA19104', 'PA19143', 'MA02534'])
{'state': ['PA', 'PA', 'MA'], 'zip': ['19104', '19143', '02534']}
See also
--------
str.format : method that this reverses
reverse_format : method for reversing just one string using a pattern
]
from relative_module[string] import module[Formatter]
variable[fmt] assign[=] call[name[Formatter], parameter[]]
variable[field_names] assign[=] <ast.ListComp object at 0x7da1b17f66e0>
variable[args] assign[=] <ast.DictComp object at 0x7da1b17f6950>
for taget[name[resolved_string]] in starred[name[resolved_strings]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b17f59c0>, <ast.Name object at 0x7da1b17f49a0>]]] in starred[call[call[name[reverse_format], parameter[name[format_string], name[resolved_string]]].items, parameter[]]] begin[:]
call[call[name[args]][name[field]].append, parameter[name[value]]]
return[name[args]] | keyword[def] identifier[reverse_formats] ( identifier[format_string] , identifier[resolved_strings] ):
literal[string]
keyword[from] identifier[string] keyword[import] identifier[Formatter]
identifier[fmt] = identifier[Formatter] ()
identifier[field_names] =[ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[fmt] . identifier[parse] ( identifier[format_string] ) keyword[if] identifier[i] [ literal[int] ]]
identifier[args] ={ identifier[field_name] :[] keyword[for] identifier[field_name] keyword[in] identifier[field_names] }
keyword[for] identifier[resolved_string] keyword[in] identifier[resolved_strings] :
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[reverse_format] ( identifier[format_string] , identifier[resolved_string] ). identifier[items] ():
identifier[args] [ identifier[field] ]. identifier[append] ( identifier[value] )
keyword[return] identifier[args] | def reverse_formats(format_string, resolved_strings):
"""
Reverse the string method format for a list of strings.
Given format_string and resolved_strings, for each resolved string
find arguments that would give
``format_string.format(**arguments) == resolved_string``.
Each item in the output corresponds to a new column with the key setting
the name and the values representing a mapping from list of resolved_strings
to the related value.
Parameters
----------
format_strings : str
Format template string as used with str.format method
resolved_strings : list
List of strings with same pattern as format_string but with fields
filled out.
Returns
-------
args : dict
Dict of the form ``{field: [value_0, ..., value_n], ...}`` where values are in
the same order as resolved_strings, so:
``format_sting.format(**{f: v[0] for f, v in args.items()}) == resolved_strings[0]``
Examples
--------
>>> paths = ['data_2014_01_03.csv', 'data_2014_02_03.csv', 'data_2015_12_03.csv']
>>> reverse_formats('data_{year}_{month}_{day}.csv', paths)
{'year': ['2014', '2014', '2015'],
'month': ['01', '02', '12'],
'day': ['03', '03', '03']}
>>> reverse_formats('data_{year:d}_{month:d}_{day:d}.csv', paths)
{'year': [2014, 2014, 2015], 'month': [1, 2, 12], 'day': [3, 3, 3]}
>>> reverse_formats('data_{date:%Y_%m_%d}.csv', paths)
{'date': [datetime.datetime(2014, 1, 3, 0, 0),
datetime.datetime(2014, 2, 3, 0, 0),
datetime.datetime(2015, 12, 3, 0, 0)]}
>>> reverse_formats('{state:2}{zip:5}', ['PA19104', 'PA19143', 'MA02534'])
{'state': ['PA', 'PA', 'MA'], 'zip': ['19104', '19143', '02534']}
See also
--------
str.format : method that this reverses
reverse_format : method for reversing just one string using a pattern
"""
from string import Formatter
fmt = Formatter()
# get the fields from the format_string
field_names = [i[1] for i in fmt.parse(format_string) if i[1]]
# itialize the args dict with an empty dict for each field
args = {field_name: [] for field_name in field_names}
for resolved_string in resolved_strings:
for (field, value) in reverse_format(format_string, resolved_string).items():
args[field].append(value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['resolved_string']]
return args |
def number_of_changes(slots, events, original_schedule, X, **kwargs):
"""
A function that counts the number of changes between a given schedule
and an array (either numpy array of lp array).
"""
changes = 0
original_array = schedule_to_array(original_schedule, events=events,
slots=slots)
for row, event in enumerate(original_array):
for col, slot in enumerate(event):
if slot == 0:
changes += X[row, col]
else:
changes += 1 - X[row, col]
return changes | def function[number_of_changes, parameter[slots, events, original_schedule, X]]:
constant[
A function that counts the number of changes between a given schedule
and an array (either numpy array of lp array).
]
variable[changes] assign[=] constant[0]
variable[original_array] assign[=] call[name[schedule_to_array], parameter[name[original_schedule]]]
for taget[tuple[[<ast.Name object at 0x7da1b0403970>, <ast.Name object at 0x7da1b04015a0>]]] in starred[call[name[enumerate], parameter[name[original_array]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0401ed0>, <ast.Name object at 0x7da1b0400f70>]]] in starred[call[name[enumerate], parameter[name[event]]]] begin[:]
if compare[name[slot] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b04033d0>
return[name[changes]] | keyword[def] identifier[number_of_changes] ( identifier[slots] , identifier[events] , identifier[original_schedule] , identifier[X] ,** identifier[kwargs] ):
literal[string]
identifier[changes] = literal[int]
identifier[original_array] = identifier[schedule_to_array] ( identifier[original_schedule] , identifier[events] = identifier[events] ,
identifier[slots] = identifier[slots] )
keyword[for] identifier[row] , identifier[event] keyword[in] identifier[enumerate] ( identifier[original_array] ):
keyword[for] identifier[col] , identifier[slot] keyword[in] identifier[enumerate] ( identifier[event] ):
keyword[if] identifier[slot] == literal[int] :
identifier[changes] += identifier[X] [ identifier[row] , identifier[col] ]
keyword[else] :
identifier[changes] += literal[int] - identifier[X] [ identifier[row] , identifier[col] ]
keyword[return] identifier[changes] | def number_of_changes(slots, events, original_schedule, X, **kwargs):
"""
A function that counts the number of changes between a given schedule
and an array (either numpy array of lp array).
"""
changes = 0
original_array = schedule_to_array(original_schedule, events=events, slots=slots)
for (row, event) in enumerate(original_array):
for (col, slot) in enumerate(event):
if slot == 0:
changes += X[row, col] # depends on [control=['if'], data=[]]
else:
changes += 1 - X[row, col] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return changes |
def _get_suggestions(self, filter_word=None):
"""
This only gets caled internally from the get_suggestion method.
"""
keys = self.manifest.keys()
words = []
for key in keys:
if isinstance(self.manifest[key], Manifest):
# if this key is another manifest, append a slash to the
# suggestion so the user knows theres more items under this key
words.append(key + '/')
else:
words.append(key)
if filter_word:
words = [x for x in words if x.startswith(filter_word)]
return words | def function[_get_suggestions, parameter[self, filter_word]]:
constant[
This only gets caled internally from the get_suggestion method.
]
variable[keys] assign[=] call[name[self].manifest.keys, parameter[]]
variable[words] assign[=] list[[]]
for taget[name[key]] in starred[name[keys]] begin[:]
if call[name[isinstance], parameter[call[name[self].manifest][name[key]], name[Manifest]]] begin[:]
call[name[words].append, parameter[binary_operation[name[key] + constant[/]]]]
if name[filter_word] begin[:]
variable[words] assign[=] <ast.ListComp object at 0x7da1b25857b0>
return[name[words]] | keyword[def] identifier[_get_suggestions] ( identifier[self] , identifier[filter_word] = keyword[None] ):
literal[string]
identifier[keys] = identifier[self] . identifier[manifest] . identifier[keys] ()
identifier[words] =[]
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[manifest] [ identifier[key] ], identifier[Manifest] ):
identifier[words] . identifier[append] ( identifier[key] + literal[string] )
keyword[else] :
identifier[words] . identifier[append] ( identifier[key] )
keyword[if] identifier[filter_word] :
identifier[words] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[words] keyword[if] identifier[x] . identifier[startswith] ( identifier[filter_word] )]
keyword[return] identifier[words] | def _get_suggestions(self, filter_word=None):
"""
This only gets caled internally from the get_suggestion method.
"""
keys = self.manifest.keys()
words = []
for key in keys:
if isinstance(self.manifest[key], Manifest): # if this key is another manifest, append a slash to the
# suggestion so the user knows theres more items under this key
words.append(key + '/') # depends on [control=['if'], data=[]]
else:
words.append(key) # depends on [control=['for'], data=['key']]
if filter_word:
words = [x for x in words if x.startswith(filter_word)] # depends on [control=['if'], data=[]]
return words |
def MessageToJson(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
return printer.ToJsonString(message) | def function[MessageToJson, parameter[message, including_default_value_fields, preserving_proto_field_name]]:
constant[Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A string containing the JSON formatted protocol buffer message.
]
variable[printer] assign[=] call[name[_Printer], parameter[name[including_default_value_fields], name[preserving_proto_field_name]]]
return[call[name[printer].ToJsonString, parameter[name[message]]]] | keyword[def] identifier[MessageToJson] ( identifier[message] ,
identifier[including_default_value_fields] = keyword[False] ,
identifier[preserving_proto_field_name] = keyword[False] ):
literal[string]
identifier[printer] = identifier[_Printer] ( identifier[including_default_value_fields] ,
identifier[preserving_proto_field_name] )
keyword[return] identifier[printer] . identifier[ToJsonString] ( identifier[message] ) | def MessageToJson(message, including_default_value_fields=False, preserving_proto_field_name=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields, preserving_proto_field_name)
return printer.ToJsonString(message) |
def _createFromObject(obj, *args, **kwargs):
""" Creates an RTI given an object. Auto-detects which RTI class to return.
The *args and **kwargs parameters are passed to the RTI constructor.
It is therefor important that all memory RTIs accept the same parameters in the
constructor (with exception of the FieldRti which is not auto-detected).
"""
if is_a_sequence(obj):
return SequenceRti(obj, *args, **kwargs)
elif is_a_mapping(obj):
return MappingRti(obj, *args, **kwargs)
elif is_an_array(obj):
return ArrayRti(obj, *args, **kwargs)
elif isinstance(obj, bytearray):
return ArrayRti(np.array(obj), *args, **kwargs)
else:
return ScalarRti(obj, *args, **kwargs) | def function[_createFromObject, parameter[obj]]:
constant[ Creates an RTI given an object. Auto-detects which RTI class to return.
The *args and **kwargs parameters are passed to the RTI constructor.
It is therefor important that all memory RTIs accept the same parameters in the
constructor (with exception of the FieldRti which is not auto-detected).
]
if call[name[is_a_sequence], parameter[name[obj]]] begin[:]
return[call[name[SequenceRti], parameter[name[obj], <ast.Starred object at 0x7da1b0477820>]]] | keyword[def] identifier[_createFromObject] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[is_a_sequence] ( identifier[obj] ):
keyword[return] identifier[SequenceRti] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[is_a_mapping] ( identifier[obj] ):
keyword[return] identifier[MappingRti] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[is_an_array] ( identifier[obj] ):
keyword[return] identifier[ArrayRti] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[bytearray] ):
keyword[return] identifier[ArrayRti] ( identifier[np] . identifier[array] ( identifier[obj] ),* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[ScalarRti] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] ) | def _createFromObject(obj, *args, **kwargs):
""" Creates an RTI given an object. Auto-detects which RTI class to return.
The *args and **kwargs parameters are passed to the RTI constructor.
It is therefor important that all memory RTIs accept the same parameters in the
constructor (with exception of the FieldRti which is not auto-detected).
"""
if is_a_sequence(obj):
return SequenceRti(obj, *args, **kwargs) # depends on [control=['if'], data=[]]
elif is_a_mapping(obj):
return MappingRti(obj, *args, **kwargs) # depends on [control=['if'], data=[]]
elif is_an_array(obj):
return ArrayRti(obj, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(obj, bytearray):
return ArrayRti(np.array(obj), *args, **kwargs) # depends on [control=['if'], data=[]]
else:
return ScalarRti(obj, *args, **kwargs) |
def cache_func(prefix, method=False):
"""
Cache result of function execution into the django cache backend.
Calculate cache key based on `prefix`, `args` and `kwargs` of the function.
For using like object method set `method=True`.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
cache_args = args
if method:
cache_args = args[1:]
cache_key = get_cache_key(prefix, *cache_args, **kwargs)
cached_value = cache.get(cache_key)
if cached_value is None:
cached_value = func(*args, **kwargs)
cache.set(cache_key, cached_value)
return cached_value
return wrapper
return decorator | def function[cache_func, parameter[prefix, method]]:
constant[
Cache result of function execution into the django cache backend.
Calculate cache key based on `prefix`, `args` and `kwargs` of the function.
For using like object method set `method=True`.
]
def function[decorator, parameter[func]]:
def function[wrapper, parameter[]]:
variable[cache_args] assign[=] name[args]
if name[method] begin[:]
variable[cache_args] assign[=] call[name[args]][<ast.Slice object at 0x7da1affedcf0>]
variable[cache_key] assign[=] call[name[get_cache_key], parameter[name[prefix], <ast.Starred object at 0x7da1affedb10>]]
variable[cached_value] assign[=] call[name[cache].get, parameter[name[cache_key]]]
if compare[name[cached_value] is constant[None]] begin[:]
variable[cached_value] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1affed780>]]
call[name[cache].set, parameter[name[cache_key], name[cached_value]]]
return[name[cached_value]]
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[cache_func] ( identifier[prefix] , identifier[method] = keyword[False] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[cache_args] = identifier[args]
keyword[if] identifier[method] :
identifier[cache_args] = identifier[args] [ literal[int] :]
identifier[cache_key] = identifier[get_cache_key] ( identifier[prefix] ,* identifier[cache_args] ,** identifier[kwargs] )
identifier[cached_value] = identifier[cache] . identifier[get] ( identifier[cache_key] )
keyword[if] identifier[cached_value] keyword[is] keyword[None] :
identifier[cached_value] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
identifier[cache] . identifier[set] ( identifier[cache_key] , identifier[cached_value] )
keyword[return] identifier[cached_value]
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def cache_func(prefix, method=False):
"""
Cache result of function execution into the django cache backend.
Calculate cache key based on `prefix`, `args` and `kwargs` of the function.
For using like object method set `method=True`.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
cache_args = args
if method:
cache_args = args[1:] # depends on [control=['if'], data=[]]
cache_key = get_cache_key(prefix, *cache_args, **kwargs)
cached_value = cache.get(cache_key)
if cached_value is None:
cached_value = func(*args, **kwargs)
cache.set(cache_key, cached_value) # depends on [control=['if'], data=['cached_value']]
return cached_value
return wrapper
return decorator |
def send(self, message_type, task_id, message):
""" Sends a message to the UDP receiver
Parameter
---------
message_type: monitoring.MessageType (enum)
In this case message type is RESOURCE_INFO most often
task_id: int
Task identifier of the task for which resource monitoring is being reported
message: object
Arbitrary pickle-able object that is to be sent
Returns:
# bytes sent
"""
x = 0
try:
buffer = pickle.dumps((self.source_id, # Identifier for manager
int(time.time()), # epoch timestamp
message_type,
message))
except Exception as e:
print("Exception during pickling {}".format(e))
return
try:
x = self.sock.sendto(buffer, (self.ip, self.port))
except socket.timeout:
print("Could not send message within timeout limit")
return False
return x | def function[send, parameter[self, message_type, task_id, message]]:
constant[ Sends a message to the UDP receiver
Parameter
---------
message_type: monitoring.MessageType (enum)
In this case message type is RESOURCE_INFO most often
task_id: int
Task identifier of the task for which resource monitoring is being reported
message: object
Arbitrary pickle-able object that is to be sent
Returns:
# bytes sent
]
variable[x] assign[=] constant[0]
<ast.Try object at 0x7da1b01b14e0>
<ast.Try object at 0x7da1b01b3310>
return[name[x]] | keyword[def] identifier[send] ( identifier[self] , identifier[message_type] , identifier[task_id] , identifier[message] ):
literal[string]
identifier[x] = literal[int]
keyword[try] :
identifier[buffer] = identifier[pickle] . identifier[dumps] (( identifier[self] . identifier[source_id] ,
identifier[int] ( identifier[time] . identifier[time] ()),
identifier[message_type] ,
identifier[message] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return]
keyword[try] :
identifier[x] = identifier[self] . identifier[sock] . identifier[sendto] ( identifier[buffer] ,( identifier[self] . identifier[ip] , identifier[self] . identifier[port] ))
keyword[except] identifier[socket] . identifier[timeout] :
identifier[print] ( literal[string] )
keyword[return] keyword[False]
keyword[return] identifier[x] | def send(self, message_type, task_id, message):
""" Sends a message to the UDP receiver
Parameter
---------
message_type: monitoring.MessageType (enum)
In this case message type is RESOURCE_INFO most often
task_id: int
Task identifier of the task for which resource monitoring is being reported
message: object
Arbitrary pickle-able object that is to be sent
Returns:
# bytes sent
"""
x = 0
try: # Identifier for manager
# epoch timestamp
buffer = pickle.dumps((self.source_id, int(time.time()), message_type, message)) # depends on [control=['try'], data=[]]
except Exception as e:
print('Exception during pickling {}'.format(e))
return # depends on [control=['except'], data=['e']]
try:
x = self.sock.sendto(buffer, (self.ip, self.port)) # depends on [control=['try'], data=[]]
except socket.timeout:
print('Could not send message within timeout limit')
return False # depends on [control=['except'], data=[]]
return x |
def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)} | def function[fit, parameter[self, data, debug]]:
constant[
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
]
with call[name[log_start_finish], parameter[call[constant[fitting models in group {}].format, parameter[name[self].name]], name[logger]]] begin[:]
return[<ast.DictComp object at 0x7da20c76dd80>] | keyword[def] identifier[fit] ( identifier[self] , identifier[data] , identifier[debug] = keyword[False] ):
literal[string]
keyword[with] identifier[log_start_finish] (
literal[string] . identifier[format] ( identifier[self] . identifier[name] ), identifier[logger] ):
keyword[return] { identifier[name] : identifier[self] . identifier[models] [ identifier[name] ]. identifier[fit] ( identifier[df] , identifier[debug] = identifier[debug] )
keyword[for] identifier[name] , identifier[df] keyword[in] identifier[self] . identifier[_iter_groups] ( identifier[data] )} | def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish('fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug) for (name, df) in self._iter_groups(data)} # depends on [control=['with'], data=[]] |
def pull(self):
"""
Returns an iterable that can be used to iterate over incoming messages,
that were pushed by a push socket. Note that the iterable returns as
many parts as sent by pushers.
:rtype: generator
"""
sock = self.__sock(zmq.PULL)
return self.__recv_generator(sock) | def function[pull, parameter[self]]:
constant[
Returns an iterable that can be used to iterate over incoming messages,
that were pushed by a push socket. Note that the iterable returns as
many parts as sent by pushers.
:rtype: generator
]
variable[sock] assign[=] call[name[self].__sock, parameter[name[zmq].PULL]]
return[call[name[self].__recv_generator, parameter[name[sock]]]] | keyword[def] identifier[pull] ( identifier[self] ):
literal[string]
identifier[sock] = identifier[self] . identifier[__sock] ( identifier[zmq] . identifier[PULL] )
keyword[return] identifier[self] . identifier[__recv_generator] ( identifier[sock] ) | def pull(self):
"""
Returns an iterable that can be used to iterate over incoming messages,
that were pushed by a push socket. Note that the iterable returns as
many parts as sent by pushers.
:rtype: generator
"""
sock = self.__sock(zmq.PULL)
return self.__recv_generator(sock) |
def SetPosition(self, track_id, position):
"""Sets the current track position in microseconds.
:param str track_id: The currently playing track's identifier.
:param int position: Track position in microseconds.
This must be between 0 and <track_length>.
If the Position argument is less than 0, do nothing.
If the Position argument is greater than the track length, do nothing.
If the CanSeek property is false, this has no effect.
"""
self.iface.SetPosition(convert2dbus(track_id, 'o'),
convert2dbus(position, 'x')) | def function[SetPosition, parameter[self, track_id, position]]:
constant[Sets the current track position in microseconds.
:param str track_id: The currently playing track's identifier.
:param int position: Track position in microseconds.
This must be between 0 and <track_length>.
If the Position argument is less than 0, do nothing.
If the Position argument is greater than the track length, do nothing.
If the CanSeek property is false, this has no effect.
]
call[name[self].iface.SetPosition, parameter[call[name[convert2dbus], parameter[name[track_id], constant[o]]], call[name[convert2dbus], parameter[name[position], constant[x]]]]] | keyword[def] identifier[SetPosition] ( identifier[self] , identifier[track_id] , identifier[position] ):
literal[string]
identifier[self] . identifier[iface] . identifier[SetPosition] ( identifier[convert2dbus] ( identifier[track_id] , literal[string] ),
identifier[convert2dbus] ( identifier[position] , literal[string] )) | def SetPosition(self, track_id, position):
"""Sets the current track position in microseconds.
:param str track_id: The currently playing track's identifier.
:param int position: Track position in microseconds.
This must be between 0 and <track_length>.
If the Position argument is less than 0, do nothing.
If the Position argument is greater than the track length, do nothing.
If the CanSeek property is false, this has no effect.
"""
self.iface.SetPosition(convert2dbus(track_id, 'o'), convert2dbus(position, 'x')) |
def BitVecVal(
value: int, size: int, annotations: Annotations = None
) -> z3.BitVecRef:
"""Creates a new bit vector with a concrete value."""
return z3.BitVecVal(value, size) | def function[BitVecVal, parameter[value, size, annotations]]:
constant[Creates a new bit vector with a concrete value.]
return[call[name[z3].BitVecVal, parameter[name[value], name[size]]]] | keyword[def] identifier[BitVecVal] (
identifier[value] : identifier[int] , identifier[size] : identifier[int] , identifier[annotations] : identifier[Annotations] = keyword[None]
)-> identifier[z3] . identifier[BitVecRef] :
literal[string]
keyword[return] identifier[z3] . identifier[BitVecVal] ( identifier[value] , identifier[size] ) | def BitVecVal(value: int, size: int, annotations: Annotations=None) -> z3.BitVecRef:
"""Creates a new bit vector with a concrete value."""
return z3.BitVecVal(value, size) |
def cmd_tracker_position(self, args):
'''tracker manual positioning commands'''
connection = self.find_connection()
if not connection:
print("No antenna tracker found")
return
positions = [0, 0, 0, 0, 0] # x, y, z, r, buttons. only position[0] (yaw) and position[1] (pitch) are currently used
for i in range(0, 4):
if len(args) > i:
positions[i] = int(args[i]) # default values are 0
connection.mav.manual_control_send(connection.target_system,
positions[0], positions[1],
positions[2], positions[3],
positions[4]) | def function[cmd_tracker_position, parameter[self, args]]:
constant[tracker manual positioning commands]
variable[connection] assign[=] call[name[self].find_connection, parameter[]]
if <ast.UnaryOp object at 0x7da1b1720430> begin[:]
call[name[print], parameter[constant[No antenna tracker found]]]
return[None]
variable[positions] assign[=] list[[<ast.Constant object at 0x7da1b1723310>, <ast.Constant object at 0x7da1b1723a00>, <ast.Constant object at 0x7da1b1723940>, <ast.Constant object at 0x7da1b1720820>, <ast.Constant object at 0x7da1b1722620>]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[4]]]] begin[:]
if compare[call[name[len], parameter[name[args]]] greater[>] name[i]] begin[:]
call[name[positions]][name[i]] assign[=] call[name[int], parameter[call[name[args]][name[i]]]]
call[name[connection].mav.manual_control_send, parameter[name[connection].target_system, call[name[positions]][constant[0]], call[name[positions]][constant[1]], call[name[positions]][constant[2]], call[name[positions]][constant[3]], call[name[positions]][constant[4]]]] | keyword[def] identifier[cmd_tracker_position] ( identifier[self] , identifier[args] ):
literal[string]
identifier[connection] = identifier[self] . identifier[find_connection] ()
keyword[if] keyword[not] identifier[connection] :
identifier[print] ( literal[string] )
keyword[return]
identifier[positions] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[if] identifier[len] ( identifier[args] )> identifier[i] :
identifier[positions] [ identifier[i] ]= identifier[int] ( identifier[args] [ identifier[i] ])
identifier[connection] . identifier[mav] . identifier[manual_control_send] ( identifier[connection] . identifier[target_system] ,
identifier[positions] [ literal[int] ], identifier[positions] [ literal[int] ],
identifier[positions] [ literal[int] ], identifier[positions] [ literal[int] ],
identifier[positions] [ literal[int] ]) | def cmd_tracker_position(self, args):
"""tracker manual positioning commands"""
connection = self.find_connection()
if not connection:
print('No antenna tracker found')
return # depends on [control=['if'], data=[]]
positions = [0, 0, 0, 0, 0] # x, y, z, r, buttons. only position[0] (yaw) and position[1] (pitch) are currently used
for i in range(0, 4):
if len(args) > i:
positions[i] = int(args[i]) # default values are 0 # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=['i']]
connection.mav.manual_control_send(connection.target_system, positions[0], positions[1], positions[2], positions[3], positions[4]) |
def delete_hook(self, id):
"""
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/hooks/" + str(id)
) | def function[delete_hook, parameter[self, id]]:
constant[
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
]
assert[call[name[isinstance], parameter[name[id], tuple[[<ast.Name object at 0x7da1b21ec0d0>, <ast.Name object at 0x7da1b21eccd0>]]]]]
<ast.Tuple object at 0x7da1b21ecac0> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[DELETE], binary_operation[binary_operation[name[self].url + constant[/hooks/]] + call[name[str], parameter[name[id]]]]]] | keyword[def] identifier[delete_hook] ( identifier[self] , identifier[id] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[id] ,( identifier[int] , identifier[long] )), identifier[id]
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
identifier[self] . identifier[url] + literal[string] + identifier[str] ( identifier[id] )
) | def delete_hook(self, id):
"""
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, (int, long)), id
(headers, data) = self._requester.requestJsonAndCheck('DELETE', self.url + '/hooks/' + str(id)) |
def __filter_past_mappings(self,
past_mappings,
long_inclusion_array):
"""
Parameters
----------
past_mappings : dict.
All elements should be None or compressed sparse row matrices from
scipy.sparse. The following keys should be in past_mappings:
- "rows_to_obs",
- "rows_to_alts",
- "chosen_rows_to_obs",
- "rows_to_nests",
- "rows_to_mixers"
The values that are not None should be 'mapping' matrices that
denote which rows of the past long-format design matrix belong to
which unique object such as unique observations, unique
alternatives, unique nests, unique 'mixing' units etc.
long_inclusion_array : 1D ndarray.
Should denote, via a `1`, the rows of the past mapping matrices
that are to be included in the filtered mapping matrices.
Returns
-------
new_mappings : dict.
The returned dictionary will be the same as `past_mappings` except
that all the mapping matrices will have been filtered according to
`long_inclusion_array`.
"""
new_mappings = {}
for key in past_mappings:
if past_mappings[key] is None:
new_mappings[key] = None
else:
mask_array = long_inclusion_array[:, None]
orig_map = past_mappings[key]
# Initialize the resultant array that is desired
new_map = orig_map.multiply(np.tile(mask_array,
(1, orig_map.shape[1]))).A
# Perform the desired filtering
current_filter = (new_map.sum(axis=1) != 0)
if current_filter.shape[0] > 0:
current_filter = current_filter.ravel()
new_map = new_map[current_filter, :]
# Do the second filtering
current_filter = (new_map.sum(axis=0) != 0)
if current_filter.shape[0] > 0:
current_filter = current_filter.ravel()
new_map = new_map[:, current_filter]
new_mappings[key] = csr_matrix(new_map)
return new_mappings | def function[__filter_past_mappings, parameter[self, past_mappings, long_inclusion_array]]:
constant[
Parameters
----------
past_mappings : dict.
All elements should be None or compressed sparse row matrices from
scipy.sparse. The following keys should be in past_mappings:
- "rows_to_obs",
- "rows_to_alts",
- "chosen_rows_to_obs",
- "rows_to_nests",
- "rows_to_mixers"
The values that are not None should be 'mapping' matrices that
denote which rows of the past long-format design matrix belong to
which unique object such as unique observations, unique
alternatives, unique nests, unique 'mixing' units etc.
long_inclusion_array : 1D ndarray.
Should denote, via a `1`, the rows of the past mapping matrices
that are to be included in the filtered mapping matrices.
Returns
-------
new_mappings : dict.
The returned dictionary will be the same as `past_mappings` except
that all the mapping matrices will have been filtered according to
`long_inclusion_array`.
]
variable[new_mappings] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[past_mappings]] begin[:]
if compare[call[name[past_mappings]][name[key]] is constant[None]] begin[:]
call[name[new_mappings]][name[key]] assign[=] constant[None]
return[name[new_mappings]] | keyword[def] identifier[__filter_past_mappings] ( identifier[self] ,
identifier[past_mappings] ,
identifier[long_inclusion_array] ):
literal[string]
identifier[new_mappings] ={}
keyword[for] identifier[key] keyword[in] identifier[past_mappings] :
keyword[if] identifier[past_mappings] [ identifier[key] ] keyword[is] keyword[None] :
identifier[new_mappings] [ identifier[key] ]= keyword[None]
keyword[else] :
identifier[mask_array] = identifier[long_inclusion_array] [:, keyword[None] ]
identifier[orig_map] = identifier[past_mappings] [ identifier[key] ]
identifier[new_map] = identifier[orig_map] . identifier[multiply] ( identifier[np] . identifier[tile] ( identifier[mask_array] ,
( literal[int] , identifier[orig_map] . identifier[shape] [ literal[int] ]))). identifier[A]
identifier[current_filter] =( identifier[new_map] . identifier[sum] ( identifier[axis] = literal[int] )!= literal[int] )
keyword[if] identifier[current_filter] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[current_filter] = identifier[current_filter] . identifier[ravel] ()
identifier[new_map] = identifier[new_map] [ identifier[current_filter] ,:]
identifier[current_filter] =( identifier[new_map] . identifier[sum] ( identifier[axis] = literal[int] )!= literal[int] )
keyword[if] identifier[current_filter] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[current_filter] = identifier[current_filter] . identifier[ravel] ()
identifier[new_map] = identifier[new_map] [:, identifier[current_filter] ]
identifier[new_mappings] [ identifier[key] ]= identifier[csr_matrix] ( identifier[new_map] )
keyword[return] identifier[new_mappings] | def __filter_past_mappings(self, past_mappings, long_inclusion_array):
"""
Parameters
----------
past_mappings : dict.
All elements should be None or compressed sparse row matrices from
scipy.sparse. The following keys should be in past_mappings:
- "rows_to_obs",
- "rows_to_alts",
- "chosen_rows_to_obs",
- "rows_to_nests",
- "rows_to_mixers"
The values that are not None should be 'mapping' matrices that
denote which rows of the past long-format design matrix belong to
which unique object such as unique observations, unique
alternatives, unique nests, unique 'mixing' units etc.
long_inclusion_array : 1D ndarray.
Should denote, via a `1`, the rows of the past mapping matrices
that are to be included in the filtered mapping matrices.
Returns
-------
new_mappings : dict.
The returned dictionary will be the same as `past_mappings` except
that all the mapping matrices will have been filtered according to
`long_inclusion_array`.
"""
new_mappings = {}
for key in past_mappings:
if past_mappings[key] is None:
new_mappings[key] = None # depends on [control=['if'], data=[]]
else:
mask_array = long_inclusion_array[:, None]
orig_map = past_mappings[key]
# Initialize the resultant array that is desired
new_map = orig_map.multiply(np.tile(mask_array, (1, orig_map.shape[1]))).A
# Perform the desired filtering
current_filter = new_map.sum(axis=1) != 0
if current_filter.shape[0] > 0:
current_filter = current_filter.ravel()
new_map = new_map[current_filter, :] # depends on [control=['if'], data=[]]
# Do the second filtering
current_filter = new_map.sum(axis=0) != 0
if current_filter.shape[0] > 0:
current_filter = current_filter.ravel()
new_map = new_map[:, current_filter] # depends on [control=['if'], data=[]]
new_mappings[key] = csr_matrix(new_map) # depends on [control=['for'], data=['key']]
return new_mappings |
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive # pylint: disable=no-name-in-module
unfinished_states = (
hive.ttypes.TOperationState.INITIALIZED_STATE,
hive.ttypes.TOperationState.RUNNING_STATE,
)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
'Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
'Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll() | def function[handle_cursor, parameter[cls, cursor, query, session]]:
constant[Updates progress information]
from relative_module[pyhive] import module[hive]
variable[unfinished_states] assign[=] tuple[[<ast.Attribute object at 0x7da1b209b9d0>, <ast.Attribute object at 0x7da1b2098850>]]
variable[polled] assign[=] call[name[cursor].poll, parameter[]]
variable[last_log_line] assign[=] constant[0]
variable[tracking_url] assign[=] constant[None]
variable[job_id] assign[=] constant[None]
while compare[name[polled].operationState in name[unfinished_states]] begin[:]
variable[query] assign[=] call[call[call[name[session].query, parameter[call[name[type], parameter[name[query]]]]].filter_by, parameter[]].one, parameter[]]
if compare[name[query].status equal[==] name[QueryStatus].STOPPED] begin[:]
call[name[cursor].cancel, parameter[]]
break
variable[log] assign[=] <ast.BoolOp object at 0x7da1b209a800>
if name[log] begin[:]
variable[log_lines] assign[=] call[name[log].splitlines, parameter[]]
variable[progress] assign[=] call[name[cls].progress, parameter[name[log_lines]]]
call[name[logging].info, parameter[call[constant[Progress total: {}].format, parameter[name[progress]]]]]
variable[needs_commit] assign[=] constant[False]
if compare[name[progress] greater[>] name[query].progress] begin[:]
name[query].progress assign[=] name[progress]
variable[needs_commit] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b209a9e0> begin[:]
variable[tracking_url] assign[=] call[name[cls].get_tracking_url, parameter[name[log_lines]]]
if name[tracking_url] begin[:]
variable[job_id] assign[=] call[call[name[tracking_url].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b209ae90>]
call[name[logging].info, parameter[call[constant[Found the tracking url: {}].format, parameter[name[tracking_url]]]]]
variable[tracking_url] assign[=] call[name[tracking_url_trans], parameter[name[tracking_url]]]
call[name[logging].info, parameter[call[constant[Transformation applied: {}].format, parameter[name[tracking_url]]]]]
name[query].tracking_url assign[=] name[tracking_url]
call[name[logging].info, parameter[call[constant[Job id: {}].format, parameter[name[job_id]]]]]
variable[needs_commit] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b20983a0> begin[:]
for taget[name[l]] in starred[call[name[log_lines]][<ast.Slice object at 0x7da1b209b220>]] begin[:]
call[name[logging].info, parameter[call[constant[[{}] {}].format, parameter[name[job_id], name[l]]]]]
variable[last_log_line] assign[=] call[name[len], parameter[name[log_lines]]]
if name[needs_commit] begin[:]
call[name[session].commit, parameter[]]
call[name[time].sleep, parameter[name[hive_poll_interval]]]
variable[polled] assign[=] call[name[cursor].poll, parameter[]] | keyword[def] identifier[handle_cursor] ( identifier[cls] , identifier[cursor] , identifier[query] , identifier[session] ):
literal[string]
keyword[from] identifier[pyhive] keyword[import] identifier[hive]
identifier[unfinished_states] =(
identifier[hive] . identifier[ttypes] . identifier[TOperationState] . identifier[INITIALIZED_STATE] ,
identifier[hive] . identifier[ttypes] . identifier[TOperationState] . identifier[RUNNING_STATE] ,
)
identifier[polled] = identifier[cursor] . identifier[poll] ()
identifier[last_log_line] = literal[int]
identifier[tracking_url] = keyword[None]
identifier[job_id] = keyword[None]
keyword[while] identifier[polled] . identifier[operationState] keyword[in] identifier[unfinished_states] :
identifier[query] = identifier[session] . identifier[query] ( identifier[type] ( identifier[query] )). identifier[filter_by] ( identifier[id] = identifier[query] . identifier[id] ). identifier[one] ()
keyword[if] identifier[query] . identifier[status] == identifier[QueryStatus] . identifier[STOPPED] :
identifier[cursor] . identifier[cancel] ()
keyword[break]
identifier[log] = identifier[cursor] . identifier[fetch_logs] () keyword[or] literal[string]
keyword[if] identifier[log] :
identifier[log_lines] = identifier[log] . identifier[splitlines] ()
identifier[progress] = identifier[cls] . identifier[progress] ( identifier[log_lines] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[progress] ))
identifier[needs_commit] = keyword[False]
keyword[if] identifier[progress] > identifier[query] . identifier[progress] :
identifier[query] . identifier[progress] = identifier[progress]
identifier[needs_commit] = keyword[True]
keyword[if] keyword[not] identifier[tracking_url] :
identifier[tracking_url] = identifier[cls] . identifier[get_tracking_url] ( identifier[log_lines] )
keyword[if] identifier[tracking_url] :
identifier[job_id] = identifier[tracking_url] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[logging] . identifier[info] (
literal[string] . identifier[format] ( identifier[tracking_url] ))
identifier[tracking_url] = identifier[tracking_url_trans] ( identifier[tracking_url] )
identifier[logging] . identifier[info] (
literal[string] . identifier[format] ( identifier[tracking_url] ))
identifier[query] . identifier[tracking_url] = identifier[tracking_url]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[job_id] ))
identifier[needs_commit] = keyword[True]
keyword[if] identifier[job_id] keyword[and] identifier[len] ( identifier[log_lines] )> identifier[last_log_line] :
keyword[for] identifier[l] keyword[in] identifier[log_lines] [ identifier[last_log_line] :]:
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[job_id] , identifier[l] ))
identifier[last_log_line] = identifier[len] ( identifier[log_lines] )
keyword[if] identifier[needs_commit] :
identifier[session] . identifier[commit] ()
identifier[time] . identifier[sleep] ( identifier[hive_poll_interval] )
identifier[polled] = identifier[cursor] . identifier[poll] () | def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive # pylint: disable=no-name-in-module
unfinished_states = (hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break # depends on [control=['if'], data=[]]
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True # depends on [control=['if'], data=['progress']]
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info('Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info('Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l)) # depends on [control=['for'], data=['l']]
last_log_line = len(log_lines) # depends on [control=['if'], data=[]]
if needs_commit:
session.commit() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
time.sleep(hive_poll_interval)
polled = cursor.poll() # depends on [control=['while'], data=[]] |
def wcxf2arrays(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values. This is needed for the parsing
of input in WCxf format."""
C = {}
for k, v in d.items():
name = k.split('_')[0]
s = C_keys_shape[name]
if s == 1:
C[k] = v
else:
ind = k.split('_')[-1]
if name not in C:
C[name] = np.zeros(s, dtype=complex)
C[name][tuple([int(i) - 1 for i in ind])] = v
return C | def function[wcxf2arrays, parameter[d]]:
constant[Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values. This is needed for the parsing
of input in WCxf format.]
variable[C] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1907fa0>, <ast.Name object at 0x7da1b1905240>]]] in starred[call[name[d].items, parameter[]]] begin[:]
variable[name] assign[=] call[call[name[k].split, parameter[constant[_]]]][constant[0]]
variable[s] assign[=] call[name[C_keys_shape]][name[name]]
if compare[name[s] equal[==] constant[1]] begin[:]
call[name[C]][name[k]] assign[=] name[v]
return[name[C]] | keyword[def] identifier[wcxf2arrays] ( identifier[d] ):
literal[string]
identifier[C] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ():
identifier[name] = identifier[k] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[s] = identifier[C_keys_shape] [ identifier[name] ]
keyword[if] identifier[s] == literal[int] :
identifier[C] [ identifier[k] ]= identifier[v]
keyword[else] :
identifier[ind] = identifier[k] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[C] :
identifier[C] [ identifier[name] ]= identifier[np] . identifier[zeros] ( identifier[s] , identifier[dtype] = identifier[complex] )
identifier[C] [ identifier[name] ][ identifier[tuple] ([ identifier[int] ( identifier[i] )- literal[int] keyword[for] identifier[i] keyword[in] identifier[ind] ])]= identifier[v]
keyword[return] identifier[C] | def wcxf2arrays(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values. This is needed for the parsing
of input in WCxf format."""
C = {}
for (k, v) in d.items():
name = k.split('_')[0]
s = C_keys_shape[name]
if s == 1:
C[k] = v # depends on [control=['if'], data=[]]
else:
ind = k.split('_')[-1]
if name not in C:
C[name] = np.zeros(s, dtype=complex) # depends on [control=['if'], data=['name', 'C']]
C[name][tuple([int(i) - 1 for i in ind])] = v # depends on [control=['for'], data=[]]
return C |
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args) | def function[hmget, parameter[self, name, keys]]:
constant[Returns a list of values ordered identically to ``keys``]
variable[args] assign[=] call[name[list_or_args], parameter[name[keys], name[args]]]
return[call[name[self].execute_command, parameter[constant[HMGET], name[name], <ast.Starred object at 0x7da18f00cd30>]]] | keyword[def] identifier[hmget] ( identifier[self] , identifier[name] , identifier[keys] ,* identifier[args] ):
literal[string]
identifier[args] = identifier[list_or_args] ( identifier[keys] , identifier[args] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] ,* identifier[args] ) | def hmget(self, name, keys, *args):
"""Returns a list of values ordered identically to ``keys``"""
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args) |
def polyfit2dGrid(arr, mask=None, order=3, replace_all=False,
copy=True, outgrid=None):
'''
replace all masked values with polynomial fitted ones
'''
s0,s1 = arr.shape
if mask is None:
if outgrid is None:
y,x = np.mgrid[:float(s0),:float(s1)]
p = polyfit2d(x.flatten(),y.flatten(),arr.flatten(),order)
return polyval2d(x,y, p, dtype=arr.dtype)
mask = np.zeros_like(arr, dtype=bool)
elif mask.sum() == 0 and not replace_all and outgrid is None:
return arr
valid = ~mask
y,x = np.where(valid)
z = arr[valid]
p = polyfit2d(x,y,z,order)
if outgrid is not None:
yy,xx = outgrid
else:
if replace_all:
yy,xx = np.mgrid[:float(s0),:float(s1)]
else:
yy,xx = np.where(mask)
new = polyval2d(xx,yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new
if copy:
arr = arr.copy()
arr[mask] = new
return arr | def function[polyfit2dGrid, parameter[arr, mask, order, replace_all, copy, outgrid]]:
constant[
replace all masked values with polynomial fitted ones
]
<ast.Tuple object at 0x7da1b11d1570> assign[=] name[arr].shape
if compare[name[mask] is constant[None]] begin[:]
if compare[name[outgrid] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b11d1810> assign[=] call[name[np].mgrid][tuple[[<ast.Slice object at 0x7da1b11d1960>, <ast.Slice object at 0x7da1b11d2d40>]]]
variable[p] assign[=] call[name[polyfit2d], parameter[call[name[x].flatten, parameter[]], call[name[y].flatten, parameter[]], call[name[arr].flatten, parameter[]], name[order]]]
return[call[name[polyval2d], parameter[name[x], name[y], name[p]]]]
variable[mask] assign[=] call[name[np].zeros_like, parameter[name[arr]]]
variable[valid] assign[=] <ast.UnaryOp object at 0x7da1b11d2080>
<ast.Tuple object at 0x7da1b11d2f20> assign[=] call[name[np].where, parameter[name[valid]]]
variable[z] assign[=] call[name[arr]][name[valid]]
variable[p] assign[=] call[name[polyfit2d], parameter[name[x], name[y], name[z], name[order]]]
if compare[name[outgrid] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b11d3520> assign[=] name[outgrid]
variable[new] assign[=] call[name[polyval2d], parameter[name[xx], name[yy], name[p]]]
if <ast.BoolOp object at 0x7da1b11d0220> begin[:]
return[name[new]]
if name[copy] begin[:]
variable[arr] assign[=] call[name[arr].copy, parameter[]]
call[name[arr]][name[mask]] assign[=] name[new]
return[name[arr]] | keyword[def] identifier[polyfit2dGrid] ( identifier[arr] , identifier[mask] = keyword[None] , identifier[order] = literal[int] , identifier[replace_all] = keyword[False] ,
identifier[copy] = keyword[True] , identifier[outgrid] = keyword[None] ):
literal[string]
identifier[s0] , identifier[s1] = identifier[arr] . identifier[shape]
keyword[if] identifier[mask] keyword[is] keyword[None] :
keyword[if] identifier[outgrid] keyword[is] keyword[None] :
identifier[y] , identifier[x] = identifier[np] . identifier[mgrid] [: identifier[float] ( identifier[s0] ),: identifier[float] ( identifier[s1] )]
identifier[p] = identifier[polyfit2d] ( identifier[x] . identifier[flatten] (), identifier[y] . identifier[flatten] (), identifier[arr] . identifier[flatten] (), identifier[order] )
keyword[return] identifier[polyval2d] ( identifier[x] , identifier[y] , identifier[p] , identifier[dtype] = identifier[arr] . identifier[dtype] )
identifier[mask] = identifier[np] . identifier[zeros_like] ( identifier[arr] , identifier[dtype] = identifier[bool] )
keyword[elif] identifier[mask] . identifier[sum] ()== literal[int] keyword[and] keyword[not] identifier[replace_all] keyword[and] identifier[outgrid] keyword[is] keyword[None] :
keyword[return] identifier[arr]
identifier[valid] =~ identifier[mask]
identifier[y] , identifier[x] = identifier[np] . identifier[where] ( identifier[valid] )
identifier[z] = identifier[arr] [ identifier[valid] ]
identifier[p] = identifier[polyfit2d] ( identifier[x] , identifier[y] , identifier[z] , identifier[order] )
keyword[if] identifier[outgrid] keyword[is] keyword[not] keyword[None] :
identifier[yy] , identifier[xx] = identifier[outgrid]
keyword[else] :
keyword[if] identifier[replace_all] :
identifier[yy] , identifier[xx] = identifier[np] . identifier[mgrid] [: identifier[float] ( identifier[s0] ),: identifier[float] ( identifier[s1] )]
keyword[else] :
identifier[yy] , identifier[xx] = identifier[np] . identifier[where] ( identifier[mask] )
identifier[new] = identifier[polyval2d] ( identifier[xx] , identifier[yy] , identifier[p] , identifier[dtype] = identifier[arr] . identifier[dtype] )
keyword[if] identifier[outgrid] keyword[is] keyword[not] keyword[None] keyword[or] identifier[replace_all] :
keyword[return] identifier[new]
keyword[if] identifier[copy] :
identifier[arr] = identifier[arr] . identifier[copy] ()
identifier[arr] [ identifier[mask] ]= identifier[new]
keyword[return] identifier[arr] | def polyfit2dGrid(arr, mask=None, order=3, replace_all=False, copy=True, outgrid=None):
"""
replace all masked values with polynomial fitted ones
"""
(s0, s1) = arr.shape
if mask is None:
if outgrid is None:
(y, x) = np.mgrid[:float(s0), :float(s1)]
p = polyfit2d(x.flatten(), y.flatten(), arr.flatten(), order)
return polyval2d(x, y, p, dtype=arr.dtype) # depends on [control=['if'], data=[]]
mask = np.zeros_like(arr, dtype=bool) # depends on [control=['if'], data=['mask']]
elif mask.sum() == 0 and (not replace_all) and (outgrid is None):
return arr # depends on [control=['if'], data=[]]
valid = ~mask
(y, x) = np.where(valid)
z = arr[valid]
p = polyfit2d(x, y, z, order)
if outgrid is not None:
(yy, xx) = outgrid # depends on [control=['if'], data=['outgrid']]
elif replace_all:
(yy, xx) = np.mgrid[:float(s0), :float(s1)] # depends on [control=['if'], data=[]]
else:
(yy, xx) = np.where(mask)
new = polyval2d(xx, yy, p, dtype=arr.dtype)
if outgrid is not None or replace_all:
return new # depends on [control=['if'], data=[]]
if copy:
arr = arr.copy() # depends on [control=['if'], data=[]]
arr[mask] = new
return arr |
def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
"""
if isinstance(srms_used, str):
srms_used = [srms_used]
# get mean and standard deviations of measured standards
self.srm_compile_measured(n_min)
stdtab = self.stdtab.copy()
# load corresponding SRM database
self.srm_load_database(srms_used, reload_srm_database)
# create blank srm table
srm_tab = self.srmdat.loc[:, ['mol_ratio', 'element']].reset_index().pivot(index='SRM', columns='element', values='mol_ratio')
# Auto - ID STDs
# 1. identify elements in measured SRMS with biggest range of values
meas_tab = stdtab.loc[:, (slice(None), 'mean')] # isolate means of standards
meas_tab.columns = meas_tab.columns.droplevel(1) # drop 'mean' column names
meas_tab.columns = [re.findall('[A-Za-z]+', a)[0] for a in meas_tab.columns] # rename to element names
meas_tab = meas_tab.T.groupby(level=0).first().T # remove duplicate columns
ranges = nominal_values(meas_tab.apply(lambda a: np.ptp(a) / np.nanmean(a), 0)) # calculate relative ranges of all elements
# (used as weights later)
# 2. Work out which standard is which
# normalise all elements between 0-1
def normalise(a):
a = nominal_values(a)
if np.nanmin(a) < np.nanmax(a):
return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))
else:
return np.ones(a.shape)
nmeas = meas_tab.apply(normalise, 0)
nmeas.dropna(1, inplace=True) # remove elements with NaN values
# nmeas.replace(np.nan, 1, inplace=True)
nsrm_tab = srm_tab.apply(normalise, 0)
nsrm_tab.dropna(1, inplace=True)
# nsrm_tab.replace(np.nan, 1, inplace=True)
for uT, r in nmeas.iterrows(): # for each standard...
idx = np.nansum(((nsrm_tab - r) * ranges)**2, 1)
idx = abs((nsrm_tab - r) * ranges).sum(1)
# calculate the absolute difference between the normalised elemental
# values for each measured SRM and the SRM table. Each element is
# multiplied by the relative range seen in that element (i.e. range / mean
# measuerd value), so that elements with a large difference are given
# more importance in identifying the SRM.
# This produces a table, where wach row contains the difference between
# a known vs. measured SRM. The measured SRM is identified as the SRM that
# has the smallest weighted sum value.
stdtab.loc[uT, 'SRM'] = srm_tab.index[idx == min(idx)].values[0]
# calculate mean time for each SRM
# reset index and sort
stdtab.reset_index(inplace=True)
stdtab.sort_index(1, inplace=True)
# isolate STD and uTime
uT = stdtab.loc[:, ['gTime', 'STD']].set_index('STD')
uT.sort_index(inplace=True)
uTm = uT.groupby(level=0).mean() # mean uTime for each SRM
# replace uTime values with means
stdtab.set_index(['STD'], inplace=True)
stdtab.loc[:, 'gTime'] = uTm
# reset index
stdtab.reset_index(inplace=True)
stdtab.set_index(['STD', 'SRM', 'gTime'], inplace=True)
# combine to make SRM reference tables
srmtabs = Bunch()
for a in self.analytes:
el = re.findall('[A-Za-z]+', a)[0]
sub = stdtab.loc[:, a]
srmsub = self.srmdat.loc[self.srmdat.element == el, ['mol_ratio', 'mol_ratio_err']]
srmtab = sub.join(srmsub)
srmtab.columns = ['meas_err', 'meas_mean', 'srm_mean', 'srm_err']
srmtabs[a] = srmtab
self.srmtabs = pd.concat(srmtabs).apply(nominal_values).sort_index()
self.srmtabs.dropna(subset=['srm_mean'], inplace=True)
# replace any nan error values with zeros - nans cause problems later.
self.srmtabs.loc[:, ['meas_err', 'srm_err']] = self.srmtabs.loc[:, ['meas_err', 'srm_err']].replace(np.nan, 0)
# remove internal standard from calibration elements
self.srmtabs.drop(self.internal_standard, inplace=True)
self.srms_ided = True
return | def function[srm_id_auto, parameter[self, srms_used, n_min, reload_srm_database]]:
constant[
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
]
if call[name[isinstance], parameter[name[srms_used], name[str]]] begin[:]
variable[srms_used] assign[=] list[[<ast.Name object at 0x7da1b01abb50>]]
call[name[self].srm_compile_measured, parameter[name[n_min]]]
variable[stdtab] assign[=] call[name[self].stdtab.copy, parameter[]]
call[name[self].srm_load_database, parameter[name[srms_used], name[reload_srm_database]]]
variable[srm_tab] assign[=] call[call[call[name[self].srmdat.loc][tuple[[<ast.Slice object at 0x7da1b01b8b80>, <ast.List object at 0x7da1b01b8be0>]]].reset_index, parameter[]].pivot, parameter[]]
variable[meas_tab] assign[=] call[name[stdtab].loc][tuple[[<ast.Slice object at 0x7da1b01b9540>, <ast.Tuple object at 0x7da1b01bb7f0>]]]
name[meas_tab].columns assign[=] call[name[meas_tab].columns.droplevel, parameter[constant[1]]]
name[meas_tab].columns assign[=] <ast.ListComp object at 0x7da1b01bb610>
variable[meas_tab] assign[=] call[call[name[meas_tab].T.groupby, parameter[]].first, parameter[]].T
variable[ranges] assign[=] call[name[nominal_values], parameter[call[name[meas_tab].apply, parameter[<ast.Lambda object at 0x7da1b01bb2b0>, constant[0]]]]]
def function[normalise, parameter[a]]:
variable[a] assign[=] call[name[nominal_values], parameter[name[a]]]
if compare[call[name[np].nanmin, parameter[name[a]]] less[<] call[name[np].nanmax, parameter[name[a]]]] begin[:]
return[binary_operation[binary_operation[name[a] - call[name[np].nanmin, parameter[name[a]]]] / call[name[np].nanmax, parameter[binary_operation[name[a] - call[name[np].nanmin, parameter[name[a]]]]]]]]
variable[nmeas] assign[=] call[name[meas_tab].apply, parameter[name[normalise], constant[0]]]
call[name[nmeas].dropna, parameter[constant[1]]]
variable[nsrm_tab] assign[=] call[name[srm_tab].apply, parameter[name[normalise], constant[0]]]
call[name[nsrm_tab].dropna, parameter[constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b01ff100>, <ast.Name object at 0x7da1b01fefe0>]]] in starred[call[name[nmeas].iterrows, parameter[]]] begin[:]
variable[idx] assign[=] call[name[np].nansum, parameter[binary_operation[binary_operation[binary_operation[name[nsrm_tab] - name[r]] * name[ranges]] ** constant[2]], constant[1]]]
variable[idx] assign[=] call[call[name[abs], parameter[binary_operation[binary_operation[name[nsrm_tab] - name[r]] * name[ranges]]]].sum, parameter[constant[1]]]
call[name[stdtab].loc][tuple[[<ast.Name object at 0x7da1b01ff6d0>, <ast.Constant object at 0x7da1b01ff700>]]] assign[=] call[call[name[srm_tab].index][compare[name[idx] equal[==] call[name[min], parameter[name[idx]]]]].values][constant[0]]
call[name[stdtab].reset_index, parameter[]]
call[name[stdtab].sort_index, parameter[constant[1]]]
variable[uT] assign[=] call[call[name[stdtab].loc][tuple[[<ast.Slice object at 0x7da1b01fdfc0>, <ast.List object at 0x7da1b01fe3e0>]]].set_index, parameter[constant[STD]]]
call[name[uT].sort_index, parameter[]]
variable[uTm] assign[=] call[call[name[uT].groupby, parameter[]].mean, parameter[]]
call[name[stdtab].set_index, parameter[list[[<ast.Constant object at 0x7da1b01fc340>]]]]
call[name[stdtab].loc][tuple[[<ast.Slice object at 0x7da1b01fc550>, <ast.Constant object at 0x7da1b01fc6d0>]]] assign[=] name[uTm]
call[name[stdtab].reset_index, parameter[]]
call[name[stdtab].set_index, parameter[list[[<ast.Constant object at 0x7da1b01ff8b0>, <ast.Constant object at 0x7da1b01ff850>, <ast.Constant object at 0x7da1b01ff8e0>]]]]
variable[srmtabs] assign[=] call[name[Bunch], parameter[]]
for taget[name[a]] in starred[name[self].analytes] begin[:]
variable[el] assign[=] call[call[name[re].findall, parameter[constant[[A-Za-z]+], name[a]]]][constant[0]]
variable[sub] assign[=] call[name[stdtab].loc][tuple[[<ast.Slice object at 0x7da1b01bfca0>, <ast.Name object at 0x7da1b01bfbb0>]]]
variable[srmsub] assign[=] call[name[self].srmdat.loc][tuple[[<ast.Compare object at 0x7da1b01beaa0>, <ast.List object at 0x7da1b01bfc70>]]]
variable[srmtab] assign[=] call[name[sub].join, parameter[name[srmsub]]]
name[srmtab].columns assign[=] list[[<ast.Constant object at 0x7da1b01be680>, <ast.Constant object at 0x7da1b01be710>, <ast.Constant object at 0x7da1b01be6e0>, <ast.Constant object at 0x7da1b01be740>]]
call[name[srmtabs]][name[a]] assign[=] name[srmtab]
name[self].srmtabs assign[=] call[call[call[name[pd].concat, parameter[name[srmtabs]]].apply, parameter[name[nominal_values]]].sort_index, parameter[]]
call[name[self].srmtabs.dropna, parameter[]]
call[name[self].srmtabs.loc][tuple[[<ast.Slice object at 0x7da1b01bde70>, <ast.List object at 0x7da1b01bdde0>]]] assign[=] call[call[name[self].srmtabs.loc][tuple[[<ast.Slice object at 0x7da1b01bdf60>, <ast.List object at 0x7da1b01bdf30>]]].replace, parameter[name[np].nan, constant[0]]]
call[name[self].srmtabs.drop, parameter[name[self].internal_standard]]
name[self].srms_ided assign[=] constant[True]
return[None] | keyword[def] identifier[srm_id_auto] ( identifier[self] , identifier[srms_used] =[ literal[string] , literal[string] , literal[string] ], identifier[n_min] = literal[int] , identifier[reload_srm_database] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[srms_used] , identifier[str] ):
identifier[srms_used] =[ identifier[srms_used] ]
identifier[self] . identifier[srm_compile_measured] ( identifier[n_min] )
identifier[stdtab] = identifier[self] . identifier[stdtab] . identifier[copy] ()
identifier[self] . identifier[srm_load_database] ( identifier[srms_used] , identifier[reload_srm_database] )
identifier[srm_tab] = identifier[self] . identifier[srmdat] . identifier[loc] [:,[ literal[string] , literal[string] ]]. identifier[reset_index] (). identifier[pivot] ( identifier[index] = literal[string] , identifier[columns] = literal[string] , identifier[values] = literal[string] )
identifier[meas_tab] = identifier[stdtab] . identifier[loc] [:,( identifier[slice] ( keyword[None] ), literal[string] )]
identifier[meas_tab] . identifier[columns] = identifier[meas_tab] . identifier[columns] . identifier[droplevel] ( literal[int] )
identifier[meas_tab] . identifier[columns] =[ identifier[re] . identifier[findall] ( literal[string] , identifier[a] )[ literal[int] ] keyword[for] identifier[a] keyword[in] identifier[meas_tab] . identifier[columns] ]
identifier[meas_tab] = identifier[meas_tab] . identifier[T] . identifier[groupby] ( identifier[level] = literal[int] ). identifier[first] (). identifier[T]
identifier[ranges] = identifier[nominal_values] ( identifier[meas_tab] . identifier[apply] ( keyword[lambda] identifier[a] : identifier[np] . identifier[ptp] ( identifier[a] )/ identifier[np] . identifier[nanmean] ( identifier[a] ), literal[int] ))
keyword[def] identifier[normalise] ( identifier[a] ):
identifier[a] = identifier[nominal_values] ( identifier[a] )
keyword[if] identifier[np] . identifier[nanmin] ( identifier[a] )< identifier[np] . identifier[nanmax] ( identifier[a] ):
keyword[return] ( identifier[a] - identifier[np] . identifier[nanmin] ( identifier[a] ))/ identifier[np] . identifier[nanmax] ( identifier[a] - identifier[np] . identifier[nanmin] ( identifier[a] ))
keyword[else] :
keyword[return] identifier[np] . identifier[ones] ( identifier[a] . identifier[shape] )
identifier[nmeas] = identifier[meas_tab] . identifier[apply] ( identifier[normalise] , literal[int] )
identifier[nmeas] . identifier[dropna] ( literal[int] , identifier[inplace] = keyword[True] )
identifier[nsrm_tab] = identifier[srm_tab] . identifier[apply] ( identifier[normalise] , literal[int] )
identifier[nsrm_tab] . identifier[dropna] ( literal[int] , identifier[inplace] = keyword[True] )
keyword[for] identifier[uT] , identifier[r] keyword[in] identifier[nmeas] . identifier[iterrows] ():
identifier[idx] = identifier[np] . identifier[nansum] ((( identifier[nsrm_tab] - identifier[r] )* identifier[ranges] )** literal[int] , literal[int] )
identifier[idx] = identifier[abs] (( identifier[nsrm_tab] - identifier[r] )* identifier[ranges] ). identifier[sum] ( literal[int] )
identifier[stdtab] . identifier[loc] [ identifier[uT] , literal[string] ]= identifier[srm_tab] . identifier[index] [ identifier[idx] == identifier[min] ( identifier[idx] )]. identifier[values] [ literal[int] ]
identifier[stdtab] . identifier[reset_index] ( identifier[inplace] = keyword[True] )
identifier[stdtab] . identifier[sort_index] ( literal[int] , identifier[inplace] = keyword[True] )
identifier[uT] = identifier[stdtab] . identifier[loc] [:,[ literal[string] , literal[string] ]]. identifier[set_index] ( literal[string] )
identifier[uT] . identifier[sort_index] ( identifier[inplace] = keyword[True] )
identifier[uTm] = identifier[uT] . identifier[groupby] ( identifier[level] = literal[int] ). identifier[mean] ()
identifier[stdtab] . identifier[set_index] ([ literal[string] ], identifier[inplace] = keyword[True] )
identifier[stdtab] . identifier[loc] [:, literal[string] ]= identifier[uTm]
identifier[stdtab] . identifier[reset_index] ( identifier[inplace] = keyword[True] )
identifier[stdtab] . identifier[set_index] ([ literal[string] , literal[string] , literal[string] ], identifier[inplace] = keyword[True] )
identifier[srmtabs] = identifier[Bunch] ()
keyword[for] identifier[a] keyword[in] identifier[self] . identifier[analytes] :
identifier[el] = identifier[re] . identifier[findall] ( literal[string] , identifier[a] )[ literal[int] ]
identifier[sub] = identifier[stdtab] . identifier[loc] [:, identifier[a] ]
identifier[srmsub] = identifier[self] . identifier[srmdat] . identifier[loc] [ identifier[self] . identifier[srmdat] . identifier[element] == identifier[el] ,[ literal[string] , literal[string] ]]
identifier[srmtab] = identifier[sub] . identifier[join] ( identifier[srmsub] )
identifier[srmtab] . identifier[columns] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[srmtabs] [ identifier[a] ]= identifier[srmtab]
identifier[self] . identifier[srmtabs] = identifier[pd] . identifier[concat] ( identifier[srmtabs] ). identifier[apply] ( identifier[nominal_values] ). identifier[sort_index] ()
identifier[self] . identifier[srmtabs] . identifier[dropna] ( identifier[subset] =[ literal[string] ], identifier[inplace] = keyword[True] )
identifier[self] . identifier[srmtabs] . identifier[loc] [:,[ literal[string] , literal[string] ]]= identifier[self] . identifier[srmtabs] . identifier[loc] [:,[ literal[string] , literal[string] ]]. identifier[replace] ( identifier[np] . identifier[nan] , literal[int] )
identifier[self] . identifier[srmtabs] . identifier[drop] ( identifier[self] . identifier[internal_standard] , identifier[inplace] = keyword[True] )
identifier[self] . identifier[srms_ided] = keyword[True]
keyword[return] | def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
"""
if isinstance(srms_used, str):
srms_used = [srms_used] # depends on [control=['if'], data=[]]
# get mean and standard deviations of measured standards
self.srm_compile_measured(n_min)
stdtab = self.stdtab.copy()
# load corresponding SRM database
self.srm_load_database(srms_used, reload_srm_database)
# create blank srm table
srm_tab = self.srmdat.loc[:, ['mol_ratio', 'element']].reset_index().pivot(index='SRM', columns='element', values='mol_ratio')
# Auto - ID STDs
# 1. identify elements in measured SRMS with biggest range of values
meas_tab = stdtab.loc[:, (slice(None), 'mean')] # isolate means of standards
meas_tab.columns = meas_tab.columns.droplevel(1) # drop 'mean' column names
meas_tab.columns = [re.findall('[A-Za-z]+', a)[0] for a in meas_tab.columns] # rename to element names
meas_tab = meas_tab.T.groupby(level=0).first().T # remove duplicate columns
ranges = nominal_values(meas_tab.apply(lambda a: np.ptp(a) / np.nanmean(a), 0)) # calculate relative ranges of all elements
# (used as weights later)
# 2. Work out which standard is which
# normalise all elements between 0-1
def normalise(a):
a = nominal_values(a)
if np.nanmin(a) < np.nanmax(a):
return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a)) # depends on [control=['if'], data=[]]
else:
return np.ones(a.shape)
nmeas = meas_tab.apply(normalise, 0)
nmeas.dropna(1, inplace=True) # remove elements with NaN values
# nmeas.replace(np.nan, 1, inplace=True)
nsrm_tab = srm_tab.apply(normalise, 0)
nsrm_tab.dropna(1, inplace=True)
# nsrm_tab.replace(np.nan, 1, inplace=True)
for (uT, r) in nmeas.iterrows(): # for each standard...
idx = np.nansum(((nsrm_tab - r) * ranges) ** 2, 1)
idx = abs((nsrm_tab - r) * ranges).sum(1)
# calculate the absolute difference between the normalised elemental
# values for each measured SRM and the SRM table. Each element is
# multiplied by the relative range seen in that element (i.e. range / mean
# measuerd value), so that elements with a large difference are given
# more importance in identifying the SRM.
# This produces a table, where wach row contains the difference between
# a known vs. measured SRM. The measured SRM is identified as the SRM that
# has the smallest weighted sum value.
stdtab.loc[uT, 'SRM'] = srm_tab.index[idx == min(idx)].values[0] # depends on [control=['for'], data=[]]
# calculate mean time for each SRM
# reset index and sort
stdtab.reset_index(inplace=True)
stdtab.sort_index(1, inplace=True)
# isolate STD and uTime
uT = stdtab.loc[:, ['gTime', 'STD']].set_index('STD')
uT.sort_index(inplace=True)
uTm = uT.groupby(level=0).mean() # mean uTime for each SRM
# replace uTime values with means
stdtab.set_index(['STD'], inplace=True)
stdtab.loc[:, 'gTime'] = uTm
# reset index
stdtab.reset_index(inplace=True)
stdtab.set_index(['STD', 'SRM', 'gTime'], inplace=True)
# combine to make SRM reference tables
srmtabs = Bunch()
for a in self.analytes:
el = re.findall('[A-Za-z]+', a)[0]
sub = stdtab.loc[:, a]
srmsub = self.srmdat.loc[self.srmdat.element == el, ['mol_ratio', 'mol_ratio_err']]
srmtab = sub.join(srmsub)
srmtab.columns = ['meas_err', 'meas_mean', 'srm_mean', 'srm_err']
srmtabs[a] = srmtab # depends on [control=['for'], data=['a']]
self.srmtabs = pd.concat(srmtabs).apply(nominal_values).sort_index()
self.srmtabs.dropna(subset=['srm_mean'], inplace=True)
# replace any nan error values with zeros - nans cause problems later.
self.srmtabs.loc[:, ['meas_err', 'srm_err']] = self.srmtabs.loc[:, ['meas_err', 'srm_err']].replace(np.nan, 0)
# remove internal standard from calibration elements
self.srmtabs.drop(self.internal_standard, inplace=True)
self.srms_ided = True
return |
def _score(self, state, score_movement=True):
"""Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
"""
score = 0
max_score = 0
if state.total_weight:
# Coefficient of variance is a value between 0 and the sqrt(n)
# where n is the length of the series (the number of brokers)
# so those parameters are scaled by (1 / sqrt(# or brokers)) to
# get a value between 0 and 1.
#
# Since smaller imbalance values are preferred use 1 - x so that
# higher scores correspond to more balanced states.
score += self.args.partition_weight_cv_score_weight * \
(1 - state.broker_weight_cv / sqrt(len(state.brokers)))
score += self.args.leader_weight_cv_score_weight * \
(1 - state.broker_leader_weight_cv / sqrt(len(state.brokers)))
score += self.args.topic_broker_imbalance_score_weight * \
(1 - state.weighted_topic_broker_imbalance)
score += self.args.broker_partition_count_score_weight * \
(1 - state.broker_partition_count_cv / sqrt(len(state.brokers)))
score += self.args.broker_leader_count_score_weight * \
(1 - state.broker_leader_count_cv / sqrt(len(state.brokers)))
max_score += self.args.partition_weight_cv_score_weight
max_score += self.args.leader_weight_cv_score_weight
max_score += self.args.topic_broker_imbalance_score_weight
max_score += self.args.broker_partition_count_score_weight
max_score += self.args.broker_leader_count_score_weight
if self.args.max_movement_size is not None and score_movement:
# Avoid potential divide-by-zero error
max_movement = max(self.args.max_movement_size, 1)
score += self.args.movement_size_score_weight * \
(1 - state.movement_size / max_movement)
max_score += self.args.movement_size_score_weight
if self.args.max_leader_changes is not None and score_movement:
# Avoid potential divide-by-zero error
max_leader = max(self.args.max_leader_changes, 1)
score += self.args.leader_change_score_weight * \
(1 - state.leader_movement_count / max_leader)
max_score += self.args.leader_change_score_weight
return score / max_score | def function[_score, parameter[self, state, score_movement]]:
constant[Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
]
variable[score] assign[=] constant[0]
variable[max_score] assign[=] constant[0]
if name[state].total_weight begin[:]
<ast.AugAssign object at 0x7da1b07ac2e0>
<ast.AugAssign object at 0x7da1b07adba0>
<ast.AugAssign object at 0x7da1b07ac5b0>
<ast.AugAssign object at 0x7da1b07ad420>
<ast.AugAssign object at 0x7da1b07acf40>
<ast.AugAssign object at 0x7da1b07aef80>
<ast.AugAssign object at 0x7da1b07ac9a0>
<ast.AugAssign object at 0x7da1b07ae6b0>
<ast.AugAssign object at 0x7da1b07adc90>
<ast.AugAssign object at 0x7da1b07af3a0>
if <ast.BoolOp object at 0x7da1b07af7f0> begin[:]
variable[max_movement] assign[=] call[name[max], parameter[name[self].args.max_movement_size, constant[1]]]
<ast.AugAssign object at 0x7da1b07ae2c0>
<ast.AugAssign object at 0x7da1b07adbd0>
if <ast.BoolOp object at 0x7da1b07aca00> begin[:]
variable[max_leader] assign[=] call[name[max], parameter[name[self].args.max_leader_changes, constant[1]]]
<ast.AugAssign object at 0x7da1b07afc40>
<ast.AugAssign object at 0x7da1b07cf0d0>
return[binary_operation[name[score] / name[max_score]]] | keyword[def] identifier[_score] ( identifier[self] , identifier[state] , identifier[score_movement] = keyword[True] ):
literal[string]
identifier[score] = literal[int]
identifier[max_score] = literal[int]
keyword[if] identifier[state] . identifier[total_weight] :
identifier[score] += identifier[self] . identifier[args] . identifier[partition_weight_cv_score_weight] *( literal[int] - identifier[state] . identifier[broker_weight_cv] / identifier[sqrt] ( identifier[len] ( identifier[state] . identifier[brokers] )))
identifier[score] += identifier[self] . identifier[args] . identifier[leader_weight_cv_score_weight] *( literal[int] - identifier[state] . identifier[broker_leader_weight_cv] / identifier[sqrt] ( identifier[len] ( identifier[state] . identifier[brokers] )))
identifier[score] += identifier[self] . identifier[args] . identifier[topic_broker_imbalance_score_weight] *( literal[int] - identifier[state] . identifier[weighted_topic_broker_imbalance] )
identifier[score] += identifier[self] . identifier[args] . identifier[broker_partition_count_score_weight] *( literal[int] - identifier[state] . identifier[broker_partition_count_cv] / identifier[sqrt] ( identifier[len] ( identifier[state] . identifier[brokers] )))
identifier[score] += identifier[self] . identifier[args] . identifier[broker_leader_count_score_weight] *( literal[int] - identifier[state] . identifier[broker_leader_count_cv] / identifier[sqrt] ( identifier[len] ( identifier[state] . identifier[brokers] )))
identifier[max_score] += identifier[self] . identifier[args] . identifier[partition_weight_cv_score_weight]
identifier[max_score] += identifier[self] . identifier[args] . identifier[leader_weight_cv_score_weight]
identifier[max_score] += identifier[self] . identifier[args] . identifier[topic_broker_imbalance_score_weight]
identifier[max_score] += identifier[self] . identifier[args] . identifier[broker_partition_count_score_weight]
identifier[max_score] += identifier[self] . identifier[args] . identifier[broker_leader_count_score_weight]
keyword[if] identifier[self] . identifier[args] . identifier[max_movement_size] keyword[is] keyword[not] keyword[None] keyword[and] identifier[score_movement] :
identifier[max_movement] = identifier[max] ( identifier[self] . identifier[args] . identifier[max_movement_size] , literal[int] )
identifier[score] += identifier[self] . identifier[args] . identifier[movement_size_score_weight] *( literal[int] - identifier[state] . identifier[movement_size] / identifier[max_movement] )
identifier[max_score] += identifier[self] . identifier[args] . identifier[movement_size_score_weight]
keyword[if] identifier[self] . identifier[args] . identifier[max_leader_changes] keyword[is] keyword[not] keyword[None] keyword[and] identifier[score_movement] :
identifier[max_leader] = identifier[max] ( identifier[self] . identifier[args] . identifier[max_leader_changes] , literal[int] )
identifier[score] += identifier[self] . identifier[args] . identifier[leader_change_score_weight] *( literal[int] - identifier[state] . identifier[leader_movement_count] / identifier[max_leader] )
identifier[max_score] += identifier[self] . identifier[args] . identifier[leader_change_score_weight]
keyword[return] identifier[score] / identifier[max_score] | def _score(self, state, score_movement=True):
"""Score a state based on how balanced it is. A higher score represents
a more balanced state.
:param state: The state to score.
"""
score = 0
max_score = 0
if state.total_weight:
# Coefficient of variance is a value between 0 and the sqrt(n)
# where n is the length of the series (the number of brokers)
# so those parameters are scaled by (1 / sqrt(# or brokers)) to
# get a value between 0 and 1.
#
# Since smaller imbalance values are preferred use 1 - x so that
# higher scores correspond to more balanced states.
score += self.args.partition_weight_cv_score_weight * (1 - state.broker_weight_cv / sqrt(len(state.brokers)))
score += self.args.leader_weight_cv_score_weight * (1 - state.broker_leader_weight_cv / sqrt(len(state.brokers)))
score += self.args.topic_broker_imbalance_score_weight * (1 - state.weighted_topic_broker_imbalance)
score += self.args.broker_partition_count_score_weight * (1 - state.broker_partition_count_cv / sqrt(len(state.brokers)))
score += self.args.broker_leader_count_score_weight * (1 - state.broker_leader_count_cv / sqrt(len(state.brokers)))
max_score += self.args.partition_weight_cv_score_weight
max_score += self.args.leader_weight_cv_score_weight
max_score += self.args.topic_broker_imbalance_score_weight
max_score += self.args.broker_partition_count_score_weight
max_score += self.args.broker_leader_count_score_weight # depends on [control=['if'], data=[]]
if self.args.max_movement_size is not None and score_movement:
# Avoid potential divide-by-zero error
max_movement = max(self.args.max_movement_size, 1)
score += self.args.movement_size_score_weight * (1 - state.movement_size / max_movement)
max_score += self.args.movement_size_score_weight # depends on [control=['if'], data=[]]
if self.args.max_leader_changes is not None and score_movement:
# Avoid potential divide-by-zero error
max_leader = max(self.args.max_leader_changes, 1)
score += self.args.leader_change_score_weight * (1 - state.leader_movement_count / max_leader)
max_score += self.args.leader_change_score_weight # depends on [control=['if'], data=[]]
return score / max_score |
def GetName(self, number):
"""Retrieves the name of an enumeration value by number.
Args:
number (int): number.
Returns:
str: name of the enumeration value or None if no corresponding
enumeration value was found.
"""
value = self._data_type_definition.values_per_number.get(number, None)
if not value:
return None
return value.name | def function[GetName, parameter[self, number]]:
constant[Retrieves the name of an enumeration value by number.
Args:
number (int): number.
Returns:
str: name of the enumeration value or None if no corresponding
enumeration value was found.
]
variable[value] assign[=] call[name[self]._data_type_definition.values_per_number.get, parameter[name[number], constant[None]]]
if <ast.UnaryOp object at 0x7da1b0d5ba00> begin[:]
return[constant[None]]
return[name[value].name] | keyword[def] identifier[GetName] ( identifier[self] , identifier[number] ):
literal[string]
identifier[value] = identifier[self] . identifier[_data_type_definition] . identifier[values_per_number] . identifier[get] ( identifier[number] , keyword[None] )
keyword[if] keyword[not] identifier[value] :
keyword[return] keyword[None]
keyword[return] identifier[value] . identifier[name] | def GetName(self, number):
"""Retrieves the name of an enumeration value by number.
Args:
number (int): number.
Returns:
str: name of the enumeration value or None if no corresponding
enumeration value was found.
"""
value = self._data_type_definition.values_per_number.get(number, None)
if not value:
return None # depends on [control=['if'], data=[]]
return value.name |
def recall_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for recall
Returns:
rec: The (float) recall score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FN = np.sum(np.logical_not(positives) * trues)
if TP or FN:
rec = TP / (TP + FN)
else:
rec = 0
return rec | def function[recall_score, parameter[gold, pred, pos_label, ignore_in_gold, ignore_in_pred]]:
constant[
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for recall
Returns:
rec: The (float) recall score
]
<ast.Tuple object at 0x7da1b1b45120> assign[=] call[name[_preprocess], parameter[name[gold], name[pred], name[ignore_in_gold], name[ignore_in_pred]]]
variable[positives] assign[=] call[call[name[np].where, parameter[compare[name[pred] equal[==] name[pos_label]], constant[1], constant[0]]].astype, parameter[name[bool]]]
variable[trues] assign[=] call[call[name[np].where, parameter[compare[name[gold] equal[==] name[pos_label]], constant[1], constant[0]]].astype, parameter[name[bool]]]
variable[TP] assign[=] call[name[np].sum, parameter[binary_operation[name[positives] * name[trues]]]]
variable[FN] assign[=] call[name[np].sum, parameter[binary_operation[call[name[np].logical_not, parameter[name[positives]]] * name[trues]]]]
if <ast.BoolOp object at 0x7da1b1b68bb0> begin[:]
variable[rec] assign[=] binary_operation[name[TP] / binary_operation[name[TP] + name[FN]]]
return[name[rec]] | keyword[def] identifier[recall_score] ( identifier[gold] , identifier[pred] , identifier[pos_label] = literal[int] , identifier[ignore_in_gold] =[], identifier[ignore_in_pred] =[]):
literal[string]
identifier[gold] , identifier[pred] = identifier[_preprocess] ( identifier[gold] , identifier[pred] , identifier[ignore_in_gold] , identifier[ignore_in_pred] )
identifier[positives] = identifier[np] . identifier[where] ( identifier[pred] == identifier[pos_label] , literal[int] , literal[int] ). identifier[astype] ( identifier[bool] )
identifier[trues] = identifier[np] . identifier[where] ( identifier[gold] == identifier[pos_label] , literal[int] , literal[int] ). identifier[astype] ( identifier[bool] )
identifier[TP] = identifier[np] . identifier[sum] ( identifier[positives] * identifier[trues] )
identifier[FN] = identifier[np] . identifier[sum] ( identifier[np] . identifier[logical_not] ( identifier[positives] )* identifier[trues] )
keyword[if] identifier[TP] keyword[or] identifier[FN] :
identifier[rec] = identifier[TP] /( identifier[TP] + identifier[FN] )
keyword[else] :
identifier[rec] = literal[int]
keyword[return] identifier[rec] | def recall_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for recall
Returns:
rec: The (float) recall score
"""
(gold, pred) = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FN = np.sum(np.logical_not(positives) * trues)
if TP or FN:
rec = TP / (TP + FN) # depends on [control=['if'], data=[]]
else:
rec = 0
return rec |
def update_last_wm_layers(self, service_id, num_layers=10):
"""
Update and index the last added and deleted layers (num_layers) in WorldMap service.
"""
from hypermap.aggregator.models import Service
LOGGER.debug(
'Updating the index the last %s added and %s deleted layers in WorldMap service'
% (num_layers, num_layers)
)
service = Service.objects.get(id=service_id)
# TODO raise error if service type is not WM type
if service.type == 'Hypermap:WorldMapLegacy':
from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm
if service.type == 'Hypermap:WorldMap':
from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm
update_layers_wm(service, num_layers)
# Remove in search engine last num_layers that were deleted
LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers)
layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers]
for layer in layer_to_unindex:
if not settings.REGISTRY_SKIP_CELERY:
unindex_layer(layer.id, use_cache=True)
else:
unindex_layer(layer.id)
# Add/Update in search engine last num_layers that were added
LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers)
layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers]
for layer in layer_to_index:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True)
else:
index_layer(layer.id) | def function[update_last_wm_layers, parameter[self, service_id, num_layers]]:
constant[
Update and index the last added and deleted layers (num_layers) in WorldMap service.
]
from relative_module[hypermap.aggregator.models] import module[Service]
call[name[LOGGER].debug, parameter[binary_operation[constant[Updating the index the last %s added and %s deleted layers in WorldMap service] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0fce0b0>, <ast.Name object at 0x7da1b0fce2f0>]]]]]
variable[service] assign[=] call[name[Service].objects.get, parameter[]]
if compare[name[service].type equal[==] constant[Hypermap:WorldMapLegacy]] begin[:]
from relative_module[hypermap.aggregator.models] import module[update_layers_wm_legacy]
if compare[name[service].type equal[==] constant[Hypermap:WorldMap]] begin[:]
from relative_module[hypermap.aggregator.models] import module[update_layers_geonode_wm]
call[name[update_layers_wm], parameter[name[service], name[num_layers]]]
call[name[LOGGER].debug, parameter[binary_operation[constant[Removing the index for the last %s deleted layers] <ast.Mod object at 0x7da2590d6920> name[num_layers]]]]
variable[layer_to_unindex] assign[=] call[call[call[name[service].layer_set.filter, parameter[]].order_by, parameter[constant[-last_updated]]]][<ast.Slice object at 0x7da1b0f9cfa0>]
for taget[name[layer]] in starred[name[layer_to_unindex]] begin[:]
if <ast.UnaryOp object at 0x7da1b0f9d540> begin[:]
call[name[unindex_layer], parameter[name[layer].id]]
call[name[LOGGER].debug, parameter[binary_operation[constant[Adding/Updating the index for the last %s added layers] <ast.Mod object at 0x7da2590d6920> name[num_layers]]]]
variable[layer_to_index] assign[=] call[call[call[name[service].layer_set.filter, parameter[]].order_by, parameter[constant[-last_updated]]]][<ast.Slice object at 0x7da1b0f9de40>]
for taget[name[layer]] in starred[name[layer_to_index]] begin[:]
if <ast.UnaryOp object at 0x7da1b0f9eaa0> begin[:]
call[name[index_layer], parameter[name[layer].id]] | keyword[def] identifier[update_last_wm_layers] ( identifier[self] , identifier[service_id] , identifier[num_layers] = literal[int] ):
literal[string]
keyword[from] identifier[hypermap] . identifier[aggregator] . identifier[models] keyword[import] identifier[Service]
identifier[LOGGER] . identifier[debug] (
literal[string]
%( identifier[num_layers] , identifier[num_layers] )
)
identifier[service] = identifier[Service] . identifier[objects] . identifier[get] ( identifier[id] = identifier[service_id] )
keyword[if] identifier[service] . identifier[type] == literal[string] :
keyword[from] identifier[hypermap] . identifier[aggregator] . identifier[models] keyword[import] identifier[update_layers_wm_legacy] keyword[as] identifier[update_layers_wm]
keyword[if] identifier[service] . identifier[type] == literal[string] :
keyword[from] identifier[hypermap] . identifier[aggregator] . identifier[models] keyword[import] identifier[update_layers_geonode_wm] keyword[as] identifier[update_layers_wm]
identifier[update_layers_wm] ( identifier[service] , identifier[num_layers] )
identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[num_layers] )
identifier[layer_to_unindex] = identifier[service] . identifier[layer_set] . identifier[filter] ( identifier[was_deleted] = keyword[True] ). identifier[order_by] ( literal[string] )[ literal[int] : identifier[num_layers] ]
keyword[for] identifier[layer] keyword[in] identifier[layer_to_unindex] :
keyword[if] keyword[not] identifier[settings] . identifier[REGISTRY_SKIP_CELERY] :
identifier[unindex_layer] ( identifier[layer] . identifier[id] , identifier[use_cache] = keyword[True] )
keyword[else] :
identifier[unindex_layer] ( identifier[layer] . identifier[id] )
identifier[LOGGER] . identifier[debug] ( literal[string] % identifier[num_layers] )
identifier[layer_to_index] = identifier[service] . identifier[layer_set] . identifier[filter] ( identifier[was_deleted] = keyword[False] ). identifier[order_by] ( literal[string] )[ literal[int] : identifier[num_layers] ]
keyword[for] identifier[layer] keyword[in] identifier[layer_to_index] :
keyword[if] keyword[not] identifier[settings] . identifier[REGISTRY_SKIP_CELERY] :
identifier[index_layer] ( identifier[layer] . identifier[id] , identifier[use_cache] = keyword[True] )
keyword[else] :
identifier[index_layer] ( identifier[layer] . identifier[id] ) | def update_last_wm_layers(self, service_id, num_layers=10):
"""
Update and index the last added and deleted layers (num_layers) in WorldMap service.
"""
from hypermap.aggregator.models import Service
LOGGER.debug('Updating the index the last %s added and %s deleted layers in WorldMap service' % (num_layers, num_layers))
service = Service.objects.get(id=service_id)
# TODO raise error if service type is not WM type
if service.type == 'Hypermap:WorldMapLegacy':
from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm # depends on [control=['if'], data=[]]
if service.type == 'Hypermap:WorldMap':
from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm # depends on [control=['if'], data=[]]
update_layers_wm(service, num_layers)
# Remove in search engine last num_layers that were deleted
LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers)
layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers]
for layer in layer_to_unindex:
if not settings.REGISTRY_SKIP_CELERY:
unindex_layer(layer.id, use_cache=True) # depends on [control=['if'], data=[]]
else:
unindex_layer(layer.id) # depends on [control=['for'], data=['layer']]
# Add/Update in search engine last num_layers that were added
LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers)
layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers]
for layer in layer_to_index:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True) # depends on [control=['if'], data=[]]
else:
index_layer(layer.id) # depends on [control=['for'], data=['layer']] |
def get_right_geo_fhs(self, dsid, fhs):
"""Find the right geographical file handlers for given dataset ID *dsid*."""
ds_info = self.ids[dsid]
req_geo, rem_geo = self._get_req_rem_geo(ds_info)
desired, other = split_desired_other(fhs, req_geo, rem_geo)
if desired:
try:
ds_info['dataset_groups'].remove(rem_geo)
except ValueError:
pass
return desired
else:
return other | def function[get_right_geo_fhs, parameter[self, dsid, fhs]]:
constant[Find the right geographical file handlers for given dataset ID *dsid*.]
variable[ds_info] assign[=] call[name[self].ids][name[dsid]]
<ast.Tuple object at 0x7da1b22ae1d0> assign[=] call[name[self]._get_req_rem_geo, parameter[name[ds_info]]]
<ast.Tuple object at 0x7da1b22ac610> assign[=] call[name[split_desired_other], parameter[name[fhs], name[req_geo], name[rem_geo]]]
if name[desired] begin[:]
<ast.Try object at 0x7da1b22ad4b0>
return[name[desired]] | keyword[def] identifier[get_right_geo_fhs] ( identifier[self] , identifier[dsid] , identifier[fhs] ):
literal[string]
identifier[ds_info] = identifier[self] . identifier[ids] [ identifier[dsid] ]
identifier[req_geo] , identifier[rem_geo] = identifier[self] . identifier[_get_req_rem_geo] ( identifier[ds_info] )
identifier[desired] , identifier[other] = identifier[split_desired_other] ( identifier[fhs] , identifier[req_geo] , identifier[rem_geo] )
keyword[if] identifier[desired] :
keyword[try] :
identifier[ds_info] [ literal[string] ]. identifier[remove] ( identifier[rem_geo] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[desired]
keyword[else] :
keyword[return] identifier[other] | def get_right_geo_fhs(self, dsid, fhs):
"""Find the right geographical file handlers for given dataset ID *dsid*."""
ds_info = self.ids[dsid]
(req_geo, rem_geo) = self._get_req_rem_geo(ds_info)
(desired, other) = split_desired_other(fhs, req_geo, rem_geo)
if desired:
try:
ds_info['dataset_groups'].remove(rem_geo) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
return desired # depends on [control=['if'], data=[]]
else:
return other |
def report(self, obj, message, linenum, char_offset=0):
"""Report an error or warning"""
self.controller.report(linenumber=linenum, filename=obj.path,
severity=self.severity, message=message,
rulename = self.__class__.__name__,
char=char_offset) | def function[report, parameter[self, obj, message, linenum, char_offset]]:
constant[Report an error or warning]
call[name[self].controller.report, parameter[]] | keyword[def] identifier[report] ( identifier[self] , identifier[obj] , identifier[message] , identifier[linenum] , identifier[char_offset] = literal[int] ):
literal[string]
identifier[self] . identifier[controller] . identifier[report] ( identifier[linenumber] = identifier[linenum] , identifier[filename] = identifier[obj] . identifier[path] ,
identifier[severity] = identifier[self] . identifier[severity] , identifier[message] = identifier[message] ,
identifier[rulename] = identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[char] = identifier[char_offset] ) | def report(self, obj, message, linenum, char_offset=0):
"""Report an error or warning"""
self.controller.report(linenumber=linenum, filename=obj.path, severity=self.severity, message=message, rulename=self.__class__.__name__, char=char_offset) |
def _smooth_the_residuals(self):
"""
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
"""
for primary_smooth in self._primary_smooths:
smooth = smoother.perform_smooth(self.x,
primary_smooth.cross_validated_residual,
MID_SPAN)
self._residual_smooths.append(smooth.smooth_result) | def function[_smooth_the_residuals, parameter[self]]:
constant[
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
]
for taget[name[primary_smooth]] in starred[name[self]._primary_smooths] begin[:]
variable[smooth] assign[=] call[name[smoother].perform_smooth, parameter[name[self].x, name[primary_smooth].cross_validated_residual, name[MID_SPAN]]]
call[name[self]._residual_smooths.append, parameter[name[smooth].smooth_result]] | keyword[def] identifier[_smooth_the_residuals] ( identifier[self] ):
literal[string]
keyword[for] identifier[primary_smooth] keyword[in] identifier[self] . identifier[_primary_smooths] :
identifier[smooth] = identifier[smoother] . identifier[perform_smooth] ( identifier[self] . identifier[x] ,
identifier[primary_smooth] . identifier[cross_validated_residual] ,
identifier[MID_SPAN] )
identifier[self] . identifier[_residual_smooths] . identifier[append] ( identifier[smooth] . identifier[smooth_result] ) | def _smooth_the_residuals(self):
"""
Apply the MID_SPAN to the residuals of the primary smooths.
"For stability reasons, it turns out to be a little better to smooth
|r_{i}(J)| against xi" - [1]
"""
for primary_smooth in self._primary_smooths:
smooth = smoother.perform_smooth(self.x, primary_smooth.cross_validated_residual, MID_SPAN)
self._residual_smooths.append(smooth.smooth_result) # depends on [control=['for'], data=['primary_smooth']] |
def read_exactly(self, num_bytes):
"""
Reads exactly the specified number of bytes from the socket
:param num_bytes:
An integer - the exact number of bytes to read
:return:
A byte string of the data that was read
"""
output = b''
remaining = num_bytes
while remaining > 0:
output += self.read(remaining)
remaining = num_bytes - len(output)
return output | def function[read_exactly, parameter[self, num_bytes]]:
constant[
Reads exactly the specified number of bytes from the socket
:param num_bytes:
An integer - the exact number of bytes to read
:return:
A byte string of the data that was read
]
variable[output] assign[=] constant[b'']
variable[remaining] assign[=] name[num_bytes]
while compare[name[remaining] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1aff20be0>
variable[remaining] assign[=] binary_operation[name[num_bytes] - call[name[len], parameter[name[output]]]]
return[name[output]] | keyword[def] identifier[read_exactly] ( identifier[self] , identifier[num_bytes] ):
literal[string]
identifier[output] = literal[string]
identifier[remaining] = identifier[num_bytes]
keyword[while] identifier[remaining] > literal[int] :
identifier[output] += identifier[self] . identifier[read] ( identifier[remaining] )
identifier[remaining] = identifier[num_bytes] - identifier[len] ( identifier[output] )
keyword[return] identifier[output] | def read_exactly(self, num_bytes):
"""
Reads exactly the specified number of bytes from the socket
:param num_bytes:
An integer - the exact number of bytes to read
:return:
A byte string of the data that was read
"""
output = b''
remaining = num_bytes
while remaining > 0:
output += self.read(remaining)
remaining = num_bytes - len(output) # depends on [control=['while'], data=['remaining']]
return output |
def pre_populate_buyer_email(self, pre_populate_buyer_email):
"""
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
"""
if pre_populate_buyer_email is None:
raise ValueError("Invalid value for `pre_populate_buyer_email`, must not be `None`")
if len(pre_populate_buyer_email) > 254:
raise ValueError("Invalid value for `pre_populate_buyer_email`, length must be less than `254`")
self._pre_populate_buyer_email = pre_populate_buyer_email | def function[pre_populate_buyer_email, parameter[self, pre_populate_buyer_email]]:
constant[
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
]
if compare[name[pre_populate_buyer_email] is constant[None]] begin[:]
<ast.Raise object at 0x7da18eb54b50>
if compare[call[name[len], parameter[name[pre_populate_buyer_email]]] greater[>] constant[254]] begin[:]
<ast.Raise object at 0x7da18eb545e0>
name[self]._pre_populate_buyer_email assign[=] name[pre_populate_buyer_email] | keyword[def] identifier[pre_populate_buyer_email] ( identifier[self] , identifier[pre_populate_buyer_email] ):
literal[string]
keyword[if] identifier[pre_populate_buyer_email] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[pre_populate_buyer_email] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_pre_populate_buyer_email] = identifier[pre_populate_buyer_email] | def pre_populate_buyer_email(self, pre_populate_buyer_email):
"""
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
"""
if pre_populate_buyer_email is None:
raise ValueError('Invalid value for `pre_populate_buyer_email`, must not be `None`') # depends on [control=['if'], data=[]]
if len(pre_populate_buyer_email) > 254:
raise ValueError('Invalid value for `pre_populate_buyer_email`, length must be less than `254`') # depends on [control=['if'], data=[]]
self._pre_populate_buyer_email = pre_populate_buyer_email |
def runGetReadGroupSet(self, id_):
"""
Returns a readGroupSet with the given id_
"""
compoundId = datamodel.ReadGroupSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
readGroupSet = dataset.getReadGroupSet(id_)
return self.runGetRequest(readGroupSet) | def function[runGetReadGroupSet, parameter[self, id_]]:
constant[
Returns a readGroupSet with the given id_
]
variable[compoundId] assign[=] call[name[datamodel].ReadGroupSetCompoundId.parse, parameter[name[id_]]]
variable[dataset] assign[=] call[call[name[self].getDataRepository, parameter[]].getDataset, parameter[name[compoundId].dataset_id]]
variable[readGroupSet] assign[=] call[name[dataset].getReadGroupSet, parameter[name[id_]]]
return[call[name[self].runGetRequest, parameter[name[readGroupSet]]]] | keyword[def] identifier[runGetReadGroupSet] ( identifier[self] , identifier[id_] ):
literal[string]
identifier[compoundId] = identifier[datamodel] . identifier[ReadGroupSetCompoundId] . identifier[parse] ( identifier[id_] )
identifier[dataset] = identifier[self] . identifier[getDataRepository] (). identifier[getDataset] ( identifier[compoundId] . identifier[dataset_id] )
identifier[readGroupSet] = identifier[dataset] . identifier[getReadGroupSet] ( identifier[id_] )
keyword[return] identifier[self] . identifier[runGetRequest] ( identifier[readGroupSet] ) | def runGetReadGroupSet(self, id_):
"""
Returns a readGroupSet with the given id_
"""
compoundId = datamodel.ReadGroupSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
readGroupSet = dataset.getReadGroupSet(id_)
return self.runGetRequest(readGroupSet) |
def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0):
r"""
Removes all nonpercolating paths between specified edges
This function is essential when performing transport simulations on an
image, since image regions that do not span between the desired inlet and
outlet do not contribute to the transport.
Parameters
----------
im : ND-array
The image of the porous material with ```True`` values indicating the
phase of interest
inlet_axis : int
Inlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
outlet_axis : int
Outlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
Returns
-------
image : ND-array
A copy of ``im`` with all the nonpercolating paths removed
See Also
--------
find_disconnected_voxels
trim_floating_solid
trim_blind_pores
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
im = trim_floating_solid(~im)
labels = spim.label(~im)[0]
inlet = sp.zeros_like(im, dtype=int)
outlet = sp.zeros_like(im, dtype=int)
if im.ndim == 3:
if inlet_axis == 0:
inlet[0, :, :] = 1
elif inlet_axis == 1:
inlet[:, 0, :] = 1
elif inlet_axis == 2:
inlet[:, :, 0] = 1
if outlet_axis == 0:
outlet[-1, :, :] = 1
elif outlet_axis == 1:
outlet[:, -1, :] = 1
elif outlet_axis == 2:
outlet[:, :, -1] = 1
if im.ndim == 2:
if inlet_axis == 0:
inlet[0, :] = 1
elif inlet_axis == 1:
inlet[:, 0] = 1
if outlet_axis == 0:
outlet[-1, :] = 1
elif outlet_axis == 1:
outlet[:, -1] = 1
IN = sp.unique(labels*inlet)
OUT = sp.unique(labels*outlet)
new_im = sp.isin(labels, list(set(IN) ^ set(OUT)), invert=True)
im[new_im == 0] = True
return ~im | def function[trim_nonpercolating_paths, parameter[im, inlet_axis, outlet_axis]]:
constant[
Removes all nonpercolating paths between specified edges
This function is essential when performing transport simulations on an
image, since image regions that do not span between the desired inlet and
outlet do not contribute to the transport.
Parameters
----------
im : ND-array
The image of the porous material with ```True`` values indicating the
phase of interest
inlet_axis : int
Inlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
outlet_axis : int
Outlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
Returns
-------
image : ND-array
A copy of ``im`` with all the nonpercolating paths removed
See Also
--------
find_disconnected_voxels
trim_floating_solid
trim_blind_pores
]
if compare[name[im].ndim not_equal[!=] call[name[im].squeeze, parameter[]].ndim] begin[:]
call[name[warnings].warn, parameter[binary_operation[binary_operation[binary_operation[constant[Input image conains a singleton axis:] + call[name[str], parameter[name[im].shape]]] + constant[ Reduce dimensionality with np.squeeze(im) to avoid]] + constant[ unexpected behavior.]]]]
variable[im] assign[=] call[name[trim_floating_solid], parameter[<ast.UnaryOp object at 0x7da1b0714be0>]]
variable[labels] assign[=] call[call[name[spim].label, parameter[<ast.UnaryOp object at 0x7da1b064e290>]]][constant[0]]
variable[inlet] assign[=] call[name[sp].zeros_like, parameter[name[im]]]
variable[outlet] assign[=] call[name[sp].zeros_like, parameter[name[im]]]
if compare[name[im].ndim equal[==] constant[3]] begin[:]
if compare[name[inlet_axis] equal[==] constant[0]] begin[:]
call[name[inlet]][tuple[[<ast.Constant object at 0x7da1b064dae0>, <ast.Slice object at 0x7da1b064e440>, <ast.Slice object at 0x7da1b064e530>]]] assign[=] constant[1]
if compare[name[outlet_axis] equal[==] constant[0]] begin[:]
call[name[outlet]][tuple[[<ast.UnaryOp object at 0x7da1b064d780>, <ast.Slice object at 0x7da1b064e0b0>, <ast.Slice object at 0x7da1b064dd20>]]] assign[=] constant[1]
if compare[name[im].ndim equal[==] constant[2]] begin[:]
if compare[name[inlet_axis] equal[==] constant[0]] begin[:]
call[name[inlet]][tuple[[<ast.Constant object at 0x7da1b0677640>, <ast.Slice object at 0x7da1b0677970>]]] assign[=] constant[1]
if compare[name[outlet_axis] equal[==] constant[0]] begin[:]
call[name[outlet]][tuple[[<ast.UnaryOp object at 0x7da1b0677f40>, <ast.Slice object at 0x7da1b0677400>]]] assign[=] constant[1]
variable[IN] assign[=] call[name[sp].unique, parameter[binary_operation[name[labels] * name[inlet]]]]
variable[OUT] assign[=] call[name[sp].unique, parameter[binary_operation[name[labels] * name[outlet]]]]
variable[new_im] assign[=] call[name[sp].isin, parameter[name[labels], call[name[list], parameter[binary_operation[call[name[set], parameter[name[IN]]] <ast.BitXor object at 0x7da2590d6b00> call[name[set], parameter[name[OUT]]]]]]]]
call[name[im]][compare[name[new_im] equal[==] constant[0]]] assign[=] constant[True]
return[<ast.UnaryOp object at 0x7da1b072c6a0>] | keyword[def] identifier[trim_nonpercolating_paths] ( identifier[im] , identifier[inlet_axis] = literal[int] , identifier[outlet_axis] = literal[int] ):
literal[string]
keyword[if] identifier[im] . identifier[ndim] != identifier[im] . identifier[squeeze] (). identifier[ndim] :
identifier[warnings] . identifier[warn] ( literal[string] + identifier[str] ( identifier[im] . identifier[shape] )+
literal[string] +
literal[string] )
identifier[im] = identifier[trim_floating_solid] (~ identifier[im] )
identifier[labels] = identifier[spim] . identifier[label] (~ identifier[im] )[ literal[int] ]
identifier[inlet] = identifier[sp] . identifier[zeros_like] ( identifier[im] , identifier[dtype] = identifier[int] )
identifier[outlet] = identifier[sp] . identifier[zeros_like] ( identifier[im] , identifier[dtype] = identifier[int] )
keyword[if] identifier[im] . identifier[ndim] == literal[int] :
keyword[if] identifier[inlet_axis] == literal[int] :
identifier[inlet] [ literal[int] ,:,:]= literal[int]
keyword[elif] identifier[inlet_axis] == literal[int] :
identifier[inlet] [:, literal[int] ,:]= literal[int]
keyword[elif] identifier[inlet_axis] == literal[int] :
identifier[inlet] [:,:, literal[int] ]= literal[int]
keyword[if] identifier[outlet_axis] == literal[int] :
identifier[outlet] [- literal[int] ,:,:]= literal[int]
keyword[elif] identifier[outlet_axis] == literal[int] :
identifier[outlet] [:,- literal[int] ,:]= literal[int]
keyword[elif] identifier[outlet_axis] == literal[int] :
identifier[outlet] [:,:,- literal[int] ]= literal[int]
keyword[if] identifier[im] . identifier[ndim] == literal[int] :
keyword[if] identifier[inlet_axis] == literal[int] :
identifier[inlet] [ literal[int] ,:]= literal[int]
keyword[elif] identifier[inlet_axis] == literal[int] :
identifier[inlet] [:, literal[int] ]= literal[int]
keyword[if] identifier[outlet_axis] == literal[int] :
identifier[outlet] [- literal[int] ,:]= literal[int]
keyword[elif] identifier[outlet_axis] == literal[int] :
identifier[outlet] [:,- literal[int] ]= literal[int]
identifier[IN] = identifier[sp] . identifier[unique] ( identifier[labels] * identifier[inlet] )
identifier[OUT] = identifier[sp] . identifier[unique] ( identifier[labels] * identifier[outlet] )
identifier[new_im] = identifier[sp] . identifier[isin] ( identifier[labels] , identifier[list] ( identifier[set] ( identifier[IN] )^ identifier[set] ( identifier[OUT] )), identifier[invert] = keyword[True] )
identifier[im] [ identifier[new_im] == literal[int] ]= keyword[True]
keyword[return] ~ identifier[im] | def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0):
"""
Removes all nonpercolating paths between specified edges
This function is essential when performing transport simulations on an
image, since image regions that do not span between the desired inlet and
outlet do not contribute to the transport.
Parameters
----------
im : ND-array
The image of the porous material with ```True`` values indicating the
phase of interest
inlet_axis : int
Inlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
outlet_axis : int
Outlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
Returns
-------
image : ND-array
A copy of ``im`` with all the nonpercolating paths removed
See Also
--------
find_disconnected_voxels
trim_floating_solid
trim_blind_pores
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') # depends on [control=['if'], data=[]]
im = trim_floating_solid(~im)
labels = spim.label(~im)[0]
inlet = sp.zeros_like(im, dtype=int)
outlet = sp.zeros_like(im, dtype=int)
if im.ndim == 3:
if inlet_axis == 0:
inlet[0, :, :] = 1 # depends on [control=['if'], data=[]]
elif inlet_axis == 1:
inlet[:, 0, :] = 1 # depends on [control=['if'], data=[]]
elif inlet_axis == 2:
inlet[:, :, 0] = 1 # depends on [control=['if'], data=[]]
if outlet_axis == 0:
outlet[-1, :, :] = 1 # depends on [control=['if'], data=[]]
elif outlet_axis == 1:
outlet[:, -1, :] = 1 # depends on [control=['if'], data=[]]
elif outlet_axis == 2:
outlet[:, :, -1] = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if im.ndim == 2:
if inlet_axis == 0:
inlet[0, :] = 1 # depends on [control=['if'], data=[]]
elif inlet_axis == 1:
inlet[:, 0] = 1 # depends on [control=['if'], data=[]]
if outlet_axis == 0:
outlet[-1, :] = 1 # depends on [control=['if'], data=[]]
elif outlet_axis == 1:
outlet[:, -1] = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
IN = sp.unique(labels * inlet)
OUT = sp.unique(labels * outlet)
new_im = sp.isin(labels, list(set(IN) ^ set(OUT)), invert=True)
im[new_im == 0] = True
return ~im |
def pmll(self,*args,**kwargs):
"""
NAME:
pmll
PURPOSE:
return proper motion in Galactic longitude (in mas/yr)
INPUT:
t - (optional) time at which to get pmll
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
vo= velocity in km/s corresponding to v=1. (default=Object-wide default)
OUTPUT:
pm_l(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'pmll')
_check_voSet(self,kwargs,'pmll')
lbdvrpmllpmbb= self._lbdvrpmllpmbb(*args,**kwargs)
return lbdvrpmllpmbb[:,4] | def function[pmll, parameter[self]]:
constant[
NAME:
pmll
PURPOSE:
return proper motion in Galactic longitude (in mas/yr)
INPUT:
t - (optional) time at which to get pmll
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
vo= velocity in km/s corresponding to v=1. (default=Object-wide default)
OUTPUT:
pm_l(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
]
call[name[_check_roSet], parameter[name[self], name[kwargs], constant[pmll]]]
call[name[_check_voSet], parameter[name[self], name[kwargs], constant[pmll]]]
variable[lbdvrpmllpmbb] assign[=] call[name[self]._lbdvrpmllpmbb, parameter[<ast.Starred object at 0x7da1b0ec01c0>]]
return[call[name[lbdvrpmllpmbb]][tuple[[<ast.Slice object at 0x7da1b0c901f0>, <ast.Constant object at 0x7da1b0c915d0>]]]] | keyword[def] identifier[pmll] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[_check_roSet] ( identifier[self] , identifier[kwargs] , literal[string] )
identifier[_check_voSet] ( identifier[self] , identifier[kwargs] , literal[string] )
identifier[lbdvrpmllpmbb] = identifier[self] . identifier[_lbdvrpmllpmbb] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[lbdvrpmllpmbb] [:, literal[int] ] | def pmll(self, *args, **kwargs):
"""
NAME:
pmll
PURPOSE:
return proper motion in Galactic longitude (in mas/yr)
INPUT:
t - (optional) time at which to get pmll
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
vo= velocity in km/s corresponding to v=1. (default=Object-wide default)
OUTPUT:
pm_l(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
_check_roSet(self, kwargs, 'pmll')
_check_voSet(self, kwargs, 'pmll')
lbdvrpmllpmbb = self._lbdvrpmllpmbb(*args, **kwargs)
return lbdvrpmllpmbb[:, 4] |
def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list)) | def function[remove_raw_jobs, parameter[self, params_list]]:
constant[ Remove jobs from a raw queue with their raw params. ]
if compare[call[name[len], parameter[name[params_list]]] equal[==] constant[0]] begin[:]
return[None]
if name[self].is_sorted begin[:]
call[name[context].connections.redis.zrem, parameter[name[self].redis_key, <ast.Starred object at 0x7da1b07ba590>]]
call[name[context].metric, parameter[binary_operation[constant[queues.%s.removed] <ast.Mod object at 0x7da2590d6920> name[self].id], call[name[len], parameter[name[params_list]]]]]
call[name[context].metric, parameter[constant[queues.all.removed], call[name[len], parameter[name[params_list]]]]] | keyword[def] identifier[remove_raw_jobs] ( identifier[self] , identifier[params_list] ):
literal[string]
keyword[if] identifier[len] ( identifier[params_list] )== literal[int] :
keyword[return]
keyword[if] identifier[self] . identifier[is_sorted] :
identifier[context] . identifier[connections] . identifier[redis] . identifier[zrem] ( identifier[self] . identifier[redis_key] ,* identifier[iter] ( identifier[params_list] ))
keyword[elif] identifier[self] . identifier[is_set] :
identifier[context] . identifier[connections] . identifier[redis] . identifier[srem] ( identifier[self] . identifier[redis_key] ,* identifier[params_list] )
keyword[else] :
keyword[for] identifier[k] keyword[in] identifier[params_list] :
identifier[context] . identifier[connections] . identifier[redis] . identifier[lrem] ( identifier[self] . identifier[redis_key] , literal[int] , identifier[k] )
identifier[context] . identifier[metric] ( literal[string] % identifier[self] . identifier[id] , identifier[len] ( identifier[params_list] ))
identifier[context] . identifier[metric] ( literal[string] , identifier[len] ( identifier[params_list] )) | def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return # depends on [control=['if'], data=[]]
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list)) # depends on [control=['if'], data=[]]
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list) # depends on [control=['if'], data=[]]
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k) # depends on [control=['for'], data=['k']]
context.metric('queues.%s.removed' % self.id, len(params_list))
context.metric('queues.all.removed', len(params_list)) |
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map | def function[renumber_block_keys, parameter[blocks]]:
constant[Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
]
variable[byte_switch_keys] assign[=] list[[<ast.Constant object at 0x7da1b0f38a60>]]
variable[block_keys] assign[=] call[name[list], parameter[call[name[blocks].keys, parameter[]]]]
for taget[name[block]] in starred[call[name[list], parameter[call[name[blocks].values, parameter[]]]]] begin[:]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[block].data]] - constant[1]]] begin[:]
variable[current_byte] assign[=] call[name[block].data][name[i]]
variable[next_byte] assign[=] call[name[block].data][binary_operation[name[i] + constant[1]]]
if compare[name[current_byte] equal[==] name[RLE_BYTE]] begin[:]
if compare[name[next_byte] equal[==] name[RLE_BYTE]] begin[:]
<ast.AugAssign object at 0x7da1b0e306d0>
call[name[byte_switch_keys].sort, parameter[]]
call[name[block_keys].sort, parameter[]]
assert[compare[call[name[len], parameter[name[byte_switch_keys]]] equal[==] call[name[len], parameter[name[block_keys]]]]]
if compare[name[byte_switch_keys] equal[==] name[block_keys]] begin[:]
return[name[blocks]]
variable[new_block_map] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0e63c40>, <ast.Name object at 0x7da1b0e61d20>]]] in starred[call[name[zip], parameter[name[block_keys], name[byte_switch_keys]]]] begin[:]
call[name[new_block_map]][name[byte_switch_key]] assign[=] call[name[blocks]][name[block_key]]
return[name[new_block_map]] | keyword[def] identifier[renumber_block_keys] ( identifier[blocks] ):
literal[string]
identifier[byte_switch_keys] =[ literal[int] ]
identifier[block_keys] = identifier[list] ( identifier[blocks] . identifier[keys] ())
keyword[for] identifier[block] keyword[in] identifier[list] ( identifier[blocks] . identifier[values] ()):
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[block] . identifier[data] )- literal[int] :
identifier[current_byte] = identifier[block] . identifier[data] [ identifier[i] ]
identifier[next_byte] = identifier[block] . identifier[data] [ identifier[i] + literal[int] ]
keyword[if] identifier[current_byte] == identifier[RLE_BYTE] :
keyword[if] identifier[next_byte] == identifier[RLE_BYTE] :
identifier[i] += literal[int]
keyword[else] :
identifier[i] += literal[int]
keyword[elif] identifier[current_byte] == identifier[SPECIAL_BYTE] :
keyword[if] identifier[next_byte] keyword[in] identifier[SPECIAL_DEFAULTS] :
identifier[i] += literal[int]
keyword[elif] identifier[next_byte] == identifier[SPECIAL_BYTE] :
identifier[i] += literal[int]
keyword[else] :
keyword[if] identifier[next_byte] != identifier[EOF_BYTE] :
identifier[byte_switch_keys] . identifier[append] ( identifier[next_byte] )
keyword[break]
keyword[else] :
identifier[i] += literal[int]
identifier[byte_switch_keys] . identifier[sort] ()
identifier[block_keys] . identifier[sort] ()
keyword[assert] identifier[len] ( identifier[byte_switch_keys] )== identifier[len] ( identifier[block_keys] ),(
literal[string]
%( identifier[len] ( identifier[byte_switch_keys] ))+
literal[string]
%( identifier[len] ( identifier[block_keys] ))+
literal[string] )
keyword[if] identifier[byte_switch_keys] == identifier[block_keys] :
keyword[return] identifier[blocks]
identifier[new_block_map] ={}
keyword[for] identifier[block_key] , identifier[byte_switch_key] keyword[in] identifier[zip] (
identifier[block_keys] , identifier[byte_switch_keys] ):
identifier[new_block_map] [ identifier[byte_switch_key] ]= identifier[blocks] [ identifier[block_key] ]
keyword[return] identifier[new_block_map] | def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2 # depends on [control=['if'], data=[]]
else:
i += 3 # depends on [control=['if'], data=['RLE_BYTE']]
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3 # depends on [control=['if'], data=[]]
elif next_byte == SPECIAL_BYTE:
i += 2 # depends on [control=['if'], data=[]]
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte) # depends on [control=['if'], data=['next_byte']]
break # depends on [control=['if'], data=['SPECIAL_BYTE']]
else:
i += 1 # depends on [control=['while'], data=['i']] # depends on [control=['for'], data=['block']]
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), 'Number of blocks that are target of block switches (%d) ' % len(byte_switch_keys) + 'does not equal number of blocks in the song (%d)' % len(block_keys) + '; possible corruption'
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks # depends on [control=['if'], data=[]]
new_block_map = {}
for (block_key, byte_switch_key) in zip(block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key] # depends on [control=['for'], data=[]]
return new_block_map |
def cli(env, identifier):
"""Edit firewall rules."""
mgr = SoftLayer.FirewallManager(env.client)
firewall_type, firewall_id = firewall.parse_id(identifier)
if firewall_type == 'vlan':
orig_rules = mgr.get_dedicated_fwl_rules(firewall_id)
else:
orig_rules = mgr.get_standard_fwl_rules(firewall_id)
# open an editor for the user to enter their rules
edited_rules = open_editor(rules=orig_rules)
env.out(edited_rules)
if formatting.confirm("Would you like to submit the rules. "
"Continue?"):
while True:
try:
rules = parse_rules(edited_rules)
if firewall_type == 'vlan':
rules = mgr.edit_dedicated_fwl_rules(firewall_id,
rules)
else:
rules = mgr.edit_standard_fwl_rules(firewall_id,
rules)
break
except (SoftLayer.SoftLayerError, ValueError) as error:
env.out("Unexpected error({%s})" % (error))
if formatting.confirm("Would you like to continue editing "
"the rules. Continue?"):
edited_rules = open_editor(content=edited_rules)
env.out(edited_rules)
if formatting.confirm("Would you like to submit the "
"rules. Continue?"):
continue
else:
raise exceptions.CLIAbort('Aborted.')
else:
raise exceptions.CLIAbort('Aborted.')
env.fout('Firewall updated!')
else:
raise exceptions.CLIAbort('Aborted.') | def function[cli, parameter[env, identifier]]:
constant[Edit firewall rules.]
variable[mgr] assign[=] call[name[SoftLayer].FirewallManager, parameter[name[env].client]]
<ast.Tuple object at 0x7da18ede7e80> assign[=] call[name[firewall].parse_id, parameter[name[identifier]]]
if compare[name[firewall_type] equal[==] constant[vlan]] begin[:]
variable[orig_rules] assign[=] call[name[mgr].get_dedicated_fwl_rules, parameter[name[firewall_id]]]
variable[edited_rules] assign[=] call[name[open_editor], parameter[]]
call[name[env].out, parameter[name[edited_rules]]]
if call[name[formatting].confirm, parameter[constant[Would you like to submit the rules. Continue?]]] begin[:]
while constant[True] begin[:]
<ast.Try object at 0x7da18ede69e0> | keyword[def] identifier[cli] ( identifier[env] , identifier[identifier] ):
literal[string]
identifier[mgr] = identifier[SoftLayer] . identifier[FirewallManager] ( identifier[env] . identifier[client] )
identifier[firewall_type] , identifier[firewall_id] = identifier[firewall] . identifier[parse_id] ( identifier[identifier] )
keyword[if] identifier[firewall_type] == literal[string] :
identifier[orig_rules] = identifier[mgr] . identifier[get_dedicated_fwl_rules] ( identifier[firewall_id] )
keyword[else] :
identifier[orig_rules] = identifier[mgr] . identifier[get_standard_fwl_rules] ( identifier[firewall_id] )
identifier[edited_rules] = identifier[open_editor] ( identifier[rules] = identifier[orig_rules] )
identifier[env] . identifier[out] ( identifier[edited_rules] )
keyword[if] identifier[formatting] . identifier[confirm] ( literal[string]
literal[string] ):
keyword[while] keyword[True] :
keyword[try] :
identifier[rules] = identifier[parse_rules] ( identifier[edited_rules] )
keyword[if] identifier[firewall_type] == literal[string] :
identifier[rules] = identifier[mgr] . identifier[edit_dedicated_fwl_rules] ( identifier[firewall_id] ,
identifier[rules] )
keyword[else] :
identifier[rules] = identifier[mgr] . identifier[edit_standard_fwl_rules] ( identifier[firewall_id] ,
identifier[rules] )
keyword[break]
keyword[except] ( identifier[SoftLayer] . identifier[SoftLayerError] , identifier[ValueError] ) keyword[as] identifier[error] :
identifier[env] . identifier[out] ( literal[string] %( identifier[error] ))
keyword[if] identifier[formatting] . identifier[confirm] ( literal[string]
literal[string] ):
identifier[edited_rules] = identifier[open_editor] ( identifier[content] = identifier[edited_rules] )
identifier[env] . identifier[out] ( identifier[edited_rules] )
keyword[if] identifier[formatting] . identifier[confirm] ( literal[string]
literal[string] ):
keyword[continue]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] )
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] )
identifier[env] . identifier[fout] ( literal[string] )
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] ) | def cli(env, identifier):
"""Edit firewall rules."""
mgr = SoftLayer.FirewallManager(env.client)
(firewall_type, firewall_id) = firewall.parse_id(identifier)
if firewall_type == 'vlan':
orig_rules = mgr.get_dedicated_fwl_rules(firewall_id) # depends on [control=['if'], data=[]]
else:
orig_rules = mgr.get_standard_fwl_rules(firewall_id)
# open an editor for the user to enter their rules
edited_rules = open_editor(rules=orig_rules)
env.out(edited_rules)
if formatting.confirm('Would you like to submit the rules. Continue?'):
while True:
try:
rules = parse_rules(edited_rules)
if firewall_type == 'vlan':
rules = mgr.edit_dedicated_fwl_rules(firewall_id, rules) # depends on [control=['if'], data=[]]
else:
rules = mgr.edit_standard_fwl_rules(firewall_id, rules)
break # depends on [control=['try'], data=[]]
except (SoftLayer.SoftLayerError, ValueError) as error:
env.out('Unexpected error({%s})' % error)
if formatting.confirm('Would you like to continue editing the rules. Continue?'):
edited_rules = open_editor(content=edited_rules)
env.out(edited_rules)
if formatting.confirm('Would you like to submit the rules. Continue?'):
continue # depends on [control=['if'], data=[]]
else:
raise exceptions.CLIAbort('Aborted.') # depends on [control=['if'], data=[]]
else:
raise exceptions.CLIAbort('Aborted.')
env.fout('Firewall updated!') # depends on [control=['except'], data=['error']] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
raise exceptions.CLIAbort('Aborted.') |
def SetColumns( self, columns, sortOrder=None ):
"""Set columns to a set of values other than the originals and recreates column controls"""
self.columns = columns
self.sortOrder = [(x.defaultOrder,x) for x in self.columns if x.sortDefault]
self.CreateColumns() | def function[SetColumns, parameter[self, columns, sortOrder]]:
constant[Set columns to a set of values other than the originals and recreates column controls]
name[self].columns assign[=] name[columns]
name[self].sortOrder assign[=] <ast.ListComp object at 0x7da18f00c6a0>
call[name[self].CreateColumns, parameter[]] | keyword[def] identifier[SetColumns] ( identifier[self] , identifier[columns] , identifier[sortOrder] = keyword[None] ):
literal[string]
identifier[self] . identifier[columns] = identifier[columns]
identifier[self] . identifier[sortOrder] =[( identifier[x] . identifier[defaultOrder] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[columns] keyword[if] identifier[x] . identifier[sortDefault] ]
identifier[self] . identifier[CreateColumns] () | def SetColumns(self, columns, sortOrder=None):
"""Set columns to a set of values other than the originals and recreates column controls"""
self.columns = columns
self.sortOrder = [(x.defaultOrder, x) for x in self.columns if x.sortDefault]
self.CreateColumns() |
def from_stream(cls, stream):
"""
Return a |_JfifMarkers| instance containing a |_JfifMarker| subclass
instance for each marker in *stream*.
"""
marker_parser = _MarkerParser.from_stream(stream)
markers = []
for marker in marker_parser.iter_markers():
markers.append(marker)
if marker.marker_code == JPEG_MARKER_CODE.SOS:
break
return cls(markers) | def function[from_stream, parameter[cls, stream]]:
constant[
Return a |_JfifMarkers| instance containing a |_JfifMarker| subclass
instance for each marker in *stream*.
]
variable[marker_parser] assign[=] call[name[_MarkerParser].from_stream, parameter[name[stream]]]
variable[markers] assign[=] list[[]]
for taget[name[marker]] in starred[call[name[marker_parser].iter_markers, parameter[]]] begin[:]
call[name[markers].append, parameter[name[marker]]]
if compare[name[marker].marker_code equal[==] name[JPEG_MARKER_CODE].SOS] begin[:]
break
return[call[name[cls], parameter[name[markers]]]] | keyword[def] identifier[from_stream] ( identifier[cls] , identifier[stream] ):
literal[string]
identifier[marker_parser] = identifier[_MarkerParser] . identifier[from_stream] ( identifier[stream] )
identifier[markers] =[]
keyword[for] identifier[marker] keyword[in] identifier[marker_parser] . identifier[iter_markers] ():
identifier[markers] . identifier[append] ( identifier[marker] )
keyword[if] identifier[marker] . identifier[marker_code] == identifier[JPEG_MARKER_CODE] . identifier[SOS] :
keyword[break]
keyword[return] identifier[cls] ( identifier[markers] ) | def from_stream(cls, stream):
"""
Return a |_JfifMarkers| instance containing a |_JfifMarker| subclass
instance for each marker in *stream*.
"""
marker_parser = _MarkerParser.from_stream(stream)
markers = []
for marker in marker_parser.iter_markers():
markers.append(marker)
if marker.marker_code == JPEG_MARKER_CODE.SOS:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['marker']]
return cls(markers) |
def erract(op, lenout, action=None):
"""
Retrieve or set the default error action.
spiceypy sets the default error action to "report" on init.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/erract_c.html
:param op: peration, "GET" or "SET".
:type op: str
:param lenout: Length of list for output.
:type lenout: int
:param action: Error response action.
:type action: str
:return: Error response action.
:rtype: str
"""
if action is None:
action = ""
lenout = ctypes.c_int(lenout)
op = stypes.stringToCharP(op)
action = ctypes.create_string_buffer(str.encode(action), lenout.value)
actionptr = ctypes.c_char_p(ctypes.addressof(action))
libspice.erract_c(op, lenout, actionptr)
return stypes.toPythonString(actionptr) | def function[erract, parameter[op, lenout, action]]:
constant[
Retrieve or set the default error action.
spiceypy sets the default error action to "report" on init.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/erract_c.html
:param op: peration, "GET" or "SET".
:type op: str
:param lenout: Length of list for output.
:type lenout: int
:param action: Error response action.
:type action: str
:return: Error response action.
:rtype: str
]
if compare[name[action] is constant[None]] begin[:]
variable[action] assign[=] constant[]
variable[lenout] assign[=] call[name[ctypes].c_int, parameter[name[lenout]]]
variable[op] assign[=] call[name[stypes].stringToCharP, parameter[name[op]]]
variable[action] assign[=] call[name[ctypes].create_string_buffer, parameter[call[name[str].encode, parameter[name[action]]], name[lenout].value]]
variable[actionptr] assign[=] call[name[ctypes].c_char_p, parameter[call[name[ctypes].addressof, parameter[name[action]]]]]
call[name[libspice].erract_c, parameter[name[op], name[lenout], name[actionptr]]]
return[call[name[stypes].toPythonString, parameter[name[actionptr]]]] | keyword[def] identifier[erract] ( identifier[op] , identifier[lenout] , identifier[action] = keyword[None] ):
literal[string]
keyword[if] identifier[action] keyword[is] keyword[None] :
identifier[action] = literal[string]
identifier[lenout] = identifier[ctypes] . identifier[c_int] ( identifier[lenout] )
identifier[op] = identifier[stypes] . identifier[stringToCharP] ( identifier[op] )
identifier[action] = identifier[ctypes] . identifier[create_string_buffer] ( identifier[str] . identifier[encode] ( identifier[action] ), identifier[lenout] . identifier[value] )
identifier[actionptr] = identifier[ctypes] . identifier[c_char_p] ( identifier[ctypes] . identifier[addressof] ( identifier[action] ))
identifier[libspice] . identifier[erract_c] ( identifier[op] , identifier[lenout] , identifier[actionptr] )
keyword[return] identifier[stypes] . identifier[toPythonString] ( identifier[actionptr] ) | def erract(op, lenout, action=None):
"""
Retrieve or set the default error action.
spiceypy sets the default error action to "report" on init.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/erract_c.html
:param op: peration, "GET" or "SET".
:type op: str
:param lenout: Length of list for output.
:type lenout: int
:param action: Error response action.
:type action: str
:return: Error response action.
:rtype: str
"""
if action is None:
action = '' # depends on [control=['if'], data=['action']]
lenout = ctypes.c_int(lenout)
op = stypes.stringToCharP(op)
action = ctypes.create_string_buffer(str.encode(action), lenout.value)
actionptr = ctypes.c_char_p(ctypes.addressof(action))
libspice.erract_c(op, lenout, actionptr)
return stypes.toPythonString(actionptr) |
def diginorm(args):
"""
%prog diginorm fastqfile
Run K-mer based normalization. Based on tutorial:
<http://ged.msu.edu/angus/diginorm-2012/tutorial.html>
Assume input is either an interleaved pairs file, or two separate files.
To set up khmer:
$ git clone git://github.com/ged-lab/screed.git
$ git clone git://github.com/ged-lab/khmer.git
$ cd screed
$ python setup.py install
$ cd ../khmer
$ make test
$ export PYTHONPATH=~/export/khmer
"""
from jcvi.formats.fastq import shuffle, pairinplace, split
from jcvi.apps.base import getfilesize
p = OptionParser(diginorm.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end reads")
p.add_option("--tablesize", help="Memory size")
p.add_option("--npass", default="1", choices=("1", "2"),
help="How many passes of normalization")
p.set_depth(depth=50)
p.set_home("khmer", default="/usr/local/bin/")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 2:
fastq = shuffle(args + ["--tag"])
else:
fastq, = args
kh = opts.khmer_home
depth = opts.depth
PE = not opts.single
sys.path.insert(0, op.join(kh, "python"))
pf = fastq.rsplit(".", 1)[0]
keepfile = fastq + ".keep"
hashfile = pf + ".kh"
mints = 10000000
ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints)
norm_cmd = op.join(kh, "normalize-by-median.py")
filt_cmd = op.join(kh, "filter-abund.py")
if need_update(fastq, (hashfile, keepfile)):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts)
if PE:
cmd += " -p"
cmd += " -s {0} {1}".format(hashfile, fastq)
sh(cmd)
abundfiltfile = keepfile + ".abundfilt"
if need_update((hashfile, keepfile), abundfiltfile):
cmd = filt_cmd
cmd += " {0} {1}".format(hashfile, keepfile)
sh(cmd)
if opts.npass == "1":
seckeepfile = abundfiltfile
else:
seckeepfile = abundfiltfile + ".keep"
if need_update(abundfiltfile, seckeepfile):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2)
cmd += " {0}".format(abundfiltfile)
sh(cmd)
if PE:
pairsfile = pairinplace([seckeepfile,
"--base={0}".format(pf + "_norm"), "--rclip=2"])
split([pairsfile]) | def function[diginorm, parameter[args]]:
constant[
%prog diginorm fastqfile
Run K-mer based normalization. Based on tutorial:
<http://ged.msu.edu/angus/diginorm-2012/tutorial.html>
Assume input is either an interleaved pairs file, or two separate files.
To set up khmer:
$ git clone git://github.com/ged-lab/screed.git
$ git clone git://github.com/ged-lab/khmer.git
$ cd screed
$ python setup.py install
$ cd ../khmer
$ make test
$ export PYTHONPATH=~/export/khmer
]
from relative_module[jcvi.formats.fastq] import module[shuffle], module[pairinplace], module[split]
from relative_module[jcvi.apps.base] import module[getfilesize]
variable[p] assign[=] call[name[OptionParser], parameter[name[diginorm].__doc__]]
call[name[p].add_option, parameter[constant[--single]]]
call[name[p].add_option, parameter[constant[--tablesize]]]
call[name[p].add_option, parameter[constant[--npass]]]
call[name[p].set_depth, parameter[]]
call[name[p].set_home, parameter[constant[khmer]]]
<ast.Tuple object at 0x7da1b07af160> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b07ac910>, <ast.Constant object at 0x7da1b07ac8b0>]]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08a9cf0>]]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[2]] begin[:]
variable[fastq] assign[=] call[name[shuffle], parameter[binary_operation[name[args] + list[[<ast.Constant object at 0x7da1b08ab9a0>]]]]]
variable[kh] assign[=] name[opts].khmer_home
variable[depth] assign[=] name[opts].depth
variable[PE] assign[=] <ast.UnaryOp object at 0x7da1b08aa7a0>
call[name[sys].path.insert, parameter[constant[0], call[name[op].join, parameter[name[kh], constant[python]]]]]
variable[pf] assign[=] call[call[name[fastq].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[keepfile] assign[=] binary_operation[name[fastq] + constant[.keep]]
variable[hashfile] assign[=] binary_operation[name[pf] + constant[.kh]]
variable[mints] assign[=] constant[10000000]
variable[ts] assign[=] <ast.BoolOp object at 0x7da1b076bd30>
variable[norm_cmd] assign[=] call[name[op].join, parameter[name[kh], constant[normalize-by-median.py]]]
variable[filt_cmd] assign[=] call[name[op].join, parameter[name[kh], constant[filter-abund.py]]]
if call[name[need_update], parameter[name[fastq], tuple[[<ast.Name object at 0x7da1b076ab00>, <ast.Name object at 0x7da1b0768190>]]]] begin[:]
variable[cmd] assign[=] name[norm_cmd]
<ast.AugAssign object at 0x7da1b076bbb0>
if name[PE] begin[:]
<ast.AugAssign object at 0x7da1b0900160>
<ast.AugAssign object at 0x7da1b0796b00>
call[name[sh], parameter[name[cmd]]]
variable[abundfiltfile] assign[=] binary_operation[name[keepfile] + constant[.abundfilt]]
if call[name[need_update], parameter[tuple[[<ast.Name object at 0x7da1b088f100>, <ast.Name object at 0x7da1b088ca00>]], name[abundfiltfile]]] begin[:]
variable[cmd] assign[=] name[filt_cmd]
<ast.AugAssign object at 0x7da1b088e0e0>
call[name[sh], parameter[name[cmd]]]
if compare[name[opts].npass equal[==] constant[1]] begin[:]
variable[seckeepfile] assign[=] name[abundfiltfile]
if name[PE] begin[:]
variable[pairsfile] assign[=] call[name[pairinplace], parameter[list[[<ast.Name object at 0x7da1b077b850>, <ast.Call object at 0x7da1b077b880>, <ast.Constant object at 0x7da1b077b970>]]]]
call[name[split], parameter[list[[<ast.Name object at 0x7da1b077b100>]]]] | keyword[def] identifier[diginorm] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[fastq] keyword[import] identifier[shuffle] , identifier[pairinplace] , identifier[split]
keyword[from] identifier[jcvi] . identifier[apps] . identifier[base] keyword[import] identifier[getfilesize]
identifier[p] = identifier[OptionParser] ( identifier[diginorm] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] , identifier[choices] =( literal[string] , literal[string] ),
identifier[help] = literal[string] )
identifier[p] . identifier[set_depth] ( identifier[depth] = literal[int] )
identifier[p] . identifier[set_home] ( literal[string] , identifier[default] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] ) keyword[not] keyword[in] ( literal[int] , literal[int] ):
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[fastq] = identifier[shuffle] ( identifier[args] +[ literal[string] ])
keyword[else] :
identifier[fastq] ,= identifier[args]
identifier[kh] = identifier[opts] . identifier[khmer_home]
identifier[depth] = identifier[opts] . identifier[depth]
identifier[PE] = keyword[not] identifier[opts] . identifier[single]
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[op] . identifier[join] ( identifier[kh] , literal[string] ))
identifier[pf] = identifier[fastq] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[keepfile] = identifier[fastq] + literal[string]
identifier[hashfile] = identifier[pf] + literal[string]
identifier[mints] = literal[int]
identifier[ts] = identifier[opts] . identifier[tablesize] keyword[or] (( identifier[getfilesize] ( identifier[fastq] )/ literal[int] / identifier[mints] + literal[int] )* identifier[mints] )
identifier[norm_cmd] = identifier[op] . identifier[join] ( identifier[kh] , literal[string] )
identifier[filt_cmd] = identifier[op] . identifier[join] ( identifier[kh] , literal[string] )
keyword[if] identifier[need_update] ( identifier[fastq] ,( identifier[hashfile] , identifier[keepfile] )):
identifier[cmd] = identifier[norm_cmd]
identifier[cmd] += literal[string] . identifier[format] ( identifier[depth] , identifier[ts] )
keyword[if] identifier[PE] :
identifier[cmd] += literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[hashfile] , identifier[fastq] )
identifier[sh] ( identifier[cmd] )
identifier[abundfiltfile] = identifier[keepfile] + literal[string]
keyword[if] identifier[need_update] (( identifier[hashfile] , identifier[keepfile] ), identifier[abundfiltfile] ):
identifier[cmd] = identifier[filt_cmd]
identifier[cmd] += literal[string] . identifier[format] ( identifier[hashfile] , identifier[keepfile] )
identifier[sh] ( identifier[cmd] )
keyword[if] identifier[opts] . identifier[npass] == literal[string] :
identifier[seckeepfile] = identifier[abundfiltfile]
keyword[else] :
identifier[seckeepfile] = identifier[abundfiltfile] + literal[string]
keyword[if] identifier[need_update] ( identifier[abundfiltfile] , identifier[seckeepfile] ):
identifier[cmd] = identifier[norm_cmd]
identifier[cmd] += literal[string] . identifier[format] ( identifier[depth] - literal[int] , identifier[ts] / literal[int] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[abundfiltfile] )
identifier[sh] ( identifier[cmd] )
keyword[if] identifier[PE] :
identifier[pairsfile] = identifier[pairinplace] ([ identifier[seckeepfile] ,
literal[string] . identifier[format] ( identifier[pf] + literal[string] ), literal[string] ])
identifier[split] ([ identifier[pairsfile] ]) | def diginorm(args):
"""
%prog diginorm fastqfile
Run K-mer based normalization. Based on tutorial:
<http://ged.msu.edu/angus/diginorm-2012/tutorial.html>
Assume input is either an interleaved pairs file, or two separate files.
To set up khmer:
$ git clone git://github.com/ged-lab/screed.git
$ git clone git://github.com/ged-lab/khmer.git
$ cd screed
$ python setup.py install
$ cd ../khmer
$ make test
$ export PYTHONPATH=~/export/khmer
"""
from jcvi.formats.fastq import shuffle, pairinplace, split
from jcvi.apps.base import getfilesize
p = OptionParser(diginorm.__doc__)
p.add_option('--single', default=False, action='store_true', help='Single end reads')
p.add_option('--tablesize', help='Memory size')
p.add_option('--npass', default='1', choices=('1', '2'), help='How many passes of normalization')
p.set_depth(depth=50)
p.set_home('khmer', default='/usr/local/bin/')
(opts, args) = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
if len(args) == 2:
fastq = shuffle(args + ['--tag']) # depends on [control=['if'], data=[]]
else:
(fastq,) = args
kh = opts.khmer_home
depth = opts.depth
PE = not opts.single
sys.path.insert(0, op.join(kh, 'python'))
pf = fastq.rsplit('.', 1)[0]
keepfile = fastq + '.keep'
hashfile = pf + '.kh'
mints = 10000000
ts = opts.tablesize or (getfilesize(fastq) / 16 / mints + 1) * mints
norm_cmd = op.join(kh, 'normalize-by-median.py')
filt_cmd = op.join(kh, 'filter-abund.py')
if need_update(fastq, (hashfile, keepfile)):
cmd = norm_cmd
cmd += ' -C {0} -k 20 -N 4 -x {1}'.format(depth, ts)
if PE:
cmd += ' -p' # depends on [control=['if'], data=[]]
cmd += ' -s {0} {1}'.format(hashfile, fastq)
sh(cmd) # depends on [control=['if'], data=[]]
abundfiltfile = keepfile + '.abundfilt'
if need_update((hashfile, keepfile), abundfiltfile):
cmd = filt_cmd
cmd += ' {0} {1}'.format(hashfile, keepfile)
sh(cmd) # depends on [control=['if'], data=[]]
if opts.npass == '1':
seckeepfile = abundfiltfile # depends on [control=['if'], data=[]]
else:
seckeepfile = abundfiltfile + '.keep'
if need_update(abundfiltfile, seckeepfile):
cmd = norm_cmd
cmd += ' -C {0} -k 20 -N 4 -x {1}'.format(depth - 10, ts / 2)
cmd += ' {0}'.format(abundfiltfile)
sh(cmd) # depends on [control=['if'], data=[]]
if PE:
pairsfile = pairinplace([seckeepfile, '--base={0}'.format(pf + '_norm'), '--rclip=2'])
split([pairsfile]) # depends on [control=['if'], data=[]] |
def add_dimension(self, name, data=None):
"""Add a named dimension to this entity."""
self.dimensions.add(name)
if data is None:
valobj = self.__dimtype__()
else:
valobj = make_object(self.__dimtype__, data)
self._data[name] = valobj
setattr(self, name, valobj)
return valobj | def function[add_dimension, parameter[self, name, data]]:
constant[Add a named dimension to this entity.]
call[name[self].dimensions.add, parameter[name[name]]]
if compare[name[data] is constant[None]] begin[:]
variable[valobj] assign[=] call[name[self].__dimtype__, parameter[]]
call[name[self]._data][name[name]] assign[=] name[valobj]
call[name[setattr], parameter[name[self], name[name], name[valobj]]]
return[name[valobj]] | keyword[def] identifier[add_dimension] ( identifier[self] , identifier[name] , identifier[data] = keyword[None] ):
literal[string]
identifier[self] . identifier[dimensions] . identifier[add] ( identifier[name] )
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[valobj] = identifier[self] . identifier[__dimtype__] ()
keyword[else] :
identifier[valobj] = identifier[make_object] ( identifier[self] . identifier[__dimtype__] , identifier[data] )
identifier[self] . identifier[_data] [ identifier[name] ]= identifier[valobj]
identifier[setattr] ( identifier[self] , identifier[name] , identifier[valobj] )
keyword[return] identifier[valobj] | def add_dimension(self, name, data=None):
"""Add a named dimension to this entity."""
self.dimensions.add(name)
if data is None:
valobj = self.__dimtype__() # depends on [control=['if'], data=[]]
else:
valobj = make_object(self.__dimtype__, data)
self._data[name] = valobj
setattr(self, name, valobj)
return valobj |
def str_lower(x):
"""Converts string samples to lower case.
:returns: an expression containing the converted strings.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.lower()
Expression = str_lower(text)
Length: 5 dtype: str (expression)
---------------------------------
0 something
1 very pretty
2 is coming
3 our
4 way.
"""
sl = _to_string_sequence(x).lower()
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | def function[str_lower, parameter[x]]:
constant[Converts string samples to lower case.
:returns: an expression containing the converted strings.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.lower()
Expression = str_lower(text)
Length: 5 dtype: str (expression)
---------------------------------
0 something
1 very pretty
2 is coming
3 our
4 way.
]
variable[sl] assign[=] call[call[name[_to_string_sequence], parameter[name[x]]].lower, parameter[]]
return[call[name[column].ColumnStringArrow, parameter[name[sl].bytes, name[sl].indices, name[sl].length, name[sl].offset]]] | keyword[def] identifier[str_lower] ( identifier[x] ):
literal[string]
identifier[sl] = identifier[_to_string_sequence] ( identifier[x] ). identifier[lower] ()
keyword[return] identifier[column] . identifier[ColumnStringArrow] ( identifier[sl] . identifier[bytes] , identifier[sl] . identifier[indices] , identifier[sl] . identifier[length] , identifier[sl] . identifier[offset] , identifier[string_sequence] = identifier[sl] ) | def str_lower(x):
"""Converts string samples to lower case.
:returns: an expression containing the converted strings.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.lower()
Expression = str_lower(text)
Length: 5 dtype: str (expression)
---------------------------------
0 something
1 very pretty
2 is coming
3 our
4 way.
"""
sl = _to_string_sequence(x).lower()
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) |
def make_regression(
n_samples=100,
n_features=100,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
chunks=None,
):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
rng = sklearn.utils.check_random_state(random_state)
return_coef = coef is True
if chunks[1][0] != n_features:
raise ValueError(
"Can only generate arrays partitioned along the "
"first axis. Specifying a larger chunksize for "
"the second axis."
)
_, _, coef = sklearn.datasets.make_regression(
n_samples=chunks[0][0],
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
tail_strength=tail_strength,
noise=noise,
shuffle=shuffle,
coef=True, # hardcode here
random_state=rng,
)
seed = da.random.random_state_data(1, random_state=rng)
da_rng = da.random.RandomState(seed[0])
X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features))
y_big = da.dot(X_big, coef) + bias
if noise > 0:
y_big = y_big + da_rng.normal(
scale=noise, size=y_big.shape, chunks=y_big.chunks
)
y_big = y_big.squeeze()
if return_coef:
return X_big, y_big, coef
else:
return X_big, y_big | def function[make_regression, parameter[n_samples, n_features, n_informative, n_targets, bias, effective_rank, tail_strength, noise, shuffle, coef, random_state, chunks]]:
constant[
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
]
variable[chunks] assign[=] call[name[da].core.normalize_chunks, parameter[name[chunks], tuple[[<ast.Name object at 0x7da1b1950340>, <ast.Name object at 0x7da1b1950d60>]]]]
call[name[_check_axis_partitioning], parameter[name[chunks], name[n_features]]]
variable[rng] assign[=] call[name[sklearn].utils.check_random_state, parameter[name[random_state]]]
variable[return_coef] assign[=] compare[name[coef] is constant[True]]
if compare[call[call[name[chunks]][constant[1]]][constant[0]] not_equal[!=] name[n_features]] begin[:]
<ast.Raise object at 0x7da1b1950880>
<ast.Tuple object at 0x7da1b1950b20> assign[=] call[name[sklearn].datasets.make_regression, parameter[]]
variable[seed] assign[=] call[name[da].random.random_state_data, parameter[constant[1]]]
variable[da_rng] assign[=] call[name[da].random.RandomState, parameter[call[name[seed]][constant[0]]]]
variable[X_big] assign[=] call[name[da_rng].normal, parameter[]]
variable[y_big] assign[=] binary_operation[call[name[da].dot, parameter[name[X_big], name[coef]]] + name[bias]]
if compare[name[noise] greater[>] constant[0]] begin[:]
variable[y_big] assign[=] binary_operation[name[y_big] + call[name[da_rng].normal, parameter[]]]
variable[y_big] assign[=] call[name[y_big].squeeze, parameter[]]
if name[return_coef] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1981000>, <ast.Name object at 0x7da1b1983eb0>, <ast.Name object at 0x7da1b19810c0>]]] | keyword[def] identifier[make_regression] (
identifier[n_samples] = literal[int] ,
identifier[n_features] = literal[int] ,
identifier[n_informative] = literal[int] ,
identifier[n_targets] = literal[int] ,
identifier[bias] = literal[int] ,
identifier[effective_rank] = keyword[None] ,
identifier[tail_strength] = literal[int] ,
identifier[noise] = literal[int] ,
identifier[shuffle] = keyword[True] ,
identifier[coef] = keyword[False] ,
identifier[random_state] = keyword[None] ,
identifier[chunks] = keyword[None] ,
):
literal[string]
identifier[chunks] = identifier[da] . identifier[core] . identifier[normalize_chunks] ( identifier[chunks] ,( identifier[n_samples] , identifier[n_features] ))
identifier[_check_axis_partitioning] ( identifier[chunks] , identifier[n_features] )
identifier[rng] = identifier[sklearn] . identifier[utils] . identifier[check_random_state] ( identifier[random_state] )
identifier[return_coef] = identifier[coef] keyword[is] keyword[True]
keyword[if] identifier[chunks] [ literal[int] ][ literal[int] ]!= identifier[n_features] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string]
)
identifier[_] , identifier[_] , identifier[coef] = identifier[sklearn] . identifier[datasets] . identifier[make_regression] (
identifier[n_samples] = identifier[chunks] [ literal[int] ][ literal[int] ],
identifier[n_features] = identifier[n_features] ,
identifier[n_informative] = identifier[n_informative] ,
identifier[n_targets] = identifier[n_targets] ,
identifier[bias] = identifier[bias] ,
identifier[effective_rank] = identifier[effective_rank] ,
identifier[tail_strength] = identifier[tail_strength] ,
identifier[noise] = identifier[noise] ,
identifier[shuffle] = identifier[shuffle] ,
identifier[coef] = keyword[True] ,
identifier[random_state] = identifier[rng] ,
)
identifier[seed] = identifier[da] . identifier[random] . identifier[random_state_data] ( literal[int] , identifier[random_state] = identifier[rng] )
identifier[da_rng] = identifier[da] . identifier[random] . identifier[RandomState] ( identifier[seed] [ literal[int] ])
identifier[X_big] = identifier[da_rng] . identifier[normal] ( identifier[size] =( identifier[n_samples] , identifier[n_features] ), identifier[chunks] =( identifier[chunks] [ literal[int] ], identifier[n_features] ))
identifier[y_big] = identifier[da] . identifier[dot] ( identifier[X_big] , identifier[coef] )+ identifier[bias]
keyword[if] identifier[noise] > literal[int] :
identifier[y_big] = identifier[y_big] + identifier[da_rng] . identifier[normal] (
identifier[scale] = identifier[noise] , identifier[size] = identifier[y_big] . identifier[shape] , identifier[chunks] = identifier[y_big] . identifier[chunks]
)
identifier[y_big] = identifier[y_big] . identifier[squeeze] ()
keyword[if] identifier[return_coef] :
keyword[return] identifier[X_big] , identifier[y_big] , identifier[coef]
keyword[else] :
keyword[return] identifier[X_big] , identifier[y_big] | def make_regression(n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None, chunks=None):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
rng = sklearn.utils.check_random_state(random_state)
return_coef = coef is True
if chunks[1][0] != n_features:
raise ValueError('Can only generate arrays partitioned along the first axis. Specifying a larger chunksize for the second axis.') # depends on [control=['if'], data=[]] # hardcode here
(_, _, coef) = sklearn.datasets.make_regression(n_samples=chunks[0][0], n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, effective_rank=effective_rank, tail_strength=tail_strength, noise=noise, shuffle=shuffle, coef=True, random_state=rng)
seed = da.random.random_state_data(1, random_state=rng)
da_rng = da.random.RandomState(seed[0])
X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features))
y_big = da.dot(X_big, coef) + bias
if noise > 0:
y_big = y_big + da_rng.normal(scale=noise, size=y_big.shape, chunks=y_big.chunks) # depends on [control=['if'], data=['noise']]
y_big = y_big.squeeze()
if return_coef:
return (X_big, y_big, coef) # depends on [control=['if'], data=[]]
else:
return (X_big, y_big) |
def TIF_to_jpg_all(path):
"""run TIF_to_jpg() on every TIF of a folder."""
for fname in sorted(glob.glob(path+"/*.tif")):
print(fname)
TIF_to_jpg(fname) | def function[TIF_to_jpg_all, parameter[path]]:
constant[run TIF_to_jpg() on every TIF of a folder.]
for taget[name[fname]] in starred[call[name[sorted], parameter[call[name[glob].glob, parameter[binary_operation[name[path] + constant[/*.tif]]]]]]] begin[:]
call[name[print], parameter[name[fname]]]
call[name[TIF_to_jpg], parameter[name[fname]]] | keyword[def] identifier[TIF_to_jpg_all] ( identifier[path] ):
literal[string]
keyword[for] identifier[fname] keyword[in] identifier[sorted] ( identifier[glob] . identifier[glob] ( identifier[path] + literal[string] )):
identifier[print] ( identifier[fname] )
identifier[TIF_to_jpg] ( identifier[fname] ) | def TIF_to_jpg_all(path):
"""run TIF_to_jpg() on every TIF of a folder."""
for fname in sorted(glob.glob(path + '/*.tif')):
print(fname)
TIF_to_jpg(fname) # depends on [control=['for'], data=['fname']] |
def GetFileContents(filename, binary=False, encoding=None, newline=None):
'''
Reads a file and returns its contents. Works for both local and remote files.
:param unicode filename:
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:returns str|unicode:
The file's contents.
Returns unicode string when `encoding` is not None.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)
try:
contents = source_file.read()
finally:
source_file.close()
return contents | def function[GetFileContents, parameter[filename, binary, encoding, newline]]:
constant[
Reads a file and returns its contents. Works for both local and remote files.
:param unicode filename:
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param None|''|'
'|'
'|'
' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:returns str|unicode:
The file's contents.
Returns unicode string when `encoding` is not None.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
]
variable[source_file] assign[=] call[name[OpenFile], parameter[name[filename]]]
<ast.Try object at 0x7da1affeff10>
return[name[contents]] | keyword[def] identifier[GetFileContents] ( identifier[filename] , identifier[binary] = keyword[False] , identifier[encoding] = keyword[None] , identifier[newline] = keyword[None] ):
literal[string]
identifier[source_file] = identifier[OpenFile] ( identifier[filename] , identifier[binary] = identifier[binary] , identifier[encoding] = identifier[encoding] , identifier[newline] = identifier[newline] )
keyword[try] :
identifier[contents] = identifier[source_file] . identifier[read] ()
keyword[finally] :
identifier[source_file] . identifier[close] ()
keyword[return] identifier[contents] | def GetFileContents(filename, binary=False, encoding=None, newline=None):
"""
Reads a file and returns its contents. Works for both local and remote files.
:param unicode filename:
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param None|''|'
'|'\r'|'\r
' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:returns str|unicode:
The file's contents.
Returns unicode string when `encoding` is not None.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
"""
source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)
try:
contents = source_file.read() # depends on [control=['try'], data=[]]
finally:
source_file.close()
return contents |
def tile_decode(tile, tileindex, tileshape, tiledshape,
lsb2msb, decompress, unpack, unpredict, out):
"""Decode tile segment bytes into 5D output array."""
_, imagedepth, imagelength, imagewidth, _ = out.shape
tileddepth, tiledlength, tiledwidth = tiledshape
tiledepth, tilelength, tilewidth, samples = tileshape
tilesize = tiledepth * tilelength * tilewidth * samples
pl = tileindex // (tiledwidth * tiledlength * tileddepth)
td = (tileindex // (tiledwidth * tiledlength)) % tileddepth * tiledepth
tl = (tileindex // tiledwidth) % tiledlength * tilelength
tw = tileindex % tiledwidth * tilewidth
if tile:
if lsb2msb:
tile = bitorder_decode(tile, out=tile)
tile = decompress(tile)
tile = unpack(tile)
# decompression / unpacking might return too many bytes
tile = tile[:tilesize]
try:
# complete tile according to TIFF specification
tile.shape = tileshape
except ValueError:
# tile fills remaining space; found in some JPEG compressed slides
s = (min(imagedepth - td, tiledepth),
min(imagelength - tl, tilelength),
min(imagewidth - tw, tilewidth),
samples)
try:
tile.shape = s
except ValueError:
# incomplete tile; see gdal issue #1179
log.warning('tile_decode: incomplete tile %s %s',
tile.shape, tileshape)
t = numpy.zeros(tilesize, tile.dtype)
s = min(tile.size, tilesize)
t[:s] = tile[:s]
tile = t.reshape(tileshape)
tile = unpredict(tile, axis=-2, out=tile)
out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = (
tile[:imagedepth-td, :imagelength-tl, :imagewidth-tw])
else:
out[pl, td:td+tiledepth, tl:tl+tilelength, tw:tw+tilewidth] = 0 | def function[tile_decode, parameter[tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, out]]:
constant[Decode tile segment bytes into 5D output array.]
<ast.Tuple object at 0x7da1b19703a0> assign[=] name[out].shape
<ast.Tuple object at 0x7da1b1971ab0> assign[=] name[tiledshape]
<ast.Tuple object at 0x7da1b19704c0> assign[=] name[tileshape]
variable[tilesize] assign[=] binary_operation[binary_operation[binary_operation[name[tiledepth] * name[tilelength]] * name[tilewidth]] * name[samples]]
variable[pl] assign[=] binary_operation[name[tileindex] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[binary_operation[name[tiledwidth] * name[tiledlength]] * name[tileddepth]]]
variable[td] assign[=] binary_operation[binary_operation[binary_operation[name[tileindex] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[name[tiledwidth] * name[tiledlength]]] <ast.Mod object at 0x7da2590d6920> name[tileddepth]] * name[tiledepth]]
variable[tl] assign[=] binary_operation[binary_operation[binary_operation[name[tileindex] <ast.FloorDiv object at 0x7da2590d6bc0> name[tiledwidth]] <ast.Mod object at 0x7da2590d6920> name[tiledlength]] * name[tilelength]]
variable[tw] assign[=] binary_operation[binary_operation[name[tileindex] <ast.Mod object at 0x7da2590d6920> name[tiledwidth]] * name[tilewidth]]
if name[tile] begin[:]
if name[lsb2msb] begin[:]
variable[tile] assign[=] call[name[bitorder_decode], parameter[name[tile]]]
variable[tile] assign[=] call[name[decompress], parameter[name[tile]]]
variable[tile] assign[=] call[name[unpack], parameter[name[tile]]]
variable[tile] assign[=] call[name[tile]][<ast.Slice object at 0x7da1b1972410>]
<ast.Try object at 0x7da1b1970550>
variable[tile] assign[=] call[name[unpredict], parameter[name[tile]]]
call[name[out]][tuple[[<ast.Name object at 0x7da1b19d3fa0>, <ast.Slice object at 0x7da1b19d3f70>, <ast.Slice object at 0x7da1b19d3dc0>, <ast.Slice object at 0x7da1b19d3c70>]]] assign[=] call[name[tile]][tuple[[<ast.Slice object at 0x7da1b19d3490>, <ast.Slice object at 0x7da1b19d0490>, <ast.Slice object at 0x7da1b19d0eb0>]]] | keyword[def] identifier[tile_decode] ( identifier[tile] , identifier[tileindex] , identifier[tileshape] , identifier[tiledshape] ,
identifier[lsb2msb] , identifier[decompress] , identifier[unpack] , identifier[unpredict] , identifier[out] ):
literal[string]
identifier[_] , identifier[imagedepth] , identifier[imagelength] , identifier[imagewidth] , identifier[_] = identifier[out] . identifier[shape]
identifier[tileddepth] , identifier[tiledlength] , identifier[tiledwidth] = identifier[tiledshape]
identifier[tiledepth] , identifier[tilelength] , identifier[tilewidth] , identifier[samples] = identifier[tileshape]
identifier[tilesize] = identifier[tiledepth] * identifier[tilelength] * identifier[tilewidth] * identifier[samples]
identifier[pl] = identifier[tileindex] //( identifier[tiledwidth] * identifier[tiledlength] * identifier[tileddepth] )
identifier[td] =( identifier[tileindex] //( identifier[tiledwidth] * identifier[tiledlength] ))% identifier[tileddepth] * identifier[tiledepth]
identifier[tl] =( identifier[tileindex] // identifier[tiledwidth] )% identifier[tiledlength] * identifier[tilelength]
identifier[tw] = identifier[tileindex] % identifier[tiledwidth] * identifier[tilewidth]
keyword[if] identifier[tile] :
keyword[if] identifier[lsb2msb] :
identifier[tile] = identifier[bitorder_decode] ( identifier[tile] , identifier[out] = identifier[tile] )
identifier[tile] = identifier[decompress] ( identifier[tile] )
identifier[tile] = identifier[unpack] ( identifier[tile] )
identifier[tile] = identifier[tile] [: identifier[tilesize] ]
keyword[try] :
identifier[tile] . identifier[shape] = identifier[tileshape]
keyword[except] identifier[ValueError] :
identifier[s] =( identifier[min] ( identifier[imagedepth] - identifier[td] , identifier[tiledepth] ),
identifier[min] ( identifier[imagelength] - identifier[tl] , identifier[tilelength] ),
identifier[min] ( identifier[imagewidth] - identifier[tw] , identifier[tilewidth] ),
identifier[samples] )
keyword[try] :
identifier[tile] . identifier[shape] = identifier[s]
keyword[except] identifier[ValueError] :
identifier[log] . identifier[warning] ( literal[string] ,
identifier[tile] . identifier[shape] , identifier[tileshape] )
identifier[t] = identifier[numpy] . identifier[zeros] ( identifier[tilesize] , identifier[tile] . identifier[dtype] )
identifier[s] = identifier[min] ( identifier[tile] . identifier[size] , identifier[tilesize] )
identifier[t] [: identifier[s] ]= identifier[tile] [: identifier[s] ]
identifier[tile] = identifier[t] . identifier[reshape] ( identifier[tileshape] )
identifier[tile] = identifier[unpredict] ( identifier[tile] , identifier[axis] =- literal[int] , identifier[out] = identifier[tile] )
identifier[out] [ identifier[pl] , identifier[td] : identifier[td] + identifier[tiledepth] , identifier[tl] : identifier[tl] + identifier[tilelength] , identifier[tw] : identifier[tw] + identifier[tilewidth] ]=(
identifier[tile] [: identifier[imagedepth] - identifier[td] ,: identifier[imagelength] - identifier[tl] ,: identifier[imagewidth] - identifier[tw] ])
keyword[else] :
identifier[out] [ identifier[pl] , identifier[td] : identifier[td] + identifier[tiledepth] , identifier[tl] : identifier[tl] + identifier[tilelength] , identifier[tw] : identifier[tw] + identifier[tilewidth] ]= literal[int] | def tile_decode(tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, out):
"""Decode tile segment bytes into 5D output array."""
(_, imagedepth, imagelength, imagewidth, _) = out.shape
(tileddepth, tiledlength, tiledwidth) = tiledshape
(tiledepth, tilelength, tilewidth, samples) = tileshape
tilesize = tiledepth * tilelength * tilewidth * samples
pl = tileindex // (tiledwidth * tiledlength * tileddepth)
td = tileindex // (tiledwidth * tiledlength) % tileddepth * tiledepth
tl = tileindex // tiledwidth % tiledlength * tilelength
tw = tileindex % tiledwidth * tilewidth
if tile:
if lsb2msb:
tile = bitorder_decode(tile, out=tile) # depends on [control=['if'], data=[]]
tile = decompress(tile)
tile = unpack(tile)
# decompression / unpacking might return too many bytes
tile = tile[:tilesize]
try:
# complete tile according to TIFF specification
tile.shape = tileshape # depends on [control=['try'], data=[]]
except ValueError:
# tile fills remaining space; found in some JPEG compressed slides
s = (min(imagedepth - td, tiledepth), min(imagelength - tl, tilelength), min(imagewidth - tw, tilewidth), samples)
try:
tile.shape = s # depends on [control=['try'], data=[]]
except ValueError:
# incomplete tile; see gdal issue #1179
log.warning('tile_decode: incomplete tile %s %s', tile.shape, tileshape)
t = numpy.zeros(tilesize, tile.dtype)
s = min(tile.size, tilesize)
t[:s] = tile[:s]
tile = t.reshape(tileshape) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
tile = unpredict(tile, axis=-2, out=tile)
out[pl, td:td + tiledepth, tl:tl + tilelength, tw:tw + tilewidth] = tile[:imagedepth - td, :imagelength - tl, :imagewidth - tw] # depends on [control=['if'], data=[]]
else:
out[pl, td:td + tiledepth, tl:tl + tilelength, tw:tw + tilewidth] = 0 |
def _get_wmi_sampler(self, instance_key, wmi_class, properties, tag_by="", **kwargs):
"""
Create and cache a WMISampler for the given (class, properties)
"""
properties = list(properties) + [tag_by] if tag_by else list(properties)
if instance_key not in self.wmi_samplers:
wmi_sampler = WMISampler(self.log, wmi_class, properties, **kwargs)
self.wmi_samplers[instance_key] = wmi_sampler
return self.wmi_samplers[instance_key] | def function[_get_wmi_sampler, parameter[self, instance_key, wmi_class, properties, tag_by]]:
constant[
Create and cache a WMISampler for the given (class, properties)
]
variable[properties] assign[=] <ast.IfExp object at 0x7da18f810940>
if compare[name[instance_key] <ast.NotIn object at 0x7da2590d7190> name[self].wmi_samplers] begin[:]
variable[wmi_sampler] assign[=] call[name[WMISampler], parameter[name[self].log, name[wmi_class], name[properties]]]
call[name[self].wmi_samplers][name[instance_key]] assign[=] name[wmi_sampler]
return[call[name[self].wmi_samplers][name[instance_key]]] | keyword[def] identifier[_get_wmi_sampler] ( identifier[self] , identifier[instance_key] , identifier[wmi_class] , identifier[properties] , identifier[tag_by] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[properties] = identifier[list] ( identifier[properties] )+[ identifier[tag_by] ] keyword[if] identifier[tag_by] keyword[else] identifier[list] ( identifier[properties] )
keyword[if] identifier[instance_key] keyword[not] keyword[in] identifier[self] . identifier[wmi_samplers] :
identifier[wmi_sampler] = identifier[WMISampler] ( identifier[self] . identifier[log] , identifier[wmi_class] , identifier[properties] ,** identifier[kwargs] )
identifier[self] . identifier[wmi_samplers] [ identifier[instance_key] ]= identifier[wmi_sampler]
keyword[return] identifier[self] . identifier[wmi_samplers] [ identifier[instance_key] ] | def _get_wmi_sampler(self, instance_key, wmi_class, properties, tag_by='', **kwargs):
"""
Create and cache a WMISampler for the given (class, properties)
"""
properties = list(properties) + [tag_by] if tag_by else list(properties)
if instance_key not in self.wmi_samplers:
wmi_sampler = WMISampler(self.log, wmi_class, properties, **kwargs)
self.wmi_samplers[instance_key] = wmi_sampler # depends on [control=['if'], data=['instance_key']]
return self.wmi_samplers[instance_key] |
def output(self):
"""
Returns the next available output token.
:return: the next token, None if none available
:rtype: Token
"""
if (self._output is None) or (len(self._output) == 0):
result = None
else:
result = self._output.pop(0)
return result | def function[output, parameter[self]]:
constant[
Returns the next available output token.
:return: the next token, None if none available
:rtype: Token
]
if <ast.BoolOp object at 0x7da1b06bfb80> begin[:]
variable[result] assign[=] constant[None]
return[name[result]] | keyword[def] identifier[output] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_output] keyword[is] keyword[None] ) keyword[or] ( identifier[len] ( identifier[self] . identifier[_output] )== literal[int] ):
identifier[result] = keyword[None]
keyword[else] :
identifier[result] = identifier[self] . identifier[_output] . identifier[pop] ( literal[int] )
keyword[return] identifier[result] | def output(self):
"""
Returns the next available output token.
:return: the next token, None if none available
:rtype: Token
"""
if self._output is None or len(self._output) == 0:
result = None # depends on [control=['if'], data=[]]
else:
result = self._output.pop(0)
return result |
def set_public_domain(self, public_domain):
"""Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_public_domain_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(public_domain):
raise errors.InvalidArgument()
self._my_map['publicDomain'] = public_domain | def function[set_public_domain, parameter[self, public_domain]]:
constant[Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
]
if call[call[name[self].get_public_domain_metadata, parameter[]].is_read_only, parameter[]] begin[:]
<ast.Raise object at 0x7da18c4cde40>
if <ast.UnaryOp object at 0x7da18c4cdf00> begin[:]
<ast.Raise object at 0x7da18c4cc4f0>
call[name[self]._my_map][constant[publicDomain]] assign[=] name[public_domain] | keyword[def] identifier[set_public_domain] ( identifier[self] , identifier[public_domain] ):
literal[string]
keyword[if] identifier[self] . identifier[get_public_domain_metadata] (). identifier[is_read_only] ():
keyword[raise] identifier[errors] . identifier[NoAccess] ()
keyword[if] keyword[not] identifier[self] . identifier[_is_valid_boolean] ( identifier[public_domain] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ()
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[public_domain] | def set_public_domain(self, public_domain):
"""Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_public_domain_metadata().is_read_only():
raise errors.NoAccess() # depends on [control=['if'], data=[]]
if not self._is_valid_boolean(public_domain):
raise errors.InvalidArgument() # depends on [control=['if'], data=[]]
self._my_map['publicDomain'] = public_domain |
def removeNullPadding(str, blocksize=AES_blocksize):
'Remove padding with null bytes'
pad_len = 0
for char in str[::-1]: # str[::-1] reverses string
if char == '\0':
pad_len += 1
else:
break
str = str[:-pad_len]
return str | def function[removeNullPadding, parameter[str, blocksize]]:
constant[Remove padding with null bytes]
variable[pad_len] assign[=] constant[0]
for taget[name[char]] in starred[call[name[str]][<ast.Slice object at 0x7da1b04a4a30>]] begin[:]
if compare[name[char] equal[==] constant[ ]] begin[:]
<ast.AugAssign object at 0x7da1b04a4940>
variable[str] assign[=] call[name[str]][<ast.Slice object at 0x7da1b04a6290>]
return[name[str]] | keyword[def] identifier[removeNullPadding] ( identifier[str] , identifier[blocksize] = identifier[AES_blocksize] ):
literal[string]
identifier[pad_len] = literal[int]
keyword[for] identifier[char] keyword[in] identifier[str] [::- literal[int] ]:
keyword[if] identifier[char] == literal[string] :
identifier[pad_len] += literal[int]
keyword[else] :
keyword[break]
identifier[str] = identifier[str] [:- identifier[pad_len] ]
keyword[return] identifier[str] | def removeNullPadding(str, blocksize=AES_blocksize):
"""Remove padding with null bytes"""
pad_len = 0
for char in str[::-1]: # str[::-1] reverses string
if char == '\x00':
pad_len += 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['char']]
str = str[:-pad_len]
return str |
def parse_operand(string, location, tokens):
"""Parse instruction operand.
"""
sizes = {
"dqword": 128,
"pointer": 72,
"qword": 64,
"pointer": 40,
"dword": 32,
"word": 16,
"byte": 8,
"bit": 1,
}
if "immediate" in tokens:
imm_str = "".join(tokens["immediate"])
base = 16 if imm_str.startswith("0x") or imm_str.startswith("-0x") else 10
imm = int(imm_str, base)
oprnd = ReilImmediateOperand(imm)
if "register" in tokens:
if tokens["register"] in ["e", "empty"]:
oprnd = ReilEmptyOperand()
oprnd.size = 0
else:
name = tokens["register"]
oprnd = ReilRegisterOperand(name)
if "size" in tokens:
oprnd.size = int(sizes[tokens["size"]])
return [oprnd] | def function[parse_operand, parameter[string, location, tokens]]:
constant[Parse instruction operand.
]
variable[sizes] assign[=] dictionary[[<ast.Constant object at 0x7da18f09fca0>, <ast.Constant object at 0x7da18f09d030>, <ast.Constant object at 0x7da18f09fc10>, <ast.Constant object at 0x7da18f09d840>, <ast.Constant object at 0x7da18f09ed70>, <ast.Constant object at 0x7da18f09f640>, <ast.Constant object at 0x7da18f09d4e0>, <ast.Constant object at 0x7da18f09e080>], [<ast.Constant object at 0x7da18f09e2c0>, <ast.Constant object at 0x7da18f09dc00>, <ast.Constant object at 0x7da18f09d630>, <ast.Constant object at 0x7da18f09faf0>, <ast.Constant object at 0x7da18f09ff70>, <ast.Constant object at 0x7da18f09dbd0>, <ast.Constant object at 0x7da18f09cbe0>, <ast.Constant object at 0x7da18f09d540>]]
if compare[constant[immediate] in name[tokens]] begin[:]
variable[imm_str] assign[=] call[constant[].join, parameter[call[name[tokens]][constant[immediate]]]]
variable[base] assign[=] <ast.IfExp object at 0x7da1b09800a0>
variable[imm] assign[=] call[name[int], parameter[name[imm_str], name[base]]]
variable[oprnd] assign[=] call[name[ReilImmediateOperand], parameter[name[imm]]]
if compare[constant[register] in name[tokens]] begin[:]
if compare[call[name[tokens]][constant[register]] in list[[<ast.Constant object at 0x7da1b086d660>, <ast.Constant object at 0x7da1b086cb20>]]] begin[:]
variable[oprnd] assign[=] call[name[ReilEmptyOperand], parameter[]]
name[oprnd].size assign[=] constant[0]
if compare[constant[size] in name[tokens]] begin[:]
name[oprnd].size assign[=] call[name[int], parameter[call[name[sizes]][call[name[tokens]][constant[size]]]]]
return[list[[<ast.Name object at 0x7da1b086c640>]]] | keyword[def] identifier[parse_operand] ( identifier[string] , identifier[location] , identifier[tokens] ):
literal[string]
identifier[sizes] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
keyword[if] literal[string] keyword[in] identifier[tokens] :
identifier[imm_str] = literal[string] . identifier[join] ( identifier[tokens] [ literal[string] ])
identifier[base] = literal[int] keyword[if] identifier[imm_str] . identifier[startswith] ( literal[string] ) keyword[or] identifier[imm_str] . identifier[startswith] ( literal[string] ) keyword[else] literal[int]
identifier[imm] = identifier[int] ( identifier[imm_str] , identifier[base] )
identifier[oprnd] = identifier[ReilImmediateOperand] ( identifier[imm] )
keyword[if] literal[string] keyword[in] identifier[tokens] :
keyword[if] identifier[tokens] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
identifier[oprnd] = identifier[ReilEmptyOperand] ()
identifier[oprnd] . identifier[size] = literal[int]
keyword[else] :
identifier[name] = identifier[tokens] [ literal[string] ]
identifier[oprnd] = identifier[ReilRegisterOperand] ( identifier[name] )
keyword[if] literal[string] keyword[in] identifier[tokens] :
identifier[oprnd] . identifier[size] = identifier[int] ( identifier[sizes] [ identifier[tokens] [ literal[string] ]])
keyword[return] [ identifier[oprnd] ] | def parse_operand(string, location, tokens):
"""Parse instruction operand.
"""
sizes = {'dqword': 128, 'pointer': 72, 'qword': 64, 'pointer': 40, 'dword': 32, 'word': 16, 'byte': 8, 'bit': 1}
if 'immediate' in tokens:
imm_str = ''.join(tokens['immediate'])
base = 16 if imm_str.startswith('0x') or imm_str.startswith('-0x') else 10
imm = int(imm_str, base)
oprnd = ReilImmediateOperand(imm) # depends on [control=['if'], data=['tokens']]
if 'register' in tokens:
if tokens['register'] in ['e', 'empty']:
oprnd = ReilEmptyOperand()
oprnd.size = 0 # depends on [control=['if'], data=[]]
else:
name = tokens['register']
oprnd = ReilRegisterOperand(name) # depends on [control=['if'], data=['tokens']]
if 'size' in tokens:
oprnd.size = int(sizes[tokens['size']]) # depends on [control=['if'], data=['tokens']]
return [oprnd] |
def use_plenary_catalog_view(self):
"""Pass through to provider CatalogLookupSession.use_plenary_catalog_view"""
self._catalog_view = PLENARY
# self._get_provider_session('catalog_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_catalog_view()
except AttributeError:
pass | def function[use_plenary_catalog_view, parameter[self]]:
constant[Pass through to provider CatalogLookupSession.use_plenary_catalog_view]
name[self]._catalog_view assign[=] name[PLENARY]
for taget[name[session]] in starred[call[name[self]._get_provider_sessions, parameter[]]] begin[:]
<ast.Try object at 0x7da1b0ab7250> | keyword[def] identifier[use_plenary_catalog_view] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_catalog_view] = identifier[PLENARY]
keyword[for] identifier[session] keyword[in] identifier[self] . identifier[_get_provider_sessions] ():
keyword[try] :
identifier[session] . identifier[use_plenary_catalog_view] ()
keyword[except] identifier[AttributeError] :
keyword[pass] | def use_plenary_catalog_view(self):
"""Pass through to provider CatalogLookupSession.use_plenary_catalog_view"""
self._catalog_view = PLENARY
# self._get_provider_session('catalog_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_catalog_view() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['session']] |
def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_size == 0:
return
result = self.es.search(index=self.marker_index,
doc_type=self.marker_doc_type,
body={'query': {
'term': {'target_index': self.index}}},
sort=('date:desc',))
for i, hit in enumerate(result.get('hits').get('hits'), start=1):
if i > self.marker_index_hist_size:
marker_document_id = hit.get('_id')
self.es.delete(id=marker_document_id, index=self.marker_index,
doc_type=self.marker_doc_type)
self.es.indices.flush(index=self.marker_index) | def function[ensure_hist_size, parameter[self]]:
constant[
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
]
if compare[name[self].marker_index_hist_size equal[==] constant[0]] begin[:]
return[None]
variable[result] assign[=] call[name[self].es.search, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cca60>, <ast.Name object at 0x7da18c4cf7c0>]]] in starred[call[name[enumerate], parameter[call[call[name[result].get, parameter[constant[hits]]].get, parameter[constant[hits]]]]]] begin[:]
if compare[name[i] greater[>] name[self].marker_index_hist_size] begin[:]
variable[marker_document_id] assign[=] call[name[hit].get, parameter[constant[_id]]]
call[name[self].es.delete, parameter[]]
call[name[self].es.indices.flush, parameter[]] | keyword[def] identifier[ensure_hist_size] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[marker_index_hist_size] == literal[int] :
keyword[return]
identifier[result] = identifier[self] . identifier[es] . identifier[search] ( identifier[index] = identifier[self] . identifier[marker_index] ,
identifier[doc_type] = identifier[self] . identifier[marker_doc_type] ,
identifier[body] ={ literal[string] :{
literal[string] :{ literal[string] : identifier[self] . identifier[index] }}},
identifier[sort] =( literal[string] ,))
keyword[for] identifier[i] , identifier[hit] keyword[in] identifier[enumerate] ( identifier[result] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] ), identifier[start] = literal[int] ):
keyword[if] identifier[i] > identifier[self] . identifier[marker_index_hist_size] :
identifier[marker_document_id] = identifier[hit] . identifier[get] ( literal[string] )
identifier[self] . identifier[es] . identifier[delete] ( identifier[id] = identifier[marker_document_id] , identifier[index] = identifier[self] . identifier[marker_index] ,
identifier[doc_type] = identifier[self] . identifier[marker_doc_type] )
identifier[self] . identifier[es] . identifier[indices] . identifier[flush] ( identifier[index] = identifier[self] . identifier[marker_index] ) | def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_size == 0:
return # depends on [control=['if'], data=[]]
result = self.es.search(index=self.marker_index, doc_type=self.marker_doc_type, body={'query': {'term': {'target_index': self.index}}}, sort=('date:desc',))
for (i, hit) in enumerate(result.get('hits').get('hits'), start=1):
if i > self.marker_index_hist_size:
marker_document_id = hit.get('_id')
self.es.delete(id=marker_document_id, index=self.marker_index, doc_type=self.marker_doc_type) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self.es.indices.flush(index=self.marker_index) |
def insert_rows(self, row, no_rows=1):
"""Adds no_rows rows before row, appends if row > maxrows
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
self.code_array.insert(row, no_rows, axis=0, tab=tab) | def function[insert_rows, parameter[self, row, no_rows]]:
constant[Adds no_rows rows before row, appends if row > maxrows
and marks grid as changed
]
call[name[post_command_event], parameter[name[self].main_window, name[self].ContentChangedMsg]]
variable[tab] assign[=] name[self].grid.current_table
call[name[self].code_array.insert, parameter[name[row], name[no_rows]]] | keyword[def] identifier[insert_rows] ( identifier[self] , identifier[row] , identifier[no_rows] = literal[int] ):
literal[string]
identifier[post_command_event] ( identifier[self] . identifier[main_window] , identifier[self] . identifier[ContentChangedMsg] )
identifier[tab] = identifier[self] . identifier[grid] . identifier[current_table]
identifier[self] . identifier[code_array] . identifier[insert] ( identifier[row] , identifier[no_rows] , identifier[axis] = literal[int] , identifier[tab] = identifier[tab] ) | def insert_rows(self, row, no_rows=1):
"""Adds no_rows rows before row, appends if row > maxrows
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
self.code_array.insert(row, no_rows, axis=0, tab=tab) |
def fix_facets(self):
"""
This function convert date_histogram facets to datetime
"""
facets = self.facets
for key in list(facets.keys()):
_type = facets[key].get("_type", "unknown")
if _type == "date_histogram":
for entry in facets[key].get("entries", []):
for k, v in list(entry.items()):
if k in ["count", "max", "min", "total_count", "mean", "total"]:
continue
if not isinstance(entry[k], datetime):
entry[k] = datetime.utcfromtimestamp(v / 1e3) | def function[fix_facets, parameter[self]]:
constant[
This function convert date_histogram facets to datetime
]
variable[facets] assign[=] name[self].facets
for taget[name[key]] in starred[call[name[list], parameter[call[name[facets].keys, parameter[]]]]] begin[:]
variable[_type] assign[=] call[call[name[facets]][name[key]].get, parameter[constant[_type], constant[unknown]]]
if compare[name[_type] equal[==] constant[date_histogram]] begin[:]
for taget[name[entry]] in starred[call[call[name[facets]][name[key]].get, parameter[constant[entries], list[[]]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0cb3160>, <ast.Name object at 0x7da1b0cb3340>]]] in starred[call[name[list], parameter[call[name[entry].items, parameter[]]]]] begin[:]
if compare[name[k] in list[[<ast.Constant object at 0x7da1b0cb3d00>, <ast.Constant object at 0x7da1b0cb18a0>, <ast.Constant object at 0x7da1b0cb3fa0>, <ast.Constant object at 0x7da1b0cb05e0>, <ast.Constant object at 0x7da1b0cb3c70>, <ast.Constant object at 0x7da1b0cb03a0>]]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b0cb1fc0> begin[:]
call[name[entry]][name[k]] assign[=] call[name[datetime].utcfromtimestamp, parameter[binary_operation[name[v] / constant[1000.0]]]] | keyword[def] identifier[fix_facets] ( identifier[self] ):
literal[string]
identifier[facets] = identifier[self] . identifier[facets]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[facets] . identifier[keys] ()):
identifier[_type] = identifier[facets] [ identifier[key] ]. identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[_type] == literal[string] :
keyword[for] identifier[entry] keyword[in] identifier[facets] [ identifier[key] ]. identifier[get] ( literal[string] ,[]):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[entry] . identifier[items] ()):
keyword[if] identifier[k] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[continue]
keyword[if] keyword[not] identifier[isinstance] ( identifier[entry] [ identifier[k] ], identifier[datetime] ):
identifier[entry] [ identifier[k] ]= identifier[datetime] . identifier[utcfromtimestamp] ( identifier[v] / literal[int] ) | def fix_facets(self):
"""
This function convert date_histogram facets to datetime
"""
facets = self.facets
for key in list(facets.keys()):
_type = facets[key].get('_type', 'unknown')
if _type == 'date_histogram':
for entry in facets[key].get('entries', []):
for (k, v) in list(entry.items()):
if k in ['count', 'max', 'min', 'total_count', 'mean', 'total']:
continue # depends on [control=['if'], data=[]]
if not isinstance(entry[k], datetime):
entry[k] = datetime.utcfromtimestamp(v / 1000.0) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def getServiceDependenciesUIDs(self):
"""
This methods returns a list with the service dependencies UIDs
:return: a list of uids
"""
deps = self.getServiceDependencies()
deps_uids = [service.UID() for service in deps]
return deps_uids | def function[getServiceDependenciesUIDs, parameter[self]]:
constant[
This methods returns a list with the service dependencies UIDs
:return: a list of uids
]
variable[deps] assign[=] call[name[self].getServiceDependencies, parameter[]]
variable[deps_uids] assign[=] <ast.ListComp object at 0x7da1b2315360>
return[name[deps_uids]] | keyword[def] identifier[getServiceDependenciesUIDs] ( identifier[self] ):
literal[string]
identifier[deps] = identifier[self] . identifier[getServiceDependencies] ()
identifier[deps_uids] =[ identifier[service] . identifier[UID] () keyword[for] identifier[service] keyword[in] identifier[deps] ]
keyword[return] identifier[deps_uids] | def getServiceDependenciesUIDs(self):
"""
This methods returns a list with the service dependencies UIDs
:return: a list of uids
"""
deps = self.getServiceDependencies()
deps_uids = [service.UID() for service in deps]
return deps_uids |
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
try:
profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1))
except LookupError:
profile_mod = None
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod | def function[get_profile_model, parameter[]]:
constant[
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
]
if <ast.BoolOp object at 0x7da18f723520> begin[:]
<ast.Raise object at 0x7da18f723b80>
<ast.Try object at 0x7da18f721d20>
if compare[name[profile_mod] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f720bb0>
return[name[profile_mod]] | keyword[def] identifier[get_profile_model] ():
literal[string]
keyword[if] ( keyword[not] identifier[hasattr] ( identifier[settings] , literal[string] )) keyword[or] ( keyword[not] identifier[settings] . identifier[AUTH_PROFILE_MODULE] ):
keyword[raise] identifier[SiteProfileNotAvailable]
keyword[try] :
identifier[profile_mod] = identifier[apps] . identifier[get_model] (* identifier[settings] . identifier[AUTH_PROFILE_MODULE] . identifier[rsplit] ( literal[string] , literal[int] ))
keyword[except] identifier[LookupError] :
identifier[profile_mod] = keyword[None]
keyword[if] identifier[profile_mod] keyword[is] keyword[None] :
keyword[raise] identifier[SiteProfileNotAvailable]
keyword[return] identifier[profile_mod] | def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if not hasattr(settings, 'AUTH_PROFILE_MODULE') or not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable # depends on [control=['if'], data=[]]
try:
profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1)) # depends on [control=['try'], data=[]]
except LookupError:
profile_mod = None # depends on [control=['except'], data=[]]
if profile_mod is None:
raise SiteProfileNotAvailable # depends on [control=['if'], data=[]]
return profile_mod |
def list_cron_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
return data | def function[list_cron_job_for_all_namespaces, parameter[self]]:
constant[
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].list_cron_job_for_all_namespaces_with_http_info, parameter[]]] | keyword[def] identifier[list_cron_job_for_all_namespaces] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[list_cron_job_for_all_namespaces_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[list_cron_job_for_all_namespaces_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def list_cron_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
return data |
def get_agent_rule_str(agent):
"""Construct a string from an Agent as part of a PySB rule name."""
rule_str_list = [_n(agent.name)]
# If it's a molecular agent
if isinstance(agent, ist.Agent):
for mod in agent.mods:
mstr = abbrevs[mod.mod_type]
if mod.residue is not None:
mstr += mod.residue
if mod.position is not None:
mstr += mod.position
rule_str_list.append('%s' % mstr)
for mut in agent.mutations:
res_from = mut.residue_from if mut.residue_from else 'mut'
res_to = mut.residue_to if mut.residue_to else 'X'
if mut.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mut.position
mstr = mut_site_name + res_to
rule_str_list.append(mstr)
if agent.bound_conditions:
for b in agent.bound_conditions:
if b.is_bound:
rule_str_list.append(_n(b.agent.name))
else:
rule_str_list.append('n' + _n(b.agent.name))
if agent.location is not None:
rule_str_list.append(_n(agent.location))
if agent.activity is not None:
if agent.activity.is_active:
rule_str_list.append(agent.activity.activity_type[:3])
else:
rule_str_list.append(agent.activity.activity_type[:3] + '_inact')
rule_str = '_'.join(rule_str_list)
return rule_str | def function[get_agent_rule_str, parameter[agent]]:
constant[Construct a string from an Agent as part of a PySB rule name.]
variable[rule_str_list] assign[=] list[[<ast.Call object at 0x7da18c4cee60>]]
if call[name[isinstance], parameter[name[agent], name[ist].Agent]] begin[:]
for taget[name[mod]] in starred[name[agent].mods] begin[:]
variable[mstr] assign[=] call[name[abbrevs]][name[mod].mod_type]
if compare[name[mod].residue is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18c4ce4d0>
if compare[name[mod].position is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18c4cc490>
call[name[rule_str_list].append, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[mstr]]]]
for taget[name[mut]] in starred[name[agent].mutations] begin[:]
variable[res_from] assign[=] <ast.IfExp object at 0x7da18c4cd2a0>
variable[res_to] assign[=] <ast.IfExp object at 0x7da18c4cf3a0>
if compare[name[mut].position is constant[None]] begin[:]
variable[mut_site_name] assign[=] name[res_from]
variable[mstr] assign[=] binary_operation[name[mut_site_name] + name[res_to]]
call[name[rule_str_list].append, parameter[name[mstr]]]
if name[agent].bound_conditions begin[:]
for taget[name[b]] in starred[name[agent].bound_conditions] begin[:]
if name[b].is_bound begin[:]
call[name[rule_str_list].append, parameter[call[name[_n], parameter[name[b].agent.name]]]]
if compare[name[agent].location is_not constant[None]] begin[:]
call[name[rule_str_list].append, parameter[call[name[_n], parameter[name[agent].location]]]]
if compare[name[agent].activity is_not constant[None]] begin[:]
if name[agent].activity.is_active begin[:]
call[name[rule_str_list].append, parameter[call[name[agent].activity.activity_type][<ast.Slice object at 0x7da18c4cc9a0>]]]
variable[rule_str] assign[=] call[constant[_].join, parameter[name[rule_str_list]]]
return[name[rule_str]] | keyword[def] identifier[get_agent_rule_str] ( identifier[agent] ):
literal[string]
identifier[rule_str_list] =[ identifier[_n] ( identifier[agent] . identifier[name] )]
keyword[if] identifier[isinstance] ( identifier[agent] , identifier[ist] . identifier[Agent] ):
keyword[for] identifier[mod] keyword[in] identifier[agent] . identifier[mods] :
identifier[mstr] = identifier[abbrevs] [ identifier[mod] . identifier[mod_type] ]
keyword[if] identifier[mod] . identifier[residue] keyword[is] keyword[not] keyword[None] :
identifier[mstr] += identifier[mod] . identifier[residue]
keyword[if] identifier[mod] . identifier[position] keyword[is] keyword[not] keyword[None] :
identifier[mstr] += identifier[mod] . identifier[position]
identifier[rule_str_list] . identifier[append] ( literal[string] % identifier[mstr] )
keyword[for] identifier[mut] keyword[in] identifier[agent] . identifier[mutations] :
identifier[res_from] = identifier[mut] . identifier[residue_from] keyword[if] identifier[mut] . identifier[residue_from] keyword[else] literal[string]
identifier[res_to] = identifier[mut] . identifier[residue_to] keyword[if] identifier[mut] . identifier[residue_to] keyword[else] literal[string]
keyword[if] identifier[mut] . identifier[position] keyword[is] keyword[None] :
identifier[mut_site_name] = identifier[res_from]
keyword[else] :
identifier[mut_site_name] = identifier[res_from] + identifier[mut] . identifier[position]
identifier[mstr] = identifier[mut_site_name] + identifier[res_to]
identifier[rule_str_list] . identifier[append] ( identifier[mstr] )
keyword[if] identifier[agent] . identifier[bound_conditions] :
keyword[for] identifier[b] keyword[in] identifier[agent] . identifier[bound_conditions] :
keyword[if] identifier[b] . identifier[is_bound] :
identifier[rule_str_list] . identifier[append] ( identifier[_n] ( identifier[b] . identifier[agent] . identifier[name] ))
keyword[else] :
identifier[rule_str_list] . identifier[append] ( literal[string] + identifier[_n] ( identifier[b] . identifier[agent] . identifier[name] ))
keyword[if] identifier[agent] . identifier[location] keyword[is] keyword[not] keyword[None] :
identifier[rule_str_list] . identifier[append] ( identifier[_n] ( identifier[agent] . identifier[location] ))
keyword[if] identifier[agent] . identifier[activity] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[agent] . identifier[activity] . identifier[is_active] :
identifier[rule_str_list] . identifier[append] ( identifier[agent] . identifier[activity] . identifier[activity_type] [: literal[int] ])
keyword[else] :
identifier[rule_str_list] . identifier[append] ( identifier[agent] . identifier[activity] . identifier[activity_type] [: literal[int] ]+ literal[string] )
identifier[rule_str] = literal[string] . identifier[join] ( identifier[rule_str_list] )
keyword[return] identifier[rule_str] | def get_agent_rule_str(agent):
"""Construct a string from an Agent as part of a PySB rule name."""
rule_str_list = [_n(agent.name)]
# If it's a molecular agent
if isinstance(agent, ist.Agent):
for mod in agent.mods:
mstr = abbrevs[mod.mod_type]
if mod.residue is not None:
mstr += mod.residue # depends on [control=['if'], data=[]]
if mod.position is not None:
mstr += mod.position # depends on [control=['if'], data=[]]
rule_str_list.append('%s' % mstr) # depends on [control=['for'], data=['mod']]
for mut in agent.mutations:
res_from = mut.residue_from if mut.residue_from else 'mut'
res_to = mut.residue_to if mut.residue_to else 'X'
if mut.position is None:
mut_site_name = res_from # depends on [control=['if'], data=[]]
else:
mut_site_name = res_from + mut.position
mstr = mut_site_name + res_to
rule_str_list.append(mstr) # depends on [control=['for'], data=['mut']]
if agent.bound_conditions:
for b in agent.bound_conditions:
if b.is_bound:
rule_str_list.append(_n(b.agent.name)) # depends on [control=['if'], data=[]]
else:
rule_str_list.append('n' + _n(b.agent.name)) # depends on [control=['for'], data=['b']] # depends on [control=['if'], data=[]]
if agent.location is not None:
rule_str_list.append(_n(agent.location)) # depends on [control=['if'], data=[]]
if agent.activity is not None:
if agent.activity.is_active:
rule_str_list.append(agent.activity.activity_type[:3]) # depends on [control=['if'], data=[]]
else:
rule_str_list.append(agent.activity.activity_type[:3] + '_inact') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
rule_str = '_'.join(rule_str_list)
return rule_str |
def name_globals(s, remove_params=None):
"""
Returns a list of the global parameter names.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to name the globals of.
remove_params : Set or None
A set of unique additional parameters to remove from the globals
list.
Returns
-------
all_params : list
The list of the global parameter names, with each of
remove_params removed.
"""
all_params = s.params
for p in s.param_particle(np.arange(s.obj_get_positions().shape[0])):
all_params.remove(p)
if remove_params is not None:
for p in set(remove_params):
all_params.remove(p)
return all_params | def function[name_globals, parameter[s, remove_params]]:
constant[
Returns a list of the global parameter names.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to name the globals of.
remove_params : Set or None
A set of unique additional parameters to remove from the globals
list.
Returns
-------
all_params : list
The list of the global parameter names, with each of
remove_params removed.
]
variable[all_params] assign[=] name[s].params
for taget[name[p]] in starred[call[name[s].param_particle, parameter[call[name[np].arange, parameter[call[call[name[s].obj_get_positions, parameter[]].shape][constant[0]]]]]]] begin[:]
call[name[all_params].remove, parameter[name[p]]]
if compare[name[remove_params] is_not constant[None]] begin[:]
for taget[name[p]] in starred[call[name[set], parameter[name[remove_params]]]] begin[:]
call[name[all_params].remove, parameter[name[p]]]
return[name[all_params]] | keyword[def] identifier[name_globals] ( identifier[s] , identifier[remove_params] = keyword[None] ):
literal[string]
identifier[all_params] = identifier[s] . identifier[params]
keyword[for] identifier[p] keyword[in] identifier[s] . identifier[param_particle] ( identifier[np] . identifier[arange] ( identifier[s] . identifier[obj_get_positions] (). identifier[shape] [ literal[int] ])):
identifier[all_params] . identifier[remove] ( identifier[p] )
keyword[if] identifier[remove_params] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[p] keyword[in] identifier[set] ( identifier[remove_params] ):
identifier[all_params] . identifier[remove] ( identifier[p] )
keyword[return] identifier[all_params] | def name_globals(s, remove_params=None):
"""
Returns a list of the global parameter names.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to name the globals of.
remove_params : Set or None
A set of unique additional parameters to remove from the globals
list.
Returns
-------
all_params : list
The list of the global parameter names, with each of
remove_params removed.
"""
all_params = s.params
for p in s.param_particle(np.arange(s.obj_get_positions().shape[0])):
all_params.remove(p) # depends on [control=['for'], data=['p']]
if remove_params is not None:
for p in set(remove_params):
all_params.remove(p) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=['remove_params']]
return all_params |
def predict(self, X, exposure=None):
"""
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if exposure is not None:
exposure = np.array(exposure).astype('f')
else:
exposure = np.ones(X.shape[0]).astype('f')
check_lengths(X, exposure)
return self.predict_mu(X) * exposure | def function[predict, parameter[self, X, exposure]]:
constant[
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
]
if <ast.UnaryOp object at 0x7da18f00c550> begin[:]
<ast.Raise object at 0x7da18f00ed40>
variable[X] assign[=] call[name[check_X], parameter[name[X]]]
if compare[name[exposure] is_not constant[None]] begin[:]
variable[exposure] assign[=] call[call[name[np].array, parameter[name[exposure]]].astype, parameter[constant[f]]]
call[name[check_lengths], parameter[name[X], name[exposure]]]
return[binary_operation[call[name[self].predict_mu, parameter[name[X]]] * name[exposure]]] | keyword[def] identifier[predict] ( identifier[self] , identifier[X] , identifier[exposure] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_fitted] :
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[X] = identifier[check_X] ( identifier[X] , identifier[n_feats] = identifier[self] . identifier[statistics_] [ literal[string] ],
identifier[edge_knots] = identifier[self] . identifier[edge_knots_] , identifier[dtypes] = identifier[self] . identifier[dtype] ,
identifier[features] = identifier[self] . identifier[feature] , identifier[verbose] = identifier[self] . identifier[verbose] )
keyword[if] identifier[exposure] keyword[is] keyword[not] keyword[None] :
identifier[exposure] = identifier[np] . identifier[array] ( identifier[exposure] ). identifier[astype] ( literal[string] )
keyword[else] :
identifier[exposure] = identifier[np] . identifier[ones] ( identifier[X] . identifier[shape] [ literal[int] ]). identifier[astype] ( literal[string] )
identifier[check_lengths] ( identifier[X] , identifier[exposure] )
keyword[return] identifier[self] . identifier[predict_mu] ( identifier[X] )* identifier[exposure] | def predict(self, X, exposure=None):
"""
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.') # depends on [control=['if'], data=[]]
X = check_X(X, n_feats=self.statistics_['m_features'], edge_knots=self.edge_knots_, dtypes=self.dtype, features=self.feature, verbose=self.verbose)
if exposure is not None:
exposure = np.array(exposure).astype('f') # depends on [control=['if'], data=['exposure']]
else:
exposure = np.ones(X.shape[0]).astype('f')
check_lengths(X, exposure)
return self.predict_mu(X) * exposure |
def explain_prediction(estimator, doc, **kwargs):
"""
Return an explanation of an estimator prediction.
:func:`explain_prediction` is not doing any work itself, it dispatches
to a concrete implementation based on estimator type.
Parameters
----------
estimator : object
Estimator instance. This argument must be positional.
doc : object
Example to run estimator on. Estimator makes a prediction for this
example, and :func:`explain_prediction` tries to show information
about this prediction. Pass a single element, not a one-element array:
if you fitted your estimator on ``X``, that would be ``X[i]`` for
most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``.
top : int or (int, int) tuple, optional
Number of features to show. When ``top`` is int, ``top`` features with
a highest absolute values are shown. When it is (pos, neg) tuple,
no more than ``pos`` positive features and no more than ``neg``
negative features is shown. ``None`` value means no limit (default).
This argument may be supported or not, depending on estimator type.
top_targets : int, optional
Number of targets to show. When ``top_targets`` is provided,
only specified number of targets with highest scores are shown.
Negative value means targets with lowest scores are shown.
Must not be given with ``targets`` argument.
``None`` value means no limit: all targets are shown (default).
This argument may be supported or not, depending on estimator type.
target_names : list[str] or {'old_name': 'new_name'} dict, optional
Names of targets or classes. This argument can be used to provide
human-readable class/target names for estimators which don't expose
clss names themselves. It can be also used to rename estimator-provided
classes before displaying them.
This argument may be supported or not, depending on estimator type.
targets : list, optional
Order of class/target names to show. This argument can be also used
to show information only for a subset of classes. It should be a list
of class / target names which match either names provided by
an estimator or names defined in ``target_names`` parameter.
Must not be given with ``top_targets`` argument.
In case of binary classification you can use this argument to
set the class which probability or score should be displayed, with
an appropriate explanation. By default a result for predicted class
is shown. For example, you can use ``targets=[True]`` to always show
result for a positive class, even if the predicted label is False.
This argument may be supported or not, depending on estimator type.
feature_names : list, optional
A list of feature names. It allows to specify feature
names when they are not provided by an estimator object.
This argument may be supported or not, depending on estimator type.
feature_re : str, optional
Only feature names which match ``feature_re`` regex are returned
(more precisely, ``re.search(feature_re, x)`` is checked).
feature_filter : Callable[[str, float], bool], optional
Only feature names for which ``feature_filter`` function returns True
are returned. It must accept feature name and feature value.
Missing features always have a NaN value.
**kwargs: dict
Keyword arguments. All keyword arguments are passed to
concrete explain_prediction... implementations.
Returns
-------
Explanation
:class:`~.Explanation` result. Use one of the formatting functions from
:mod:`eli5.formatters` to print it in a human-readable form.
Explanation instances have repr which works well with
IPython notebook, but it can be a better idea to use
:func:`eli5.show_prediction` instead of :func:`eli5.explain_prediction`
if you work with IPython: :func:`eli5.show_prediction` allows to
customize formatting without a need to import :mod:`eli5.formatters`
functions.
"""
return Explanation(
estimator=repr(estimator),
error="estimator %r is not supported" % estimator,
) | def function[explain_prediction, parameter[estimator, doc]]:
constant[
Return an explanation of an estimator prediction.
:func:`explain_prediction` is not doing any work itself, it dispatches
to a concrete implementation based on estimator type.
Parameters
----------
estimator : object
Estimator instance. This argument must be positional.
doc : object
Example to run estimator on. Estimator makes a prediction for this
example, and :func:`explain_prediction` tries to show information
about this prediction. Pass a single element, not a one-element array:
if you fitted your estimator on ``X``, that would be ``X[i]`` for
most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``.
top : int or (int, int) tuple, optional
Number of features to show. When ``top`` is int, ``top`` features with
a highest absolute values are shown. When it is (pos, neg) tuple,
no more than ``pos`` positive features and no more than ``neg``
negative features is shown. ``None`` value means no limit (default).
This argument may be supported or not, depending on estimator type.
top_targets : int, optional
Number of targets to show. When ``top_targets`` is provided,
only specified number of targets with highest scores are shown.
Negative value means targets with lowest scores are shown.
Must not be given with ``targets`` argument.
``None`` value means no limit: all targets are shown (default).
This argument may be supported or not, depending on estimator type.
target_names : list[str] or {'old_name': 'new_name'} dict, optional
Names of targets or classes. This argument can be used to provide
human-readable class/target names for estimators which don't expose
clss names themselves. It can be also used to rename estimator-provided
classes before displaying them.
This argument may be supported or not, depending on estimator type.
targets : list, optional
Order of class/target names to show. This argument can be also used
to show information only for a subset of classes. It should be a list
of class / target names which match either names provided by
an estimator or names defined in ``target_names`` parameter.
Must not be given with ``top_targets`` argument.
In case of binary classification you can use this argument to
set the class which probability or score should be displayed, with
an appropriate explanation. By default a result for predicted class
is shown. For example, you can use ``targets=[True]`` to always show
result for a positive class, even if the predicted label is False.
This argument may be supported or not, depending on estimator type.
feature_names : list, optional
A list of feature names. It allows to specify feature
names when they are not provided by an estimator object.
This argument may be supported or not, depending on estimator type.
feature_re : str, optional
Only feature names which match ``feature_re`` regex are returned
(more precisely, ``re.search(feature_re, x)`` is checked).
feature_filter : Callable[[str, float], bool], optional
Only feature names for which ``feature_filter`` function returns True
are returned. It must accept feature name and feature value.
Missing features always have a NaN value.
**kwargs: dict
Keyword arguments. All keyword arguments are passed to
concrete explain_prediction... implementations.
Returns
-------
Explanation
:class:`~.Explanation` result. Use one of the formatting functions from
:mod:`eli5.formatters` to print it in a human-readable form.
Explanation instances have repr which works well with
IPython notebook, but it can be a better idea to use
:func:`eli5.show_prediction` instead of :func:`eli5.explain_prediction`
if you work with IPython: :func:`eli5.show_prediction` allows to
customize formatting without a need to import :mod:`eli5.formatters`
functions.
]
return[call[name[Explanation], parameter[]]] | keyword[def] identifier[explain_prediction] ( identifier[estimator] , identifier[doc] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[Explanation] (
identifier[estimator] = identifier[repr] ( identifier[estimator] ),
identifier[error] = literal[string] % identifier[estimator] ,
) | def explain_prediction(estimator, doc, **kwargs):
"""
Return an explanation of an estimator prediction.
:func:`explain_prediction` is not doing any work itself, it dispatches
to a concrete implementation based on estimator type.
Parameters
----------
estimator : object
Estimator instance. This argument must be positional.
doc : object
Example to run estimator on. Estimator makes a prediction for this
example, and :func:`explain_prediction` tries to show information
about this prediction. Pass a single element, not a one-element array:
if you fitted your estimator on ``X``, that would be ``X[i]`` for
most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``.
top : int or (int, int) tuple, optional
Number of features to show. When ``top`` is int, ``top`` features with
a highest absolute values are shown. When it is (pos, neg) tuple,
no more than ``pos`` positive features and no more than ``neg``
negative features is shown. ``None`` value means no limit (default).
This argument may be supported or not, depending on estimator type.
top_targets : int, optional
Number of targets to show. When ``top_targets`` is provided,
only specified number of targets with highest scores are shown.
Negative value means targets with lowest scores are shown.
Must not be given with ``targets`` argument.
``None`` value means no limit: all targets are shown (default).
This argument may be supported or not, depending on estimator type.
target_names : list[str] or {'old_name': 'new_name'} dict, optional
Names of targets or classes. This argument can be used to provide
human-readable class/target names for estimators which don't expose
clss names themselves. It can be also used to rename estimator-provided
classes before displaying them.
This argument may be supported or not, depending on estimator type.
targets : list, optional
Order of class/target names to show. This argument can be also used
to show information only for a subset of classes. It should be a list
of class / target names which match either names provided by
an estimator or names defined in ``target_names`` parameter.
Must not be given with ``top_targets`` argument.
In case of binary classification you can use this argument to
set the class which probability or score should be displayed, with
an appropriate explanation. By default a result for predicted class
is shown. For example, you can use ``targets=[True]`` to always show
result for a positive class, even if the predicted label is False.
This argument may be supported or not, depending on estimator type.
feature_names : list, optional
A list of feature names. It allows to specify feature
names when they are not provided by an estimator object.
This argument may be supported or not, depending on estimator type.
feature_re : str, optional
Only feature names which match ``feature_re`` regex are returned
(more precisely, ``re.search(feature_re, x)`` is checked).
feature_filter : Callable[[str, float], bool], optional
Only feature names for which ``feature_filter`` function returns True
are returned. It must accept feature name and feature value.
Missing features always have a NaN value.
**kwargs: dict
Keyword arguments. All keyword arguments are passed to
concrete explain_prediction... implementations.
Returns
-------
Explanation
:class:`~.Explanation` result. Use one of the formatting functions from
:mod:`eli5.formatters` to print it in a human-readable form.
Explanation instances have repr which works well with
IPython notebook, but it can be a better idea to use
:func:`eli5.show_prediction` instead of :func:`eli5.explain_prediction`
if you work with IPython: :func:`eli5.show_prediction` allows to
customize formatting without a need to import :mod:`eli5.formatters`
functions.
"""
return Explanation(estimator=repr(estimator), error='estimator %r is not supported' % estimator) |
def as_ndarray(arr, copy=False, dtype=None, order='K'):
"""Convert an arbitrary array to numpy.ndarray.
In the case of a memmap array, a copy is automatically made to break the
link with the underlying file (whatever the value of the "copy" keyword).
The purpose of this function is mainly to get rid of memmap objects, but
it can be used for other purposes. In particular, combining copying and
casting can lead to performance improvements in some cases, by avoiding
unnecessary copies.
If not specified, input array order is preserved, in all cases, even when
a copy is requested.
Caveat: this function does not copy during bool to/from 1-byte dtype
conversions. This can lead to some surprising results in some rare cases.
Example:
a = numpy.asarray([0, 1, 2], dtype=numpy.int8)
b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)
c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)
The usually expected result for the last line would be array([0, 1, 1])
because True evaluates to 1. Since there is no copy made here, the original
array is recovered.
Parameters
----------
arr: array-like
input array. Any value accepted by numpy.asarray is valid.
copy: bool
if True, force a copy of the array. Always True when arr is a memmap.
dtype: any numpy dtype
dtype of the returned array. Performing copy and type conversion at the
same time can in some cases avoid an additional copy.
order: string
gives the order of the returned array.
Valid values are: "C", "F", "A", "K", None.
default is "K". See ndarray.copy() for more information.
Returns
-------
ret: np.ndarray
Numpy array containing the same data as arr, always of class
numpy.ndarray, and with no link to any underlying file.
"""
if order not in ('C', 'F', 'A', 'K', None):
raise ValueError("Invalid value for 'order': {}".format(str(order)))
if isinstance(arr, np.memmap):
if dtype is None:
if order in ('K', 'A', None):
ret = np.array(np.asarray(arr), copy=True)
else:
ret = np.array(np.asarray(arr), copy=True, order=order)
else:
if order in ('K', 'A', None):
# always copy (even when dtype does not change)
ret = np.asarray(arr).astype(dtype)
else:
# load data from disk without changing order
# Changing order while reading through a memmap is incredibly
# inefficient.
ret = _asarray(np.array(arr, copy=True), dtype=dtype, order=order)
elif isinstance(arr, np.ndarray):
ret = _asarray(arr, dtype=dtype, order=order)
# In the present cas, np.may_share_memory result is always reliable.
if np.may_share_memory(ret, arr) and copy:
# order-preserving copy
ret = ret.T.copy().T if ret.flags['F_CONTIGUOUS'] else ret.copy()
elif isinstance(arr, (list, tuple)):
if order in ("A", "K"):
ret = np.asarray(arr, dtype=dtype)
else:
ret = np.asarray(arr, dtype=dtype, order=order)
else:
raise ValueError("Type not handled: {}".format(arr.__class__))
return ret | def function[as_ndarray, parameter[arr, copy, dtype, order]]:
constant[Convert an arbitrary array to numpy.ndarray.
In the case of a memmap array, a copy is automatically made to break the
link with the underlying file (whatever the value of the "copy" keyword).
The purpose of this function is mainly to get rid of memmap objects, but
it can be used for other purposes. In particular, combining copying and
casting can lead to performance improvements in some cases, by avoiding
unnecessary copies.
If not specified, input array order is preserved, in all cases, even when
a copy is requested.
Caveat: this function does not copy during bool to/from 1-byte dtype
conversions. This can lead to some surprising results in some rare cases.
Example:
a = numpy.asarray([0, 1, 2], dtype=numpy.int8)
b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)
c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)
The usually expected result for the last line would be array([0, 1, 1])
because True evaluates to 1. Since there is no copy made here, the original
array is recovered.
Parameters
----------
arr: array-like
input array. Any value accepted by numpy.asarray is valid.
copy: bool
if True, force a copy of the array. Always True when arr is a memmap.
dtype: any numpy dtype
dtype of the returned array. Performing copy and type conversion at the
same time can in some cases avoid an additional copy.
order: string
gives the order of the returned array.
Valid values are: "C", "F", "A", "K", None.
default is "K". See ndarray.copy() for more information.
Returns
-------
ret: np.ndarray
Numpy array containing the same data as arr, always of class
numpy.ndarray, and with no link to any underlying file.
]
if compare[name[order] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1afe7bd00>, <ast.Constant object at 0x7da1afe7b9d0>, <ast.Constant object at 0x7da1afe794e0>, <ast.Constant object at 0x7da1afe79a80>, <ast.Constant object at 0x7da1afe78910>]]] begin[:]
<ast.Raise object at 0x7da1afe78b20>
if call[name[isinstance], parameter[name[arr], name[np].memmap]] begin[:]
if compare[name[dtype] is constant[None]] begin[:]
if compare[name[order] in tuple[[<ast.Constant object at 0x7da1afe7a830>, <ast.Constant object at 0x7da1afe79330>, <ast.Constant object at 0x7da1afe78b50>]]] begin[:]
variable[ret] assign[=] call[name[np].array, parameter[call[name[np].asarray, parameter[name[arr]]]]]
return[name[ret]] | keyword[def] identifier[as_ndarray] ( identifier[arr] , identifier[copy] = keyword[False] , identifier[dtype] = keyword[None] , identifier[order] = literal[string] ):
literal[string]
keyword[if] identifier[order] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[order] )))
keyword[if] identifier[isinstance] ( identifier[arr] , identifier[np] . identifier[memmap] ):
keyword[if] identifier[dtype] keyword[is] keyword[None] :
keyword[if] identifier[order] keyword[in] ( literal[string] , literal[string] , keyword[None] ):
identifier[ret] = identifier[np] . identifier[array] ( identifier[np] . identifier[asarray] ( identifier[arr] ), identifier[copy] = keyword[True] )
keyword[else] :
identifier[ret] = identifier[np] . identifier[array] ( identifier[np] . identifier[asarray] ( identifier[arr] ), identifier[copy] = keyword[True] , identifier[order] = identifier[order] )
keyword[else] :
keyword[if] identifier[order] keyword[in] ( literal[string] , literal[string] , keyword[None] ):
identifier[ret] = identifier[np] . identifier[asarray] ( identifier[arr] ). identifier[astype] ( identifier[dtype] )
keyword[else] :
identifier[ret] = identifier[_asarray] ( identifier[np] . identifier[array] ( identifier[arr] , identifier[copy] = keyword[True] ), identifier[dtype] = identifier[dtype] , identifier[order] = identifier[order] )
keyword[elif] identifier[isinstance] ( identifier[arr] , identifier[np] . identifier[ndarray] ):
identifier[ret] = identifier[_asarray] ( identifier[arr] , identifier[dtype] = identifier[dtype] , identifier[order] = identifier[order] )
keyword[if] identifier[np] . identifier[may_share_memory] ( identifier[ret] , identifier[arr] ) keyword[and] identifier[copy] :
identifier[ret] = identifier[ret] . identifier[T] . identifier[copy] (). identifier[T] keyword[if] identifier[ret] . identifier[flags] [ literal[string] ] keyword[else] identifier[ret] . identifier[copy] ()
keyword[elif] identifier[isinstance] ( identifier[arr] ,( identifier[list] , identifier[tuple] )):
keyword[if] identifier[order] keyword[in] ( literal[string] , literal[string] ):
identifier[ret] = identifier[np] . identifier[asarray] ( identifier[arr] , identifier[dtype] = identifier[dtype] )
keyword[else] :
identifier[ret] = identifier[np] . identifier[asarray] ( identifier[arr] , identifier[dtype] = identifier[dtype] , identifier[order] = identifier[order] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[arr] . identifier[__class__] ))
keyword[return] identifier[ret] | def as_ndarray(arr, copy=False, dtype=None, order='K'):
"""Convert an arbitrary array to numpy.ndarray.
In the case of a memmap array, a copy is automatically made to break the
link with the underlying file (whatever the value of the "copy" keyword).
The purpose of this function is mainly to get rid of memmap objects, but
it can be used for other purposes. In particular, combining copying and
casting can lead to performance improvements in some cases, by avoiding
unnecessary copies.
If not specified, input array order is preserved, in all cases, even when
a copy is requested.
Caveat: this function does not copy during bool to/from 1-byte dtype
conversions. This can lead to some surprising results in some rare cases.
Example:
a = numpy.asarray([0, 1, 2], dtype=numpy.int8)
b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)
c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)
The usually expected result for the last line would be array([0, 1, 1])
because True evaluates to 1. Since there is no copy made here, the original
array is recovered.
Parameters
----------
arr: array-like
input array. Any value accepted by numpy.asarray is valid.
copy: bool
if True, force a copy of the array. Always True when arr is a memmap.
dtype: any numpy dtype
dtype of the returned array. Performing copy and type conversion at the
same time can in some cases avoid an additional copy.
order: string
gives the order of the returned array.
Valid values are: "C", "F", "A", "K", None.
default is "K". See ndarray.copy() for more information.
Returns
-------
ret: np.ndarray
Numpy array containing the same data as arr, always of class
numpy.ndarray, and with no link to any underlying file.
"""
if order not in ('C', 'F', 'A', 'K', None):
raise ValueError("Invalid value for 'order': {}".format(str(order))) # depends on [control=['if'], data=['order']]
if isinstance(arr, np.memmap):
if dtype is None:
if order in ('K', 'A', None):
ret = np.array(np.asarray(arr), copy=True) # depends on [control=['if'], data=[]]
else:
ret = np.array(np.asarray(arr), copy=True, order=order) # depends on [control=['if'], data=[]]
elif order in ('K', 'A', None):
# always copy (even when dtype does not change)
ret = np.asarray(arr).astype(dtype) # depends on [control=['if'], data=[]]
else:
# load data from disk without changing order
# Changing order while reading through a memmap is incredibly
# inefficient.
ret = _asarray(np.array(arr, copy=True), dtype=dtype, order=order) # depends on [control=['if'], data=[]]
elif isinstance(arr, np.ndarray):
ret = _asarray(arr, dtype=dtype, order=order)
# In the present cas, np.may_share_memory result is always reliable.
if np.may_share_memory(ret, arr) and copy:
# order-preserving copy
ret = ret.T.copy().T if ret.flags['F_CONTIGUOUS'] else ret.copy() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(arr, (list, tuple)):
if order in ('A', 'K'):
ret = np.asarray(arr, dtype=dtype) # depends on [control=['if'], data=[]]
else:
ret = np.asarray(arr, dtype=dtype, order=order) # depends on [control=['if'], data=[]]
else:
raise ValueError('Type not handled: {}'.format(arr.__class__))
return ret |
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies | def function[rebuild_proxies, parameter[self, prepared_request, proxies]]:
constant[This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
]
variable[proxies] assign[=] <ast.IfExp object at 0x7da18fe90130>
variable[headers] assign[=] name[prepared_request].headers
variable[url] assign[=] name[prepared_request].url
variable[scheme] assign[=] call[name[urlparse], parameter[name[url]]].scheme
variable[new_proxies] assign[=] call[name[proxies].copy, parameter[]]
variable[no_proxy] assign[=] call[name[proxies].get, parameter[constant[no_proxy]]]
variable[bypass_proxy] assign[=] call[name[should_bypass_proxies], parameter[name[url]]]
if <ast.BoolOp object at 0x7da18fe903d0> begin[:]
variable[environ_proxies] assign[=] call[name[get_environ_proxies], parameter[name[url]]]
variable[proxy] assign[=] call[name[environ_proxies].get, parameter[name[scheme], call[name[environ_proxies].get, parameter[constant[all]]]]]
if name[proxy] begin[:]
call[name[new_proxies].setdefault, parameter[name[scheme], name[proxy]]]
if compare[constant[Proxy-Authorization] in name[headers]] begin[:]
<ast.Delete object at 0x7da18fe927a0>
<ast.Try object at 0x7da18fe90b20>
if <ast.BoolOp object at 0x7da18fe92980> begin[:]
call[name[headers]][constant[Proxy-Authorization]] assign[=] call[name[_basic_auth_str], parameter[name[username], name[password]]]
return[name[new_proxies]] | keyword[def] identifier[rebuild_proxies] ( identifier[self] , identifier[prepared_request] , identifier[proxies] ):
literal[string]
identifier[proxies] = identifier[proxies] keyword[if] identifier[proxies] keyword[is] keyword[not] keyword[None] keyword[else] {}
identifier[headers] = identifier[prepared_request] . identifier[headers]
identifier[url] = identifier[prepared_request] . identifier[url]
identifier[scheme] = identifier[urlparse] ( identifier[url] ). identifier[scheme]
identifier[new_proxies] = identifier[proxies] . identifier[copy] ()
identifier[no_proxy] = identifier[proxies] . identifier[get] ( literal[string] )
identifier[bypass_proxy] = identifier[should_bypass_proxies] ( identifier[url] , identifier[no_proxy] = identifier[no_proxy] )
keyword[if] identifier[self] . identifier[trust_env] keyword[and] keyword[not] identifier[bypass_proxy] :
identifier[environ_proxies] = identifier[get_environ_proxies] ( identifier[url] , identifier[no_proxy] = identifier[no_proxy] )
identifier[proxy] = identifier[environ_proxies] . identifier[get] ( identifier[scheme] , identifier[environ_proxies] . identifier[get] ( literal[string] ))
keyword[if] identifier[proxy] :
identifier[new_proxies] . identifier[setdefault] ( identifier[scheme] , identifier[proxy] )
keyword[if] literal[string] keyword[in] identifier[headers] :
keyword[del] identifier[headers] [ literal[string] ]
keyword[try] :
identifier[username] , identifier[password] = identifier[get_auth_from_url] ( identifier[new_proxies] [ identifier[scheme] ])
keyword[except] identifier[KeyError] :
identifier[username] , identifier[password] = keyword[None] , keyword[None]
keyword[if] identifier[username] keyword[and] identifier[password] :
identifier[headers] [ literal[string] ]= identifier[_basic_auth_str] ( identifier[username] , identifier[password] )
keyword[return] identifier[new_proxies] | def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and (not bypass_proxy):
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization'] # depends on [control=['if'], data=['headers']]
try:
(username, password) = get_auth_from_url(new_proxies[scheme]) # depends on [control=['try'], data=[]]
except KeyError:
(username, password) = (None, None) # depends on [control=['except'], data=[]]
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password) # depends on [control=['if'], data=[]]
return new_proxies |
def fetch(version='bayestar2017'):
"""
Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
"""
doi = {
'bayestar2015': '10.7910/DVN/40C44C',
'bayestar2017': '10.7910/DVN/LCYHJG'
}
# Raise an error if the specified version of the map does not exist
try:
doi = doi[version]
except KeyError as err:
raise ValueError('Version "{}" does not exist. Valid versions are: {}'.format(
version,
', '.join(['"{}"'.format(k) for k in doi.keys()])
))
requirements = {
'bayestar2015': {'contentType': 'application/x-hdf'},
'bayestar2017': {'filename': 'bayestar2017.h5'}
}[version]
local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version))
# Download the data
fetch_utils.dataverse_download_doi(
doi,
local_fname,
file_requirements=requirements) | def function[fetch, parameter[version]]:
constant[
Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
]
variable[doi] assign[=] dictionary[[<ast.Constant object at 0x7da18f58cc10>, <ast.Constant object at 0x7da18f58fb20>], [<ast.Constant object at 0x7da18f58cd00>, <ast.Constant object at 0x7da18f58cb50>]]
<ast.Try object at 0x7da18f58d870>
variable[requirements] assign[=] call[dictionary[[<ast.Constant object at 0x7da18f00d3c0>, <ast.Constant object at 0x7da18f00c2b0>], [<ast.Dict object at 0x7da18f00f6d0>, <ast.Dict object at 0x7da18f00d960>]]][name[version]]
variable[local_fname] assign[=] call[name[os].path.join, parameter[call[name[data_dir], parameter[]], constant[bayestar], call[constant[{}.h5].format, parameter[name[version]]]]]
call[name[fetch_utils].dataverse_download_doi, parameter[name[doi], name[local_fname]]] | keyword[def] identifier[fetch] ( identifier[version] = literal[string] ):
literal[string]
identifier[doi] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[try] :
identifier[doi] = identifier[doi] [ identifier[version] ]
keyword[except] identifier[KeyError] keyword[as] identifier[err] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[version] ,
literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[doi] . identifier[keys] ()])
))
identifier[requirements] ={
literal[string] :{ literal[string] : literal[string] },
literal[string] :{ literal[string] : literal[string] }
}[ identifier[version] ]
identifier[local_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] (), literal[string] , literal[string] . identifier[format] ( identifier[version] ))
identifier[fetch_utils] . identifier[dataverse_download_doi] (
identifier[doi] ,
identifier[local_fname] ,
identifier[file_requirements] = identifier[requirements] ) | def fetch(version='bayestar2017'):
"""
Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
"""
doi = {'bayestar2015': '10.7910/DVN/40C44C', 'bayestar2017': '10.7910/DVN/LCYHJG'}
# Raise an error if the specified version of the map does not exist
try:
doi = doi[version] # depends on [control=['try'], data=[]]
except KeyError as err:
raise ValueError('Version "{}" does not exist. Valid versions are: {}'.format(version, ', '.join(['"{}"'.format(k) for k in doi.keys()]))) # depends on [control=['except'], data=[]]
requirements = {'bayestar2015': {'contentType': 'application/x-hdf'}, 'bayestar2017': {'filename': 'bayestar2017.h5'}}[version]
local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version))
# Download the data
fetch_utils.dataverse_download_doi(doi, local_fname, file_requirements=requirements) |
def nonlinear_odr(x, y, dx, dy, func, params_init, **kwargs):
"""Perform a non-linear orthogonal distance regression, return the results as
ErrorValue() instances.
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dx: absolute error (square root of the variance) of the independent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled. Non-finite (NaN or inf) elements signify
that the corresponding element in x is to be treated as fixed by
ODRPACK.
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled.
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
other optional keyword arguments will be passed to leastsq().
Outputs: par1, par2, par3, ... , statdict
par1, par2, par3, ...: fitted values of par1, par2, par3 etc
as instances of ErrorValue.
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, the module scipy.odr is used.
"""
odrmodel=odr.Model(lambda pars, x: func(x,*pars))
if dx is not None:
# treat non-finite values as fixed
xfixed=np.isfinite(dx)
else:
xfixed=None
odrdata=odr.RealData(x, y, sx=dx,sy=dy, fix=xfixed)
odrodr=odr.ODR(odrdata,odrmodel,params_init,ifixb=[not(isinstance(p,FixedParameter)) for p in params_init],
**kwargs)
odroutput=odrodr.run()
statdict=odroutput.__dict__.copy()
statdict['Covariance']=odroutput.cov_beta
statdict['Correlation_coeffs']=odroutput.cov_beta/np.outer(odroutput.sd_beta,odroutput.sd_beta)
statdict['DoF']=len(x)-len(odroutput.beta)
statdict['Chi2_reduced']=statdict['res_var']
statdict['func_value']=statdict['y']
statdict['Chi2']=statdict['sum_square']
def convert(p_, dp_, pi):
if isinstance(pi, FixedParameter):
return FixedParameter(p_)
else:
return ErrorValue(p_, dp_)
return tuple([convert(p_, dp_, pi) for (p_, dp_, pi) in zip(odroutput.beta, odroutput.sd_beta, params_init)] + [statdict]) | def function[nonlinear_odr, parameter[x, y, dx, dy, func, params_init]]:
constant[Perform a non-linear orthogonal distance regression, return the results as
ErrorValue() instances.
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dx: absolute error (square root of the variance) of the independent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled. Non-finite (NaN or inf) elements signify
that the corresponding element in x is to be treated as fixed by
ODRPACK.
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled.
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
other optional keyword arguments will be passed to leastsq().
Outputs: par1, par2, par3, ... , statdict
par1, par2, par3, ...: fitted values of par1, par2, par3 etc
as instances of ErrorValue.
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, the module scipy.odr is used.
]
variable[odrmodel] assign[=] call[name[odr].Model, parameter[<ast.Lambda object at 0x7da1b106fc40>]]
if compare[name[dx] is_not constant[None]] begin[:]
variable[xfixed] assign[=] call[name[np].isfinite, parameter[name[dx]]]
variable[odrdata] assign[=] call[name[odr].RealData, parameter[name[x], name[y]]]
variable[odrodr] assign[=] call[name[odr].ODR, parameter[name[odrdata], name[odrmodel], name[params_init]]]
variable[odroutput] assign[=] call[name[odrodr].run, parameter[]]
variable[statdict] assign[=] call[name[odroutput].__dict__.copy, parameter[]]
call[name[statdict]][constant[Covariance]] assign[=] name[odroutput].cov_beta
call[name[statdict]][constant[Correlation_coeffs]] assign[=] binary_operation[name[odroutput].cov_beta / call[name[np].outer, parameter[name[odroutput].sd_beta, name[odroutput].sd_beta]]]
call[name[statdict]][constant[DoF]] assign[=] binary_operation[call[name[len], parameter[name[x]]] - call[name[len], parameter[name[odroutput].beta]]]
call[name[statdict]][constant[Chi2_reduced]] assign[=] call[name[statdict]][constant[res_var]]
call[name[statdict]][constant[func_value]] assign[=] call[name[statdict]][constant[y]]
call[name[statdict]][constant[Chi2]] assign[=] call[name[statdict]][constant[sum_square]]
def function[convert, parameter[p_, dp_, pi]]:
if call[name[isinstance], parameter[name[pi], name[FixedParameter]]] begin[:]
return[call[name[FixedParameter], parameter[name[p_]]]]
return[call[name[tuple], parameter[binary_operation[<ast.ListComp object at 0x7da1b106e110> + list[[<ast.Name object at 0x7da1b106dd50>]]]]]] | keyword[def] identifier[nonlinear_odr] ( identifier[x] , identifier[y] , identifier[dx] , identifier[dy] , identifier[func] , identifier[params_init] ,** identifier[kwargs] ):
literal[string]
identifier[odrmodel] = identifier[odr] . identifier[Model] ( keyword[lambda] identifier[pars] , identifier[x] : identifier[func] ( identifier[x] ,* identifier[pars] ))
keyword[if] identifier[dx] keyword[is] keyword[not] keyword[None] :
identifier[xfixed] = identifier[np] . identifier[isfinite] ( identifier[dx] )
keyword[else] :
identifier[xfixed] = keyword[None]
identifier[odrdata] = identifier[odr] . identifier[RealData] ( identifier[x] , identifier[y] , identifier[sx] = identifier[dx] , identifier[sy] = identifier[dy] , identifier[fix] = identifier[xfixed] )
identifier[odrodr] = identifier[odr] . identifier[ODR] ( identifier[odrdata] , identifier[odrmodel] , identifier[params_init] , identifier[ifixb] =[ keyword[not] ( identifier[isinstance] ( identifier[p] , identifier[FixedParameter] )) keyword[for] identifier[p] keyword[in] identifier[params_init] ],
** identifier[kwargs] )
identifier[odroutput] = identifier[odrodr] . identifier[run] ()
identifier[statdict] = identifier[odroutput] . identifier[__dict__] . identifier[copy] ()
identifier[statdict] [ literal[string] ]= identifier[odroutput] . identifier[cov_beta]
identifier[statdict] [ literal[string] ]= identifier[odroutput] . identifier[cov_beta] / identifier[np] . identifier[outer] ( identifier[odroutput] . identifier[sd_beta] , identifier[odroutput] . identifier[sd_beta] )
identifier[statdict] [ literal[string] ]= identifier[len] ( identifier[x] )- identifier[len] ( identifier[odroutput] . identifier[beta] )
identifier[statdict] [ literal[string] ]= identifier[statdict] [ literal[string] ]
identifier[statdict] [ literal[string] ]= identifier[statdict] [ literal[string] ]
identifier[statdict] [ literal[string] ]= identifier[statdict] [ literal[string] ]
keyword[def] identifier[convert] ( identifier[p_] , identifier[dp_] , identifier[pi] ):
keyword[if] identifier[isinstance] ( identifier[pi] , identifier[FixedParameter] ):
keyword[return] identifier[FixedParameter] ( identifier[p_] )
keyword[else] :
keyword[return] identifier[ErrorValue] ( identifier[p_] , identifier[dp_] )
keyword[return] identifier[tuple] ([ identifier[convert] ( identifier[p_] , identifier[dp_] , identifier[pi] ) keyword[for] ( identifier[p_] , identifier[dp_] , identifier[pi] ) keyword[in] identifier[zip] ( identifier[odroutput] . identifier[beta] , identifier[odroutput] . identifier[sd_beta] , identifier[params_init] )]+[ identifier[statdict] ]) | def nonlinear_odr(x, y, dx, dy, func, params_init, **kwargs):
"""Perform a non-linear orthogonal distance regression, return the results as
ErrorValue() instances.
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dx: absolute error (square root of the variance) of the independent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled. Non-finite (NaN or inf) elements signify
that the corresponding element in x is to be treated as fixed by
ODRPACK.
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled.
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
other optional keyword arguments will be passed to leastsq().
Outputs: par1, par2, par3, ... , statdict
par1, par2, par3, ...: fitted values of par1, par2, par3 etc
as instances of ErrorValue.
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, the module scipy.odr is used.
"""
odrmodel = odr.Model(lambda pars, x: func(x, *pars))
if dx is not None:
# treat non-finite values as fixed
xfixed = np.isfinite(dx) # depends on [control=['if'], data=['dx']]
else:
xfixed = None
odrdata = odr.RealData(x, y, sx=dx, sy=dy, fix=xfixed)
odrodr = odr.ODR(odrdata, odrmodel, params_init, ifixb=[not isinstance(p, FixedParameter) for p in params_init], **kwargs)
odroutput = odrodr.run()
statdict = odroutput.__dict__.copy()
statdict['Covariance'] = odroutput.cov_beta
statdict['Correlation_coeffs'] = odroutput.cov_beta / np.outer(odroutput.sd_beta, odroutput.sd_beta)
statdict['DoF'] = len(x) - len(odroutput.beta)
statdict['Chi2_reduced'] = statdict['res_var']
statdict['func_value'] = statdict['y']
statdict['Chi2'] = statdict['sum_square']
def convert(p_, dp_, pi):
if isinstance(pi, FixedParameter):
return FixedParameter(p_) # depends on [control=['if'], data=[]]
else:
return ErrorValue(p_, dp_)
return tuple([convert(p_, dp_, pi) for (p_, dp_, pi) in zip(odroutput.beta, odroutput.sd_beta, params_init)] + [statdict]) |
def create_topic(
self, topic_name,
default_message_time_to_live=None,
max_size_in_megabytes=None, requires_duplicate_detection=None,
duplicate_detection_history_time_window=None,
enable_batched_operations=None):
"""Create a topic entity.
:param topic_name: The name of the new topic.
:type topic_name: str
:param max_size_in_megabytes: The max size to allow the topic to grow to.
:type max_size_in_megabytes: int
:param requires_duplicate_detection: Whether the topic will require every message with
a specified time frame to have a unique ID. Non-unique messages will be discarded.
Default value is False.
:type requires_duplicate_detection: bool
:param default_message_time_to_live: The length of time a message will remain in the topic
before it is either discarded or moved to the dead letter queue.
:type default_message_time_to_live: ~datetime.timedelta
:param duplicate_detection_history_time_window: The period within which all incoming messages
must have a unique message ID.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:param enable_batched_operations:
:type: enable_batched_operations: bool
:raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.
:raises: ~azure.common.AzureConflictHttpError if a topic of the same name already exists.
"""
topic_properties = Topic(
max_size_in_megabytes=max_size_in_megabytes,
requires_duplicate_detection=requires_duplicate_detection,
default_message_time_to_live=default_message_time_to_live,
duplicate_detection_history_time_window=duplicate_detection_history_time_window,
enable_batched_operations=enable_batched_operations)
try:
return self.mgmt_client.create_topic(topic_name, topic=topic_properties, fail_on_exist=True)
except requests.exceptions.ConnectionError as e:
raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e) | def function[create_topic, parameter[self, topic_name, default_message_time_to_live, max_size_in_megabytes, requires_duplicate_detection, duplicate_detection_history_time_window, enable_batched_operations]]:
constant[Create a topic entity.
:param topic_name: The name of the new topic.
:type topic_name: str
:param max_size_in_megabytes: The max size to allow the topic to grow to.
:type max_size_in_megabytes: int
:param requires_duplicate_detection: Whether the topic will require every message with
a specified time frame to have a unique ID. Non-unique messages will be discarded.
Default value is False.
:type requires_duplicate_detection: bool
:param default_message_time_to_live: The length of time a message will remain in the topic
before it is either discarded or moved to the dead letter queue.
:type default_message_time_to_live: ~datetime.timedelta
:param duplicate_detection_history_time_window: The period within which all incoming messages
must have a unique message ID.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:param enable_batched_operations:
:type: enable_batched_operations: bool
:raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.
:raises: ~azure.common.AzureConflictHttpError if a topic of the same name already exists.
]
variable[topic_properties] assign[=] call[name[Topic], parameter[]]
<ast.Try object at 0x7da207f01570> | keyword[def] identifier[create_topic] (
identifier[self] , identifier[topic_name] ,
identifier[default_message_time_to_live] = keyword[None] ,
identifier[max_size_in_megabytes] = keyword[None] , identifier[requires_duplicate_detection] = keyword[None] ,
identifier[duplicate_detection_history_time_window] = keyword[None] ,
identifier[enable_batched_operations] = keyword[None] ):
literal[string]
identifier[topic_properties] = identifier[Topic] (
identifier[max_size_in_megabytes] = identifier[max_size_in_megabytes] ,
identifier[requires_duplicate_detection] = identifier[requires_duplicate_detection] ,
identifier[default_message_time_to_live] = identifier[default_message_time_to_live] ,
identifier[duplicate_detection_history_time_window] = identifier[duplicate_detection_history_time_window] ,
identifier[enable_batched_operations] = identifier[enable_batched_operations] )
keyword[try] :
keyword[return] identifier[self] . identifier[mgmt_client] . identifier[create_topic] ( identifier[topic_name] , identifier[topic] = identifier[topic_properties] , identifier[fail_on_exist] = keyword[True] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[e] :
keyword[raise] identifier[ServiceBusConnectionError] ( literal[string] . identifier[format] ( identifier[self] . identifier[service_namespace] ), identifier[e] ) | def create_topic(self, topic_name, default_message_time_to_live=None, max_size_in_megabytes=None, requires_duplicate_detection=None, duplicate_detection_history_time_window=None, enable_batched_operations=None):
"""Create a topic entity.
:param topic_name: The name of the new topic.
:type topic_name: str
:param max_size_in_megabytes: The max size to allow the topic to grow to.
:type max_size_in_megabytes: int
:param requires_duplicate_detection: Whether the topic will require every message with
a specified time frame to have a unique ID. Non-unique messages will be discarded.
Default value is False.
:type requires_duplicate_detection: bool
:param default_message_time_to_live: The length of time a message will remain in the topic
before it is either discarded or moved to the dead letter queue.
:type default_message_time_to_live: ~datetime.timedelta
:param duplicate_detection_history_time_window: The period within which all incoming messages
must have a unique message ID.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:param enable_batched_operations:
:type: enable_batched_operations: bool
:raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.
:raises: ~azure.common.AzureConflictHttpError if a topic of the same name already exists.
"""
topic_properties = Topic(max_size_in_megabytes=max_size_in_megabytes, requires_duplicate_detection=requires_duplicate_detection, default_message_time_to_live=default_message_time_to_live, duplicate_detection_history_time_window=duplicate_detection_history_time_window, enable_batched_operations=enable_batched_operations)
try:
return self.mgmt_client.create_topic(topic_name, topic=topic_properties, fail_on_exist=True) # depends on [control=['try'], data=[]]
except requests.exceptions.ConnectionError as e:
raise ServiceBusConnectionError('Namespace: {} not found'.format(self.service_namespace), e) # depends on [control=['except'], data=['e']] |
def choices(tree):
"""
Get the 'address' of each leaf node in terms of internal
node choices
"""
n = len(leaves(tree))
addr = np.nan * np.ones((n, n-1))
def _addresses(node, index, choices):
# index is the index of the current internal node
# choices is a list of (indice, 0/1) choices made
if np.isscalar(node):
for i, choice in choices:
addr[node, i] = choice
return index
elif isinstance(node, tuple) and len(node) == 2:
newindex = _addresses(node[0], index+1, choices + [(index, 0)])
newindex = _addresses(node[1], newindex, choices + [(index, 1)])
return newindex
else:
raise Exception("Not a tree!")
_addresses(tree, 0, [])
return addr | def function[choices, parameter[tree]]:
constant[
Get the 'address' of each leaf node in terms of internal
node choices
]
variable[n] assign[=] call[name[len], parameter[call[name[leaves], parameter[name[tree]]]]]
variable[addr] assign[=] binary_operation[name[np].nan * call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da1b1b02500>, <ast.BinOp object at 0x7da1b1b03df0>]]]]]
def function[_addresses, parameter[node, index, choices]]:
if call[name[np].isscalar, parameter[name[node]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1b00f10>, <ast.Name object at 0x7da1b1b006a0>]]] in starred[name[choices]] begin[:]
call[name[addr]][tuple[[<ast.Name object at 0x7da1b1b025f0>, <ast.Name object at 0x7da1b1b03340>]]] assign[=] name[choice]
return[name[index]]
call[name[_addresses], parameter[name[tree], constant[0], list[[]]]]
return[name[addr]] | keyword[def] identifier[choices] ( identifier[tree] ):
literal[string]
identifier[n] = identifier[len] ( identifier[leaves] ( identifier[tree] ))
identifier[addr] = identifier[np] . identifier[nan] * identifier[np] . identifier[ones] (( identifier[n] , identifier[n] - literal[int] ))
keyword[def] identifier[_addresses] ( identifier[node] , identifier[index] , identifier[choices] ):
keyword[if] identifier[np] . identifier[isscalar] ( identifier[node] ):
keyword[for] identifier[i] , identifier[choice] keyword[in] identifier[choices] :
identifier[addr] [ identifier[node] , identifier[i] ]= identifier[choice]
keyword[return] identifier[index]
keyword[elif] identifier[isinstance] ( identifier[node] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[node] )== literal[int] :
identifier[newindex] = identifier[_addresses] ( identifier[node] [ literal[int] ], identifier[index] + literal[int] , identifier[choices] +[( identifier[index] , literal[int] )])
identifier[newindex] = identifier[_addresses] ( identifier[node] [ literal[int] ], identifier[newindex] , identifier[choices] +[( identifier[index] , literal[int] )])
keyword[return] identifier[newindex]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[_addresses] ( identifier[tree] , literal[int] ,[])
keyword[return] identifier[addr] | def choices(tree):
"""
Get the 'address' of each leaf node in terms of internal
node choices
"""
n = len(leaves(tree))
addr = np.nan * np.ones((n, n - 1))
def _addresses(node, index, choices):
# index is the index of the current internal node
# choices is a list of (indice, 0/1) choices made
if np.isscalar(node):
for (i, choice) in choices:
addr[node, i] = choice # depends on [control=['for'], data=[]]
return index # depends on [control=['if'], data=[]]
elif isinstance(node, tuple) and len(node) == 2:
newindex = _addresses(node[0], index + 1, choices + [(index, 0)])
newindex = _addresses(node[1], newindex, choices + [(index, 1)])
return newindex # depends on [control=['if'], data=[]]
else:
raise Exception('Not a tree!')
_addresses(tree, 0, [])
return addr |
def wait_for_completion(self, response, timeout=3600, initial_wait=5, scaleup=10):
"""
Poll resource request status until resource is provisioned.
:param response: A response dict, which needs to have a 'requestId' item.
:type response: ``dict``
:param timeout: Maximum waiting time in seconds. None means infinite waiting time.
:type timeout: ``int``
:param initial_wait: Initial polling interval in seconds.
:type initial_wait: ``int``
:param scaleup: Double polling interval every scaleup steps, which will be doubled.
:type scaleup: ``int``
"""
if not response:
return
logger = logging.getLogger(__name__)
wait_period = initial_wait
next_increase = time.time() + wait_period * scaleup
if timeout:
timeout = time.time() + timeout
while True:
request = self.get_request(request_id=response['requestId'], status=True)
if request['metadata']['status'] == 'DONE':
break
elif request['metadata']['status'] == 'FAILED':
raise PBFailedRequest(
'Request {0} failed to complete: {1}'.format(
response['requestId'], request['metadata']['message']),
response['requestId']
)
current_time = time.time()
if timeout and current_time > timeout:
raise PBTimeoutError('Timed out waiting for request {0}.'.format(
response['requestId']), response['requestId'])
if current_time > next_increase:
wait_period *= 2
next_increase = time.time() + wait_period * scaleup
scaleup *= 2
logger.info("Request %s is in state '%s'. Sleeping for %i seconds...",
response['requestId'], request['metadata']['status'], wait_period)
time.sleep(wait_period) | def function[wait_for_completion, parameter[self, response, timeout, initial_wait, scaleup]]:
constant[
Poll resource request status until resource is provisioned.
:param response: A response dict, which needs to have a 'requestId' item.
:type response: ``dict``
:param timeout: Maximum waiting time in seconds. None means infinite waiting time.
:type timeout: ``int``
:param initial_wait: Initial polling interval in seconds.
:type initial_wait: ``int``
:param scaleup: Double polling interval every scaleup steps, which will be doubled.
:type scaleup: ``int``
]
if <ast.UnaryOp object at 0x7da1b26acf40> begin[:]
return[None]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
variable[wait_period] assign[=] name[initial_wait]
variable[next_increase] assign[=] binary_operation[call[name[time].time, parameter[]] + binary_operation[name[wait_period] * name[scaleup]]]
if name[timeout] begin[:]
variable[timeout] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]]
while constant[True] begin[:]
variable[request] assign[=] call[name[self].get_request, parameter[]]
if compare[call[call[name[request]][constant[metadata]]][constant[status]] equal[==] constant[DONE]] begin[:]
break
variable[current_time] assign[=] call[name[time].time, parameter[]]
if <ast.BoolOp object at 0x7da1b26ae470> begin[:]
<ast.Raise object at 0x7da1b26ac880>
if compare[name[current_time] greater[>] name[next_increase]] begin[:]
<ast.AugAssign object at 0x7da1b26af8b0>
variable[next_increase] assign[=] binary_operation[call[name[time].time, parameter[]] + binary_operation[name[wait_period] * name[scaleup]]]
<ast.AugAssign object at 0x7da1b26ac7c0>
call[name[logger].info, parameter[constant[Request %s is in state '%s'. Sleeping for %i seconds...], call[name[response]][constant[requestId]], call[call[name[request]][constant[metadata]]][constant[status]], name[wait_period]]]
call[name[time].sleep, parameter[name[wait_period]]] | keyword[def] identifier[wait_for_completion] ( identifier[self] , identifier[response] , identifier[timeout] = literal[int] , identifier[initial_wait] = literal[int] , identifier[scaleup] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[response] :
keyword[return]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[wait_period] = identifier[initial_wait]
identifier[next_increase] = identifier[time] . identifier[time] ()+ identifier[wait_period] * identifier[scaleup]
keyword[if] identifier[timeout] :
identifier[timeout] = identifier[time] . identifier[time] ()+ identifier[timeout]
keyword[while] keyword[True] :
identifier[request] = identifier[self] . identifier[get_request] ( identifier[request_id] = identifier[response] [ literal[string] ], identifier[status] = keyword[True] )
keyword[if] identifier[request] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[break]
keyword[elif] identifier[request] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[raise] identifier[PBFailedRequest] (
literal[string] . identifier[format] (
identifier[response] [ literal[string] ], identifier[request] [ literal[string] ][ literal[string] ]),
identifier[response] [ literal[string] ]
)
identifier[current_time] = identifier[time] . identifier[time] ()
keyword[if] identifier[timeout] keyword[and] identifier[current_time] > identifier[timeout] :
keyword[raise] identifier[PBTimeoutError] ( literal[string] . identifier[format] (
identifier[response] [ literal[string] ]), identifier[response] [ literal[string] ])
keyword[if] identifier[current_time] > identifier[next_increase] :
identifier[wait_period] *= literal[int]
identifier[next_increase] = identifier[time] . identifier[time] ()+ identifier[wait_period] * identifier[scaleup]
identifier[scaleup] *= literal[int]
identifier[logger] . identifier[info] ( literal[string] ,
identifier[response] [ literal[string] ], identifier[request] [ literal[string] ][ literal[string] ], identifier[wait_period] )
identifier[time] . identifier[sleep] ( identifier[wait_period] ) | def wait_for_completion(self, response, timeout=3600, initial_wait=5, scaleup=10):
"""
Poll resource request status until resource is provisioned.
:param response: A response dict, which needs to have a 'requestId' item.
:type response: ``dict``
:param timeout: Maximum waiting time in seconds. None means infinite waiting time.
:type timeout: ``int``
:param initial_wait: Initial polling interval in seconds.
:type initial_wait: ``int``
:param scaleup: Double polling interval every scaleup steps, which will be doubled.
:type scaleup: ``int``
"""
if not response:
return # depends on [control=['if'], data=[]]
logger = logging.getLogger(__name__)
wait_period = initial_wait
next_increase = time.time() + wait_period * scaleup
if timeout:
timeout = time.time() + timeout # depends on [control=['if'], data=[]]
while True:
request = self.get_request(request_id=response['requestId'], status=True)
if request['metadata']['status'] == 'DONE':
break # depends on [control=['if'], data=[]]
elif request['metadata']['status'] == 'FAILED':
raise PBFailedRequest('Request {0} failed to complete: {1}'.format(response['requestId'], request['metadata']['message']), response['requestId']) # depends on [control=['if'], data=[]]
current_time = time.time()
if timeout and current_time > timeout:
raise PBTimeoutError('Timed out waiting for request {0}.'.format(response['requestId']), response['requestId']) # depends on [control=['if'], data=[]]
if current_time > next_increase:
wait_period *= 2
next_increase = time.time() + wait_period * scaleup
scaleup *= 2 # depends on [control=['if'], data=['next_increase']]
logger.info("Request %s is in state '%s'. Sleeping for %i seconds...", response['requestId'], request['metadata']['status'], wait_period)
time.sleep(wait_period) # depends on [control=['while'], data=[]] |
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return [self.rich_text_action, self.plain_text_action,
self.show_source_action, MENU_SEPARATOR,
self.auto_import_action] | def function[get_plugin_actions, parameter[self]]:
constant[Return a list of actions related to plugin]
return[list[[<ast.Attribute object at 0x7da20c6c6e00>, <ast.Attribute object at 0x7da20c6c6bc0>, <ast.Attribute object at 0x7da20c6c7f70>, <ast.Name object at 0x7da20c6c7a60>, <ast.Attribute object at 0x7da20c6c77f0>]]] | keyword[def] identifier[get_plugin_actions] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[self] . identifier[rich_text_action] , identifier[self] . identifier[plain_text_action] ,
identifier[self] . identifier[show_source_action] , identifier[MENU_SEPARATOR] ,
identifier[self] . identifier[auto_import_action] ] | def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return [self.rich_text_action, self.plain_text_action, self.show_source_action, MENU_SEPARATOR, self.auto_import_action] |
def MTF(self, px_per_mm):
'''
px_per_mm = cam_resolution / image_size
'''
res = 100 #numeric resolution
r = 4 #range +-r*std
#size of 1 px:
px_size = 1 / px_per_mm
#standard deviation of the point-spread-function (PSF) as normal distributed:
std = self.std*px_size #transform standard deviation from [px] to [mm]
x = np.linspace(-r*std,r*std, res)
#line spread function:
lsf = self.gaussian1d(x, 1, 0, std)
#MTF defined as Fourier transform of the line spread function:
#abs() because result is complex
y = abs(np.fft.fft(lsf))
#normalize fft so that max = 1
y /= np.max(y)
#step length between xn and xn+1
dstep = r*std/res
# Fourier frequencies - here: line pairs(cycles) per mm
freq = np.fft.fftfreq(lsf.size, dstep)
#limit mtf between [0-px_per_mm]:
i = np.argmax(freq>px_per_mm)
self.mtf_x = freq[:i]
self.mtf_y = y[:i]
return self.mtf_x, self.mtf_y | def function[MTF, parameter[self, px_per_mm]]:
constant[
px_per_mm = cam_resolution / image_size
]
variable[res] assign[=] constant[100]
variable[r] assign[=] constant[4]
variable[px_size] assign[=] binary_operation[constant[1] / name[px_per_mm]]
variable[std] assign[=] binary_operation[name[self].std * name[px_size]]
variable[x] assign[=] call[name[np].linspace, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b23446a0> * name[std]], binary_operation[name[r] * name[std]], name[res]]]
variable[lsf] assign[=] call[name[self].gaussian1d, parameter[name[x], constant[1], constant[0], name[std]]]
variable[y] assign[=] call[name[abs], parameter[call[name[np].fft.fft, parameter[name[lsf]]]]]
<ast.AugAssign object at 0x7da1b2344f10>
variable[dstep] assign[=] binary_operation[binary_operation[name[r] * name[std]] / name[res]]
variable[freq] assign[=] call[name[np].fft.fftfreq, parameter[name[lsf].size, name[dstep]]]
variable[i] assign[=] call[name[np].argmax, parameter[compare[name[freq] greater[>] name[px_per_mm]]]]
name[self].mtf_x assign[=] call[name[freq]][<ast.Slice object at 0x7da204621ed0>]
name[self].mtf_y assign[=] call[name[y]][<ast.Slice object at 0x7da204620730>]
return[tuple[[<ast.Attribute object at 0x7da2046227a0>, <ast.Attribute object at 0x7da204620280>]]] | keyword[def] identifier[MTF] ( identifier[self] , identifier[px_per_mm] ):
literal[string]
identifier[res] = literal[int]
identifier[r] = literal[int]
identifier[px_size] = literal[int] / identifier[px_per_mm]
identifier[std] = identifier[self] . identifier[std] * identifier[px_size]
identifier[x] = identifier[np] . identifier[linspace] (- identifier[r] * identifier[std] , identifier[r] * identifier[std] , identifier[res] )
identifier[lsf] = identifier[self] . identifier[gaussian1d] ( identifier[x] , literal[int] , literal[int] , identifier[std] )
identifier[y] = identifier[abs] ( identifier[np] . identifier[fft] . identifier[fft] ( identifier[lsf] ))
identifier[y] /= identifier[np] . identifier[max] ( identifier[y] )
identifier[dstep] = identifier[r] * identifier[std] / identifier[res]
identifier[freq] = identifier[np] . identifier[fft] . identifier[fftfreq] ( identifier[lsf] . identifier[size] , identifier[dstep] )
identifier[i] = identifier[np] . identifier[argmax] ( identifier[freq] > identifier[px_per_mm] )
identifier[self] . identifier[mtf_x] = identifier[freq] [: identifier[i] ]
identifier[self] . identifier[mtf_y] = identifier[y] [: identifier[i] ]
keyword[return] identifier[self] . identifier[mtf_x] , identifier[self] . identifier[mtf_y] | def MTF(self, px_per_mm):
"""
px_per_mm = cam_resolution / image_size
"""
res = 100 #numeric resolution
r = 4 #range +-r*std
#size of 1 px:
px_size = 1 / px_per_mm #standard deviation of the point-spread-function (PSF) as normal distributed:
std = self.std * px_size #transform standard deviation from [px] to [mm]
x = np.linspace(-r * std, r * std, res) #line spread function:
lsf = self.gaussian1d(x, 1, 0, std) #MTF defined as Fourier transform of the line spread function:
#abs() because result is complex
y = abs(np.fft.fft(lsf)) #normalize fft so that max = 1
y /= np.max(y) #step length between xn and xn+1
dstep = r * std / res # Fourier frequencies - here: line pairs(cycles) per mm
freq = np.fft.fftfreq(lsf.size, dstep) #limit mtf between [0-px_per_mm]:
i = np.argmax(freq > px_per_mm)
self.mtf_x = freq[:i]
self.mtf_y = y[:i]
return (self.mtf_x, self.mtf_y) |
def kron(*matrices: np.ndarray) -> np.ndarray:
"""Computes the kronecker product of a sequence of matrices.
A *args version of lambda args: functools.reduce(np.kron, args).
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
"""
product = np.eye(1)
for m in matrices:
product = np.kron(product, m)
return np.array(product) | def function[kron, parameter[]]:
constant[Computes the kronecker product of a sequence of matrices.
A *args version of lambda args: functools.reduce(np.kron, args).
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
]
variable[product] assign[=] call[name[np].eye, parameter[constant[1]]]
for taget[name[m]] in starred[name[matrices]] begin[:]
variable[product] assign[=] call[name[np].kron, parameter[name[product], name[m]]]
return[call[name[np].array, parameter[name[product]]]] | keyword[def] identifier[kron] (* identifier[matrices] : identifier[np] . identifier[ndarray] )-> identifier[np] . identifier[ndarray] :
literal[string]
identifier[product] = identifier[np] . identifier[eye] ( literal[int] )
keyword[for] identifier[m] keyword[in] identifier[matrices] :
identifier[product] = identifier[np] . identifier[kron] ( identifier[product] , identifier[m] )
keyword[return] identifier[np] . identifier[array] ( identifier[product] ) | def kron(*matrices: np.ndarray) -> np.ndarray:
"""Computes the kronecker product of a sequence of matrices.
A *args version of lambda args: functools.reduce(np.kron, args).
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
"""
product = np.eye(1)
for m in matrices:
product = np.kron(product, m) # depends on [control=['for'], data=['m']]
return np.array(product) |
def post_license_request(request):
"""Submission to create a license acceptance request."""
uuid_ = request.matchdict['uuid']
posted_data = request.json
license_url = posted_data.get('license_url')
licensors = posted_data.get('licensors', [])
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT l.url
FROM document_controls AS dc
LEFT JOIN licenses AS l ON (dc.licenseid = l.licenseid)
WHERE uuid = %s::UUID""", (uuid_,))
try:
# Check that the license exists
existing_license_url = cursor.fetchone()[0]
except TypeError: # NoneType
if request.has_permission('publish.create-identifier'):
cursor.execute("""\
INSERT INTO document_controls (uuid) VALUES (%s)""", (uuid_,))
existing_license_url = None
else:
raise httpexceptions.HTTPNotFound()
if existing_license_url is None and license_url is None:
raise httpexceptions.HTTPBadRequest("license_url is required")
elif (license_url != existing_license_url or
existing_license_url is None):
cursor.execute("""\
UPDATE document_controls AS dc
SET licenseid = l.licenseid FROM licenses AS l
WHERE url = %s and is_valid_for_publication = 't'
RETURNING dc.licenseid""",
(license_url,))
try:
# Check that it is a valid license id
cursor.fetchone()[0]
except TypeError: # None returned
raise httpexceptions.HTTPBadRequest("invalid license_url")
upsert_license_requests(cursor, uuid_, licensors)
resp = request.response
resp.status_int = 202
return resp | def function[post_license_request, parameter[request]]:
constant[Submission to create a license acceptance request.]
variable[uuid_] assign[=] call[name[request].matchdict][constant[uuid]]
variable[posted_data] assign[=] name[request].json
variable[license_url] assign[=] call[name[posted_data].get, parameter[constant[license_url]]]
variable[licensors] assign[=] call[name[posted_data].get, parameter[constant[licensors], list[[]]]]
with call[name[db_connect], parameter[]] begin[:]
with call[name[db_conn].cursor, parameter[]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT l.url
FROM document_controls AS dc
LEFT JOIN licenses AS l ON (dc.licenseid = l.licenseid)
WHERE uuid = %s::UUID], tuple[[<ast.Name object at 0x7da1b00dba60>]]]]
<ast.Try object at 0x7da1b00db9d0>
if <ast.BoolOp object at 0x7da1b00dbc40> begin[:]
<ast.Raise object at 0x7da18f00e020>
call[name[upsert_license_requests], parameter[name[cursor], name[uuid_], name[licensors]]]
variable[resp] assign[=] name[request].response
name[resp].status_int assign[=] constant[202]
return[name[resp]] | keyword[def] identifier[post_license_request] ( identifier[request] ):
literal[string]
identifier[uuid_] = identifier[request] . identifier[matchdict] [ literal[string] ]
identifier[posted_data] = identifier[request] . identifier[json]
identifier[license_url] = identifier[posted_data] . identifier[get] ( literal[string] )
identifier[licensors] = identifier[posted_data] . identifier[get] ( literal[string] ,[])
keyword[with] identifier[db_connect] () keyword[as] identifier[db_conn] :
keyword[with] identifier[db_conn] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[uuid_] ,))
keyword[try] :
identifier[existing_license_url] = identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
keyword[except] identifier[TypeError] :
keyword[if] identifier[request] . identifier[has_permission] ( literal[string] ):
identifier[cursor] . identifier[execute] ( literal[string] ,( identifier[uuid_] ,))
identifier[existing_license_url] = keyword[None]
keyword[else] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPNotFound] ()
keyword[if] identifier[existing_license_url] keyword[is] keyword[None] keyword[and] identifier[license_url] keyword[is] keyword[None] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPBadRequest] ( literal[string] )
keyword[elif] ( identifier[license_url] != identifier[existing_license_url] keyword[or]
identifier[existing_license_url] keyword[is] keyword[None] ):
identifier[cursor] . identifier[execute] ( literal[string] ,
( identifier[license_url] ,))
keyword[try] :
identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[httpexceptions] . identifier[HTTPBadRequest] ( literal[string] )
identifier[upsert_license_requests] ( identifier[cursor] , identifier[uuid_] , identifier[licensors] )
identifier[resp] = identifier[request] . identifier[response]
identifier[resp] . identifier[status_int] = literal[int]
keyword[return] identifier[resp] | def post_license_request(request):
"""Submission to create a license acceptance request."""
uuid_ = request.matchdict['uuid']
posted_data = request.json
license_url = posted_data.get('license_url')
licensors = posted_data.get('licensors', [])
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute('SELECT l.url\nFROM document_controls AS dc\nLEFT JOIN licenses AS l ON (dc.licenseid = l.licenseid)\nWHERE uuid = %s::UUID', (uuid_,))
try:
# Check that the license exists
existing_license_url = cursor.fetchone()[0] # depends on [control=['try'], data=[]]
except TypeError: # NoneType
if request.has_permission('publish.create-identifier'):
cursor.execute('INSERT INTO document_controls (uuid) VALUES (%s)', (uuid_,))
existing_license_url = None # depends on [control=['if'], data=[]]
else:
raise httpexceptions.HTTPNotFound() # depends on [control=['except'], data=[]]
if existing_license_url is None and license_url is None:
raise httpexceptions.HTTPBadRequest('license_url is required') # depends on [control=['if'], data=[]]
elif license_url != existing_license_url or existing_license_url is None:
cursor.execute("UPDATE document_controls AS dc\nSET licenseid = l.licenseid FROM licenses AS l\nWHERE url = %s and is_valid_for_publication = 't'\nRETURNING dc.licenseid", (license_url,))
try:
# Check that it is a valid license id
cursor.fetchone()[0] # depends on [control=['try'], data=[]]
except TypeError: # None returned
raise httpexceptions.HTTPBadRequest('invalid license_url') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
upsert_license_requests(cursor, uuid_, licensors) # depends on [control=['with'], data=['cursor']] # depends on [control=['with'], data=['db_conn']]
resp = request.response
resp.status_int = 202
return resp |
def parse_config(h5path):
"""Parse the RT-DC configuration of an hdf5 file"""
with h5py.File(h5path, mode="r") as fh5:
h5attrs = dict(fh5.attrs)
# Convert byte strings to unicode strings
# https://github.com/h5py/h5py/issues/379
for key in h5attrs:
if isinstance(h5attrs[key], bytes):
h5attrs[key] = h5attrs[key].decode("utf-8")
config = Configuration()
for key in h5attrs:
section, pname = key.split(":")
if pname not in dfn.config_funcs[section]:
# Add the value as a string but issue a warning
config[section][pname] = h5attrs[key]
msg = "Unknown key '{}' in section [{}]!".format(
pname, section)
warnings.warn(msg, UnknownKeyWarning)
else:
typ = dfn.config_funcs[section][pname]
config[section][pname] = typ(h5attrs[key])
return config | def function[parse_config, parameter[h5path]]:
constant[Parse the RT-DC configuration of an hdf5 file]
with call[name[h5py].File, parameter[name[h5path]]] begin[:]
variable[h5attrs] assign[=] call[name[dict], parameter[name[fh5].attrs]]
for taget[name[key]] in starred[name[h5attrs]] begin[:]
if call[name[isinstance], parameter[call[name[h5attrs]][name[key]], name[bytes]]] begin[:]
call[name[h5attrs]][name[key]] assign[=] call[call[name[h5attrs]][name[key]].decode, parameter[constant[utf-8]]]
variable[config] assign[=] call[name[Configuration], parameter[]]
for taget[name[key]] in starred[name[h5attrs]] begin[:]
<ast.Tuple object at 0x7da1b1931ab0> assign[=] call[name[key].split, parameter[constant[:]]]
if compare[name[pname] <ast.NotIn object at 0x7da2590d7190> call[name[dfn].config_funcs][name[section]]] begin[:]
call[call[name[config]][name[section]]][name[pname]] assign[=] call[name[h5attrs]][name[key]]
variable[msg] assign[=] call[constant[Unknown key '{}' in section [{}]!].format, parameter[name[pname], name[section]]]
call[name[warnings].warn, parameter[name[msg], name[UnknownKeyWarning]]]
return[name[config]] | keyword[def] identifier[parse_config] ( identifier[h5path] ):
literal[string]
keyword[with] identifier[h5py] . identifier[File] ( identifier[h5path] , identifier[mode] = literal[string] ) keyword[as] identifier[fh5] :
identifier[h5attrs] = identifier[dict] ( identifier[fh5] . identifier[attrs] )
keyword[for] identifier[key] keyword[in] identifier[h5attrs] :
keyword[if] identifier[isinstance] ( identifier[h5attrs] [ identifier[key] ], identifier[bytes] ):
identifier[h5attrs] [ identifier[key] ]= identifier[h5attrs] [ identifier[key] ]. identifier[decode] ( literal[string] )
identifier[config] = identifier[Configuration] ()
keyword[for] identifier[key] keyword[in] identifier[h5attrs] :
identifier[section] , identifier[pname] = identifier[key] . identifier[split] ( literal[string] )
keyword[if] identifier[pname] keyword[not] keyword[in] identifier[dfn] . identifier[config_funcs] [ identifier[section] ]:
identifier[config] [ identifier[section] ][ identifier[pname] ]= identifier[h5attrs] [ identifier[key] ]
identifier[msg] = literal[string] . identifier[format] (
identifier[pname] , identifier[section] )
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[UnknownKeyWarning] )
keyword[else] :
identifier[typ] = identifier[dfn] . identifier[config_funcs] [ identifier[section] ][ identifier[pname] ]
identifier[config] [ identifier[section] ][ identifier[pname] ]= identifier[typ] ( identifier[h5attrs] [ identifier[key] ])
keyword[return] identifier[config] | def parse_config(h5path):
"""Parse the RT-DC configuration of an hdf5 file"""
with h5py.File(h5path, mode='r') as fh5:
h5attrs = dict(fh5.attrs) # depends on [control=['with'], data=['fh5']]
# Convert byte strings to unicode strings
# https://github.com/h5py/h5py/issues/379
for key in h5attrs:
if isinstance(h5attrs[key], bytes):
h5attrs[key] = h5attrs[key].decode('utf-8') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
config = Configuration()
for key in h5attrs:
(section, pname) = key.split(':')
if pname not in dfn.config_funcs[section]:
# Add the value as a string but issue a warning
config[section][pname] = h5attrs[key]
msg = "Unknown key '{}' in section [{}]!".format(pname, section)
warnings.warn(msg, UnknownKeyWarning) # depends on [control=['if'], data=['pname']]
else:
typ = dfn.config_funcs[section][pname]
config[section][pname] = typ(h5attrs[key]) # depends on [control=['for'], data=['key']]
return config |
def get_samples(self, n_samples, log_p_function, burn_in_steps=50):
"""
Generates samples.
Parameters:
n_samples - number of samples to generate
log_p_function - a function that returns log density for a specific sample
burn_in_steps - number of burn-in steps for sampling
Returns a tuple of two array: (samples, log_p_function values for samples)
"""
restarts = initial_design('random', self.space, n_samples)
sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function)
samples, samples_log, _ = sampler.run_mcmc(restarts, burn_in_steps)
# make sure we have an array of shape (n samples, space input dim)
if len(samples.shape) == 1:
samples = samples.reshape(-1, 1)
samples_log = samples_log.reshape(-1, 1)
return samples, samples_log | def function[get_samples, parameter[self, n_samples, log_p_function, burn_in_steps]]:
constant[
Generates samples.
Parameters:
n_samples - number of samples to generate
log_p_function - a function that returns log density for a specific sample
burn_in_steps - number of burn-in steps for sampling
Returns a tuple of two array: (samples, log_p_function values for samples)
]
variable[restarts] assign[=] call[name[initial_design], parameter[constant[random], name[self].space, name[n_samples]]]
variable[sampler] assign[=] call[name[emcee].EnsembleSampler, parameter[name[n_samples], call[name[self].space.input_dim, parameter[]], name[log_p_function]]]
<ast.Tuple object at 0x7da1b26adb70> assign[=] call[name[sampler].run_mcmc, parameter[name[restarts], name[burn_in_steps]]]
if compare[call[name[len], parameter[name[samples].shape]] equal[==] constant[1]] begin[:]
variable[samples] assign[=] call[name[samples].reshape, parameter[<ast.UnaryOp object at 0x7da1b26aed70>, constant[1]]]
variable[samples_log] assign[=] call[name[samples_log].reshape, parameter[<ast.UnaryOp object at 0x7da18bcc98a0>, constant[1]]]
return[tuple[[<ast.Name object at 0x7da18bcc8af0>, <ast.Name object at 0x7da18bcca470>]]] | keyword[def] identifier[get_samples] ( identifier[self] , identifier[n_samples] , identifier[log_p_function] , identifier[burn_in_steps] = literal[int] ):
literal[string]
identifier[restarts] = identifier[initial_design] ( literal[string] , identifier[self] . identifier[space] , identifier[n_samples] )
identifier[sampler] = identifier[emcee] . identifier[EnsembleSampler] ( identifier[n_samples] , identifier[self] . identifier[space] . identifier[input_dim] (), identifier[log_p_function] )
identifier[samples] , identifier[samples_log] , identifier[_] = identifier[sampler] . identifier[run_mcmc] ( identifier[restarts] , identifier[burn_in_steps] )
keyword[if] identifier[len] ( identifier[samples] . identifier[shape] )== literal[int] :
identifier[samples] = identifier[samples] . identifier[reshape] (- literal[int] , literal[int] )
identifier[samples_log] = identifier[samples_log] . identifier[reshape] (- literal[int] , literal[int] )
keyword[return] identifier[samples] , identifier[samples_log] | def get_samples(self, n_samples, log_p_function, burn_in_steps=50):
"""
Generates samples.
Parameters:
n_samples - number of samples to generate
log_p_function - a function that returns log density for a specific sample
burn_in_steps - number of burn-in steps for sampling
Returns a tuple of two array: (samples, log_p_function values for samples)
"""
restarts = initial_design('random', self.space, n_samples)
sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function)
(samples, samples_log, _) = sampler.run_mcmc(restarts, burn_in_steps)
# make sure we have an array of shape (n samples, space input dim)
if len(samples.shape) == 1:
samples = samples.reshape(-1, 1) # depends on [control=['if'], data=[]]
samples_log = samples_log.reshape(-1, 1)
return (samples, samples_log) |
def _getel(key, value):
"""Returns an element given a key and value."""
if key in ['HorizontalRule', 'Null']:
return elt(key, 0)()
elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList',
'DefinitionList', 'HorizontalRule', 'Null']:
return elt(key, 1)(value)
return elt(key, len(value))(*value) | def function[_getel, parameter[key, value]]:
constant[Returns an element given a key and value.]
if compare[name[key] in list[[<ast.Constant object at 0x7da20c6abbe0>, <ast.Constant object at 0x7da20c6a9780>]]] begin[:]
return[call[call[name[elt], parameter[name[key], constant[0]]], parameter[]]]
return[call[call[name[elt], parameter[name[key], call[name[len], parameter[name[value]]]]], parameter[<ast.Starred object at 0x7da18bc71c60>]]] | keyword[def] identifier[_getel] ( identifier[key] , identifier[value] ):
literal[string]
keyword[if] identifier[key] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[elt] ( identifier[key] , literal[int] )()
keyword[elif] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[elt] ( identifier[key] , literal[int] )( identifier[value] )
keyword[return] identifier[elt] ( identifier[key] , identifier[len] ( identifier[value] ))(* identifier[value] ) | def _getel(key, value):
"""Returns an element given a key and value."""
if key in ['HorizontalRule', 'Null']:
return elt(key, 0)() # depends on [control=['if'], data=['key']]
elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList', 'DefinitionList', 'HorizontalRule', 'Null']:
return elt(key, 1)(value) # depends on [control=['if'], data=['key']]
return elt(key, len(value))(*value) |
def _create_client_impl(self, api_version):
"""
Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return:
"""
if api_version == v7_0_VERSION:
from azure.keyvault.v7_0 import KeyVaultClient as ImplClient
elif api_version == v2016_10_01_VERSION:
from azure.keyvault.v2016_10_01 import KeyVaultClient as ImplClient
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
impl = ImplClient(credentials=self._credentials)
impl.config = self.config
# if __enter__ has previously been called and the impl client has __enter__ defined we need to call it
if self._entered and hasattr(impl, '__enter__'):
impl.__enter__()
self._client_impls[api_version] = impl
return impl | def function[_create_client_impl, parameter[self, api_version]]:
constant[
Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return:
]
if compare[name[api_version] equal[==] name[v7_0_VERSION]] begin[:]
from relative_module[azure.keyvault.v7_0] import module[KeyVaultClient]
variable[impl] assign[=] call[name[ImplClient], parameter[]]
name[impl].config assign[=] name[self].config
if <ast.BoolOp object at 0x7da204621e10> begin[:]
call[name[impl].__enter__, parameter[]]
call[name[self]._client_impls][name[api_version]] assign[=] name[impl]
return[name[impl]] | keyword[def] identifier[_create_client_impl] ( identifier[self] , identifier[api_version] ):
literal[string]
keyword[if] identifier[api_version] == identifier[v7_0_VERSION] :
keyword[from] identifier[azure] . identifier[keyvault] . identifier[v7_0] keyword[import] identifier[KeyVaultClient] keyword[as] identifier[ImplClient]
keyword[elif] identifier[api_version] == identifier[v2016_10_01_VERSION] :
keyword[from] identifier[azure] . identifier[keyvault] . identifier[v2016_10_01] keyword[import] identifier[KeyVaultClient] keyword[as] identifier[ImplClient]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] ))
identifier[impl] = identifier[ImplClient] ( identifier[credentials] = identifier[self] . identifier[_credentials] )
identifier[impl] . identifier[config] = identifier[self] . identifier[config]
keyword[if] identifier[self] . identifier[_entered] keyword[and] identifier[hasattr] ( identifier[impl] , literal[string] ):
identifier[impl] . identifier[__enter__] ()
identifier[self] . identifier[_client_impls] [ identifier[api_version] ]= identifier[impl]
keyword[return] identifier[impl] | def _create_client_impl(self, api_version):
"""
Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return:
"""
if api_version == v7_0_VERSION:
from azure.keyvault.v7_0 import KeyVaultClient as ImplClient # depends on [control=['if'], data=[]]
elif api_version == v2016_10_01_VERSION:
from azure.keyvault.v2016_10_01 import KeyVaultClient as ImplClient # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('APIVersion {} is not available'.format(api_version))
impl = ImplClient(credentials=self._credentials)
impl.config = self.config
# if __enter__ has previously been called and the impl client has __enter__ defined we need to call it
if self._entered and hasattr(impl, '__enter__'):
impl.__enter__() # depends on [control=['if'], data=[]]
self._client_impls[api_version] = impl
return impl |
def njsd_geneset(network, ref, query, gene_set, file, verbose=True):
"""Compute gene set-specified nJSD between reference and query expression profiles.
Attribute;
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
geneset (str): File path to a gene set file.
"""
graph, gene_set_total = util.parse_network(network)
ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True)
query_gene_expression_dict = util.parse_gene_expression(query, mean=False)
group_gene_set_dict = util.parse_gene_set(gene_set)
maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict)
gene_set_present = set(query_gene_expression_dict.keys())
with open(file, 'w') as outFile:
print('Gene_set_ID', 'nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile)
for group, gene_set in group_gene_set_dict.items():
gene_set_to_be_analyzed = gene_set.intersection(gene_set_present)
# If no genes are available for the group, just ignore it.
if len(gene_set_to_be_analyzed) == 0:
logger.warning('%s has no genes available for analysis. Ignoring the group.' % group)
continue
# If every gene has a single neighbor, just ignore it.
if all([graph.degree(gene) == 1 for gene in gene_set_to_be_analyzed]):
logger.warning('%s has no genes with enough neighbors. Ignoring the group.' % group)
continue
normal_to_tumor_njsd = entropy.njsd(network=graph,
ref_gene_expression_dict=ref_gene_expression_dict,
query_gene_expression_dict=query_gene_expression_dict,
gene_set=gene_set)
tumor_to_ambiguous_njsd = entropy.njsd(network=graph,
ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict,
query_gene_expression_dict=query_gene_expression_dict,
gene_set=gene_set)
tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd)
with open(file, 'a') as outFile:
print(group, normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile) | def function[njsd_geneset, parameter[network, ref, query, gene_set, file, verbose]]:
constant[Compute gene set-specified nJSD between reference and query expression profiles.
Attribute;
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
geneset (str): File path to a gene set file.
]
<ast.Tuple object at 0x7da1b271c670> assign[=] call[name[util].parse_network, parameter[name[network]]]
variable[ref_gene_expression_dict] assign[=] call[name[util].parse_gene_expression, parameter[name[ref]]]
variable[query_gene_expression_dict] assign[=] call[name[util].parse_gene_expression, parameter[name[query]]]
variable[group_gene_set_dict] assign[=] call[name[util].parse_gene_set, parameter[name[gene_set]]]
variable[maximally_ambiguous_gene_experession_dict] assign[=] call[name[util].get_maximally_ambiguous_network, parameter[name[query_gene_expression_dict]]]
variable[gene_set_present] assign[=] call[name[set], parameter[call[name[query_gene_expression_dict].keys, parameter[]]]]
with call[name[open], parameter[name[file], constant[w]]] begin[:]
call[name[print], parameter[constant[Gene_set_ID], constant[nJSD_NT], constant[nJSD_TA], constant[tITH]]]
for taget[tuple[[<ast.Name object at 0x7da1b2879b40>, <ast.Name object at 0x7da1b287a4a0>]]] in starred[call[name[group_gene_set_dict].items, parameter[]]] begin[:]
variable[gene_set_to_be_analyzed] assign[=] call[name[gene_set].intersection, parameter[name[gene_set_present]]]
if compare[call[name[len], parameter[name[gene_set_to_be_analyzed]]] equal[==] constant[0]] begin[:]
call[name[logger].warning, parameter[binary_operation[constant[%s has no genes available for analysis. Ignoring the group.] <ast.Mod object at 0x7da2590d6920> name[group]]]]
continue
if call[name[all], parameter[<ast.ListComp object at 0x7da1b287aec0>]] begin[:]
call[name[logger].warning, parameter[binary_operation[constant[%s has no genes with enough neighbors. Ignoring the group.] <ast.Mod object at 0x7da2590d6920> name[group]]]]
continue
variable[normal_to_tumor_njsd] assign[=] call[name[entropy].njsd, parameter[]]
variable[tumor_to_ambiguous_njsd] assign[=] call[name[entropy].njsd, parameter[]]
variable[tITH] assign[=] binary_operation[name[normal_to_tumor_njsd] / binary_operation[name[normal_to_tumor_njsd] + name[tumor_to_ambiguous_njsd]]]
with call[name[open], parameter[name[file], constant[a]]] begin[:]
call[name[print], parameter[name[group], name[normal_to_tumor_njsd], name[tumor_to_ambiguous_njsd], name[tITH]]] | keyword[def] identifier[njsd_geneset] ( identifier[network] , identifier[ref] , identifier[query] , identifier[gene_set] , identifier[file] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[graph] , identifier[gene_set_total] = identifier[util] . identifier[parse_network] ( identifier[network] )
identifier[ref_gene_expression_dict] = identifier[util] . identifier[parse_gene_expression] ( identifier[ref] , identifier[mean] = keyword[True] )
identifier[query_gene_expression_dict] = identifier[util] . identifier[parse_gene_expression] ( identifier[query] , identifier[mean] = keyword[False] )
identifier[group_gene_set_dict] = identifier[util] . identifier[parse_gene_set] ( identifier[gene_set] )
identifier[maximally_ambiguous_gene_experession_dict] = identifier[util] . identifier[get_maximally_ambiguous_network] ( identifier[query_gene_expression_dict] )
identifier[gene_set_present] = identifier[set] ( identifier[query_gene_expression_dict] . identifier[keys] ())
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[outFile] :
identifier[print] ( literal[string] , literal[string] , literal[string] , literal[string] , identifier[sep] = literal[string] , identifier[file] = identifier[outFile] )
keyword[for] identifier[group] , identifier[gene_set] keyword[in] identifier[group_gene_set_dict] . identifier[items] ():
identifier[gene_set_to_be_analyzed] = identifier[gene_set] . identifier[intersection] ( identifier[gene_set_present] )
keyword[if] identifier[len] ( identifier[gene_set_to_be_analyzed] )== literal[int] :
identifier[logger] . identifier[warning] ( literal[string] % identifier[group] )
keyword[continue]
keyword[if] identifier[all] ([ identifier[graph] . identifier[degree] ( identifier[gene] )== literal[int] keyword[for] identifier[gene] keyword[in] identifier[gene_set_to_be_analyzed] ]):
identifier[logger] . identifier[warning] ( literal[string] % identifier[group] )
keyword[continue]
identifier[normal_to_tumor_njsd] = identifier[entropy] . identifier[njsd] ( identifier[network] = identifier[graph] ,
identifier[ref_gene_expression_dict] = identifier[ref_gene_expression_dict] ,
identifier[query_gene_expression_dict] = identifier[query_gene_expression_dict] ,
identifier[gene_set] = identifier[gene_set] )
identifier[tumor_to_ambiguous_njsd] = identifier[entropy] . identifier[njsd] ( identifier[network] = identifier[graph] ,
identifier[ref_gene_expression_dict] = identifier[maximally_ambiguous_gene_experession_dict] ,
identifier[query_gene_expression_dict] = identifier[query_gene_expression_dict] ,
identifier[gene_set] = identifier[gene_set] )
identifier[tITH] = identifier[normal_to_tumor_njsd] /( identifier[normal_to_tumor_njsd] + identifier[tumor_to_ambiguous_njsd] )
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[outFile] :
identifier[print] ( identifier[group] , identifier[normal_to_tumor_njsd] , identifier[tumor_to_ambiguous_njsd] , identifier[tITH] , identifier[sep] = literal[string] , identifier[file] = identifier[outFile] ) | def njsd_geneset(network, ref, query, gene_set, file, verbose=True):
"""Compute gene set-specified nJSD between reference and query expression profiles.
Attribute;
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
geneset (str): File path to a gene set file.
"""
(graph, gene_set_total) = util.parse_network(network)
ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True)
query_gene_expression_dict = util.parse_gene_expression(query, mean=False)
group_gene_set_dict = util.parse_gene_set(gene_set)
maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict)
gene_set_present = set(query_gene_expression_dict.keys())
with open(file, 'w') as outFile:
print('Gene_set_ID', 'nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile) # depends on [control=['with'], data=['outFile']]
for (group, gene_set) in group_gene_set_dict.items():
gene_set_to_be_analyzed = gene_set.intersection(gene_set_present)
# If no genes are available for the group, just ignore it.
if len(gene_set_to_be_analyzed) == 0:
logger.warning('%s has no genes available for analysis. Ignoring the group.' % group)
continue # depends on [control=['if'], data=[]]
# If every gene has a single neighbor, just ignore it.
if all([graph.degree(gene) == 1 for gene in gene_set_to_be_analyzed]):
logger.warning('%s has no genes with enough neighbors. Ignoring the group.' % group)
continue # depends on [control=['if'], data=[]]
normal_to_tumor_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=ref_gene_expression_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set)
tumor_to_ambiguous_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set)
tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd)
with open(file, 'a') as outFile:
print(group, normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile) # depends on [control=['with'], data=['outFile']] # depends on [control=['for'], data=[]] |
def setSpecialPrice(self, product, special_price=None,
from_date=None, to_date=None, store_view=None,
identifierType=None):
"""
Update product's special price
:param product: ID or SKU of product
:param special_price: Special Price
:param from_date: From date
:param to_date: To Date
:param store_view: ID or Code of Store View
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call(
'catalog_product.setSpecialPrice', [
product, special_price, from_date, to_date, store_view,
identifierType
]
)) | def function[setSpecialPrice, parameter[self, product, special_price, from_date, to_date, store_view, identifierType]]:
constant[
Update product's special price
:param product: ID or SKU of product
:param special_price: Special Price
:param from_date: From date
:param to_date: To Date
:param store_view: ID or Code of Store View
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
]
return[call[name[bool], parameter[call[name[self].call, parameter[constant[catalog_product.setSpecialPrice], list[[<ast.Name object at 0x7da1b04f5a50>, <ast.Name object at 0x7da1b04f4b80>, <ast.Name object at 0x7da1b04f4310>, <ast.Name object at 0x7da1b04f6110>, <ast.Name object at 0x7da1b04f5db0>, <ast.Name object at 0x7da1b04f4760>]]]]]]] | keyword[def] identifier[setSpecialPrice] ( identifier[self] , identifier[product] , identifier[special_price] = keyword[None] ,
identifier[from_date] = keyword[None] , identifier[to_date] = keyword[None] , identifier[store_view] = keyword[None] ,
identifier[identifierType] = keyword[None] ):
literal[string]
keyword[return] identifier[bool] ( identifier[self] . identifier[call] (
literal[string] ,[
identifier[product] , identifier[special_price] , identifier[from_date] , identifier[to_date] , identifier[store_view] ,
identifier[identifierType]
]
)) | def setSpecialPrice(self, product, special_price=None, from_date=None, to_date=None, store_view=None, identifierType=None):
"""
Update product's special price
:param product: ID or SKU of product
:param special_price: Special Price
:param from_date: From date
:param to_date: To Date
:param store_view: ID or Code of Store View
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call('catalog_product.setSpecialPrice', [product, special_price, from_date, to_date, store_view, identifierType])) |
def delete_user_by_email(self, id, email):
"""Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
"""
return self.client.delete(self._url(id) + '/users', params={'email': email}) | def function[delete_user_by_email, parameter[self, id, email]]:
constant[Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
]
return[call[name[self].client.delete, parameter[binary_operation[call[name[self]._url, parameter[name[id]]] + constant[/users]]]]] | keyword[def] identifier[delete_user_by_email] ( identifier[self] , identifier[id] , identifier[email] ):
literal[string]
keyword[return] identifier[self] . identifier[client] . identifier[delete] ( identifier[self] . identifier[_url] ( identifier[id] )+ literal[string] , identifier[params] ={ literal[string] : identifier[email] }) | def delete_user_by_email(self, id, email):
"""Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
"""
return self.client.delete(self._url(id) + '/users', params={'email': email}) |
def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection)
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection)
return son | def function[_fix_outgoing, parameter[self, son, collection]]:
constant[Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
]
for taget[name[manipulator]] in starred[call[name[reversed], parameter[name[self].__outgoing_manipulators]]] begin[:]
variable[son] assign[=] call[name[manipulator].transform_outgoing, parameter[name[son], name[collection]]]
for taget[name[manipulator]] in starred[call[name[reversed], parameter[name[self].__outgoing_copying_manipulators]]] begin[:]
variable[son] assign[=] call[name[manipulator].transform_outgoing, parameter[name[son], name[collection]]]
return[name[son]] | keyword[def] identifier[_fix_outgoing] ( identifier[self] , identifier[son] , identifier[collection] ):
literal[string]
keyword[for] identifier[manipulator] keyword[in] identifier[reversed] ( identifier[self] . identifier[__outgoing_manipulators] ):
identifier[son] = identifier[manipulator] . identifier[transform_outgoing] ( identifier[son] , identifier[collection] )
keyword[for] identifier[manipulator] keyword[in] identifier[reversed] ( identifier[self] . identifier[__outgoing_copying_manipulators] ):
identifier[son] = identifier[manipulator] . identifier[transform_outgoing] ( identifier[son] , identifier[collection] )
keyword[return] identifier[son] | def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection) # depends on [control=['for'], data=['manipulator']]
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection) # depends on [control=['for'], data=['manipulator']]
return son |
def update_persistent_boot(self, device_type=[]):
"""Changes the persistent boot device order for the host
:param device_type: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
# Check if the input is valid
for item in device_type:
if item.upper() not in DEVICE_COMMON_TO_RIS:
raise exception.IloInvalidInputError("Invalid input. Valid "
"devices: NETWORK, HDD,"
" ISCSI or CDROM.")
self._update_persistent_boot(device_type, persistent=True) | def function[update_persistent_boot, parameter[self, device_type]]:
constant[Changes the persistent boot device order for the host
:param device_type: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
]
for taget[name[item]] in starred[name[device_type]] begin[:]
if compare[call[name[item].upper, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[DEVICE_COMMON_TO_RIS]] begin[:]
<ast.Raise object at 0x7da20c993ca0>
call[name[self]._update_persistent_boot, parameter[name[device_type]]] | keyword[def] identifier[update_persistent_boot] ( identifier[self] , identifier[device_type] =[]):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[device_type] :
keyword[if] identifier[item] . identifier[upper] () keyword[not] keyword[in] identifier[DEVICE_COMMON_TO_RIS] :
keyword[raise] identifier[exception] . identifier[IloInvalidInputError] ( literal[string]
literal[string]
literal[string] )
identifier[self] . identifier[_update_persistent_boot] ( identifier[device_type] , identifier[persistent] = keyword[True] ) | def update_persistent_boot(self, device_type=[]):
"""Changes the persistent boot device order for the host
:param device_type: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
# Check if the input is valid
for item in device_type:
if item.upper() not in DEVICE_COMMON_TO_RIS:
raise exception.IloInvalidInputError('Invalid input. Valid devices: NETWORK, HDD, ISCSI or CDROM.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
self._update_persistent_boot(device_type, persistent=True) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.