code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def getLTime():
"""Returns a formatted string with the current local time."""
_ltime = _time.localtime(_time.time())
tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime)
return tlm_str | def function[getLTime, parameter[]]:
constant[Returns a formatted string with the current local time.]
variable[_ltime] assign[=] call[name[_time].localtime, parameter[call[name[_time].time, parameter[]]]]
variable[tlm_str] assign[=] call[name[_time].strftime, parameter[constant[%H:%M:%S (%d/%m/%Y)], name[_ltime]]]
return[name[tlm_str]] | keyword[def] identifier[getLTime] ():
literal[string]
identifier[_ltime] = identifier[_time] . identifier[localtime] ( identifier[_time] . identifier[time] ())
identifier[tlm_str] = identifier[_time] . identifier[strftime] ( literal[string] , identifier[_ltime] )
keyword[return] identifier[tlm_str] | def getLTime():
"""Returns a formatted string with the current local time."""
_ltime = _time.localtime(_time.time())
tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime)
return tlm_str |
def _next_id(self):
"""
Return the next available slide ID as an int. Valid slide IDs start
at 256. The next integer value greater than the max value in use is
chosen, which minimizes that chance of reusing the id of a deleted
slide.
"""
id_str_lst = self.xpath('./p:sldId/@id')
return max([255]+[int(id_str) for id_str in id_str_lst])+1 | def function[_next_id, parameter[self]]:
constant[
Return the next available slide ID as an int. Valid slide IDs start
at 256. The next integer value greater than the max value in use is
chosen, which minimizes that chance of reusing the id of a deleted
slide.
]
variable[id_str_lst] assign[=] call[name[self].xpath, parameter[constant[./p:sldId/@id]]]
return[binary_operation[call[name[max], parameter[binary_operation[list[[<ast.Constant object at 0x7da20c76fc10>]] + <ast.ListComp object at 0x7da20c76f5e0>]]] + constant[1]]] | keyword[def] identifier[_next_id] ( identifier[self] ):
literal[string]
identifier[id_str_lst] = identifier[self] . identifier[xpath] ( literal[string] )
keyword[return] identifier[max] ([ literal[int] ]+[ identifier[int] ( identifier[id_str] ) keyword[for] identifier[id_str] keyword[in] identifier[id_str_lst] ])+ literal[int] | def _next_id(self):
"""
Return the next available slide ID as an int. Valid slide IDs start
at 256. The next integer value greater than the max value in use is
chosen, which minimizes that chance of reusing the id of a deleted
slide.
"""
id_str_lst = self.xpath('./p:sldId/@id')
return max([255] + [int(id_str) for id_str in id_str_lst]) + 1 |
def _execute(self,
command, # type: str
args, # type: List[str]
env_vars=None, # type: EnvVars
shim=None # type: OptStr
):
# type: (...) -> Tuple[int, bytes, bytes]
"""Execute a pip command with the given arguments."""
main_args = [command] + args
logger.debug("calling pip %s", ' '.join(main_args))
rc, out, err = self._wrapped_pip.main(main_args, env_vars=env_vars,
shim=shim)
return rc, out, err | def function[_execute, parameter[self, command, args, env_vars, shim]]:
constant[Execute a pip command with the given arguments.]
variable[main_args] assign[=] binary_operation[list[[<ast.Name object at 0x7da2054a4d90>]] + name[args]]
call[name[logger].debug, parameter[constant[calling pip %s], call[constant[ ].join, parameter[name[main_args]]]]]
<ast.Tuple object at 0x7da2054a6140> assign[=] call[name[self]._wrapped_pip.main, parameter[name[main_args]]]
return[tuple[[<ast.Name object at 0x7da2054a4280>, <ast.Name object at 0x7da2054a5ed0>, <ast.Name object at 0x7da2054a5de0>]]] | keyword[def] identifier[_execute] ( identifier[self] ,
identifier[command] ,
identifier[args] ,
identifier[env_vars] = keyword[None] ,
identifier[shim] = keyword[None]
):
literal[string]
identifier[main_args] =[ identifier[command] ]+ identifier[args]
identifier[logger] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[main_args] ))
identifier[rc] , identifier[out] , identifier[err] = identifier[self] . identifier[_wrapped_pip] . identifier[main] ( identifier[main_args] , identifier[env_vars] = identifier[env_vars] ,
identifier[shim] = identifier[shim] )
keyword[return] identifier[rc] , identifier[out] , identifier[err] | def _execute(self, command, args, env_vars=None, shim=None): # type: str
# type: List[str]
# type: EnvVars
# type: OptStr
# type: (...) -> Tuple[int, bytes, bytes]
'Execute a pip command with the given arguments.'
main_args = [command] + args
logger.debug('calling pip %s', ' '.join(main_args))
(rc, out, err) = self._wrapped_pip.main(main_args, env_vars=env_vars, shim=shim)
return (rc, out, err) |
def del_stream(self, bucket, label):
'''Delete a bitstream. This needs more testing - file deletion in a zipfile
is problematic. Alternate method is to create second zipfile without the files
in question, which is not a nice method for large zip archives.
'''
if self.exists(bucket, label):
name = self._zf(bucket, label)
#z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64)
self._del_stream(name) | def function[del_stream, parameter[self, bucket, label]]:
constant[Delete a bitstream. This needs more testing - file deletion in a zipfile
is problematic. Alternate method is to create second zipfile without the files
in question, which is not a nice method for large zip archives.
]
if call[name[self].exists, parameter[name[bucket], name[label]]] begin[:]
variable[name] assign[=] call[name[self]._zf, parameter[name[bucket], name[label]]]
call[name[self]._del_stream, parameter[name[name]]] | keyword[def] identifier[del_stream] ( identifier[self] , identifier[bucket] , identifier[label] ):
literal[string]
keyword[if] identifier[self] . identifier[exists] ( identifier[bucket] , identifier[label] ):
identifier[name] = identifier[self] . identifier[_zf] ( identifier[bucket] , identifier[label] )
identifier[self] . identifier[_del_stream] ( identifier[name] ) | def del_stream(self, bucket, label):
"""Delete a bitstream. This needs more testing - file deletion in a zipfile
is problematic. Alternate method is to create second zipfile without the files
in question, which is not a nice method for large zip archives.
"""
if self.exists(bucket, label):
name = self._zf(bucket, label)
#z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64)
self._del_stream(name) # depends on [control=['if'], data=[]] |
def calc_mdl(yx_dist, y_dist):
"""
Function calculates mdl with given label distributions.
yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions
y_dist: dictionary - all label distributions
Reference:
Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995.
"""
prior = multinomLog2(y_dist.values())
prior += multinomLog2([len(y_dist.keys()) - 1, sum(y_dist.values())])
post = 0
for x_val in yx_dist:
post += multinomLog2([x_val.get(c, 0) for c in y_dist.keys()])
post += multinomLog2([len(y_dist.keys()) - 1, sum(x_val.values())])
return (prior - post) / float(sum(y_dist.values())) | def function[calc_mdl, parameter[yx_dist, y_dist]]:
constant[
Function calculates mdl with given label distributions.
yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions
y_dist: dictionary - all label distributions
Reference:
Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995.
]
variable[prior] assign[=] call[name[multinomLog2], parameter[call[name[y_dist].values, parameter[]]]]
<ast.AugAssign object at 0x7da1b207eb90>
variable[post] assign[=] constant[0]
for taget[name[x_val]] in starred[name[yx_dist]] begin[:]
<ast.AugAssign object at 0x7da1b207c6a0>
<ast.AugAssign object at 0x7da1b207c9a0>
return[binary_operation[binary_operation[name[prior] - name[post]] / call[name[float], parameter[call[name[sum], parameter[call[name[y_dist].values, parameter[]]]]]]]] | keyword[def] identifier[calc_mdl] ( identifier[yx_dist] , identifier[y_dist] ):
literal[string]
identifier[prior] = identifier[multinomLog2] ( identifier[y_dist] . identifier[values] ())
identifier[prior] += identifier[multinomLog2] ([ identifier[len] ( identifier[y_dist] . identifier[keys] ())- literal[int] , identifier[sum] ( identifier[y_dist] . identifier[values] ())])
identifier[post] = literal[int]
keyword[for] identifier[x_val] keyword[in] identifier[yx_dist] :
identifier[post] += identifier[multinomLog2] ([ identifier[x_val] . identifier[get] ( identifier[c] , literal[int] ) keyword[for] identifier[c] keyword[in] identifier[y_dist] . identifier[keys] ()])
identifier[post] += identifier[multinomLog2] ([ identifier[len] ( identifier[y_dist] . identifier[keys] ())- literal[int] , identifier[sum] ( identifier[x_val] . identifier[values] ())])
keyword[return] ( identifier[prior] - identifier[post] )/ identifier[float] ( identifier[sum] ( identifier[y_dist] . identifier[values] ())) | def calc_mdl(yx_dist, y_dist):
"""
Function calculates mdl with given label distributions.
yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions
y_dist: dictionary - all label distributions
Reference:
Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995.
"""
prior = multinomLog2(y_dist.values())
prior += multinomLog2([len(y_dist.keys()) - 1, sum(y_dist.values())])
post = 0
for x_val in yx_dist:
post += multinomLog2([x_val.get(c, 0) for c in y_dist.keys()])
post += multinomLog2([len(y_dist.keys()) - 1, sum(x_val.values())]) # depends on [control=['for'], data=['x_val']]
return (prior - post) / float(sum(y_dist.values())) |
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position,
field_position_global=None,
field_position_local=None):
"""Modify subfield at specified position.
Specify the subfield by tag, field number and subfield position.
"""
subfields = record_get_subfields(
rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfields[subfield_position] = (subfield_code, value)
except IndexError:
raise InvenioBibRecordFieldError(
"There is no subfield with position '%d'." % subfield_position) | def function[record_modify_subfield, parameter[rec, tag, subfield_code, value, subfield_position, field_position_global, field_position_local]]:
constant[Modify subfield at specified position.
Specify the subfield by tag, field number and subfield position.
]
variable[subfields] assign[=] call[name[record_get_subfields], parameter[name[rec], name[tag]]]
<ast.Try object at 0x7da2054a57e0> | keyword[def] identifier[record_modify_subfield] ( identifier[rec] , identifier[tag] , identifier[subfield_code] , identifier[value] , identifier[subfield_position] ,
identifier[field_position_global] = keyword[None] ,
identifier[field_position_local] = keyword[None] ):
literal[string]
identifier[subfields] = identifier[record_get_subfields] (
identifier[rec] , identifier[tag] ,
identifier[field_position_global] = identifier[field_position_global] ,
identifier[field_position_local] = identifier[field_position_local] )
keyword[try] :
identifier[subfields] [ identifier[subfield_position] ]=( identifier[subfield_code] , identifier[value] )
keyword[except] identifier[IndexError] :
keyword[raise] identifier[InvenioBibRecordFieldError] (
literal[string] % identifier[subfield_position] ) | def record_modify_subfield(rec, tag, subfield_code, value, subfield_position, field_position_global=None, field_position_local=None):
"""Modify subfield at specified position.
Specify the subfield by tag, field number and subfield position.
"""
subfields = record_get_subfields(rec, tag, field_position_global=field_position_global, field_position_local=field_position_local)
try:
subfields[subfield_position] = (subfield_code, value) # depends on [control=['try'], data=[]]
except IndexError:
raise InvenioBibRecordFieldError("There is no subfield with position '%d'." % subfield_position) # depends on [control=['except'], data=[]] |
def get_program_python(cmd):
"""Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
"""
full_cmd = os.path.realpath(which(cmd))
cmd_python = os.path.join(os.path.dirname(full_cmd), "python")
env_python = None
if "envs" in cmd_python:
parts = cmd_python.split(os.sep)
env_python = os.path.join(os.sep.join(parts[:parts.index("envs") + 2]), "bin", "python")
if os.path.exists(cmd_python):
return cmd_python
elif env_python and os.path.exists(env_python):
return env_python
else:
return os.path.realpath(sys.executable) | def function[get_program_python, parameter[cmd]]:
constant[Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
]
variable[full_cmd] assign[=] call[name[os].path.realpath, parameter[call[name[which], parameter[name[cmd]]]]]
variable[cmd_python] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[full_cmd]]], constant[python]]]
variable[env_python] assign[=] constant[None]
if compare[constant[envs] in name[cmd_python]] begin[:]
variable[parts] assign[=] call[name[cmd_python].split, parameter[name[os].sep]]
variable[env_python] assign[=] call[name[os].path.join, parameter[call[name[os].sep.join, parameter[call[name[parts]][<ast.Slice object at 0x7da1b19bbd00>]]], constant[bin], constant[python]]]
if call[name[os].path.exists, parameter[name[cmd_python]]] begin[:]
return[name[cmd_python]] | keyword[def] identifier[get_program_python] ( identifier[cmd] ):
literal[string]
identifier[full_cmd] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[which] ( identifier[cmd] ))
identifier[cmd_python] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[full_cmd] ), literal[string] )
identifier[env_python] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cmd_python] :
identifier[parts] = identifier[cmd_python] . identifier[split] ( identifier[os] . identifier[sep] )
identifier[env_python] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[sep] . identifier[join] ( identifier[parts] [: identifier[parts] . identifier[index] ( literal[string] )+ literal[int] ]), literal[string] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[cmd_python] ):
keyword[return] identifier[cmd_python]
keyword[elif] identifier[env_python] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[env_python] ):
keyword[return] identifier[env_python]
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[realpath] ( identifier[sys] . identifier[executable] ) | def get_program_python(cmd):
"""Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
"""
full_cmd = os.path.realpath(which(cmd))
cmd_python = os.path.join(os.path.dirname(full_cmd), 'python')
env_python = None
if 'envs' in cmd_python:
parts = cmd_python.split(os.sep)
env_python = os.path.join(os.sep.join(parts[:parts.index('envs') + 2]), 'bin', 'python') # depends on [control=['if'], data=['cmd_python']]
if os.path.exists(cmd_python):
return cmd_python # depends on [control=['if'], data=[]]
elif env_python and os.path.exists(env_python):
return env_python # depends on [control=['if'], data=[]]
else:
return os.path.realpath(sys.executable) |
def fso_readlink(self, path):
'overlays os.readlink()'
path = self.deref(path, to_parent=True)
st = self.fso_lstat(path)
if not stat.S_ISLNK(st.st_mode):
raise OSError(22, 'Invalid argument', path)
if st.st_overlay:
return self.entries[path].content
return self.originals['os:readlink'](path) | def function[fso_readlink, parameter[self, path]]:
constant[overlays os.readlink()]
variable[path] assign[=] call[name[self].deref, parameter[name[path]]]
variable[st] assign[=] call[name[self].fso_lstat, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da1b004be80> begin[:]
<ast.Raise object at 0x7da1b00491b0>
if name[st].st_overlay begin[:]
return[call[name[self].entries][name[path]].content]
return[call[call[name[self].originals][constant[os:readlink]], parameter[name[path]]]] | keyword[def] identifier[fso_readlink] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[self] . identifier[deref] ( identifier[path] , identifier[to_parent] = keyword[True] )
identifier[st] = identifier[self] . identifier[fso_lstat] ( identifier[path] )
keyword[if] keyword[not] identifier[stat] . identifier[S_ISLNK] ( identifier[st] . identifier[st_mode] ):
keyword[raise] identifier[OSError] ( literal[int] , literal[string] , identifier[path] )
keyword[if] identifier[st] . identifier[st_overlay] :
keyword[return] identifier[self] . identifier[entries] [ identifier[path] ]. identifier[content]
keyword[return] identifier[self] . identifier[originals] [ literal[string] ]( identifier[path] ) | def fso_readlink(self, path):
"""overlays os.readlink()"""
path = self.deref(path, to_parent=True)
st = self.fso_lstat(path)
if not stat.S_ISLNK(st.st_mode):
raise OSError(22, 'Invalid argument', path) # depends on [control=['if'], data=[]]
if st.st_overlay:
return self.entries[path].content # depends on [control=['if'], data=[]]
return self.originals['os:readlink'](path) |
def high(data, test=None, queue=False, **kwargs):
'''
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
ret = st_.call_high(data)
_set_retcode(ret, highstate=data)
return ret | def function[high, parameter[data, test, queue]]:
constant[
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
]
variable[conflict] assign[=] call[name[_check_queue], parameter[name[queue], name[kwargs]]]
if compare[name[conflict] is_not constant[None]] begin[:]
return[name[conflict]]
variable[opts] assign[=] call[name[salt].utils.state.get_sls_opts, parameter[name[__opts__]]]
call[name[opts]][constant[test]] assign[=] call[name[_get_test_value], parameter[name[test]]]
variable[pillar_override] assign[=] call[name[kwargs].get, parameter[constant[pillar]]]
variable[pillar_enc] assign[=] call[name[kwargs].get, parameter[constant[pillar_enc]]]
if <ast.BoolOp object at 0x7da1b210be80> begin[:]
<ast.Raise object at 0x7da1b210a1a0>
<ast.Try object at 0x7da1b21096f0>
variable[ret] assign[=] call[name[st_].call_high, parameter[name[data]]]
call[name[_set_retcode], parameter[name[ret]]]
return[name[ret]] | keyword[def] identifier[high] ( identifier[data] , identifier[test] = keyword[None] , identifier[queue] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[conflict] = identifier[_check_queue] ( identifier[queue] , identifier[kwargs] )
keyword[if] identifier[conflict] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[conflict]
identifier[opts] = identifier[salt] . identifier[utils] . identifier[state] . identifier[get_sls_opts] ( identifier[__opts__] ,** identifier[kwargs] )
identifier[opts] [ literal[string] ]= identifier[_get_test_value] ( identifier[test] ,** identifier[kwargs] )
identifier[pillar_override] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[pillar_enc] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] identifier[pillar_enc] keyword[is] keyword[None] keyword[and] identifier[pillar_override] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[pillar_override] , identifier[dict] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
literal[string]
)
keyword[try] :
identifier[st_] = identifier[salt] . identifier[state] . identifier[State] ( identifier[opts] ,
identifier[pillar_override] ,
identifier[pillar_enc] = identifier[pillar_enc] ,
identifier[proxy] = identifier[__proxy__] ,
identifier[context] = identifier[__context__] ,
identifier[initial_pillar] = identifier[_get_initial_pillar] ( identifier[opts] ))
keyword[except] identifier[NameError] :
identifier[st_] = identifier[salt] . identifier[state] . identifier[State] ( identifier[opts] ,
identifier[pillar_override] ,
identifier[pillar_enc] = identifier[pillar_enc] ,
identifier[initial_pillar] = identifier[_get_initial_pillar] ( identifier[opts] ))
identifier[ret] = identifier[st_] . identifier[call_high] ( identifier[data] )
identifier[_set_retcode] ( identifier[ret] , identifier[highstate] = identifier[data] )
keyword[return] identifier[ret] | def high(data, test=None, queue=False, **kwargs):
"""
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict # depends on [control=['if'], data=['conflict']]
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None and pillar_override is not None and (not isinstance(pillar_override, dict)):
raise SaltInvocationError('Pillar data must be formatted as a dictionary, unless pillar_enc is specified.') # depends on [control=['if'], data=[]]
try:
st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, context=__context__, initial_pillar=_get_initial_pillar(opts)) # depends on [control=['try'], data=[]]
except NameError:
st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) # depends on [control=['except'], data=[]]
ret = st_.call_high(data)
_set_retcode(ret, highstate=data)
return ret |
def class_url(cls):
"""Returns a versioned URI string for this class"""
base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1'))
return "/{0}/{1}".format(base, class_to_api_name(cls.class_name())) | def function[class_url, parameter[cls]]:
constant[Returns a versioned URI string for this class]
variable[base] assign[=] call[constant[v{0}].format, parameter[call[name[getattr], parameter[name[cls], constant[RESOURCE_VERSION], constant[1]]]]]
return[call[constant[/{0}/{1}].format, parameter[name[base], call[name[class_to_api_name], parameter[call[name[cls].class_name, parameter[]]]]]]] | keyword[def] identifier[class_url] ( identifier[cls] ):
literal[string]
identifier[base] = literal[string] . identifier[format] ( identifier[getattr] ( identifier[cls] , literal[string] , literal[string] ))
keyword[return] literal[string] . identifier[format] ( identifier[base] , identifier[class_to_api_name] ( identifier[cls] . identifier[class_name] ())) | def class_url(cls):
"""Returns a versioned URI string for this class"""
base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1'))
return '/{0}/{1}'.format(base, class_to_api_name(cls.class_name())) |
def hide_node(self, node):
"""
Hides a node from the graph. The incoming and outgoing edges of the
node will also be hidden. The node may be unhidden at some later time.
"""
try:
all_edges = self.all_edges(node)
self.hidden_nodes[node] = (self.nodes[node], all_edges)
for edge in all_edges:
self.hide_edge(edge)
del self.nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node) | def function[hide_node, parameter[self, node]]:
constant[
Hides a node from the graph. The incoming and outgoing edges of the
node will also be hidden. The node may be unhidden at some later time.
]
<ast.Try object at 0x7da1b0c53640> | keyword[def] identifier[hide_node] ( identifier[self] , identifier[node] ):
literal[string]
keyword[try] :
identifier[all_edges] = identifier[self] . identifier[all_edges] ( identifier[node] )
identifier[self] . identifier[hidden_nodes] [ identifier[node] ]=( identifier[self] . identifier[nodes] [ identifier[node] ], identifier[all_edges] )
keyword[for] identifier[edge] keyword[in] identifier[all_edges] :
identifier[self] . identifier[hide_edge] ( identifier[edge] )
keyword[del] identifier[self] . identifier[nodes] [ identifier[node] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[GraphError] ( literal[string] % identifier[node] ) | def hide_node(self, node):
"""
Hides a node from the graph. The incoming and outgoing edges of the
node will also be hidden. The node may be unhidden at some later time.
"""
try:
all_edges = self.all_edges(node)
self.hidden_nodes[node] = (self.nodes[node], all_edges)
for edge in all_edges:
self.hide_edge(edge) # depends on [control=['for'], data=['edge']]
del self.nodes[node] # depends on [control=['try'], data=[]]
except KeyError:
raise GraphError('Invalid node %s' % node) # depends on [control=['except'], data=[]] |
def translate_markers(pipfile_entry):
"""Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
"""
if not isinstance(pipfile_entry, Mapping):
raise TypeError("Entry is not a pipfile formatted mapping.")
from .vendor.distlib.markers import DEFAULT_CONTEXT as marker_context
from .vendor.packaging.markers import Marker
from .vendor.vistir.misc import dedup
allowed_marker_keys = ["markers"] + [k for k in marker_context.keys()]
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
pipfile_markers = [k for k in provided_keys if k in allowed_marker_keys]
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
if "markers" in new_pipfile:
marker = str(Marker(new_pipfile.pop("markers")))
if 'extra' not in marker:
marker_set.add(marker)
for m in pipfile_markers:
entry = "{0}".format(pipfile_entry[m])
if m != "markers":
marker_set.add(str(Marker("{0}{1}".format(m, entry))))
new_pipfile.pop(m)
if marker_set:
new_pipfile["markers"] = str(Marker(" or ".join(
"{0}".format(s) if " and " in s else s
for s in sorted(dedup(marker_set))
))).replace('"', "'")
return new_pipfile | def function[translate_markers, parameter[pipfile_entry]]:
constant[Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
]
if <ast.UnaryOp object at 0x7da18ede6bf0> begin[:]
<ast.Raise object at 0x7da18ede6c50>
from relative_module[vendor.distlib.markers] import module[DEFAULT_CONTEXT]
from relative_module[vendor.packaging.markers] import module[Marker]
from relative_module[vendor.vistir.misc] import module[dedup]
variable[allowed_marker_keys] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18ede4af0>]] + <ast.ListComp object at 0x7da18ede42b0>]
variable[provided_keys] assign[=] <ast.IfExp object at 0x7da18ede6050>
variable[pipfile_markers] assign[=] <ast.ListComp object at 0x7da18ede6e60>
variable[new_pipfile] assign[=] call[call[name[dict], parameter[name[pipfile_entry]]].copy, parameter[]]
variable[marker_set] assign[=] call[name[set], parameter[]]
if compare[constant[markers] in name[new_pipfile]] begin[:]
variable[marker] assign[=] call[name[str], parameter[call[name[Marker], parameter[call[name[new_pipfile].pop, parameter[constant[markers]]]]]]]
if compare[constant[extra] <ast.NotIn object at 0x7da2590d7190> name[marker]] begin[:]
call[name[marker_set].add, parameter[name[marker]]]
for taget[name[m]] in starred[name[pipfile_markers]] begin[:]
variable[entry] assign[=] call[constant[{0}].format, parameter[call[name[pipfile_entry]][name[m]]]]
if compare[name[m] not_equal[!=] constant[markers]] begin[:]
call[name[marker_set].add, parameter[call[name[str], parameter[call[name[Marker], parameter[call[constant[{0}{1}].format, parameter[name[m], name[entry]]]]]]]]]
call[name[new_pipfile].pop, parameter[name[m]]]
if name[marker_set] begin[:]
call[name[new_pipfile]][constant[markers]] assign[=] call[call[name[str], parameter[call[name[Marker], parameter[call[constant[ or ].join, parameter[<ast.GeneratorExp object at 0x7da18ede6890>]]]]]].replace, parameter[constant["], constant[']]]
return[name[new_pipfile]] | keyword[def] identifier[translate_markers] ( identifier[pipfile_entry] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[pipfile_entry] , identifier[Mapping] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[from] . identifier[vendor] . identifier[distlib] . identifier[markers] keyword[import] identifier[DEFAULT_CONTEXT] keyword[as] identifier[marker_context]
keyword[from] . identifier[vendor] . identifier[packaging] . identifier[markers] keyword[import] identifier[Marker]
keyword[from] . identifier[vendor] . identifier[vistir] . identifier[misc] keyword[import] identifier[dedup]
identifier[allowed_marker_keys] =[ literal[string] ]+[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[marker_context] . identifier[keys] ()]
identifier[provided_keys] = identifier[list] ( identifier[pipfile_entry] . identifier[keys] ()) keyword[if] identifier[hasattr] ( identifier[pipfile_entry] , literal[string] ) keyword[else] []
identifier[pipfile_markers] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[provided_keys] keyword[if] identifier[k] keyword[in] identifier[allowed_marker_keys] ]
identifier[new_pipfile] = identifier[dict] ( identifier[pipfile_entry] ). identifier[copy] ()
identifier[marker_set] = identifier[set] ()
keyword[if] literal[string] keyword[in] identifier[new_pipfile] :
identifier[marker] = identifier[str] ( identifier[Marker] ( identifier[new_pipfile] . identifier[pop] ( literal[string] )))
keyword[if] literal[string] keyword[not] keyword[in] identifier[marker] :
identifier[marker_set] . identifier[add] ( identifier[marker] )
keyword[for] identifier[m] keyword[in] identifier[pipfile_markers] :
identifier[entry] = literal[string] . identifier[format] ( identifier[pipfile_entry] [ identifier[m] ])
keyword[if] identifier[m] != literal[string] :
identifier[marker_set] . identifier[add] ( identifier[str] ( identifier[Marker] ( literal[string] . identifier[format] ( identifier[m] , identifier[entry] ))))
identifier[new_pipfile] . identifier[pop] ( identifier[m] )
keyword[if] identifier[marker_set] :
identifier[new_pipfile] [ literal[string] ]= identifier[str] ( identifier[Marker] ( literal[string] . identifier[join] (
literal[string] . identifier[format] ( identifier[s] ) keyword[if] literal[string] keyword[in] identifier[s] keyword[else] identifier[s]
keyword[for] identifier[s] keyword[in] identifier[sorted] ( identifier[dedup] ( identifier[marker_set] ))
))). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[new_pipfile] | def translate_markers(pipfile_entry):
"""Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
"""
if not isinstance(pipfile_entry, Mapping):
raise TypeError('Entry is not a pipfile formatted mapping.') # depends on [control=['if'], data=[]]
from .vendor.distlib.markers import DEFAULT_CONTEXT as marker_context
from .vendor.packaging.markers import Marker
from .vendor.vistir.misc import dedup
allowed_marker_keys = ['markers'] + [k for k in marker_context.keys()]
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, 'keys') else []
pipfile_markers = [k for k in provided_keys if k in allowed_marker_keys]
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
if 'markers' in new_pipfile:
marker = str(Marker(new_pipfile.pop('markers')))
if 'extra' not in marker:
marker_set.add(marker) # depends on [control=['if'], data=['marker']] # depends on [control=['if'], data=['new_pipfile']]
for m in pipfile_markers:
entry = '{0}'.format(pipfile_entry[m])
if m != 'markers':
marker_set.add(str(Marker('{0}{1}'.format(m, entry))))
new_pipfile.pop(m) # depends on [control=['if'], data=['m']] # depends on [control=['for'], data=['m']]
if marker_set:
new_pipfile['markers'] = str(Marker(' or '.join(('{0}'.format(s) if ' and ' in s else s for s in sorted(dedup(marker_set)))))).replace('"', "'") # depends on [control=['if'], data=[]]
return new_pipfile |
def save(self, output_path=None, title=None):
"""Saves the current figure with carpet visualization to disk.
Parameters
----------
output_path : str
Path to where the figure needs to be saved to.
title : str
text to overlay and annotate the visualization (done via plt.suptitle())
"""
try:
save_figure(self.fig, output_path=output_path, annot=title)
except:
print('Unable to save the figure to disk! \nException: ')
traceback.print_exc() | def function[save, parameter[self, output_path, title]]:
constant[Saves the current figure with carpet visualization to disk.
Parameters
----------
output_path : str
Path to where the figure needs to be saved to.
title : str
text to overlay and annotate the visualization (done via plt.suptitle())
]
<ast.Try object at 0x7da1b26af7f0> | keyword[def] identifier[save] ( identifier[self] , identifier[output_path] = keyword[None] , identifier[title] = keyword[None] ):
literal[string]
keyword[try] :
identifier[save_figure] ( identifier[self] . identifier[fig] , identifier[output_path] = identifier[output_path] , identifier[annot] = identifier[title] )
keyword[except] :
identifier[print] ( literal[string] )
identifier[traceback] . identifier[print_exc] () | def save(self, output_path=None, title=None):
"""Saves the current figure with carpet visualization to disk.
Parameters
----------
output_path : str
Path to where the figure needs to be saved to.
title : str
text to overlay and annotate the visualization (done via plt.suptitle())
"""
try:
save_figure(self.fig, output_path=output_path, annot=title) # depends on [control=['try'], data=[]]
except:
print('Unable to save the figure to disk! \nException: ')
traceback.print_exc() # depends on [control=['except'], data=[]] |
def add_watermark(pdf_file_in, pdf_file_mark, pdf_file_out):
"""添加水印
"""
pdf_output = PdfFileWriter()
input_stream = open(pdf_file_in, 'rb')
pdf_input = PdfFileReader(input_stream)
# PDF文件被加密了
if pdf_input.getIsEncrypted():
print('该PDF文件被加密了.')
# 尝试用空密码解密
try:
pdf_input.decrypt('')
except Exception as e:
print('尝试用空密码解密失败.')
return False
else:
print('用空密码解密成功.')
# 获取PDF文件的页数
page_num = pdf_input.getNumPages()
# 读入水印pdf文件
pdf_watermark_input_stream = open(pdf_file_mark, 'rb')
pdf_watermark = PdfFileReader(pdf_watermark_input_stream)
# 给每一页打水印
for i in range(page_num):
page = pdf_input.getPage(i)
page.mergePage(pdf_watermark.getPage(0))
page.compressContentStreams() # 压缩内容
pdf_output.addPage(page)
output_stream = open(pdf_file_out, "wb")
pdf_output.write(output_stream)
input_stream.close()
pdf_watermark_input_stream.close()
output_stream.close() | def function[add_watermark, parameter[pdf_file_in, pdf_file_mark, pdf_file_out]]:
constant[添加水印
]
variable[pdf_output] assign[=] call[name[PdfFileWriter], parameter[]]
variable[input_stream] assign[=] call[name[open], parameter[name[pdf_file_in], constant[rb]]]
variable[pdf_input] assign[=] call[name[PdfFileReader], parameter[name[input_stream]]]
if call[name[pdf_input].getIsEncrypted, parameter[]] begin[:]
call[name[print], parameter[constant[该PDF文件被加密了.]]]
<ast.Try object at 0x7da18dc06a70>
variable[page_num] assign[=] call[name[pdf_input].getNumPages, parameter[]]
variable[pdf_watermark_input_stream] assign[=] call[name[open], parameter[name[pdf_file_mark], constant[rb]]]
variable[pdf_watermark] assign[=] call[name[PdfFileReader], parameter[name[pdf_watermark_input_stream]]]
for taget[name[i]] in starred[call[name[range], parameter[name[page_num]]]] begin[:]
variable[page] assign[=] call[name[pdf_input].getPage, parameter[name[i]]]
call[name[page].mergePage, parameter[call[name[pdf_watermark].getPage, parameter[constant[0]]]]]
call[name[page].compressContentStreams, parameter[]]
call[name[pdf_output].addPage, parameter[name[page]]]
variable[output_stream] assign[=] call[name[open], parameter[name[pdf_file_out], constant[wb]]]
call[name[pdf_output].write, parameter[name[output_stream]]]
call[name[input_stream].close, parameter[]]
call[name[pdf_watermark_input_stream].close, parameter[]]
call[name[output_stream].close, parameter[]] | keyword[def] identifier[add_watermark] ( identifier[pdf_file_in] , identifier[pdf_file_mark] , identifier[pdf_file_out] ):
literal[string]
identifier[pdf_output] = identifier[PdfFileWriter] ()
identifier[input_stream] = identifier[open] ( identifier[pdf_file_in] , literal[string] )
identifier[pdf_input] = identifier[PdfFileReader] ( identifier[input_stream] )
keyword[if] identifier[pdf_input] . identifier[getIsEncrypted] ():
identifier[print] ( literal[string] )
keyword[try] :
identifier[pdf_input] . identifier[decrypt] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] )
keyword[return] keyword[False]
keyword[else] :
identifier[print] ( literal[string] )
identifier[page_num] = identifier[pdf_input] . identifier[getNumPages] ()
identifier[pdf_watermark_input_stream] = identifier[open] ( identifier[pdf_file_mark] , literal[string] )
identifier[pdf_watermark] = identifier[PdfFileReader] ( identifier[pdf_watermark_input_stream] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[page_num] ):
identifier[page] = identifier[pdf_input] . identifier[getPage] ( identifier[i] )
identifier[page] . identifier[mergePage] ( identifier[pdf_watermark] . identifier[getPage] ( literal[int] ))
identifier[page] . identifier[compressContentStreams] ()
identifier[pdf_output] . identifier[addPage] ( identifier[page] )
identifier[output_stream] = identifier[open] ( identifier[pdf_file_out] , literal[string] )
identifier[pdf_output] . identifier[write] ( identifier[output_stream] )
identifier[input_stream] . identifier[close] ()
identifier[pdf_watermark_input_stream] . identifier[close] ()
identifier[output_stream] . identifier[close] () | def add_watermark(pdf_file_in, pdf_file_mark, pdf_file_out):
"""添加水印
"""
pdf_output = PdfFileWriter()
input_stream = open(pdf_file_in, 'rb')
pdf_input = PdfFileReader(input_stream)
# PDF文件被加密了
if pdf_input.getIsEncrypted():
print('该PDF文件被加密了.')
# 尝试用空密码解密
try:
pdf_input.decrypt('') # depends on [control=['try'], data=[]]
except Exception as e:
print('尝试用空密码解密失败.')
return False # depends on [control=['except'], data=[]]
else:
print('用空密码解密成功.') # depends on [control=['if'], data=[]]
# 获取PDF文件的页数
page_num = pdf_input.getNumPages()
# 读入水印pdf文件
pdf_watermark_input_stream = open(pdf_file_mark, 'rb')
pdf_watermark = PdfFileReader(pdf_watermark_input_stream)
# 给每一页打水印
for i in range(page_num):
page = pdf_input.getPage(i)
page.mergePage(pdf_watermark.getPage(0))
page.compressContentStreams() # 压缩内容
pdf_output.addPage(page) # depends on [control=['for'], data=['i']]
output_stream = open(pdf_file_out, 'wb')
pdf_output.write(output_stream)
input_stream.close()
pdf_watermark_input_stream.close()
output_stream.close() |
def _duration(start, end):
""" Return time delta.
"""
if start and end:
if start > end:
return None
else:
return end - start
elif start:
return time.time() - start
else:
return None | def function[_duration, parameter[start, end]]:
constant[ Return time delta.
]
if <ast.BoolOp object at 0x7da2044c3040> begin[:]
if compare[name[start] greater[>] name[end]] begin[:]
return[constant[None]] | keyword[def] identifier[_duration] ( identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[start] keyword[and] identifier[end] :
keyword[if] identifier[start] > identifier[end] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[end] - identifier[start]
keyword[elif] identifier[start] :
keyword[return] identifier[time] . identifier[time] ()- identifier[start]
keyword[else] :
keyword[return] keyword[None] | def _duration(start, end):
""" Return time delta.
"""
if start and end:
if start > end:
return None # depends on [control=['if'], data=[]]
else:
return end - start # depends on [control=['if'], data=[]]
elif start:
return time.time() - start # depends on [control=['if'], data=[]]
else:
return None |
async def emitters(self, key, value):
"""
Single-channel emitter
"""
while True:
await asyncio.sleep(value['schedule'].total_seconds())
await self.channel_layer.send(key, {
"type": value['type'],
"message": value['message']
}) | <ast.AsyncFunctionDef object at 0x7da1b1080e50> | keyword[async] keyword[def] identifier[emitters] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[while] keyword[True] :
keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[value] [ literal[string] ]. identifier[total_seconds] ())
keyword[await] identifier[self] . identifier[channel_layer] . identifier[send] ( identifier[key] ,{
literal[string] : identifier[value] [ literal[string] ],
literal[string] : identifier[value] [ literal[string] ]
}) | async def emitters(self, key, value):
"""
Single-channel emitter
"""
while True:
await asyncio.sleep(value['schedule'].total_seconds())
await self.channel_layer.send(key, {'type': value['type'], 'message': value['message']}) # depends on [control=['while'], data=[]] |
def load_fw(path):
"""Open firmware file and return a binary string."""
fname = os.path.realpath(path)
exists = os.path.isfile(fname)
if not exists or not os.access(fname, os.R_OK):
_LOGGER.error(
'Firmware path %s does not exist or is not readable',
path)
return None
try:
intel_hex = IntelHex()
with open(path, 'r') as file_handle:
intel_hex.fromfile(file_handle, format='hex')
return intel_hex.tobinstr()
except (IntelHexError, TypeError, ValueError) as exc:
_LOGGER.error(
'Firmware not valid, check the hex file at %s: %s', path, exc)
return None | def function[load_fw, parameter[path]]:
constant[Open firmware file and return a binary string.]
variable[fname] assign[=] call[name[os].path.realpath, parameter[name[path]]]
variable[exists] assign[=] call[name[os].path.isfile, parameter[name[fname]]]
if <ast.BoolOp object at 0x7da20cabf400> begin[:]
call[name[_LOGGER].error, parameter[constant[Firmware path %s does not exist or is not readable], name[path]]]
return[constant[None]]
<ast.Try object at 0x7da20cabc7c0> | keyword[def] identifier[load_fw] ( identifier[path] ):
literal[string]
identifier[fname] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[path] )
identifier[exists] = identifier[os] . identifier[path] . identifier[isfile] ( identifier[fname] )
keyword[if] keyword[not] identifier[exists] keyword[or] keyword[not] identifier[os] . identifier[access] ( identifier[fname] , identifier[os] . identifier[R_OK] ):
identifier[_LOGGER] . identifier[error] (
literal[string] ,
identifier[path] )
keyword[return] keyword[None]
keyword[try] :
identifier[intel_hex] = identifier[IntelHex] ()
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[file_handle] :
identifier[intel_hex] . identifier[fromfile] ( identifier[file_handle] , identifier[format] = literal[string] )
keyword[return] identifier[intel_hex] . identifier[tobinstr] ()
keyword[except] ( identifier[IntelHexError] , identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[exc] :
identifier[_LOGGER] . identifier[error] (
literal[string] , identifier[path] , identifier[exc] )
keyword[return] keyword[None] | def load_fw(path):
"""Open firmware file and return a binary string."""
fname = os.path.realpath(path)
exists = os.path.isfile(fname)
if not exists or not os.access(fname, os.R_OK):
_LOGGER.error('Firmware path %s does not exist or is not readable', path)
return None # depends on [control=['if'], data=[]]
try:
intel_hex = IntelHex()
with open(path, 'r') as file_handle:
intel_hex.fromfile(file_handle, format='hex') # depends on [control=['with'], data=['file_handle']]
return intel_hex.tobinstr() # depends on [control=['try'], data=[]]
except (IntelHexError, TypeError, ValueError) as exc:
_LOGGER.error('Firmware not valid, check the hex file at %s: %s', path, exc)
return None # depends on [control=['except'], data=['exc']] |
def expand_tpm(tpm):
"""Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network.
"""
unconstrained = np.ones([2] * (tpm.ndim - 1) + [tpm.shape[-1]])
return tpm * unconstrained | def function[expand_tpm, parameter[tpm]]:
constant[Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network.
]
variable[unconstrained] assign[=] call[name[np].ones, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da207f022f0>]] * binary_operation[name[tpm].ndim - constant[1]]] + list[[<ast.Subscript object at 0x7da207f02da0>]]]]]
return[binary_operation[name[tpm] * name[unconstrained]]] | keyword[def] identifier[expand_tpm] ( identifier[tpm] ):
literal[string]
identifier[unconstrained] = identifier[np] . identifier[ones] ([ literal[int] ]*( identifier[tpm] . identifier[ndim] - literal[int] )+[ identifier[tpm] . identifier[shape] [- literal[int] ]])
keyword[return] identifier[tpm] * identifier[unconstrained] | def expand_tpm(tpm):
"""Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network.
"""
unconstrained = np.ones([2] * (tpm.ndim - 1) + [tpm.shape[-1]])
return tpm * unconstrained |
def load_plume_package(package, plume_dir, accept_defaults):
"""Loads a canari package into Plume."""
from canari.commands.load_plume_package import load_plume_package
load_plume_package(package, plume_dir, accept_defaults) | def function[load_plume_package, parameter[package, plume_dir, accept_defaults]]:
constant[Loads a canari package into Plume.]
from relative_module[canari.commands.load_plume_package] import module[load_plume_package]
call[name[load_plume_package], parameter[name[package], name[plume_dir], name[accept_defaults]]] | keyword[def] identifier[load_plume_package] ( identifier[package] , identifier[plume_dir] , identifier[accept_defaults] ):
literal[string]
keyword[from] identifier[canari] . identifier[commands] . identifier[load_plume_package] keyword[import] identifier[load_plume_package]
identifier[load_plume_package] ( identifier[package] , identifier[plume_dir] , identifier[accept_defaults] ) | def load_plume_package(package, plume_dir, accept_defaults):
"""Loads a canari package into Plume."""
from canari.commands.load_plume_package import load_plume_package
load_plume_package(package, plume_dir, accept_defaults) |
def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m) | def function[deriv1, parameter[x, y, i, n]]:
constant[
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
]
<ast.Tuple object at 0x7da1b03025f0> assign[=] tuple[[<ast.Constant object at 0x7da1b0301840>, <ast.Constant object at 0x7da1b0301810>, <ast.Constant object at 0x7da1b03017b0>, <ast.Constant object at 0x7da1b0302890>, <ast.Constant object at 0x7da1b03028c0>]]
for taget[name[ix]] in starred[call[name[range], parameter[name[i], binary_operation[name[i] + name[n]], constant[1]]]] begin[:]
variable[x_] assign[=] binary_operation[name[x_] + call[name[x]][name[ix]]]
variable[y_] assign[=] binary_operation[name[y_] + call[name[y]][name[ix]]]
variable[xy_] assign[=] binary_operation[name[xy_] + binary_operation[call[name[x]][name[ix]] * call[name[y]][name[ix]]]]
variable[x_2] assign[=] binary_operation[name[x_2] + binary_operation[call[name[x]][name[ix]] ** constant[2]]]
variable[m] assign[=] call[name[old_div], parameter[binary_operation[binary_operation[name[n] * name[xy_]] - binary_operation[name[x_] * name[y_]]], binary_operation[binary_operation[name[n] * name[x_2]] - binary_operation[name[x_] ** constant[2]]]]]
return[name[m]] | keyword[def] identifier[deriv1] ( identifier[x] , identifier[y] , identifier[i] , identifier[n] ):
literal[string]
identifier[m_] , identifier[x_] , identifier[y_] , identifier[xy_] , identifier[x_2] = literal[int] , literal[int] , literal[int] , literal[int] , literal[int]
keyword[for] identifier[ix] keyword[in] identifier[range] ( identifier[i] , identifier[i] + identifier[n] , literal[int] ):
identifier[x_] = identifier[x_] + identifier[x] [ identifier[ix] ]
identifier[y_] = identifier[y_] + identifier[y] [ identifier[ix] ]
identifier[xy_] = identifier[xy_] + identifier[x] [ identifier[ix] ]* identifier[y] [ identifier[ix] ]
identifier[x_2] = identifier[x_2] + identifier[x] [ identifier[ix] ]** literal[int]
identifier[m] = identifier[old_div] ((( identifier[n] * identifier[xy_] )-( identifier[x_] * identifier[y_] )),( identifier[n] * identifier[x_2] -( identifier[x_] )** literal[int] ))
keyword[return] ( identifier[m] ) | def deriv1(x, y, i, n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
(m_, x_, y_, xy_, x_2) = (0.0, 0.0, 0.0, 0.0, 0.0)
for ix in range(i, i + n, 1):
x_ = x_ + x[ix]
y_ = y_ + y[ix]
xy_ = xy_ + x[ix] * y[ix]
x_2 = x_2 + x[ix] ** 2 # depends on [control=['for'], data=['ix']]
m = old_div(n * xy_ - x_ * y_, n * x_2 - x_ ** 2)
return m |
def add_distinguished_name(list_name, item_name):
'''
Adds a distinguished name to a distinguished name list.
list_name(str): The name of the specific policy distinguished name list to append to.
item_name(str): The distinguished name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name MyDistinguishedList cn=foo.bar.com
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_distinguished_names",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | def function[add_distinguished_name, parameter[list_name, item_name]]:
constant[
Adds a distinguished name to a distinguished name list.
list_name(str): The name of the specific policy distinguished name list to append to.
item_name(str): The distinguished name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name MyDistinguishedList cn=foo.bar.com
]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da2045640d0>, <ast.Constant object at 0x7da204567790>, <ast.Constant object at 0x7da2045647c0>, <ast.Constant object at 0x7da204565600>], [<ast.Constant object at 0x7da204564c70>, <ast.Constant object at 0x7da2045666e0>, <ast.Constant object at 0x7da204564d30>, <ast.List object at 0x7da2045661a0>]]
variable[response] assign[=] call[call[name[__proxy__]][constant[bluecoat_sslv.call]], parameter[name[payload], constant[True]]]
return[call[name[_validate_change_result], parameter[name[response]]]] | keyword[def] identifier[add_distinguished_name] ( identifier[list_name] , identifier[item_name] ):
literal[string]
identifier[payload] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :[ identifier[list_name] ,{ literal[string] : identifier[item_name] }]}
identifier[response] = identifier[__proxy__] [ literal[string] ]( identifier[payload] , keyword[True] )
keyword[return] identifier[_validate_change_result] ( identifier[response] ) | def add_distinguished_name(list_name, item_name):
"""
Adds a distinguished name to a distinguished name list.
list_name(str): The name of the specific policy distinguished name list to append to.
item_name(str): The distinguished name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name MyDistinguishedList cn=foo.bar.com
"""
payload = {'jsonrpc': '2.0', 'id': 'ID0', 'method': 'add_policy_distinguished_names', 'params': [list_name, {'item_name': item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) |
def get_other_gene_names(cls, entry):
"""
get list of `models.OtherGeneName` objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.models.OtherGeneName` objects
"""
alternative_gene_names = []
for alternative_gene_name in entry.iterfind("./gene/name"):
if alternative_gene_name.attrib['type'] != 'primary':
alternative_gene_name_dict = {
'type_': alternative_gene_name.attrib['type'],
'name': alternative_gene_name.text
}
alternative_gene_names.append(models.OtherGeneName(**alternative_gene_name_dict))
return alternative_gene_names | def function[get_other_gene_names, parameter[cls, entry]]:
constant[
get list of `models.OtherGeneName` objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.models.OtherGeneName` objects
]
variable[alternative_gene_names] assign[=] list[[]]
for taget[name[alternative_gene_name]] in starred[call[name[entry].iterfind, parameter[constant[./gene/name]]]] begin[:]
if compare[call[name[alternative_gene_name].attrib][constant[type]] not_equal[!=] constant[primary]] begin[:]
variable[alternative_gene_name_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c64130>, <ast.Constant object at 0x7da1b0c64a90>], [<ast.Subscript object at 0x7da1b0c64d60>, <ast.Attribute object at 0x7da1b0c979a0>]]
call[name[alternative_gene_names].append, parameter[call[name[models].OtherGeneName, parameter[]]]]
return[name[alternative_gene_names]] | keyword[def] identifier[get_other_gene_names] ( identifier[cls] , identifier[entry] ):
literal[string]
identifier[alternative_gene_names] =[]
keyword[for] identifier[alternative_gene_name] keyword[in] identifier[entry] . identifier[iterfind] ( literal[string] ):
keyword[if] identifier[alternative_gene_name] . identifier[attrib] [ literal[string] ]!= literal[string] :
identifier[alternative_gene_name_dict] ={
literal[string] : identifier[alternative_gene_name] . identifier[attrib] [ literal[string] ],
literal[string] : identifier[alternative_gene_name] . identifier[text]
}
identifier[alternative_gene_names] . identifier[append] ( identifier[models] . identifier[OtherGeneName] (** identifier[alternative_gene_name_dict] ))
keyword[return] identifier[alternative_gene_names] | def get_other_gene_names(cls, entry):
"""
get list of `models.OtherGeneName` objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.models.OtherGeneName` objects
"""
alternative_gene_names = []
for alternative_gene_name in entry.iterfind('./gene/name'):
if alternative_gene_name.attrib['type'] != 'primary':
alternative_gene_name_dict = {'type_': alternative_gene_name.attrib['type'], 'name': alternative_gene_name.text}
alternative_gene_names.append(models.OtherGeneName(**alternative_gene_name_dict)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['alternative_gene_name']]
return alternative_gene_names |
def get_unresolved_variables(f):
"""
Gets unresolved vars from file
"""
reporter = RReporter()
checkPath(f, reporter=reporter)
return dict(reporter.messages) | def function[get_unresolved_variables, parameter[f]]:
constant[
Gets unresolved vars from file
]
variable[reporter] assign[=] call[name[RReporter], parameter[]]
call[name[checkPath], parameter[name[f]]]
return[call[name[dict], parameter[name[reporter].messages]]] | keyword[def] identifier[get_unresolved_variables] ( identifier[f] ):
literal[string]
identifier[reporter] = identifier[RReporter] ()
identifier[checkPath] ( identifier[f] , identifier[reporter] = identifier[reporter] )
keyword[return] identifier[dict] ( identifier[reporter] . identifier[messages] ) | def get_unresolved_variables(f):
"""
Gets unresolved vars from file
"""
reporter = RReporter()
checkPath(f, reporter=reporter)
return dict(reporter.messages) |
def replace_keywords(self, sentence):
"""Searches in the string for all keywords present in corpus.
Keywords present are replaced by the clean name and a new string is returned.
Args:
sentence (str): Line of text where we will replace keywords
Returns:
new_sentence (str): Line of text with replaced keywords
Examples:
>>> from flashtext import KeywordProcessor
>>> keyword_processor = KeywordProcessor()
>>> keyword_processor.add_keyword('Big Apple', 'New York')
>>> keyword_processor.add_keyword('Bay Area')
>>> new_sentence = keyword_processor.replace_keywords('I love Big Apple and bay area.')
>>> new_sentence
>>> 'I love New York and Bay Area.'
"""
if not sentence:
# if sentence is empty or none just return the same.
return sentence
new_sentence = []
orig_sentence = sentence
if not self.case_sensitive:
sentence = sentence.lower()
current_word = ''
current_dict = self.keyword_trie_dict
current_white_space = ''
sequence_end_pos = 0
idx = 0
sentence_len = len(sentence)
while idx < sentence_len:
char = sentence[idx]
current_word += orig_sentence[idx]
# when we reach whitespace
if char not in self.non_word_boundaries:
current_white_space = char
# if end is present in current_dict
if self._keyword in current_dict or char in current_dict:
# update longest sequence found
sequence_found = None
longest_sequence_found = None
is_longer_seq_found = False
if self._keyword in current_dict:
sequence_found = current_dict[self._keyword]
longest_sequence_found = current_dict[self._keyword]
sequence_end_pos = idx
# re look for longest_sequence from this position
if char in current_dict:
current_dict_continued = current_dict[char]
current_word_continued = current_word
idy = idx + 1
while idy < sentence_len:
inner_char = sentence[idy]
current_word_continued += orig_sentence[idy]
if inner_char not in self.non_word_boundaries and self._keyword in current_dict_continued:
# update longest sequence found
current_white_space = inner_char
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True
if inner_char in current_dict_continued:
current_dict_continued = current_dict_continued[inner_char]
else:
break
idy += 1
else:
# end of sentence reached.
if self._keyword in current_dict_continued:
# update longest sequence found
current_white_space = ''
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True
if is_longer_seq_found:
idx = sequence_end_pos
current_word = current_word_continued
current_dict = self.keyword_trie_dict
if longest_sequence_found:
new_sentence.append(longest_sequence_found + current_white_space)
current_word = ''
current_white_space = ''
else:
new_sentence.append(current_word)
current_word = ''
current_white_space = ''
else:
# we reset current_dict
current_dict = self.keyword_trie_dict
new_sentence.append(current_word)
current_word = ''
current_white_space = ''
elif char in current_dict:
# we can continue from this char
current_dict = current_dict[char]
else:
# we reset current_dict
current_dict = self.keyword_trie_dict
# skip to end of word
idy = idx + 1
while idy < sentence_len:
char = sentence[idy]
current_word += orig_sentence[idy]
if char not in self.non_word_boundaries:
break
idy += 1
idx = idy
new_sentence.append(current_word)
current_word = ''
current_white_space = ''
# if we are end of sentence and have a sequence discovered
if idx + 1 >= sentence_len:
if self._keyword in current_dict:
sequence_found = current_dict[self._keyword]
new_sentence.append(sequence_found)
else:
new_sentence.append(current_word)
idx += 1
return "".join(new_sentence) | def function[replace_keywords, parameter[self, sentence]]:
constant[Searches in the string for all keywords present in corpus.
Keywords present are replaced by the clean name and a new string is returned.
Args:
sentence (str): Line of text where we will replace keywords
Returns:
new_sentence (str): Line of text with replaced keywords
Examples:
>>> from flashtext import KeywordProcessor
>>> keyword_processor = KeywordProcessor()
>>> keyword_processor.add_keyword('Big Apple', 'New York')
>>> keyword_processor.add_keyword('Bay Area')
>>> new_sentence = keyword_processor.replace_keywords('I love Big Apple and bay area.')
>>> new_sentence
>>> 'I love New York and Bay Area.'
]
if <ast.UnaryOp object at 0x7da2047ea1d0> begin[:]
return[name[sentence]]
variable[new_sentence] assign[=] list[[]]
variable[orig_sentence] assign[=] name[sentence]
if <ast.UnaryOp object at 0x7da2047ea0b0> begin[:]
variable[sentence] assign[=] call[name[sentence].lower, parameter[]]
variable[current_word] assign[=] constant[]
variable[current_dict] assign[=] name[self].keyword_trie_dict
variable[current_white_space] assign[=] constant[]
variable[sequence_end_pos] assign[=] constant[0]
variable[idx] assign[=] constant[0]
variable[sentence_len] assign[=] call[name[len], parameter[name[sentence]]]
while compare[name[idx] less[<] name[sentence_len]] begin[:]
variable[char] assign[=] call[name[sentence]][name[idx]]
<ast.AugAssign object at 0x7da2047e9450>
if compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[self].non_word_boundaries] begin[:]
variable[current_white_space] assign[=] name[char]
if <ast.BoolOp object at 0x7da2047e81c0> begin[:]
variable[sequence_found] assign[=] constant[None]
variable[longest_sequence_found] assign[=] constant[None]
variable[is_longer_seq_found] assign[=] constant[False]
if compare[name[self]._keyword in name[current_dict]] begin[:]
variable[sequence_found] assign[=] call[name[current_dict]][name[self]._keyword]
variable[longest_sequence_found] assign[=] call[name[current_dict]][name[self]._keyword]
variable[sequence_end_pos] assign[=] name[idx]
if compare[name[char] in name[current_dict]] begin[:]
variable[current_dict_continued] assign[=] call[name[current_dict]][name[char]]
variable[current_word_continued] assign[=] name[current_word]
variable[idy] assign[=] binary_operation[name[idx] + constant[1]]
while compare[name[idy] less[<] name[sentence_len]] begin[:]
variable[inner_char] assign[=] call[name[sentence]][name[idy]]
<ast.AugAssign object at 0x7da20c7c9150>
if <ast.BoolOp object at 0x7da20c7ca980> begin[:]
variable[current_white_space] assign[=] name[inner_char]
variable[longest_sequence_found] assign[=] call[name[current_dict_continued]][name[self]._keyword]
variable[sequence_end_pos] assign[=] name[idy]
variable[is_longer_seq_found] assign[=] constant[True]
if compare[name[inner_char] in name[current_dict_continued]] begin[:]
variable[current_dict_continued] assign[=] call[name[current_dict_continued]][name[inner_char]]
<ast.AugAssign object at 0x7da20c7c86a0>
if name[is_longer_seq_found] begin[:]
variable[idx] assign[=] name[sequence_end_pos]
variable[current_word] assign[=] name[current_word_continued]
variable[current_dict] assign[=] name[self].keyword_trie_dict
if name[longest_sequence_found] begin[:]
call[name[new_sentence].append, parameter[binary_operation[name[longest_sequence_found] + name[current_white_space]]]]
variable[current_word] assign[=] constant[]
variable[current_white_space] assign[=] constant[]
if compare[binary_operation[name[idx] + constant[1]] greater_or_equal[>=] name[sentence_len]] begin[:]
if compare[name[self]._keyword in name[current_dict]] begin[:]
variable[sequence_found] assign[=] call[name[current_dict]][name[self]._keyword]
call[name[new_sentence].append, parameter[name[sequence_found]]]
<ast.AugAssign object at 0x7da20c795f90>
return[call[constant[].join, parameter[name[new_sentence]]]] | keyword[def] identifier[replace_keywords] ( identifier[self] , identifier[sentence] ):
literal[string]
keyword[if] keyword[not] identifier[sentence] :
keyword[return] identifier[sentence]
identifier[new_sentence] =[]
identifier[orig_sentence] = identifier[sentence]
keyword[if] keyword[not] identifier[self] . identifier[case_sensitive] :
identifier[sentence] = identifier[sentence] . identifier[lower] ()
identifier[current_word] = literal[string]
identifier[current_dict] = identifier[self] . identifier[keyword_trie_dict]
identifier[current_white_space] = literal[string]
identifier[sequence_end_pos] = literal[int]
identifier[idx] = literal[int]
identifier[sentence_len] = identifier[len] ( identifier[sentence] )
keyword[while] identifier[idx] < identifier[sentence_len] :
identifier[char] = identifier[sentence] [ identifier[idx] ]
identifier[current_word] += identifier[orig_sentence] [ identifier[idx] ]
keyword[if] identifier[char] keyword[not] keyword[in] identifier[self] . identifier[non_word_boundaries] :
identifier[current_white_space] = identifier[char]
keyword[if] identifier[self] . identifier[_keyword] keyword[in] identifier[current_dict] keyword[or] identifier[char] keyword[in] identifier[current_dict] :
identifier[sequence_found] = keyword[None]
identifier[longest_sequence_found] = keyword[None]
identifier[is_longer_seq_found] = keyword[False]
keyword[if] identifier[self] . identifier[_keyword] keyword[in] identifier[current_dict] :
identifier[sequence_found] = identifier[current_dict] [ identifier[self] . identifier[_keyword] ]
identifier[longest_sequence_found] = identifier[current_dict] [ identifier[self] . identifier[_keyword] ]
identifier[sequence_end_pos] = identifier[idx]
keyword[if] identifier[char] keyword[in] identifier[current_dict] :
identifier[current_dict_continued] = identifier[current_dict] [ identifier[char] ]
identifier[current_word_continued] = identifier[current_word]
identifier[idy] = identifier[idx] + literal[int]
keyword[while] identifier[idy] < identifier[sentence_len] :
identifier[inner_char] = identifier[sentence] [ identifier[idy] ]
identifier[current_word_continued] += identifier[orig_sentence] [ identifier[idy] ]
keyword[if] identifier[inner_char] keyword[not] keyword[in] identifier[self] . identifier[non_word_boundaries] keyword[and] identifier[self] . identifier[_keyword] keyword[in] identifier[current_dict_continued] :
identifier[current_white_space] = identifier[inner_char]
identifier[longest_sequence_found] = identifier[current_dict_continued] [ identifier[self] . identifier[_keyword] ]
identifier[sequence_end_pos] = identifier[idy]
identifier[is_longer_seq_found] = keyword[True]
keyword[if] identifier[inner_char] keyword[in] identifier[current_dict_continued] :
identifier[current_dict_continued] = identifier[current_dict_continued] [ identifier[inner_char] ]
keyword[else] :
keyword[break]
identifier[idy] += literal[int]
keyword[else] :
keyword[if] identifier[self] . identifier[_keyword] keyword[in] identifier[current_dict_continued] :
identifier[current_white_space] = literal[string]
identifier[longest_sequence_found] = identifier[current_dict_continued] [ identifier[self] . identifier[_keyword] ]
identifier[sequence_end_pos] = identifier[idy]
identifier[is_longer_seq_found] = keyword[True]
keyword[if] identifier[is_longer_seq_found] :
identifier[idx] = identifier[sequence_end_pos]
identifier[current_word] = identifier[current_word_continued]
identifier[current_dict] = identifier[self] . identifier[keyword_trie_dict]
keyword[if] identifier[longest_sequence_found] :
identifier[new_sentence] . identifier[append] ( identifier[longest_sequence_found] + identifier[current_white_space] )
identifier[current_word] = literal[string]
identifier[current_white_space] = literal[string]
keyword[else] :
identifier[new_sentence] . identifier[append] ( identifier[current_word] )
identifier[current_word] = literal[string]
identifier[current_white_space] = literal[string]
keyword[else] :
identifier[current_dict] = identifier[self] . identifier[keyword_trie_dict]
identifier[new_sentence] . identifier[append] ( identifier[current_word] )
identifier[current_word] = literal[string]
identifier[current_white_space] = literal[string]
keyword[elif] identifier[char] keyword[in] identifier[current_dict] :
identifier[current_dict] = identifier[current_dict] [ identifier[char] ]
keyword[else] :
identifier[current_dict] = identifier[self] . identifier[keyword_trie_dict]
identifier[idy] = identifier[idx] + literal[int]
keyword[while] identifier[idy] < identifier[sentence_len] :
identifier[char] = identifier[sentence] [ identifier[idy] ]
identifier[current_word] += identifier[orig_sentence] [ identifier[idy] ]
keyword[if] identifier[char] keyword[not] keyword[in] identifier[self] . identifier[non_word_boundaries] :
keyword[break]
identifier[idy] += literal[int]
identifier[idx] = identifier[idy]
identifier[new_sentence] . identifier[append] ( identifier[current_word] )
identifier[current_word] = literal[string]
identifier[current_white_space] = literal[string]
keyword[if] identifier[idx] + literal[int] >= identifier[sentence_len] :
keyword[if] identifier[self] . identifier[_keyword] keyword[in] identifier[current_dict] :
identifier[sequence_found] = identifier[current_dict] [ identifier[self] . identifier[_keyword] ]
identifier[new_sentence] . identifier[append] ( identifier[sequence_found] )
keyword[else] :
identifier[new_sentence] . identifier[append] ( identifier[current_word] )
identifier[idx] += literal[int]
keyword[return] literal[string] . identifier[join] ( identifier[new_sentence] ) | def replace_keywords(self, sentence):
"""Searches in the string for all keywords present in corpus.
Keywords present are replaced by the clean name and a new string is returned.
Args:
sentence (str): Line of text where we will replace keywords
Returns:
new_sentence (str): Line of text with replaced keywords
Examples:
>>> from flashtext import KeywordProcessor
>>> keyword_processor = KeywordProcessor()
>>> keyword_processor.add_keyword('Big Apple', 'New York')
>>> keyword_processor.add_keyword('Bay Area')
>>> new_sentence = keyword_processor.replace_keywords('I love Big Apple and bay area.')
>>> new_sentence
>>> 'I love New York and Bay Area.'
"""
if not sentence:
# if sentence is empty or none just return the same.
return sentence # depends on [control=['if'], data=[]]
new_sentence = []
orig_sentence = sentence
if not self.case_sensitive:
sentence = sentence.lower() # depends on [control=['if'], data=[]]
current_word = ''
current_dict = self.keyword_trie_dict
current_white_space = ''
sequence_end_pos = 0
idx = 0
sentence_len = len(sentence)
while idx < sentence_len:
char = sentence[idx]
current_word += orig_sentence[idx]
# when we reach whitespace
if char not in self.non_word_boundaries:
current_white_space = char
# if end is present in current_dict
if self._keyword in current_dict or char in current_dict:
# update longest sequence found
sequence_found = None
longest_sequence_found = None
is_longer_seq_found = False
if self._keyword in current_dict:
sequence_found = current_dict[self._keyword]
longest_sequence_found = current_dict[self._keyword]
sequence_end_pos = idx # depends on [control=['if'], data=['current_dict']]
# re look for longest_sequence from this position
if char in current_dict:
current_dict_continued = current_dict[char]
current_word_continued = current_word
idy = idx + 1
while idy < sentence_len:
inner_char = sentence[idy]
current_word_continued += orig_sentence[idy]
if inner_char not in self.non_word_boundaries and self._keyword in current_dict_continued:
# update longest sequence found
current_white_space = inner_char
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True # depends on [control=['if'], data=[]]
if inner_char in current_dict_continued:
current_dict_continued = current_dict_continued[inner_char] # depends on [control=['if'], data=['inner_char', 'current_dict_continued']]
else:
break
idy += 1 # depends on [control=['while'], data=['idy']]
else:
# end of sentence reached.
if self._keyword in current_dict_continued:
# update longest sequence found
current_white_space = ''
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True # depends on [control=['if'], data=['current_dict_continued']]
if is_longer_seq_found:
idx = sequence_end_pos
current_word = current_word_continued # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['char', 'current_dict']]
current_dict = self.keyword_trie_dict
if longest_sequence_found:
new_sentence.append(longest_sequence_found + current_white_space)
current_word = ''
current_white_space = '' # depends on [control=['if'], data=[]]
else:
new_sentence.append(current_word)
current_word = ''
current_white_space = '' # depends on [control=['if'], data=[]]
else:
# we reset current_dict
current_dict = self.keyword_trie_dict
new_sentence.append(current_word)
current_word = ''
current_white_space = '' # depends on [control=['if'], data=['char']]
elif char in current_dict:
# we can continue from this char
current_dict = current_dict[char] # depends on [control=['if'], data=['char', 'current_dict']]
else:
# we reset current_dict
current_dict = self.keyword_trie_dict
# skip to end of word
idy = idx + 1
while idy < sentence_len:
char = sentence[idy]
current_word += orig_sentence[idy]
if char not in self.non_word_boundaries:
break # depends on [control=['if'], data=[]]
idy += 1 # depends on [control=['while'], data=['idy']]
idx = idy
new_sentence.append(current_word)
current_word = ''
current_white_space = ''
# if we are end of sentence and have a sequence discovered
if idx + 1 >= sentence_len:
if self._keyword in current_dict:
sequence_found = current_dict[self._keyword]
new_sentence.append(sequence_found) # depends on [control=['if'], data=['current_dict']]
else:
new_sentence.append(current_word) # depends on [control=['if'], data=[]]
idx += 1 # depends on [control=['while'], data=['idx', 'sentence_len']]
return ''.join(new_sentence) |
def send_scp(self, *args, **kwargs):
"""Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int
"""
# Retrieve contextual arguments from the keyword arguments. The
# context system ensures that these values are present.
cabinet = kwargs.pop("cabinet")
frame = kwargs.pop("frame")
board = kwargs.pop("board")
return self._send_scp(cabinet, frame, board, *args, **kwargs) | def function[send_scp, parameter[self]]:
constant[Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int
]
variable[cabinet] assign[=] call[name[kwargs].pop, parameter[constant[cabinet]]]
variable[frame] assign[=] call[name[kwargs].pop, parameter[constant[frame]]]
variable[board] assign[=] call[name[kwargs].pop, parameter[constant[board]]]
return[call[name[self]._send_scp, parameter[name[cabinet], name[frame], name[board], <ast.Starred object at 0x7da1b180c790>]]] | keyword[def] identifier[send_scp] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[cabinet] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[frame] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[board] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[return] identifier[self] . identifier[_send_scp] ( identifier[cabinet] , identifier[frame] , identifier[board] ,* identifier[args] ,** identifier[kwargs] ) | def send_scp(self, *args, **kwargs):
"""Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int
"""
# Retrieve contextual arguments from the keyword arguments. The
# context system ensures that these values are present.
cabinet = kwargs.pop('cabinet')
frame = kwargs.pop('frame')
board = kwargs.pop('board')
return self._send_scp(cabinet, frame, board, *args, **kwargs) |
def log_info(msg, logger="TaskLogger"):
"""Log an INFO message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.info(msg)
return tasklogger | def function[log_info, parameter[msg, logger]]:
constant[Log an INFO message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
]
variable[tasklogger] assign[=] call[name[get_tasklogger], parameter[name[logger]]]
call[name[tasklogger].info, parameter[name[msg]]]
return[name[tasklogger]] | keyword[def] identifier[log_info] ( identifier[msg] , identifier[logger] = literal[string] ):
literal[string]
identifier[tasklogger] = identifier[get_tasklogger] ( identifier[logger] )
identifier[tasklogger] . identifier[info] ( identifier[msg] )
keyword[return] identifier[tasklogger] | def log_info(msg, logger='TaskLogger'):
"""Log an INFO message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.info(msg)
return tasklogger |
def p_import(self, p):
'import : IMPORT ID NL'
p[0] = AstImport(self.path, p.lineno(1), p.lexpos(1), p[2]) | def function[p_import, parameter[self, p]]:
constant[import : IMPORT ID NL]
call[name[p]][constant[0]] assign[=] call[name[AstImport], parameter[name[self].path, call[name[p].lineno, parameter[constant[1]]], call[name[p].lexpos, parameter[constant[1]]], call[name[p]][constant[2]]]] | keyword[def] identifier[p_import] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[AstImport] ( identifier[self] . identifier[path] , identifier[p] . identifier[lineno] ( literal[int] ), identifier[p] . identifier[lexpos] ( literal[int] ), identifier[p] [ literal[int] ]) | def p_import(self, p):
"""import : IMPORT ID NL"""
p[0] = AstImport(self.path, p.lineno(1), p.lexpos(1), p[2]) |
def irr(values, guess = None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e) | def function[irr, parameter[values, guess]]:
constant[
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
]
if call[name[isinstance], parameter[name[values], name[Range]]] begin[:]
variable[values] assign[=] name[values].values
if <ast.BoolOp object at 0x7da1b07331c0> begin[:]
<ast.Raise object at 0x7da1b07336d0> | keyword[def] identifier[irr] ( identifier[values] , identifier[guess] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[values] , identifier[Range] ):
identifier[values] = identifier[values] . identifier[values]
keyword[if] identifier[guess] keyword[is] keyword[not] keyword[None] keyword[and] identifier[guess] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[guess] )
keyword[else] :
keyword[try] :
keyword[return] identifier[np] . identifier[irr] ( identifier[values] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[ExcelError] ( literal[string] , identifier[e] ) | def irr(values, guess=None):
"""
Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR.
"""
if isinstance(values, Range):
values = values.values # depends on [control=['if'], data=[]]
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess) # depends on [control=['if'], data=[]]
else:
try:
return np.irr(values) # depends on [control=['try'], data=[]]
except Exception as e:
return ExcelError('#NUM!', e) # depends on [control=['except'], data=['e']] |
def underlying_order_book_id(self):
"""
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
"""
try:
return self.__dict__["underlying_order_book_id"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' ".format(self.order_book_id)
) | def function[underlying_order_book_id, parameter[self]]:
constant[
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
]
<ast.Try object at 0x7da1b2185db0> | keyword[def] identifier[underlying_order_book_id] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[__dict__] [ literal[string] ]
keyword[except] ( identifier[KeyError] , identifier[ValueError] ):
keyword[raise] identifier[AttributeError] (
literal[string] . identifier[format] ( identifier[self] . identifier[order_book_id] )
) | def underlying_order_book_id(self):
"""
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
"""
try:
return self.__dict__['underlying_order_book_id'] # depends on [control=['try'], data=[]]
except (KeyError, ValueError):
raise AttributeError("Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' ".format(self.order_book_id)) # depends on [control=['except'], data=[]] |
def perc_fltr(dem, perc=(1.0, 99.0)):
"""Percentile filter
"""
rangelim = malib.calcperc(dem, perc)
print('Excluding values outside of percentile range: {0:0.2f} to {1:0.2f}'.format(*perc))
out = range_fltr(dem, rangelim)
return out | def function[perc_fltr, parameter[dem, perc]]:
constant[Percentile filter
]
variable[rangelim] assign[=] call[name[malib].calcperc, parameter[name[dem], name[perc]]]
call[name[print], parameter[call[constant[Excluding values outside of percentile range: {0:0.2f} to {1:0.2f}].format, parameter[<ast.Starred object at 0x7da1b07ad000>]]]]
variable[out] assign[=] call[name[range_fltr], parameter[name[dem], name[rangelim]]]
return[name[out]] | keyword[def] identifier[perc_fltr] ( identifier[dem] , identifier[perc] =( literal[int] , literal[int] )):
literal[string]
identifier[rangelim] = identifier[malib] . identifier[calcperc] ( identifier[dem] , identifier[perc] )
identifier[print] ( literal[string] . identifier[format] (* identifier[perc] ))
identifier[out] = identifier[range_fltr] ( identifier[dem] , identifier[rangelim] )
keyword[return] identifier[out] | def perc_fltr(dem, perc=(1.0, 99.0)):
"""Percentile filter
"""
rangelim = malib.calcperc(dem, perc)
print('Excluding values outside of percentile range: {0:0.2f} to {1:0.2f}'.format(*perc))
out = range_fltr(dem, rangelim)
return out |
def threadpooled(
func: None = None,
*,
loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None,
loop_getter_need_context: bool = False,
) -> ThreadPooled:
"""Overload: No function.""" | def function[threadpooled, parameter[func]]:
constant[Overload: No function.] | keyword[def] identifier[threadpooled] (
identifier[func] : keyword[None] = keyword[None] ,
*,
identifier[loop_getter] : identifier[typing] . identifier[Union] [ keyword[None] , identifier[typing] . identifier[Callable] [..., identifier[asyncio] . identifier[AbstractEventLoop] ], identifier[asyncio] . identifier[AbstractEventLoop] ]= keyword[None] ,
identifier[loop_getter_need_context] : identifier[bool] = keyword[False] ,
)-> identifier[ThreadPooled] :
literal[string] | def threadpooled(func: None=None, *, loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop]=None, loop_getter_need_context: bool=False) -> ThreadPooled:
"""Overload: No function.""" |
def getlibversion():
"""Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetlibversion()
_checkErr('getlibversion', status, "cannot get lib version")
return major_v, minor_v, release, info | def function[getlibversion, parameter[]]:
constant[Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
]
<ast.Tuple object at 0x7da1b15eaef0> assign[=] call[name[_C].Hgetlibversion, parameter[]]
call[name[_checkErr], parameter[constant[getlibversion], name[status], constant[cannot get lib version]]]
return[tuple[[<ast.Name object at 0x7da1b12650f0>, <ast.Name object at 0x7da1b1264e80>, <ast.Name object at 0x7da1b1264be0>, <ast.Name object at 0x7da1b1264a90>]]] | keyword[def] identifier[getlibversion] ():
literal[string]
identifier[status] , identifier[major_v] , identifier[minor_v] , identifier[release] , identifier[info] = identifier[_C] . identifier[Hgetlibversion] ()
identifier[_checkErr] ( literal[string] , identifier[status] , literal[string] )
keyword[return] identifier[major_v] , identifier[minor_v] , identifier[release] , identifier[info] | def getlibversion():
"""Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
(status, major_v, minor_v, release, info) = _C.Hgetlibversion()
_checkErr('getlibversion', status, 'cannot get lib version')
return (major_v, minor_v, release, info) |
def known_remotes(self):
"""The names of the configured remote repositories (a list of :class:`.Remote` objects)."""
objects = []
for line in self.context.capture('hg', 'paths').splitlines():
name, _, location = line.partition('=')
if name and location:
name = name.strip()
objects.append(Remote(
default=(name in ('default', 'default-push')),
location=location.strip(), name=name, repository=self,
# We give the `default-push' remote the `push' role only,
# while allowing both roles for other remotes. This isn't
# strictly speaking correct but it will prevent
# Repository.pull() from considering the `default-push'
# remote as a suitable default to pull from (which is not
# what Mercurial does when you run `hg pull').
roles=(['push'] if name == 'default-push' else ['push', 'pull']),
))
return objects | def function[known_remotes, parameter[self]]:
constant[The names of the configured remote repositories (a list of :class:`.Remote` objects).]
variable[objects] assign[=] list[[]]
for taget[name[line]] in starred[call[call[name[self].context.capture, parameter[constant[hg], constant[paths]]].splitlines, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b0ac9d80> assign[=] call[name[line].partition, parameter[constant[=]]]
if <ast.BoolOp object at 0x7da1b0ac8a90> begin[:]
variable[name] assign[=] call[name[name].strip, parameter[]]
call[name[objects].append, parameter[call[name[Remote], parameter[]]]]
return[name[objects]] | keyword[def] identifier[known_remotes] ( identifier[self] ):
literal[string]
identifier[objects] =[]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[context] . identifier[capture] ( literal[string] , literal[string] ). identifier[splitlines] ():
identifier[name] , identifier[_] , identifier[location] = identifier[line] . identifier[partition] ( literal[string] )
keyword[if] identifier[name] keyword[and] identifier[location] :
identifier[name] = identifier[name] . identifier[strip] ()
identifier[objects] . identifier[append] ( identifier[Remote] (
identifier[default] =( identifier[name] keyword[in] ( literal[string] , literal[string] )),
identifier[location] = identifier[location] . identifier[strip] (), identifier[name] = identifier[name] , identifier[repository] = identifier[self] ,
identifier[roles] =([ literal[string] ] keyword[if] identifier[name] == literal[string] keyword[else] [ literal[string] , literal[string] ]),
))
keyword[return] identifier[objects] | def known_remotes(self):
"""The names of the configured remote repositories (a list of :class:`.Remote` objects)."""
objects = []
for line in self.context.capture('hg', 'paths').splitlines():
(name, _, location) = line.partition('=')
if name and location:
name = name.strip()
# We give the `default-push' remote the `push' role only,
# while allowing both roles for other remotes. This isn't
# strictly speaking correct but it will prevent
# Repository.pull() from considering the `default-push'
# remote as a suitable default to pull from (which is not
# what Mercurial does when you run `hg pull').
objects.append(Remote(default=name in ('default', 'default-push'), location=location.strip(), name=name, repository=self, roles=['push'] if name == 'default-push' else ['push', 'pull'])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return objects |
def rels_xml_for(self, source_uri):
"""
Return rels item XML for source with *source_uri*, or None if the
item has no rels item.
"""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except IOError:
rels_xml = None
return rels_xml | def function[rels_xml_for, parameter[self, source_uri]]:
constant[
Return rels item XML for source with *source_uri*, or None if the
item has no rels item.
]
<ast.Try object at 0x7da1b21788e0>
return[name[rels_xml]] | keyword[def] identifier[rels_xml_for] ( identifier[self] , identifier[source_uri] ):
literal[string]
keyword[try] :
identifier[rels_xml] = identifier[self] . identifier[blob_for] ( identifier[source_uri] . identifier[rels_uri] )
keyword[except] identifier[IOError] :
identifier[rels_xml] = keyword[None]
keyword[return] identifier[rels_xml] | def rels_xml_for(self, source_uri):
"""
Return rels item XML for source with *source_uri*, or None if the
item has no rels item.
"""
try:
rels_xml = self.blob_for(source_uri.rels_uri) # depends on [control=['try'], data=[]]
except IOError:
rels_xml = None # depends on [control=['except'], data=[]]
return rels_xml |
def is_valid_catalog(self, catalog=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = catalog or self
return validation.is_valid_catalog(catalog, validator=self.validator) | def function[is_valid_catalog, parameter[self, catalog]]:
constant[Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False.
]
variable[catalog] assign[=] <ast.BoolOp object at 0x7da1b02e5090>
return[call[name[validation].is_valid_catalog, parameter[name[catalog]]]] | keyword[def] identifier[is_valid_catalog] ( identifier[self] , identifier[catalog] = keyword[None] ):
literal[string]
identifier[catalog] = identifier[catalog] keyword[or] identifier[self]
keyword[return] identifier[validation] . identifier[is_valid_catalog] ( identifier[catalog] , identifier[validator] = identifier[self] . identifier[validator] ) | def is_valid_catalog(self, catalog=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = catalog or self
return validation.is_valid_catalog(catalog, validator=self.validator) |
def _line_start_indexes(self):
"""
Array pointing to the start indexes of all the lines.
"""
# Cache, because this is often reused. (If it is used, it's often used
# many times. And this has to be fast for editing big documents!)
if self._cache.line_indexes is None:
# Create list of line lengths.
line_lengths = map(len, self.lines)
# Calculate cumulative sums.
indexes = [0]
append = indexes.append
pos = 0
for line_length in line_lengths:
pos += line_length + 1
append(pos)
# Remove the last item. (This is not a new line.)
if len(indexes) > 1:
indexes.pop()
self._cache.line_indexes = indexes
return self._cache.line_indexes | def function[_line_start_indexes, parameter[self]]:
constant[
Array pointing to the start indexes of all the lines.
]
if compare[name[self]._cache.line_indexes is constant[None]] begin[:]
variable[line_lengths] assign[=] call[name[map], parameter[name[len], name[self].lines]]
variable[indexes] assign[=] list[[<ast.Constant object at 0x7da18f58fdc0>]]
variable[append] assign[=] name[indexes].append
variable[pos] assign[=] constant[0]
for taget[name[line_length]] in starred[name[line_lengths]] begin[:]
<ast.AugAssign object at 0x7da18f58cd00>
call[name[append], parameter[name[pos]]]
if compare[call[name[len], parameter[name[indexes]]] greater[>] constant[1]] begin[:]
call[name[indexes].pop, parameter[]]
name[self]._cache.line_indexes assign[=] name[indexes]
return[name[self]._cache.line_indexes] | keyword[def] identifier[_line_start_indexes] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_cache] . identifier[line_indexes] keyword[is] keyword[None] :
identifier[line_lengths] = identifier[map] ( identifier[len] , identifier[self] . identifier[lines] )
identifier[indexes] =[ literal[int] ]
identifier[append] = identifier[indexes] . identifier[append]
identifier[pos] = literal[int]
keyword[for] identifier[line_length] keyword[in] identifier[line_lengths] :
identifier[pos] += identifier[line_length] + literal[int]
identifier[append] ( identifier[pos] )
keyword[if] identifier[len] ( identifier[indexes] )> literal[int] :
identifier[indexes] . identifier[pop] ()
identifier[self] . identifier[_cache] . identifier[line_indexes] = identifier[indexes]
keyword[return] identifier[self] . identifier[_cache] . identifier[line_indexes] | def _line_start_indexes(self):
"""
Array pointing to the start indexes of all the lines.
"""
# Cache, because this is often reused. (If it is used, it's often used
# many times. And this has to be fast for editing big documents!)
if self._cache.line_indexes is None:
# Create list of line lengths.
line_lengths = map(len, self.lines)
# Calculate cumulative sums.
indexes = [0]
append = indexes.append
pos = 0
for line_length in line_lengths:
pos += line_length + 1
append(pos) # depends on [control=['for'], data=['line_length']]
# Remove the last item. (This is not a new line.)
if len(indexes) > 1:
indexes.pop() # depends on [control=['if'], data=[]]
self._cache.line_indexes = indexes # depends on [control=['if'], data=[]]
return self._cache.line_indexes |
def replace_constraint(self,name,selectfrac_skip=False,distribution_skip=False):
"""
Re-apply constraint that had been removed
:param name:
Name of constraint to replace
:param selectfrac_skip,distribution_skip: (optional)
Same as :func:`StarPopulation.apply_constraint`
"""
hidden_constraints = self.hidden_constraints
if name in hidden_constraints:
c = hidden_constraints[name]
self.apply_constraint(c,selectfrac_skip=selectfrac_skip,
distribution_skip=distribution_skip)
del hidden_constraints[name]
else:
logging.warning('Constraint {} not available for replacement.'.format(name))
self.hidden_constraints = hidden_constraints | def function[replace_constraint, parameter[self, name, selectfrac_skip, distribution_skip]]:
constant[
Re-apply constraint that had been removed
:param name:
Name of constraint to replace
:param selectfrac_skip,distribution_skip: (optional)
Same as :func:`StarPopulation.apply_constraint`
]
variable[hidden_constraints] assign[=] name[self].hidden_constraints
if compare[name[name] in name[hidden_constraints]] begin[:]
variable[c] assign[=] call[name[hidden_constraints]][name[name]]
call[name[self].apply_constraint, parameter[name[c]]]
<ast.Delete object at 0x7da20c6c4d90>
name[self].hidden_constraints assign[=] name[hidden_constraints] | keyword[def] identifier[replace_constraint] ( identifier[self] , identifier[name] , identifier[selectfrac_skip] = keyword[False] , identifier[distribution_skip] = keyword[False] ):
literal[string]
identifier[hidden_constraints] = identifier[self] . identifier[hidden_constraints]
keyword[if] identifier[name] keyword[in] identifier[hidden_constraints] :
identifier[c] = identifier[hidden_constraints] [ identifier[name] ]
identifier[self] . identifier[apply_constraint] ( identifier[c] , identifier[selectfrac_skip] = identifier[selectfrac_skip] ,
identifier[distribution_skip] = identifier[distribution_skip] )
keyword[del] identifier[hidden_constraints] [ identifier[name] ]
keyword[else] :
identifier[logging] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[self] . identifier[hidden_constraints] = identifier[hidden_constraints] | def replace_constraint(self, name, selectfrac_skip=False, distribution_skip=False):
"""
Re-apply constraint that had been removed
:param name:
Name of constraint to replace
:param selectfrac_skip,distribution_skip: (optional)
Same as :func:`StarPopulation.apply_constraint`
"""
hidden_constraints = self.hidden_constraints
if name in hidden_constraints:
c = hidden_constraints[name]
self.apply_constraint(c, selectfrac_skip=selectfrac_skip, distribution_skip=distribution_skip)
del hidden_constraints[name] # depends on [control=['if'], data=['name', 'hidden_constraints']]
else:
logging.warning('Constraint {} not available for replacement.'.format(name))
self.hidden_constraints = hidden_constraints |
def to_ts(s):
"""Parses an NGINX timestamp from "30/Apr/2014:07:32:09 +0000" and returns it as ISO 8601" """
# Strip TZ portion if present
m = Nginx.DATE_FMT.match(s)
if m:
s = m.group(1)
delta = timedelta(seconds=int(m.group(3)) * (-1 if m.group(2) == '-' else 1)) # Offset from GMT
else:
delta = timedelta(seconds=0)
dt = datetime.strptime(s, "%d/%b/%Y:%H:%M:%S")
dt += delta
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') | def function[to_ts, parameter[s]]:
constant[Parses an NGINX timestamp from "30/Apr/2014:07:32:09 +0000" and returns it as ISO 8601" ]
variable[m] assign[=] call[name[Nginx].DATE_FMT.match, parameter[name[s]]]
if name[m] begin[:]
variable[s] assign[=] call[name[m].group, parameter[constant[1]]]
variable[delta] assign[=] call[name[timedelta], parameter[]]
variable[dt] assign[=] call[name[datetime].strptime, parameter[name[s], constant[%d/%b/%Y:%H:%M:%S]]]
<ast.AugAssign object at 0x7da207f9a4d0>
return[call[name[dt].strftime, parameter[constant[%Y-%m-%dT%H:%M:%SZ]]]] | keyword[def] identifier[to_ts] ( identifier[s] ):
literal[string]
identifier[m] = identifier[Nginx] . identifier[DATE_FMT] . identifier[match] ( identifier[s] )
keyword[if] identifier[m] :
identifier[s] = identifier[m] . identifier[group] ( literal[int] )
identifier[delta] = identifier[timedelta] ( identifier[seconds] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))*(- literal[int] keyword[if] identifier[m] . identifier[group] ( literal[int] )== literal[string] keyword[else] literal[int] ))
keyword[else] :
identifier[delta] = identifier[timedelta] ( identifier[seconds] = literal[int] )
identifier[dt] = identifier[datetime] . identifier[strptime] ( identifier[s] , literal[string] )
identifier[dt] += identifier[delta]
keyword[return] identifier[dt] . identifier[strftime] ( literal[string] ) | def to_ts(s):
"""Parses an NGINX timestamp from "30/Apr/2014:07:32:09 +0000" and returns it as ISO 8601" """
# Strip TZ portion if present
m = Nginx.DATE_FMT.match(s)
if m:
s = m.group(1)
delta = timedelta(seconds=int(m.group(3)) * (-1 if m.group(2) == '-' else 1)) # Offset from GMT # depends on [control=['if'], data=[]]
else:
delta = timedelta(seconds=0)
dt = datetime.strptime(s, '%d/%b/%Y:%H:%M:%S')
dt += delta
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') |
def _to_patches(self, X):
"""
Reshapes input to patches of the size of classifier's receptive field.
For example:
input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]
output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,
receptive_field_y, receptive_field_x, n_bands]
"""
window = self.receptive_field
asteps = self.receptive_field
if len(X.shape) == 4:
window += (0,)
asteps += (1,)
image_view = rolling_window(X, window, asteps)
new_shape = image_view.shape
# this makes a copy of the array? can we do without reshaping?
image_view = image_view.reshape((new_shape[0] * new_shape[1] * new_shape[2],) + new_shape[3:])
if len(X.shape) == 4:
image_view = np.moveaxis(image_view, 1, -1)
return image_view, new_shape | def function[_to_patches, parameter[self, X]]:
constant[
Reshapes input to patches of the size of classifier's receptive field.
For example:
input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]
output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,
receptive_field_y, receptive_field_x, n_bands]
]
variable[window] assign[=] name[self].receptive_field
variable[asteps] assign[=] name[self].receptive_field
if compare[call[name[len], parameter[name[X].shape]] equal[==] constant[4]] begin[:]
<ast.AugAssign object at 0x7da18f812c50>
<ast.AugAssign object at 0x7da18f813a90>
variable[image_view] assign[=] call[name[rolling_window], parameter[name[X], name[window], name[asteps]]]
variable[new_shape] assign[=] name[image_view].shape
variable[image_view] assign[=] call[name[image_view].reshape, parameter[binary_operation[tuple[[<ast.BinOp object at 0x7da18f810df0>]] + call[name[new_shape]][<ast.Slice object at 0x7da18f8125c0>]]]]
if compare[call[name[len], parameter[name[X].shape]] equal[==] constant[4]] begin[:]
variable[image_view] assign[=] call[name[np].moveaxis, parameter[name[image_view], constant[1], <ast.UnaryOp object at 0x7da18f812f50>]]
return[tuple[[<ast.Name object at 0x7da18f810850>, <ast.Name object at 0x7da18f8101f0>]]] | keyword[def] identifier[_to_patches] ( identifier[self] , identifier[X] ):
literal[string]
identifier[window] = identifier[self] . identifier[receptive_field]
identifier[asteps] = identifier[self] . identifier[receptive_field]
keyword[if] identifier[len] ( identifier[X] . identifier[shape] )== literal[int] :
identifier[window] +=( literal[int] ,)
identifier[asteps] +=( literal[int] ,)
identifier[image_view] = identifier[rolling_window] ( identifier[X] , identifier[window] , identifier[asteps] )
identifier[new_shape] = identifier[image_view] . identifier[shape]
identifier[image_view] = identifier[image_view] . identifier[reshape] (( identifier[new_shape] [ literal[int] ]* identifier[new_shape] [ literal[int] ]* identifier[new_shape] [ literal[int] ],)+ identifier[new_shape] [ literal[int] :])
keyword[if] identifier[len] ( identifier[X] . identifier[shape] )== literal[int] :
identifier[image_view] = identifier[np] . identifier[moveaxis] ( identifier[image_view] , literal[int] ,- literal[int] )
keyword[return] identifier[image_view] , identifier[new_shape] | def _to_patches(self, X):
"""
Reshapes input to patches of the size of classifier's receptive field.
For example:
input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]
output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,
receptive_field_y, receptive_field_x, n_bands]
"""
window = self.receptive_field
asteps = self.receptive_field
if len(X.shape) == 4:
window += (0,)
asteps += (1,) # depends on [control=['if'], data=[]]
image_view = rolling_window(X, window, asteps)
new_shape = image_view.shape # this makes a copy of the array? can we do without reshaping?
image_view = image_view.reshape((new_shape[0] * new_shape[1] * new_shape[2],) + new_shape[3:])
if len(X.shape) == 4:
image_view = np.moveaxis(image_view, 1, -1) # depends on [control=['if'], data=[]]
return (image_view, new_shape) |
def report_events(self, start_date, end_date, type="system"):
"""
Create a report for all client events or all system events.
Uses GET to /reports/events/{clients,system} interface
:Args:
* *start_date*: (datetime) Start time for report generation
* *end_date*: (datetime) End time for report generation
:Kwargs:
* *type*: (str) Type of event report to create. "system" or "clients"
:Returns: (list) List of events in the input range
"""
start_str, end_str = self._format_input_dates(start_date, end_date)
params = {
"start_date": start_str,
"end_date": end_str
}
endpoint = url.reports_events_clients if type == "clients" else url.reports_events_system
response = self._get(endpoint, params=params)
self._check_response(response, 200)
return self._create_response(response).get("events") | def function[report_events, parameter[self, start_date, end_date, type]]:
constant[
Create a report for all client events or all system events.
Uses GET to /reports/events/{clients,system} interface
:Args:
* *start_date*: (datetime) Start time for report generation
* *end_date*: (datetime) End time for report generation
:Kwargs:
* *type*: (str) Type of event report to create. "system" or "clients"
:Returns: (list) List of events in the input range
]
<ast.Tuple object at 0x7da1b0a49300> assign[=] call[name[self]._format_input_dates, parameter[name[start_date], name[end_date]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a4a590>, <ast.Constant object at 0x7da1b0a48df0>], [<ast.Name object at 0x7da1b0a4aef0>, <ast.Name object at 0x7da1b0a48280>]]
variable[endpoint] assign[=] <ast.IfExp object at 0x7da1b0a495a0>
variable[response] assign[=] call[name[self]._get, parameter[name[endpoint]]]
call[name[self]._check_response, parameter[name[response], constant[200]]]
return[call[call[name[self]._create_response, parameter[name[response]]].get, parameter[constant[events]]]] | keyword[def] identifier[report_events] ( identifier[self] , identifier[start_date] , identifier[end_date] , identifier[type] = literal[string] ):
literal[string]
identifier[start_str] , identifier[end_str] = identifier[self] . identifier[_format_input_dates] ( identifier[start_date] , identifier[end_date] )
identifier[params] ={
literal[string] : identifier[start_str] ,
literal[string] : identifier[end_str]
}
identifier[endpoint] = identifier[url] . identifier[reports_events_clients] keyword[if] identifier[type] == literal[string] keyword[else] identifier[url] . identifier[reports_events_system]
identifier[response] = identifier[self] . identifier[_get] ( identifier[endpoint] , identifier[params] = identifier[params] )
identifier[self] . identifier[_check_response] ( identifier[response] , literal[int] )
keyword[return] identifier[self] . identifier[_create_response] ( identifier[response] ). identifier[get] ( literal[string] ) | def report_events(self, start_date, end_date, type='system'):
"""
Create a report for all client events or all system events.
Uses GET to /reports/events/{clients,system} interface
:Args:
* *start_date*: (datetime) Start time for report generation
* *end_date*: (datetime) End time for report generation
:Kwargs:
* *type*: (str) Type of event report to create. "system" or "clients"
:Returns: (list) List of events in the input range
"""
(start_str, end_str) = self._format_input_dates(start_date, end_date)
params = {'start_date': start_str, 'end_date': end_str}
endpoint = url.reports_events_clients if type == 'clients' else url.reports_events_system
response = self._get(endpoint, params=params)
self._check_response(response, 200)
return self._create_response(response).get('events') |
def _set_virtual_mac(self, v, load=False):
"""
Setter method for virtual_mac, mapped from YANG variable /rbridge_id/interface/ve/ipv6/vrrpv3e/virtual_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_virtual_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_virtual_mac() directly.
YANG Description: Virtual MAC
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=virtual_mac.virtual_mac, is_container='container', presence=False, yang_name="virtual-mac", rest_name="virtual-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual MAC'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """virtual_mac must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=virtual_mac.virtual_mac, is_container='container', presence=False, yang_name="virtual-mac", rest_name="virtual-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual MAC'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)""",
})
self.__virtual_mac = t
if hasattr(self, '_set'):
self._set() | def function[_set_virtual_mac, parameter[self, v, load]]:
constant[
Setter method for virtual_mac, mapped from YANG variable /rbridge_id/interface/ve/ipv6/vrrpv3e/virtual_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_virtual_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_virtual_mac() directly.
YANG Description: Virtual MAC
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c794f70>
name[self].__virtual_mac assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_virtual_mac] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[virtual_mac] . identifier[virtual_mac] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__virtual_mac] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_virtual_mac(self, v, load=False):
"""
Setter method for virtual_mac, mapped from YANG variable /rbridge_id/interface/ve/ipv6/vrrpv3e/virtual_mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_virtual_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_virtual_mac() directly.
YANG Description: Virtual MAC
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=virtual_mac.virtual_mac, is_container='container', presence=False, yang_name='virtual-mac', rest_name='virtual-mac', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual MAC'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'virtual_mac must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=virtual_mac.virtual_mac, is_container=\'container\', presence=False, yang_name="virtual-mac", rest_name="virtual-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Virtual MAC\'}}, namespace=\'urn:brocade.com:mgmt:brocade-vrrpv3\', defining_module=\'brocade-vrrpv3\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__virtual_mac = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def MakeDestinationKey(directory, filename):
"""Creates a name that identifies a database file."""
return utils.SmartStr(utils.JoinPath(directory, filename)).lstrip("/") | def function[MakeDestinationKey, parameter[directory, filename]]:
constant[Creates a name that identifies a database file.]
return[call[call[name[utils].SmartStr, parameter[call[name[utils].JoinPath, parameter[name[directory], name[filename]]]]].lstrip, parameter[constant[/]]]] | keyword[def] identifier[MakeDestinationKey] ( identifier[directory] , identifier[filename] ):
literal[string]
keyword[return] identifier[utils] . identifier[SmartStr] ( identifier[utils] . identifier[JoinPath] ( identifier[directory] , identifier[filename] )). identifier[lstrip] ( literal[string] ) | def MakeDestinationKey(directory, filename):
"""Creates a name that identifies a database file."""
return utils.SmartStr(utils.JoinPath(directory, filename)).lstrip('/') |
def _getIndxChop(self, indx):
'''
A helper method for Type subclasses to use for a simple way to truncate
indx bytes.
'''
# cut down an index value to 256 bytes...
if len(indx) <= 256:
return indx
base = indx[:248]
sufx = xxhash.xxh64(indx).digest()
return base + sufx | def function[_getIndxChop, parameter[self, indx]]:
constant[
A helper method for Type subclasses to use for a simple way to truncate
indx bytes.
]
if compare[call[name[len], parameter[name[indx]]] less_or_equal[<=] constant[256]] begin[:]
return[name[indx]]
variable[base] assign[=] call[name[indx]][<ast.Slice object at 0x7da20c76fc70>]
variable[sufx] assign[=] call[call[name[xxhash].xxh64, parameter[name[indx]]].digest, parameter[]]
return[binary_operation[name[base] + name[sufx]]] | keyword[def] identifier[_getIndxChop] ( identifier[self] , identifier[indx] ):
literal[string]
keyword[if] identifier[len] ( identifier[indx] )<= literal[int] :
keyword[return] identifier[indx]
identifier[base] = identifier[indx] [: literal[int] ]
identifier[sufx] = identifier[xxhash] . identifier[xxh64] ( identifier[indx] ). identifier[digest] ()
keyword[return] identifier[base] + identifier[sufx] | def _getIndxChop(self, indx):
"""
A helper method for Type subclasses to use for a simple way to truncate
indx bytes.
"""
# cut down an index value to 256 bytes...
if len(indx) <= 256:
return indx # depends on [control=['if'], data=[]]
base = indx[:248]
sufx = xxhash.xxh64(indx).digest()
return base + sufx |
def add_reporting_args(parser):
"""Add reporting arguments to an argument parser.
Parameters
----------
parser: `argparse.ArgumentParser`
Returns
-------
`argparse.ArgumentGroup`
The argument group created.
"""
g = parser.add_argument_group('Reporting options')
g.add_argument(
'-l', '--log-file', default=None,
type=str, metavar=file_mv,
help='Path of log file (if specified, report to stdout AND file.'
)
g.add_argument('-q', '--quiet', action='store_true',
help='Only output errors and warnings.')
g.add_argument(
'-v', '--verbose', action='store_true',
help='Enable verbose output. Ignored if --quiet is specified.'
)
return g | def function[add_reporting_args, parameter[parser]]:
constant[Add reporting arguments to an argument parser.
Parameters
----------
parser: `argparse.ArgumentParser`
Returns
-------
`argparse.ArgumentGroup`
The argument group created.
]
variable[g] assign[=] call[name[parser].add_argument_group, parameter[constant[Reporting options]]]
call[name[g].add_argument, parameter[constant[-l], constant[--log-file]]]
call[name[g].add_argument, parameter[constant[-q], constant[--quiet]]]
call[name[g].add_argument, parameter[constant[-v], constant[--verbose]]]
return[name[g]] | keyword[def] identifier[add_reporting_args] ( identifier[parser] ):
literal[string]
identifier[g] = identifier[parser] . identifier[add_argument_group] ( literal[string] )
identifier[g] . identifier[add_argument] (
literal[string] , literal[string] , identifier[default] = keyword[None] ,
identifier[type] = identifier[str] , identifier[metavar] = identifier[file_mv] ,
identifier[help] = literal[string]
)
identifier[g] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[g] . identifier[add_argument] (
literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
keyword[return] identifier[g] | def add_reporting_args(parser):
"""Add reporting arguments to an argument parser.
Parameters
----------
parser: `argparse.ArgumentParser`
Returns
-------
`argparse.ArgumentGroup`
The argument group created.
"""
g = parser.add_argument_group('Reporting options')
g.add_argument('-l', '--log-file', default=None, type=str, metavar=file_mv, help='Path of log file (if specified, report to stdout AND file.')
g.add_argument('-q', '--quiet', action='store_true', help='Only output errors and warnings.')
g.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output. Ignored if --quiet is specified.')
return g |
def summarize_provenance(self):
"""Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
"""
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not(summary_provenance):
## pick arbitrary provenance & call this the "summary" (for now)
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache
## for each cache, check equivalence with summary_provenance
num_discrepant += compare_provenance(
provenance_per_cache[cache],
summary_provenance,
left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name),
right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache)
)
## compare provenance across cached items
if num_discrepant == 0:
prov = summary_provenance ## report summary provenance if exists
else:
prov = provenance_per_cache ## otherwise, return provenance per cache
return(prov) | def function[summarize_provenance, parameter[self]]:
constant[Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
]
variable[provenance_per_cache] assign[=] call[name[self].summarize_provenance_per_cache, parameter[]]
variable[summary_provenance] assign[=] constant[None]
variable[num_discrepant] assign[=] constant[0]
for taget[name[cache]] in starred[name[provenance_per_cache]] begin[:]
if <ast.UnaryOp object at 0x7da1b26ad3f0> begin[:]
variable[summary_provenance] assign[=] call[name[provenance_per_cache]][name[cache]]
variable[summary_provenance_name] assign[=] name[cache]
<ast.AugAssign object at 0x7da1b26ac610>
if compare[name[num_discrepant] equal[==] constant[0]] begin[:]
variable[prov] assign[=] name[summary_provenance]
return[name[prov]] | keyword[def] identifier[summarize_provenance] ( identifier[self] ):
literal[string]
identifier[provenance_per_cache] = identifier[self] . identifier[summarize_provenance_per_cache] ()
identifier[summary_provenance] = keyword[None]
identifier[num_discrepant] = literal[int]
keyword[for] identifier[cache] keyword[in] identifier[provenance_per_cache] :
keyword[if] keyword[not] ( identifier[summary_provenance] ):
identifier[summary_provenance] = identifier[provenance_per_cache] [ identifier[cache] ]
identifier[summary_provenance_name] = identifier[cache]
identifier[num_discrepant] += identifier[compare_provenance] (
identifier[provenance_per_cache] [ identifier[cache] ],
identifier[summary_provenance] ,
identifier[left_outer_diff] = literal[string] %( identifier[cache] , identifier[summary_provenance_name] ),
identifier[right_outer_diff] = literal[string] %( identifier[summary_provenance_name] , identifier[cache] )
)
keyword[if] identifier[num_discrepant] == literal[int] :
identifier[prov] = identifier[summary_provenance]
keyword[else] :
identifier[prov] = identifier[provenance_per_cache]
keyword[return] ( identifier[prov] ) | def summarize_provenance(self):
"""Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir.
"""
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not summary_provenance:
## pick arbitrary provenance & call this the "summary" (for now)
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache # depends on [control=['if'], data=[]]
## for each cache, check equivalence with summary_provenance
num_discrepant += compare_provenance(provenance_per_cache[cache], summary_provenance, left_outer_diff='In %s but not in %s' % (cache, summary_provenance_name), right_outer_diff='In %s but not in %s' % (summary_provenance_name, cache)) # depends on [control=['for'], data=['cache']]
## compare provenance across cached items
if num_discrepant == 0:
prov = summary_provenance ## report summary provenance if exists # depends on [control=['if'], data=[]]
else:
prov = provenance_per_cache ## otherwise, return provenance per cache
return prov |
def parse_options(settings):
'''Parse command line options'''
optlist, args = getopt(sys.argv, 'x', [])
settings['configfile'] = args[1]
return settings | def function[parse_options, parameter[settings]]:
constant[Parse command line options]
<ast.Tuple object at 0x7da1b09bb250> assign[=] call[name[getopt], parameter[name[sys].argv, constant[x], list[[]]]]
call[name[settings]][constant[configfile]] assign[=] call[name[args]][constant[1]]
return[name[settings]] | keyword[def] identifier[parse_options] ( identifier[settings] ):
literal[string]
identifier[optlist] , identifier[args] = identifier[getopt] ( identifier[sys] . identifier[argv] , literal[string] ,[])
identifier[settings] [ literal[string] ]= identifier[args] [ literal[int] ]
keyword[return] identifier[settings] | def parse_options(settings):
"""Parse command line options"""
(optlist, args) = getopt(sys.argv, 'x', [])
settings['configfile'] = args[1]
return settings |
def on_configuration_changed(self, config):
""" Handles a screen configuration change.
"""
self.width = config['width']
self.height = config['height']
self.orientation = ('square', 'portrait', 'landscape')[
config['orientation']] | def function[on_configuration_changed, parameter[self, config]]:
constant[ Handles a screen configuration change.
]
name[self].width assign[=] call[name[config]][constant[width]]
name[self].height assign[=] call[name[config]][constant[height]]
name[self].orientation assign[=] call[tuple[[<ast.Constant object at 0x7da1b1c608e0>, <ast.Constant object at 0x7da1b1c61390>, <ast.Constant object at 0x7da1b1c61de0>]]][call[name[config]][constant[orientation]]] | keyword[def] identifier[on_configuration_changed] ( identifier[self] , identifier[config] ):
literal[string]
identifier[self] . identifier[width] = identifier[config] [ literal[string] ]
identifier[self] . identifier[height] = identifier[config] [ literal[string] ]
identifier[self] . identifier[orientation] =( literal[string] , literal[string] , literal[string] )[
identifier[config] [ literal[string] ]] | def on_configuration_changed(self, config):
""" Handles a screen configuration change.
"""
self.width = config['width']
self.height = config['height']
self.orientation = ('square', 'portrait', 'landscape')[config['orientation']] |
def numberOfConnectedDistalSynapses(self, cells=None):
"""
Returns the number of connected distal synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
"""
if cells is None:
cells = xrange(self.numberOfCells())
n = _countWhereGreaterEqualInRows(self.internalDistalPermanences, cells,
self.connectedPermanenceDistal)
for permanences in self.distalPermanences:
n += _countWhereGreaterEqualInRows(permanences, cells,
self.connectedPermanenceDistal)
return n | def function[numberOfConnectedDistalSynapses, parameter[self, cells]]:
constant[
Returns the number of connected distal synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
]
if compare[name[cells] is constant[None]] begin[:]
variable[cells] assign[=] call[name[xrange], parameter[call[name[self].numberOfCells, parameter[]]]]
variable[n] assign[=] call[name[_countWhereGreaterEqualInRows], parameter[name[self].internalDistalPermanences, name[cells], name[self].connectedPermanenceDistal]]
for taget[name[permanences]] in starred[name[self].distalPermanences] begin[:]
<ast.AugAssign object at 0x7da1b09019c0>
return[name[n]] | keyword[def] identifier[numberOfConnectedDistalSynapses] ( identifier[self] , identifier[cells] = keyword[None] ):
literal[string]
keyword[if] identifier[cells] keyword[is] keyword[None] :
identifier[cells] = identifier[xrange] ( identifier[self] . identifier[numberOfCells] ())
identifier[n] = identifier[_countWhereGreaterEqualInRows] ( identifier[self] . identifier[internalDistalPermanences] , identifier[cells] ,
identifier[self] . identifier[connectedPermanenceDistal] )
keyword[for] identifier[permanences] keyword[in] identifier[self] . identifier[distalPermanences] :
identifier[n] += identifier[_countWhereGreaterEqualInRows] ( identifier[permanences] , identifier[cells] ,
identifier[self] . identifier[connectedPermanenceDistal] )
keyword[return] identifier[n] | def numberOfConnectedDistalSynapses(self, cells=None):
"""
Returns the number of connected distal synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
"""
if cells is None:
cells = xrange(self.numberOfCells()) # depends on [control=['if'], data=['cells']]
n = _countWhereGreaterEqualInRows(self.internalDistalPermanences, cells, self.connectedPermanenceDistal)
for permanences in self.distalPermanences:
n += _countWhereGreaterEqualInRows(permanences, cells, self.connectedPermanenceDistal) # depends on [control=['for'], data=['permanences']]
return n |
def _setup_logging(args):
"""
Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set.
"""
log_conf = getattr(args, 'logging', None)
if log_conf:
logging.config.fileConfig(log_conf)
else:
logging.basicConfig() | def function[_setup_logging, parameter[args]]:
constant[
Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set.
]
variable[log_conf] assign[=] call[name[getattr], parameter[name[args], constant[logging], constant[None]]]
if name[log_conf] begin[:]
call[name[logging].config.fileConfig, parameter[name[log_conf]]] | keyword[def] identifier[_setup_logging] ( identifier[args] ):
literal[string]
identifier[log_conf] = identifier[getattr] ( identifier[args] , literal[string] , keyword[None] )
keyword[if] identifier[log_conf] :
identifier[logging] . identifier[config] . identifier[fileConfig] ( identifier[log_conf] )
keyword[else] :
identifier[logging] . identifier[basicConfig] () | def _setup_logging(args):
"""
Set up logging for the script, based on the configuration
specified by the 'logging' attribute of the command line
arguments.
:param args: A Namespace object containing a 'logging' attribute
specifying the name of a logging configuration file
to use. If not present or not given, a basic logging
configuration will be set.
"""
log_conf = getattr(args, 'logging', None)
if log_conf:
logging.config.fileConfig(log_conf) # depends on [control=['if'], data=[]]
else:
logging.basicConfig() |
def index_buses(self, buses=None, start=0):
""" Updates the indices of all buses.
@param start: Starting index, typically 0 or 1.
@type start: int
"""
bs = self.connected_buses if buses is None else buses
for i, b in enumerate(bs):
b._i = start + i | def function[index_buses, parameter[self, buses, start]]:
constant[ Updates the indices of all buses.
@param start: Starting index, typically 0 or 1.
@type start: int
]
variable[bs] assign[=] <ast.IfExp object at 0x7da1b25d1030>
for taget[tuple[[<ast.Name object at 0x7da1b25d1e70>, <ast.Name object at 0x7da1b25d1fc0>]]] in starred[call[name[enumerate], parameter[name[bs]]]] begin[:]
name[b]._i assign[=] binary_operation[name[start] + name[i]] | keyword[def] identifier[index_buses] ( identifier[self] , identifier[buses] = keyword[None] , identifier[start] = literal[int] ):
literal[string]
identifier[bs] = identifier[self] . identifier[connected_buses] keyword[if] identifier[buses] keyword[is] keyword[None] keyword[else] identifier[buses]
keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[bs] ):
identifier[b] . identifier[_i] = identifier[start] + identifier[i] | def index_buses(self, buses=None, start=0):
""" Updates the indices of all buses.
@param start: Starting index, typically 0 or 1.
@type start: int
"""
bs = self.connected_buses if buses is None else buses
for (i, b) in enumerate(bs):
b._i = start + i # depends on [control=['for'], data=[]] |
def apply(self, collection, ops, resource=None, **kwargs):
"""Filter given collection."""
mfield = self.mfield or resource.meta.model._meta.fields.get(self.field.attribute)
if mfield:
collection = collection.where(*[op(mfield, val) for op, val in ops])
return collection | def function[apply, parameter[self, collection, ops, resource]]:
constant[Filter given collection.]
variable[mfield] assign[=] <ast.BoolOp object at 0x7da1b0a9d2d0>
if name[mfield] begin[:]
variable[collection] assign[=] call[name[collection].where, parameter[<ast.Starred object at 0x7da1b0b384c0>]]
return[name[collection]] | keyword[def] identifier[apply] ( identifier[self] , identifier[collection] , identifier[ops] , identifier[resource] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[mfield] = identifier[self] . identifier[mfield] keyword[or] identifier[resource] . identifier[meta] . identifier[model] . identifier[_meta] . identifier[fields] . identifier[get] ( identifier[self] . identifier[field] . identifier[attribute] )
keyword[if] identifier[mfield] :
identifier[collection] = identifier[collection] . identifier[where] (*[ identifier[op] ( identifier[mfield] , identifier[val] ) keyword[for] identifier[op] , identifier[val] keyword[in] identifier[ops] ])
keyword[return] identifier[collection] | def apply(self, collection, ops, resource=None, **kwargs):
"""Filter given collection."""
mfield = self.mfield or resource.meta.model._meta.fields.get(self.field.attribute)
if mfield:
collection = collection.where(*[op(mfield, val) for (op, val) in ops]) # depends on [control=['if'], data=[]]
return collection |
def parse_response(self, response):
"""
Evaluates the action-call response from a FritzBox.
The response is a xml byte-string.
Returns a dictionary with the received arguments-value pairs.
The values are converted according to the given data_types.
TODO: boolean and signed integers data-types from tr64 responses
"""
result = {}
root = etree.fromstring(response)
for argument in self.arguments.values():
try:
value = root.find('.//%s' % argument.name).text
except AttributeError:
# will happen by searching for in-parameters and by
# parsing responses with status_code != 200
continue
if argument.data_type.startswith('ui'):
try:
value = int(value)
except ValueError:
# should not happen
value = None
result[argument.name] = value
return result | def function[parse_response, parameter[self, response]]:
constant[
Evaluates the action-call response from a FritzBox.
The response is a xml byte-string.
Returns a dictionary with the received arguments-value pairs.
The values are converted according to the given data_types.
TODO: boolean and signed integers data-types from tr64 responses
]
variable[result] assign[=] dictionary[[], []]
variable[root] assign[=] call[name[etree].fromstring, parameter[name[response]]]
for taget[name[argument]] in starred[call[name[self].arguments.values, parameter[]]] begin[:]
<ast.Try object at 0x7da1b11d8400>
if call[name[argument].data_type.startswith, parameter[constant[ui]]] begin[:]
<ast.Try object at 0x7da1b11da4d0>
call[name[result]][name[argument].name] assign[=] name[value]
return[name[result]] | keyword[def] identifier[parse_response] ( identifier[self] , identifier[response] ):
literal[string]
identifier[result] ={}
identifier[root] = identifier[etree] . identifier[fromstring] ( identifier[response] )
keyword[for] identifier[argument] keyword[in] identifier[self] . identifier[arguments] . identifier[values] ():
keyword[try] :
identifier[value] = identifier[root] . identifier[find] ( literal[string] % identifier[argument] . identifier[name] ). identifier[text]
keyword[except] identifier[AttributeError] :
keyword[continue]
keyword[if] identifier[argument] . identifier[data_type] . identifier[startswith] ( literal[string] ):
keyword[try] :
identifier[value] = identifier[int] ( identifier[value] )
keyword[except] identifier[ValueError] :
identifier[value] = keyword[None]
identifier[result] [ identifier[argument] . identifier[name] ]= identifier[value]
keyword[return] identifier[result] | def parse_response(self, response):
"""
Evaluates the action-call response from a FritzBox.
The response is a xml byte-string.
Returns a dictionary with the received arguments-value pairs.
The values are converted according to the given data_types.
TODO: boolean and signed integers data-types from tr64 responses
"""
result = {}
root = etree.fromstring(response)
for argument in self.arguments.values():
try:
value = root.find('.//%s' % argument.name).text # depends on [control=['try'], data=[]]
except AttributeError:
# will happen by searching for in-parameters and by
# parsing responses with status_code != 200
continue # depends on [control=['except'], data=[]]
if argument.data_type.startswith('ui'):
try:
value = int(value) # depends on [control=['try'], data=[]]
except ValueError:
# should not happen
value = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
result[argument.name] = value # depends on [control=['for'], data=['argument']]
return result |
def _algo_check_for_section_problems(self, ro_rw_zi):
"""! @brief Return a string describing any errors with the layout or None if good"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_ro is None:
return "RO section is missing"
if s_rw is None:
return "RW section is missing"
if s_zi is None:
return "ZI section is missing"
if s_ro.start != 0:
return "RO section does not start at address 0"
if s_ro.start + s_ro.length != s_rw.start:
return "RW section does not follow RO section"
if s_rw.start + s_rw.length != s_zi.start:
return "ZI section does not follow RW section"
return None | def function[_algo_check_for_section_problems, parameter[self, ro_rw_zi]]:
constant[! @brief Return a string describing any errors with the layout or None if good]
<ast.Tuple object at 0x7da18f723640> assign[=] name[ro_rw_zi]
if compare[name[s_ro] is constant[None]] begin[:]
return[constant[RO section is missing]]
if compare[name[s_rw] is constant[None]] begin[:]
return[constant[RW section is missing]]
if compare[name[s_zi] is constant[None]] begin[:]
return[constant[ZI section is missing]]
if compare[name[s_ro].start not_equal[!=] constant[0]] begin[:]
return[constant[RO section does not start at address 0]]
if compare[binary_operation[name[s_ro].start + name[s_ro].length] not_equal[!=] name[s_rw].start] begin[:]
return[constant[RW section does not follow RO section]]
if compare[binary_operation[name[s_rw].start + name[s_rw].length] not_equal[!=] name[s_zi].start] begin[:]
return[constant[ZI section does not follow RW section]]
return[constant[None]] | keyword[def] identifier[_algo_check_for_section_problems] ( identifier[self] , identifier[ro_rw_zi] ):
literal[string]
identifier[s_ro] , identifier[s_rw] , identifier[s_zi] = identifier[ro_rw_zi]
keyword[if] identifier[s_ro] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[if] identifier[s_rw] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[if] identifier[s_zi] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[if] identifier[s_ro] . identifier[start] != literal[int] :
keyword[return] literal[string]
keyword[if] identifier[s_ro] . identifier[start] + identifier[s_ro] . identifier[length] != identifier[s_rw] . identifier[start] :
keyword[return] literal[string]
keyword[if] identifier[s_rw] . identifier[start] + identifier[s_rw] . identifier[length] != identifier[s_zi] . identifier[start] :
keyword[return] literal[string]
keyword[return] keyword[None] | def _algo_check_for_section_problems(self, ro_rw_zi):
"""! @brief Return a string describing any errors with the layout or None if good"""
(s_ro, s_rw, s_zi) = ro_rw_zi
if s_ro is None:
return 'RO section is missing' # depends on [control=['if'], data=[]]
if s_rw is None:
return 'RW section is missing' # depends on [control=['if'], data=[]]
if s_zi is None:
return 'ZI section is missing' # depends on [control=['if'], data=[]]
if s_ro.start != 0:
return 'RO section does not start at address 0' # depends on [control=['if'], data=[]]
if s_ro.start + s_ro.length != s_rw.start:
return 'RW section does not follow RO section' # depends on [control=['if'], data=[]]
if s_rw.start + s_rw.length != s_zi.start:
return 'ZI section does not follow RW section' # depends on [control=['if'], data=[]]
return None |
def _prepare_by_column_dtype(self, X):
"""Get distance functions for each column's dtype"""
if not isinstance(X, pandas.DataFrame):
raise TypeError('X must be a pandas DataFrame')
numeric_columns = []
nominal_columns = []
numeric_ranges = []
fit_data = numpy.empty_like(X)
for i, dt in enumerate(X.dtypes):
col = X.iloc[:, i]
if is_categorical_dtype(dt):
if col.cat.ordered:
numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min())
numeric_columns.append(i)
else:
nominal_columns.append(i)
col = col.cat.codes
elif is_numeric_dtype(dt):
numeric_ranges.append(col.max() - col.min())
numeric_columns.append(i)
else:
raise TypeError('unsupported dtype: %r' % dt)
fit_data[:, i] = col.values
self._numeric_columns = numpy.asarray(numeric_columns)
self._nominal_columns = numpy.asarray(nominal_columns)
self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float)
self.X_fit_ = fit_data | def function[_prepare_by_column_dtype, parameter[self, X]]:
constant[Get distance functions for each column's dtype]
if <ast.UnaryOp object at 0x7da1b17e3130> begin[:]
<ast.Raise object at 0x7da1b17e0610>
variable[numeric_columns] assign[=] list[[]]
variable[nominal_columns] assign[=] list[[]]
variable[numeric_ranges] assign[=] list[[]]
variable[fit_data] assign[=] call[name[numpy].empty_like, parameter[name[X]]]
for taget[tuple[[<ast.Name object at 0x7da1b17e0c10>, <ast.Name object at 0x7da1b17e1960>]]] in starred[call[name[enumerate], parameter[name[X].dtypes]]] begin[:]
variable[col] assign[=] call[name[X].iloc][tuple[[<ast.Slice object at 0x7da1b17e1fc0>, <ast.Name object at 0x7da1b17e19f0>]]]
if call[name[is_categorical_dtype], parameter[name[dt]]] begin[:]
if name[col].cat.ordered begin[:]
call[name[numeric_ranges].append, parameter[binary_operation[call[name[col].cat.codes.max, parameter[]] - call[name[col].cat.codes.min, parameter[]]]]]
call[name[numeric_columns].append, parameter[name[i]]]
variable[col] assign[=] name[col].cat.codes
call[name[fit_data]][tuple[[<ast.Slice object at 0x7da207f00760>, <ast.Name object at 0x7da207f00fa0>]]] assign[=] name[col].values
name[self]._numeric_columns assign[=] call[name[numpy].asarray, parameter[name[numeric_columns]]]
name[self]._nominal_columns assign[=] call[name[numpy].asarray, parameter[name[nominal_columns]]]
name[self]._numeric_ranges assign[=] call[name[numpy].asarray, parameter[name[numeric_ranges]]]
name[self].X_fit_ assign[=] name[fit_data] | keyword[def] identifier[_prepare_by_column_dtype] ( identifier[self] , identifier[X] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[X] , identifier[pandas] . identifier[DataFrame] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[numeric_columns] =[]
identifier[nominal_columns] =[]
identifier[numeric_ranges] =[]
identifier[fit_data] = identifier[numpy] . identifier[empty_like] ( identifier[X] )
keyword[for] identifier[i] , identifier[dt] keyword[in] identifier[enumerate] ( identifier[X] . identifier[dtypes] ):
identifier[col] = identifier[X] . identifier[iloc] [:, identifier[i] ]
keyword[if] identifier[is_categorical_dtype] ( identifier[dt] ):
keyword[if] identifier[col] . identifier[cat] . identifier[ordered] :
identifier[numeric_ranges] . identifier[append] ( identifier[col] . identifier[cat] . identifier[codes] . identifier[max] ()- identifier[col] . identifier[cat] . identifier[codes] . identifier[min] ())
identifier[numeric_columns] . identifier[append] ( identifier[i] )
keyword[else] :
identifier[nominal_columns] . identifier[append] ( identifier[i] )
identifier[col] = identifier[col] . identifier[cat] . identifier[codes]
keyword[elif] identifier[is_numeric_dtype] ( identifier[dt] ):
identifier[numeric_ranges] . identifier[append] ( identifier[col] . identifier[max] ()- identifier[col] . identifier[min] ())
identifier[numeric_columns] . identifier[append] ( identifier[i] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[dt] )
identifier[fit_data] [:, identifier[i] ]= identifier[col] . identifier[values]
identifier[self] . identifier[_numeric_columns] = identifier[numpy] . identifier[asarray] ( identifier[numeric_columns] )
identifier[self] . identifier[_nominal_columns] = identifier[numpy] . identifier[asarray] ( identifier[nominal_columns] )
identifier[self] . identifier[_numeric_ranges] = identifier[numpy] . identifier[asarray] ( identifier[numeric_ranges] , identifier[dtype] = identifier[float] )
identifier[self] . identifier[X_fit_] = identifier[fit_data] | def _prepare_by_column_dtype(self, X):
"""Get distance functions for each column's dtype"""
if not isinstance(X, pandas.DataFrame):
raise TypeError('X must be a pandas DataFrame') # depends on [control=['if'], data=[]]
numeric_columns = []
nominal_columns = []
numeric_ranges = []
fit_data = numpy.empty_like(X)
for (i, dt) in enumerate(X.dtypes):
col = X.iloc[:, i]
if is_categorical_dtype(dt):
if col.cat.ordered:
numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min())
numeric_columns.append(i) # depends on [control=['if'], data=[]]
else:
nominal_columns.append(i)
col = col.cat.codes # depends on [control=['if'], data=[]]
elif is_numeric_dtype(dt):
numeric_ranges.append(col.max() - col.min())
numeric_columns.append(i) # depends on [control=['if'], data=[]]
else:
raise TypeError('unsupported dtype: %r' % dt)
fit_data[:, i] = col.values # depends on [control=['for'], data=[]]
self._numeric_columns = numpy.asarray(numeric_columns)
self._nominal_columns = numpy.asarray(nominal_columns)
self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float)
self.X_fit_ = fit_data |
def __lists_to_str(self):
"""
There are some data lists that we collected across the dataset that need to be concatenated into a single
string before writing to the text file.
:return none:
"""
# ["archive_type", "sensor_genus", "sensor_species", "investigator"]
if self.lsts_tmp["archive"]:
self.noaa_data_sorted["Top"]["Archive"] = ",".join(self.lsts_tmp["archive"])
if self.lsts_tmp["species"]:
self.noaa_data_sorted["Species"]["Species_Name"] = ",".join(self.lsts_tmp["species"])
if self.lsts_tmp["genus"]:
self.noaa_data_sorted["Species"]["Species_Code"] = ",".join(self.lsts_tmp["genus"])
if self.lsts_tmp["qc"]:
if self.__is_notes():
self.noaa_data_sorted["Description_Notes_and_Keywords"]["Description"] = ";".join(self.lsts_tmp["qc"])
return | def function[__lists_to_str, parameter[self]]:
constant[
There are some data lists that we collected across the dataset that need to be concatenated into a single
string before writing to the text file.
:return none:
]
if call[name[self].lsts_tmp][constant[archive]] begin[:]
call[call[name[self].noaa_data_sorted][constant[Top]]][constant[Archive]] assign[=] call[constant[,].join, parameter[call[name[self].lsts_tmp][constant[archive]]]]
if call[name[self].lsts_tmp][constant[species]] begin[:]
call[call[name[self].noaa_data_sorted][constant[Species]]][constant[Species_Name]] assign[=] call[constant[,].join, parameter[call[name[self].lsts_tmp][constant[species]]]]
if call[name[self].lsts_tmp][constant[genus]] begin[:]
call[call[name[self].noaa_data_sorted][constant[Species]]][constant[Species_Code]] assign[=] call[constant[,].join, parameter[call[name[self].lsts_tmp][constant[genus]]]]
if call[name[self].lsts_tmp][constant[qc]] begin[:]
if call[name[self].__is_notes, parameter[]] begin[:]
call[call[name[self].noaa_data_sorted][constant[Description_Notes_and_Keywords]]][constant[Description]] assign[=] call[constant[;].join, parameter[call[name[self].lsts_tmp][constant[qc]]]]
return[None] | keyword[def] identifier[__lists_to_str] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[lsts_tmp] [ literal[string] ]:
identifier[self] . identifier[noaa_data_sorted] [ literal[string] ][ literal[string] ]= literal[string] . identifier[join] ( identifier[self] . identifier[lsts_tmp] [ literal[string] ])
keyword[if] identifier[self] . identifier[lsts_tmp] [ literal[string] ]:
identifier[self] . identifier[noaa_data_sorted] [ literal[string] ][ literal[string] ]= literal[string] . identifier[join] ( identifier[self] . identifier[lsts_tmp] [ literal[string] ])
keyword[if] identifier[self] . identifier[lsts_tmp] [ literal[string] ]:
identifier[self] . identifier[noaa_data_sorted] [ literal[string] ][ literal[string] ]= literal[string] . identifier[join] ( identifier[self] . identifier[lsts_tmp] [ literal[string] ])
keyword[if] identifier[self] . identifier[lsts_tmp] [ literal[string] ]:
keyword[if] identifier[self] . identifier[__is_notes] ():
identifier[self] . identifier[noaa_data_sorted] [ literal[string] ][ literal[string] ]= literal[string] . identifier[join] ( identifier[self] . identifier[lsts_tmp] [ literal[string] ])
keyword[return] | def __lists_to_str(self):
"""
There are some data lists that we collected across the dataset that need to be concatenated into a single
string before writing to the text file.
:return none:
"""
# ["archive_type", "sensor_genus", "sensor_species", "investigator"]
if self.lsts_tmp['archive']:
self.noaa_data_sorted['Top']['Archive'] = ','.join(self.lsts_tmp['archive']) # depends on [control=['if'], data=[]]
if self.lsts_tmp['species']:
self.noaa_data_sorted['Species']['Species_Name'] = ','.join(self.lsts_tmp['species']) # depends on [control=['if'], data=[]]
if self.lsts_tmp['genus']:
self.noaa_data_sorted['Species']['Species_Code'] = ','.join(self.lsts_tmp['genus']) # depends on [control=['if'], data=[]]
if self.lsts_tmp['qc']:
if self.__is_notes():
self.noaa_data_sorted['Description_Notes_and_Keywords']['Description'] = ';'.join(self.lsts_tmp['qc']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return |
def str_replace(x, pat, repl, n=-1, flags=0, regex=False):
"""Replace occurences of a pattern/regex in a column with some other string.
:param str pattern: string or a regex pattern
:param str replace: a replacement string
:param int n: number of replacements to be made from the start. If -1 make all replacements.
:param int flags: ??
:param bool regex: If True, ...?
:returns: an expression containing the string replacements.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.replace(pat='et', repl='__')
Expression = str_replace(text, pat='et', repl='__')
Length: 5 dtype: str (expression)
---------------------------------
0 Som__hing
1 very pr__ty
2 is coming
3 our
4 way.
"""
sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | def function[str_replace, parameter[x, pat, repl, n, flags, regex]]:
constant[Replace occurences of a pattern/regex in a column with some other string.
:param str pattern: string or a regex pattern
:param str replace: a replacement string
:param int n: number of replacements to be made from the start. If -1 make all replacements.
:param int flags: ??
:param bool regex: If True, ...?
:returns: an expression containing the string replacements.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.replace(pat='et', repl='__')
Expression = str_replace(text, pat='et', repl='__')
Length: 5 dtype: str (expression)
---------------------------------
0 Som__hing
1 very pr__ty
2 is coming
3 our
4 way.
]
variable[sl] assign[=] call[call[name[_to_string_sequence], parameter[name[x]]].replace, parameter[name[pat], name[repl], name[n], name[flags], name[regex]]]
return[call[name[column].ColumnStringArrow, parameter[name[sl].bytes, name[sl].indices, name[sl].length, name[sl].offset]]] | keyword[def] identifier[str_replace] ( identifier[x] , identifier[pat] , identifier[repl] , identifier[n] =- literal[int] , identifier[flags] = literal[int] , identifier[regex] = keyword[False] ):
literal[string]
identifier[sl] = identifier[_to_string_sequence] ( identifier[x] ). identifier[replace] ( identifier[pat] , identifier[repl] , identifier[n] , identifier[flags] , identifier[regex] )
keyword[return] identifier[column] . identifier[ColumnStringArrow] ( identifier[sl] . identifier[bytes] , identifier[sl] . identifier[indices] , identifier[sl] . identifier[length] , identifier[sl] . identifier[offset] , identifier[string_sequence] = identifier[sl] ) | def str_replace(x, pat, repl, n=-1, flags=0, regex=False):
"""Replace occurences of a pattern/regex in a column with some other string.
:param str pattern: string or a regex pattern
:param str replace: a replacement string
:param int n: number of replacements to be made from the start. If -1 make all replacements.
:param int flags: ??
:param bool regex: If True, ...?
:returns: an expression containing the string replacements.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.replace(pat='et', repl='__')
Expression = str_replace(text, pat='et', repl='__')
Length: 5 dtype: str (expression)
---------------------------------
0 Som__hing
1 very pr__ty
2 is coming
3 our
4 way.
"""
sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) |
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | def function[data_uuids, parameter[self, uuids, start, end, archiver, timeout]]:
constant[
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
]
if <ast.UnaryOp object at 0x7da18f00d120> begin[:]
variable[uuids] assign[=] list[[<ast.Name object at 0x7da18f00e8c0>]]
variable[where] assign[=] call[constant[ or ].join, parameter[<ast.ListComp object at 0x7da18f00d5d0>]]
return[call[call[name[self].query, parameter[call[constant[select data in ({0}, {1}) where {2}].format, parameter[name[start], name[end], name[where]]], name[archiver], name[timeout]]].get, parameter[constant[timeseries], dictionary[[], []]]]] | keyword[def] identifier[data_uuids] ( identifier[self] , identifier[uuids] , identifier[start] , identifier[end] , identifier[archiver] = literal[string] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[uuids] , identifier[list] ):
identifier[uuids] =[ identifier[uuids] ]
identifier[where] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[uuid] ) keyword[for] identifier[uuid] keyword[in] identifier[uuids] ])
keyword[return] identifier[self] . identifier[query] ( literal[string] . identifier[format] ( identifier[start] , identifier[end] , identifier[where] ), identifier[archiver] , identifier[timeout] ). identifier[get] ( literal[string] ,{}) | def data_uuids(self, uuids, start, end, archiver='', timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids] # depends on [control=['if'], data=[]]
where = ' or '.join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query('select data in ({0}, {1}) where {2}'.format(start, end, where), archiver, timeout).get('timeseries', {}) |
def _ParseArgs(self, args, known_only):
"""Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: List of strings with the arguments to parse.
known_only: parse and remove known flags, return rest in unparsed_args
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
undefok: Set of flags that were given via --undefok.
Raises:
Error: on any parsing error.
ValueError: on flag value parsing error.
"""
unknown_flags, unparsed_args, undefok = [], [], set()
flag_dict = self.FlagDict()
args = iter(args)
for arg in args:
value = None
def GetValue():
# pylint: disable=cell-var-from-loop
try:
return next(args) if value is None else value
except StopIteration:
raise exceptions.Error('Missing value for flag ' + arg)
if not arg.startswith('-'):
# A non-argument: default is break, GNU is skip.
unparsed_args.append(arg)
if self.IsGnuGetOpt():
continue
else:
break
if arg == '--':
if known_only:
unparsed_args.append(arg)
break
if '=' in arg:
name, value = arg.lstrip('-').split('=', 1)
else:
name, value = arg.lstrip('-'), None
if not name:
# The argument is all dashes (including one dash).
unparsed_args.append(arg)
if self.IsGnuGetOpt():
continue
else:
break
# --undefok is a special case.
if name == 'undefok':
if known_only:
unparsed_args.append(arg)
value = GetValue()
undefok.update(v.strip() for v in value.split(','))
undefok.update('no' + v.strip() for v in value.split(','))
continue
flag = flag_dict.get(name)
if flag:
value = (flag.boolean and value is None) or GetValue()
elif name.startswith('no') and len(name) > 2:
# Boolean flags can take the form of --noflag, with no value.
noflag = flag_dict.get(name[2:])
if noflag and noflag.boolean:
if value is not None:
raise ValueError(arg + ' does not take an argument')
flag = noflag
value = False
if flag:
flag.parse(value)
flag.using_default_value = False
elif known_only:
unparsed_args.append(arg)
else:
unknown_flags.append((name, arg))
unparsed_args.extend(args)
return unknown_flags, unparsed_args, undefok | def function[_ParseArgs, parameter[self, args, known_only]]:
constant[Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: List of strings with the arguments to parse.
known_only: parse and remove known flags, return rest in unparsed_args
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
undefok: Set of flags that were given via --undefok.
Raises:
Error: on any parsing error.
ValueError: on flag value parsing error.
]
<ast.Tuple object at 0x7da20e9b1c60> assign[=] tuple[[<ast.List object at 0x7da20e9b0e20>, <ast.List object at 0x7da20e9b0b50>, <ast.Call object at 0x7da20e9b2710>]]
variable[flag_dict] assign[=] call[name[self].FlagDict, parameter[]]
variable[args] assign[=] call[name[iter], parameter[name[args]]]
for taget[name[arg]] in starred[name[args]] begin[:]
variable[value] assign[=] constant[None]
def function[GetValue, parameter[]]:
<ast.Try object at 0x7da20e9b0dc0>
if <ast.UnaryOp object at 0x7da20e9b2830> begin[:]
call[name[unparsed_args].append, parameter[name[arg]]]
if call[name[self].IsGnuGetOpt, parameter[]] begin[:]
continue
if compare[name[arg] equal[==] constant[--]] begin[:]
if name[known_only] begin[:]
call[name[unparsed_args].append, parameter[name[arg]]]
break
if compare[constant[=] in name[arg]] begin[:]
<ast.Tuple object at 0x7da2054a7640> assign[=] call[call[name[arg].lstrip, parameter[constant[-]]].split, parameter[constant[=], constant[1]]]
if <ast.UnaryOp object at 0x7da2054a4310> begin[:]
call[name[unparsed_args].append, parameter[name[arg]]]
if call[name[self].IsGnuGetOpt, parameter[]] begin[:]
continue
if compare[name[name] equal[==] constant[undefok]] begin[:]
if name[known_only] begin[:]
call[name[unparsed_args].append, parameter[name[arg]]]
variable[value] assign[=] call[name[GetValue], parameter[]]
call[name[undefok].update, parameter[<ast.GeneratorExp object at 0x7da2054a5f30>]]
call[name[undefok].update, parameter[<ast.GeneratorExp object at 0x7da2054a5390>]]
continue
variable[flag] assign[=] call[name[flag_dict].get, parameter[name[name]]]
if name[flag] begin[:]
variable[value] assign[=] <ast.BoolOp object at 0x7da18bcc97e0>
if name[flag] begin[:]
call[name[flag].parse, parameter[name[value]]]
name[flag].using_default_value assign[=] constant[False]
call[name[unparsed_args].extend, parameter[name[args]]]
return[tuple[[<ast.Name object at 0x7da207f00be0>, <ast.Name object at 0x7da207f000d0>, <ast.Name object at 0x7da207f03ee0>]]] | keyword[def] identifier[_ParseArgs] ( identifier[self] , identifier[args] , identifier[known_only] ):
literal[string]
identifier[unknown_flags] , identifier[unparsed_args] , identifier[undefok] =[],[], identifier[set] ()
identifier[flag_dict] = identifier[self] . identifier[FlagDict] ()
identifier[args] = identifier[iter] ( identifier[args] )
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[value] = keyword[None]
keyword[def] identifier[GetValue] ():
keyword[try] :
keyword[return] identifier[next] ( identifier[args] ) keyword[if] identifier[value] keyword[is] keyword[None] keyword[else] identifier[value]
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[exceptions] . identifier[Error] ( literal[string] + identifier[arg] )
keyword[if] keyword[not] identifier[arg] . identifier[startswith] ( literal[string] ):
identifier[unparsed_args] . identifier[append] ( identifier[arg] )
keyword[if] identifier[self] . identifier[IsGnuGetOpt] ():
keyword[continue]
keyword[else] :
keyword[break]
keyword[if] identifier[arg] == literal[string] :
keyword[if] identifier[known_only] :
identifier[unparsed_args] . identifier[append] ( identifier[arg] )
keyword[break]
keyword[if] literal[string] keyword[in] identifier[arg] :
identifier[name] , identifier[value] = identifier[arg] . identifier[lstrip] ( literal[string] ). identifier[split] ( literal[string] , literal[int] )
keyword[else] :
identifier[name] , identifier[value] = identifier[arg] . identifier[lstrip] ( literal[string] ), keyword[None]
keyword[if] keyword[not] identifier[name] :
identifier[unparsed_args] . identifier[append] ( identifier[arg] )
keyword[if] identifier[self] . identifier[IsGnuGetOpt] ():
keyword[continue]
keyword[else] :
keyword[break]
keyword[if] identifier[name] == literal[string] :
keyword[if] identifier[known_only] :
identifier[unparsed_args] . identifier[append] ( identifier[arg] )
identifier[value] = identifier[GetValue] ()
identifier[undefok] . identifier[update] ( identifier[v] . identifier[strip] () keyword[for] identifier[v] keyword[in] identifier[value] . identifier[split] ( literal[string] ))
identifier[undefok] . identifier[update] ( literal[string] + identifier[v] . identifier[strip] () keyword[for] identifier[v] keyword[in] identifier[value] . identifier[split] ( literal[string] ))
keyword[continue]
identifier[flag] = identifier[flag_dict] . identifier[get] ( identifier[name] )
keyword[if] identifier[flag] :
identifier[value] =( identifier[flag] . identifier[boolean] keyword[and] identifier[value] keyword[is] keyword[None] ) keyword[or] identifier[GetValue] ()
keyword[elif] identifier[name] . identifier[startswith] ( literal[string] ) keyword[and] identifier[len] ( identifier[name] )> literal[int] :
identifier[noflag] = identifier[flag_dict] . identifier[get] ( identifier[name] [ literal[int] :])
keyword[if] identifier[noflag] keyword[and] identifier[noflag] . identifier[boolean] :
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( identifier[arg] + literal[string] )
identifier[flag] = identifier[noflag]
identifier[value] = keyword[False]
keyword[if] identifier[flag] :
identifier[flag] . identifier[parse] ( identifier[value] )
identifier[flag] . identifier[using_default_value] = keyword[False]
keyword[elif] identifier[known_only] :
identifier[unparsed_args] . identifier[append] ( identifier[arg] )
keyword[else] :
identifier[unknown_flags] . identifier[append] (( identifier[name] , identifier[arg] ))
identifier[unparsed_args] . identifier[extend] ( identifier[args] )
keyword[return] identifier[unknown_flags] , identifier[unparsed_args] , identifier[undefok] | def _ParseArgs(self, args, known_only):
"""Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: List of strings with the arguments to parse.
known_only: parse and remove known flags, return rest in unparsed_args
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
undefok: Set of flags that were given via --undefok.
Raises:
Error: on any parsing error.
ValueError: on flag value parsing error.
"""
(unknown_flags, unparsed_args, undefok) = ([], [], set())
flag_dict = self.FlagDict()
args = iter(args)
for arg in args:
value = None
def GetValue():
# pylint: disable=cell-var-from-loop
try:
return next(args) if value is None else value # depends on [control=['try'], data=[]]
except StopIteration:
raise exceptions.Error('Missing value for flag ' + arg) # depends on [control=['except'], data=[]]
if not arg.startswith('-'):
# A non-argument: default is break, GNU is skip.
unparsed_args.append(arg)
if self.IsGnuGetOpt():
continue # depends on [control=['if'], data=[]]
else:
break # depends on [control=['if'], data=[]]
if arg == '--':
if known_only:
unparsed_args.append(arg) # depends on [control=['if'], data=[]]
break # depends on [control=['if'], data=['arg']]
if '=' in arg:
(name, value) = arg.lstrip('-').split('=', 1) # depends on [control=['if'], data=['arg']]
else:
(name, value) = (arg.lstrip('-'), None)
if not name:
# The argument is all dashes (including one dash).
unparsed_args.append(arg)
if self.IsGnuGetOpt():
continue # depends on [control=['if'], data=[]]
else:
break # depends on [control=['if'], data=[]]
# --undefok is a special case.
if name == 'undefok':
if known_only:
unparsed_args.append(arg) # depends on [control=['if'], data=[]]
value = GetValue()
undefok.update((v.strip() for v in value.split(',')))
undefok.update(('no' + v.strip() for v in value.split(',')))
continue # depends on [control=['if'], data=[]]
flag = flag_dict.get(name)
if flag:
value = flag.boolean and value is None or GetValue() # depends on [control=['if'], data=[]]
elif name.startswith('no') and len(name) > 2:
# Boolean flags can take the form of --noflag, with no value.
noflag = flag_dict.get(name[2:])
if noflag and noflag.boolean:
if value is not None:
raise ValueError(arg + ' does not take an argument') # depends on [control=['if'], data=[]]
flag = noflag
value = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if flag:
flag.parse(value)
flag.using_default_value = False # depends on [control=['if'], data=[]]
elif known_only:
unparsed_args.append(arg) # depends on [control=['if'], data=[]]
else:
unknown_flags.append((name, arg)) # depends on [control=['for'], data=['arg']]
unparsed_args.extend(args)
return (unknown_flags, unparsed_args, undefok) |
def get_child_work_units(self, worker_id):
'''Get work units assigned to a worker's children.
Returns a dictionary mapping worker ID to :class:`WorkUnit`.
If a child exists but is idle, that worker ID will map to
:const:`None`. The work unit may already be expired or
assigned to a different worker; this will be reflected in
the returned :class:`WorkUnit`.
This may write back to the underlying data store to clean up
stale children that have not unregistered themselves but
no longer exist in any form.
'''
result = {}
with self.registry.lock(identifier=worker_id) as session:
all_children = session.pull(WORKER_CHILDREN_ + worker_id)
# The data stored in Redis isn't actually conducive to
# this specific query; we will need to scan each work spec
# for each work unit
work_specs = session.pull(WORK_SPECS)
for child in all_children.iterkeys():
work_spec_name = None
for spec in work_specs.iterkeys():
work_unit_key = session.get(
WORK_UNITS_ + spec + '_locks', child)
if work_unit_key:
work_spec_name = spec
break
if work_spec_name:
assigned = session.get(
WORK_UNITS_ + work_spec_name + '_locks',
work_unit_key)
(data, expires) = session.get(
WORK_UNITS_ + work_spec_name, work_unit_key,
include_priority=True)
if data is None:
# The work unit is probably already finished
result[child] = None
else:
result[child] = WorkUnit(
self.registry, work_spec_name, work_unit_key,
data, expires=expires, worker_id=assigned)
else:
# The child isn't doing anything. Does it still
# exist?
heartbeat = session.get(WORKER_OBSERVED_MODE, child)
if heartbeat:
result[child] = None
else:
session.popmany(WORKER_CHILDREN_ + worker_id,
child)
return result | def function[get_child_work_units, parameter[self, worker_id]]:
constant[Get work units assigned to a worker's children.
Returns a dictionary mapping worker ID to :class:`WorkUnit`.
If a child exists but is idle, that worker ID will map to
:const:`None`. The work unit may already be expired or
assigned to a different worker; this will be reflected in
the returned :class:`WorkUnit`.
This may write back to the underlying data store to clean up
stale children that have not unregistered themselves but
no longer exist in any form.
]
variable[result] assign[=] dictionary[[], []]
with call[name[self].registry.lock, parameter[]] begin[:]
variable[all_children] assign[=] call[name[session].pull, parameter[binary_operation[name[WORKER_CHILDREN_] + name[worker_id]]]]
variable[work_specs] assign[=] call[name[session].pull, parameter[name[WORK_SPECS]]]
for taget[name[child]] in starred[call[name[all_children].iterkeys, parameter[]]] begin[:]
variable[work_spec_name] assign[=] constant[None]
for taget[name[spec]] in starred[call[name[work_specs].iterkeys, parameter[]]] begin[:]
variable[work_unit_key] assign[=] call[name[session].get, parameter[binary_operation[binary_operation[name[WORK_UNITS_] + name[spec]] + constant[_locks]], name[child]]]
if name[work_unit_key] begin[:]
variable[work_spec_name] assign[=] name[spec]
break
if name[work_spec_name] begin[:]
variable[assigned] assign[=] call[name[session].get, parameter[binary_operation[binary_operation[name[WORK_UNITS_] + name[work_spec_name]] + constant[_locks]], name[work_unit_key]]]
<ast.Tuple object at 0x7da1b14d0af0> assign[=] call[name[session].get, parameter[binary_operation[name[WORK_UNITS_] + name[work_spec_name]], name[work_unit_key]]]
if compare[name[data] is constant[None]] begin[:]
call[name[result]][name[child]] assign[=] constant[None]
return[name[result]] | keyword[def] identifier[get_child_work_units] ( identifier[self] , identifier[worker_id] ):
literal[string]
identifier[result] ={}
keyword[with] identifier[self] . identifier[registry] . identifier[lock] ( identifier[identifier] = identifier[worker_id] ) keyword[as] identifier[session] :
identifier[all_children] = identifier[session] . identifier[pull] ( identifier[WORKER_CHILDREN_] + identifier[worker_id] )
identifier[work_specs] = identifier[session] . identifier[pull] ( identifier[WORK_SPECS] )
keyword[for] identifier[child] keyword[in] identifier[all_children] . identifier[iterkeys] ():
identifier[work_spec_name] = keyword[None]
keyword[for] identifier[spec] keyword[in] identifier[work_specs] . identifier[iterkeys] ():
identifier[work_unit_key] = identifier[session] . identifier[get] (
identifier[WORK_UNITS_] + identifier[spec] + literal[string] , identifier[child] )
keyword[if] identifier[work_unit_key] :
identifier[work_spec_name] = identifier[spec]
keyword[break]
keyword[if] identifier[work_spec_name] :
identifier[assigned] = identifier[session] . identifier[get] (
identifier[WORK_UNITS_] + identifier[work_spec_name] + literal[string] ,
identifier[work_unit_key] )
( identifier[data] , identifier[expires] )= identifier[session] . identifier[get] (
identifier[WORK_UNITS_] + identifier[work_spec_name] , identifier[work_unit_key] ,
identifier[include_priority] = keyword[True] )
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[result] [ identifier[child] ]= keyword[None]
keyword[else] :
identifier[result] [ identifier[child] ]= identifier[WorkUnit] (
identifier[self] . identifier[registry] , identifier[work_spec_name] , identifier[work_unit_key] ,
identifier[data] , identifier[expires] = identifier[expires] , identifier[worker_id] = identifier[assigned] )
keyword[else] :
identifier[heartbeat] = identifier[session] . identifier[get] ( identifier[WORKER_OBSERVED_MODE] , identifier[child] )
keyword[if] identifier[heartbeat] :
identifier[result] [ identifier[child] ]= keyword[None]
keyword[else] :
identifier[session] . identifier[popmany] ( identifier[WORKER_CHILDREN_] + identifier[worker_id] ,
identifier[child] )
keyword[return] identifier[result] | def get_child_work_units(self, worker_id):
"""Get work units assigned to a worker's children.
Returns a dictionary mapping worker ID to :class:`WorkUnit`.
If a child exists but is idle, that worker ID will map to
:const:`None`. The work unit may already be expired or
assigned to a different worker; this will be reflected in
the returned :class:`WorkUnit`.
This may write back to the underlying data store to clean up
stale children that have not unregistered themselves but
no longer exist in any form.
"""
result = {}
with self.registry.lock(identifier=worker_id) as session:
all_children = session.pull(WORKER_CHILDREN_ + worker_id)
# The data stored in Redis isn't actually conducive to
# this specific query; we will need to scan each work spec
# for each work unit
work_specs = session.pull(WORK_SPECS)
for child in all_children.iterkeys():
work_spec_name = None
for spec in work_specs.iterkeys():
work_unit_key = session.get(WORK_UNITS_ + spec + '_locks', child)
if work_unit_key:
work_spec_name = spec
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spec']]
if work_spec_name:
assigned = session.get(WORK_UNITS_ + work_spec_name + '_locks', work_unit_key)
(data, expires) = session.get(WORK_UNITS_ + work_spec_name, work_unit_key, include_priority=True)
if data is None:
# The work unit is probably already finished
result[child] = None # depends on [control=['if'], data=[]]
else:
result[child] = WorkUnit(self.registry, work_spec_name, work_unit_key, data, expires=expires, worker_id=assigned) # depends on [control=['if'], data=[]]
else:
# The child isn't doing anything. Does it still
# exist?
heartbeat = session.get(WORKER_OBSERVED_MODE, child)
if heartbeat:
result[child] = None # depends on [control=['if'], data=[]]
else:
session.popmany(WORKER_CHILDREN_ + worker_id, child) # depends on [control=['for'], data=['child']] # depends on [control=['with'], data=['session']]
return result |
def get_provider():
'''Get the current provider from config'''
name = get_config('provider')
available = entrypoints.get_all('udata.avatars')
if name not in available:
raise ValueError('Unknown avatar provider: {0}'.format(name))
return available[name] | def function[get_provider, parameter[]]:
constant[Get the current provider from config]
variable[name] assign[=] call[name[get_config], parameter[constant[provider]]]
variable[available] assign[=] call[name[entrypoints].get_all, parameter[constant[udata.avatars]]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[available]] begin[:]
<ast.Raise object at 0x7da1b1191600>
return[call[name[available]][name[name]]] | keyword[def] identifier[get_provider] ():
literal[string]
identifier[name] = identifier[get_config] ( literal[string] )
identifier[available] = identifier[entrypoints] . identifier[get_all] ( literal[string] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[available] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[return] identifier[available] [ identifier[name] ] | def get_provider():
"""Get the current provider from config"""
name = get_config('provider')
available = entrypoints.get_all('udata.avatars')
if name not in available:
raise ValueError('Unknown avatar provider: {0}'.format(name)) # depends on [control=['if'], data=['name']]
return available[name] |
def clear(self):
"""Executes an HTTP request to clear all contents of a queue."""
url = "queues/%s/messages" % self.name
result = self.client.delete(url = url,
body = json.dumps({}),
headers={'Content-Type': 'application/json'})
return result['body'] | def function[clear, parameter[self]]:
constant[Executes an HTTP request to clear all contents of a queue.]
variable[url] assign[=] binary_operation[constant[queues/%s/messages] <ast.Mod object at 0x7da2590d6920> name[self].name]
variable[result] assign[=] call[name[self].client.delete, parameter[]]
return[call[name[result]][constant[body]]] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] % identifier[self] . identifier[name]
identifier[result] = identifier[self] . identifier[client] . identifier[delete] ( identifier[url] = identifier[url] ,
identifier[body] = identifier[json] . identifier[dumps] ({}),
identifier[headers] ={ literal[string] : literal[string] })
keyword[return] identifier[result] [ literal[string] ] | def clear(self):
"""Executes an HTTP request to clear all contents of a queue."""
url = 'queues/%s/messages' % self.name
result = self.client.delete(url=url, body=json.dumps({}), headers={'Content-Type': 'application/json'})
return result['body'] |
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8],
path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage (GCS) object: {}'
.format(file_name)) | def function[google_cloud_to_local, parameter[self, file_name]]:
constant[
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
]
if <ast.UnaryOp object at 0x7da1b0348ac0> begin[:]
return[name[file_name]]
variable[path_components] assign[=] call[call[name[file_name]][<ast.Slice object at 0x7da1b03480a0>].split, parameter[constant[/]]]
if compare[call[name[len], parameter[name[path_components]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b034be80>
variable[bucket_id] assign[=] call[name[path_components]][constant[0]]
variable[object_id] assign[=] call[constant[/].join, parameter[call[name[path_components]][<ast.Slice object at 0x7da1b03494e0>]]]
variable[local_file] assign[=] call[constant[/tmp/dataflow{}-{}].format, parameter[call[call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]][<ast.Slice object at 0x7da1b034a2f0>], call[name[path_components]][<ast.UnaryOp object at 0x7da1b0348550>]]]
call[name[self]._gcs_hook.download, parameter[name[bucket_id], name[object_id], name[local_file]]]
if compare[call[name[os].stat, parameter[name[local_file]]].st_size greater[>] constant[0]] begin[:]
return[name[local_file]]
<ast.Raise object at 0x7da1b034a5c0> | keyword[def] identifier[google_cloud_to_local] ( identifier[self] , identifier[file_name] ):
literal[string]
keyword[if] keyword[not] identifier[file_name] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[file_name]
identifier[path_components] = identifier[file_name] [ identifier[self] . identifier[GCS_PREFIX_LENGTH] :]. identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[path_components] )< literal[int] :
keyword[raise] identifier[Exception] (
literal[string]
. identifier[format] ( identifier[file_name] ))
identifier[bucket_id] = identifier[path_components] [ literal[int] ]
identifier[object_id] = literal[string] . identifier[join] ( identifier[path_components] [ literal[int] :])
identifier[local_file] = literal[string] . identifier[format] ( identifier[str] ( identifier[uuid] . identifier[uuid4] ())[: literal[int] ],
identifier[path_components] [- literal[int] ])
identifier[self] . identifier[_gcs_hook] . identifier[download] ( identifier[bucket_id] , identifier[object_id] , identifier[local_file] )
keyword[if] identifier[os] . identifier[stat] ( identifier[local_file] ). identifier[st_size] > literal[int] :
keyword[return] identifier[local_file]
keyword[raise] identifier[Exception] (
literal[string]
. identifier[format] ( identifier[file_name] )) | def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name # depends on [control=['if'], data=[]]
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception('Invalid Google Cloud Storage (GCS) object path: {}'.format(file_name)) # depends on [control=['if'], data=[]]
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8], path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file # depends on [control=['if'], data=[]]
raise Exception('Failed to download Google Cloud Storage (GCS) object: {}'.format(file_name)) |
def track_change(self, instance, resolution_level=0):
"""
Change tracking options for the already tracked object 'instance'.
If instance is not tracked, a KeyError will be raised.
"""
tobj = self.objects[id(instance)]
tobj.set_resolution_level(resolution_level) | def function[track_change, parameter[self, instance, resolution_level]]:
constant[
Change tracking options for the already tracked object 'instance'.
If instance is not tracked, a KeyError will be raised.
]
variable[tobj] assign[=] call[name[self].objects][call[name[id], parameter[name[instance]]]]
call[name[tobj].set_resolution_level, parameter[name[resolution_level]]] | keyword[def] identifier[track_change] ( identifier[self] , identifier[instance] , identifier[resolution_level] = literal[int] ):
literal[string]
identifier[tobj] = identifier[self] . identifier[objects] [ identifier[id] ( identifier[instance] )]
identifier[tobj] . identifier[set_resolution_level] ( identifier[resolution_level] ) | def track_change(self, instance, resolution_level=0):
"""
Change tracking options for the already tracked object 'instance'.
If instance is not tracked, a KeyError will be raised.
"""
tobj = self.objects[id(instance)]
tobj.set_resolution_level(resolution_level) |
def call_api(event, app_name, app_swagger_path, logger, strict_validation=True,
validate_responses=True, cache_app=True):
"""Wire up the incoming Lambda/API Gateway request to an application.
:param dict event:
Dictionary containing the entire request template. This can vary wildly
depending on the template structure and contents.
:param str app_name:
Name of the API application.
:param str app_swagger_path:
Local path to the Swagger API YAML file.
:param logging.Logger logger:
A Logger instance returned by `fleece.log.get_logger()` to be used for
capturing details about errors.
:param bool strict_validation:
Toggle to enable/disable Connexion's parameter validation.
:param bool validate_responses:
Toggle to enable/disable Connexion's response validation.
:param bool cache_app:
Toggle to enable/disable the caching of the Connextion/Flask app
instance. It's on by default, because it provides a significant
performance improvement in the Lambda runtime environment.
"""
app = get_connexion_app(
app_name=app_name,
app_swagger_path=app_swagger_path,
strict_validation=strict_validation,
validate_responses=validate_responses,
cache_app=cache_app,
)
return app.call_api(event) | def function[call_api, parameter[event, app_name, app_swagger_path, logger, strict_validation, validate_responses, cache_app]]:
constant[Wire up the incoming Lambda/API Gateway request to an application.
:param dict event:
Dictionary containing the entire request template. This can vary wildly
depending on the template structure and contents.
:param str app_name:
Name of the API application.
:param str app_swagger_path:
Local path to the Swagger API YAML file.
:param logging.Logger logger:
A Logger instance returned by `fleece.log.get_logger()` to be used for
capturing details about errors.
:param bool strict_validation:
Toggle to enable/disable Connexion's parameter validation.
:param bool validate_responses:
Toggle to enable/disable Connexion's response validation.
:param bool cache_app:
Toggle to enable/disable the caching of the Connextion/Flask app
instance. It's on by default, because it provides a significant
performance improvement in the Lambda runtime environment.
]
variable[app] assign[=] call[name[get_connexion_app], parameter[]]
return[call[name[app].call_api, parameter[name[event]]]] | keyword[def] identifier[call_api] ( identifier[event] , identifier[app_name] , identifier[app_swagger_path] , identifier[logger] , identifier[strict_validation] = keyword[True] ,
identifier[validate_responses] = keyword[True] , identifier[cache_app] = keyword[True] ):
literal[string]
identifier[app] = identifier[get_connexion_app] (
identifier[app_name] = identifier[app_name] ,
identifier[app_swagger_path] = identifier[app_swagger_path] ,
identifier[strict_validation] = identifier[strict_validation] ,
identifier[validate_responses] = identifier[validate_responses] ,
identifier[cache_app] = identifier[cache_app] ,
)
keyword[return] identifier[app] . identifier[call_api] ( identifier[event] ) | def call_api(event, app_name, app_swagger_path, logger, strict_validation=True, validate_responses=True, cache_app=True):
"""Wire up the incoming Lambda/API Gateway request to an application.
:param dict event:
Dictionary containing the entire request template. This can vary wildly
depending on the template structure and contents.
:param str app_name:
Name of the API application.
:param str app_swagger_path:
Local path to the Swagger API YAML file.
:param logging.Logger logger:
A Logger instance returned by `fleece.log.get_logger()` to be used for
capturing details about errors.
:param bool strict_validation:
Toggle to enable/disable Connexion's parameter validation.
:param bool validate_responses:
Toggle to enable/disable Connexion's response validation.
:param bool cache_app:
Toggle to enable/disable the caching of the Connextion/Flask app
instance. It's on by default, because it provides a significant
performance improvement in the Lambda runtime environment.
"""
app = get_connexion_app(app_name=app_name, app_swagger_path=app_swagger_path, strict_validation=strict_validation, validate_responses=validate_responses, cache_app=cache_app)
return app.call_api(event) |
async def close_interface(self, conn_id, interface):
"""Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
"""
adapter_id = self._get_property(conn_id, 'adapter')
await self.adapters[adapter_id].close_interface(conn_id, interface) | <ast.AsyncFunctionDef object at 0x7da2046232b0> | keyword[async] keyword[def] identifier[close_interface] ( identifier[self] , identifier[conn_id] , identifier[interface] ):
literal[string]
identifier[adapter_id] = identifier[self] . identifier[_get_property] ( identifier[conn_id] , literal[string] )
keyword[await] identifier[self] . identifier[adapters] [ identifier[adapter_id] ]. identifier[close_interface] ( identifier[conn_id] , identifier[interface] ) | async def close_interface(self, conn_id, interface):
"""Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
"""
adapter_id = self._get_property(conn_id, 'adapter')
await self.adapters[adapter_id].close_interface(conn_id, interface) |
def set_release_actions(self, actions):
"""Set the widget that gives users options about the release, e.g. importing references
:param actions: Release actions that define the sanity checks and cleanup actions
:type actions: :class:`jukeboxcore.release.ReleaseActions`
:returns: None
:rtype: None
:raises: None
"""
self.release_actions = actions
self.option_widget = self.release_actions.option_widget()
if self.option_widget:
self.option_vbox.addWidget(self.option_widget)
self.option_gb.setVisible(True) | def function[set_release_actions, parameter[self, actions]]:
constant[Set the widget that gives users options about the release, e.g. importing references
:param actions: Release actions that define the sanity checks and cleanup actions
:type actions: :class:`jukeboxcore.release.ReleaseActions`
:returns: None
:rtype: None
:raises: None
]
name[self].release_actions assign[=] name[actions]
name[self].option_widget assign[=] call[name[self].release_actions.option_widget, parameter[]]
if name[self].option_widget begin[:]
call[name[self].option_vbox.addWidget, parameter[name[self].option_widget]]
call[name[self].option_gb.setVisible, parameter[constant[True]]] | keyword[def] identifier[set_release_actions] ( identifier[self] , identifier[actions] ):
literal[string]
identifier[self] . identifier[release_actions] = identifier[actions]
identifier[self] . identifier[option_widget] = identifier[self] . identifier[release_actions] . identifier[option_widget] ()
keyword[if] identifier[self] . identifier[option_widget] :
identifier[self] . identifier[option_vbox] . identifier[addWidget] ( identifier[self] . identifier[option_widget] )
identifier[self] . identifier[option_gb] . identifier[setVisible] ( keyword[True] ) | def set_release_actions(self, actions):
"""Set the widget that gives users options about the release, e.g. importing references
:param actions: Release actions that define the sanity checks and cleanup actions
:type actions: :class:`jukeboxcore.release.ReleaseActions`
:returns: None
:rtype: None
:raises: None
"""
self.release_actions = actions
self.option_widget = self.release_actions.option_widget()
if self.option_widget:
self.option_vbox.addWidget(self.option_widget)
self.option_gb.setVisible(True) # depends on [control=['if'], data=[]] |
def ListChildren(self, limit=None, age=NEWEST_TIME):
"""Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range in microseconds.
Yields:
RDFURNs instances of each child.
"""
# Just grab all the children from the index.
for predicate, timestamp in data_store.DB.AFF4FetchChildren(
self.urn, timestamp=Factory.ParseAgeSpecification(age), limit=limit):
urn = self.urn.Add(predicate)
urn.age = rdfvalue.RDFDatetime(timestamp)
yield urn | def function[ListChildren, parameter[self, limit, age]]:
constant[Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range in microseconds.
Yields:
RDFURNs instances of each child.
]
for taget[tuple[[<ast.Name object at 0x7da1b1b87640>, <ast.Name object at 0x7da1b1b84c40>]]] in starred[call[name[data_store].DB.AFF4FetchChildren, parameter[name[self].urn]]] begin[:]
variable[urn] assign[=] call[name[self].urn.Add, parameter[name[predicate]]]
name[urn].age assign[=] call[name[rdfvalue].RDFDatetime, parameter[name[timestamp]]]
<ast.Yield object at 0x7da1b1b85990> | keyword[def] identifier[ListChildren] ( identifier[self] , identifier[limit] = keyword[None] , identifier[age] = identifier[NEWEST_TIME] ):
literal[string]
keyword[for] identifier[predicate] , identifier[timestamp] keyword[in] identifier[data_store] . identifier[DB] . identifier[AFF4FetchChildren] (
identifier[self] . identifier[urn] , identifier[timestamp] = identifier[Factory] . identifier[ParseAgeSpecification] ( identifier[age] ), identifier[limit] = identifier[limit] ):
identifier[urn] = identifier[self] . identifier[urn] . identifier[Add] ( identifier[predicate] )
identifier[urn] . identifier[age] = identifier[rdfvalue] . identifier[RDFDatetime] ( identifier[timestamp] )
keyword[yield] identifier[urn] | def ListChildren(self, limit=None, age=NEWEST_TIME):
"""Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range in microseconds.
Yields:
RDFURNs instances of each child.
"""
# Just grab all the children from the index.
for (predicate, timestamp) in data_store.DB.AFF4FetchChildren(self.urn, timestamp=Factory.ParseAgeSpecification(age), limit=limit):
urn = self.urn.Add(predicate)
urn.age = rdfvalue.RDFDatetime(timestamp)
yield urn # depends on [control=['for'], data=[]] |
def locate_module(module_id: str, module_type: str = None):
"""
Locate module by module ID
Args:
module_id: Module ID
module_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``
"""
entry_point = None
if module_type:
entry_point = 'ehforwarderbot.%s' % module_type
module_id = module_id.split('#', 1)[0]
if entry_point:
for i in pkg_resources.iter_entry_points(entry_point):
if i.name == module_id:
return i.load()
return pydoc.locate(module_id) | def function[locate_module, parameter[module_id, module_type]]:
constant[
Locate module by module ID
Args:
module_id: Module ID
module_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``
]
variable[entry_point] assign[=] constant[None]
if name[module_type] begin[:]
variable[entry_point] assign[=] binary_operation[constant[ehforwarderbot.%s] <ast.Mod object at 0x7da2590d6920> name[module_type]]
variable[module_id] assign[=] call[call[name[module_id].split, parameter[constant[#], constant[1]]]][constant[0]]
if name[entry_point] begin[:]
for taget[name[i]] in starred[call[name[pkg_resources].iter_entry_points, parameter[name[entry_point]]]] begin[:]
if compare[name[i].name equal[==] name[module_id]] begin[:]
return[call[name[i].load, parameter[]]]
return[call[name[pydoc].locate, parameter[name[module_id]]]] | keyword[def] identifier[locate_module] ( identifier[module_id] : identifier[str] , identifier[module_type] : identifier[str] = keyword[None] ):
literal[string]
identifier[entry_point] = keyword[None]
keyword[if] identifier[module_type] :
identifier[entry_point] = literal[string] % identifier[module_type]
identifier[module_id] = identifier[module_id] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
keyword[if] identifier[entry_point] :
keyword[for] identifier[i] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] ( identifier[entry_point] ):
keyword[if] identifier[i] . identifier[name] == identifier[module_id] :
keyword[return] identifier[i] . identifier[load] ()
keyword[return] identifier[pydoc] . identifier[locate] ( identifier[module_id] ) | def locate_module(module_id: str, module_type: str=None):
"""
Locate module by module ID
Args:
module_id: Module ID
module_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``
"""
entry_point = None
if module_type:
entry_point = 'ehforwarderbot.%s' % module_type # depends on [control=['if'], data=[]]
module_id = module_id.split('#', 1)[0]
if entry_point:
for i in pkg_resources.iter_entry_points(entry_point):
if i.name == module_id:
return i.load() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return pydoc.locate(module_id) |
def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for root, dirs, files in os.walk(static_dir):
for filename in files:
if cls._pattern.match(filename):
APPS_INCLUDE_DIRS.append(static_dir)
return | def function[traverse_tree, parameter[cls, static_dir]]:
constant[traverse the static folders an look for at least one file ending in .scss/.sass]
for taget[tuple[[<ast.Name object at 0x7da20c7cb610>, <ast.Name object at 0x7da20c7c9bd0>, <ast.Name object at 0x7da20c7c8b80>]]] in starred[call[name[os].walk, parameter[name[static_dir]]]] begin[:]
for taget[name[filename]] in starred[name[files]] begin[:]
if call[name[cls]._pattern.match, parameter[name[filename]]] begin[:]
call[name[APPS_INCLUDE_DIRS].append, parameter[name[static_dir]]]
return[None] | keyword[def] identifier[traverse_tree] ( identifier[cls] , identifier[static_dir] ):
literal[string]
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[static_dir] ):
keyword[for] identifier[filename] keyword[in] identifier[files] :
keyword[if] identifier[cls] . identifier[_pattern] . identifier[match] ( identifier[filename] ):
identifier[APPS_INCLUDE_DIRS] . identifier[append] ( identifier[static_dir] )
keyword[return] | def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for (root, dirs, files) in os.walk(static_dir):
for filename in files:
if cls._pattern.match(filename):
APPS_INCLUDE_DIRS.append(static_dir)
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]] |
def stretch(self, factor, window=20):
'''Change the audio duration (but not its pitch).
**Unless factor is close to 1, use the tempo effect instead.**
This effect is broadly equivalent to the tempo effect with search set
to zero, so in general, its results are comparatively poor; it is
retained as it can sometimes out-perform tempo for small factors.
Parameters
----------
factor : float
The ratio of the new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with tempo.
window : float, default=20
Window size in miliseconds
See Also
--------
tempo, speed, pitch
'''
if not is_number(factor) or factor <= 0:
raise ValueError("factor must be a positive number")
if factor < 0.5 or factor > 2:
logger.warning(
"Using an extreme time stretching factor. "
"Quality of results will be poor"
)
if abs(factor - 1.0) > 0.1:
logger.warning(
"For this stretch factor, "
"the tempo effect has better performance."
)
if not is_number(window) or window <= 0:
raise ValueError(
"window must be a positive number."
)
effect_args = ['stretch', '{:f}'.format(factor), '{:f}'.format(window)]
self.effects.extend(effect_args)
self.effects_log.append('stretch')
return self | def function[stretch, parameter[self, factor, window]]:
constant[Change the audio duration (but not its pitch).
**Unless factor is close to 1, use the tempo effect instead.**
This effect is broadly equivalent to the tempo effect with search set
to zero, so in general, its results are comparatively poor; it is
retained as it can sometimes out-perform tempo for small factors.
Parameters
----------
factor : float
The ratio of the new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with tempo.
window : float, default=20
Window size in miliseconds
See Also
--------
tempo, speed, pitch
]
if <ast.BoolOp object at 0x7da1b01e5960> begin[:]
<ast.Raise object at 0x7da1b01e5660>
if <ast.BoolOp object at 0x7da1b01e7010> begin[:]
call[name[logger].warning, parameter[constant[Using an extreme time stretching factor. Quality of results will be poor]]]
if compare[call[name[abs], parameter[binary_operation[name[factor] - constant[1.0]]]] greater[>] constant[0.1]] begin[:]
call[name[logger].warning, parameter[constant[For this stretch factor, the tempo effect has better performance.]]]
if <ast.BoolOp object at 0x7da1b01e78b0> begin[:]
<ast.Raise object at 0x7da1b01e74f0>
variable[effect_args] assign[=] list[[<ast.Constant object at 0x7da1b01e4f10>, <ast.Call object at 0x7da1b01e5360>, <ast.Call object at 0x7da1b01e5b10>]]
call[name[self].effects.extend, parameter[name[effect_args]]]
call[name[self].effects_log.append, parameter[constant[stretch]]]
return[name[self]] | keyword[def] identifier[stretch] ( identifier[self] , identifier[factor] , identifier[window] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[is_number] ( identifier[factor] ) keyword[or] identifier[factor] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[factor] < literal[int] keyword[or] identifier[factor] > literal[int] :
identifier[logger] . identifier[warning] (
literal[string]
literal[string]
)
keyword[if] identifier[abs] ( identifier[factor] - literal[int] )> literal[int] :
identifier[logger] . identifier[warning] (
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[is_number] ( identifier[window] ) keyword[or] identifier[window] <= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
)
identifier[effect_args] =[ literal[string] , literal[string] . identifier[format] ( identifier[factor] ), literal[string] . identifier[format] ( identifier[window] )]
identifier[self] . identifier[effects] . identifier[extend] ( identifier[effect_args] )
identifier[self] . identifier[effects_log] . identifier[append] ( literal[string] )
keyword[return] identifier[self] | def stretch(self, factor, window=20):
"""Change the audio duration (but not its pitch).
**Unless factor is close to 1, use the tempo effect instead.**
This effect is broadly equivalent to the tempo effect with search set
to zero, so in general, its results are comparatively poor; it is
retained as it can sometimes out-perform tempo for small factors.
Parameters
----------
factor : float
The ratio of the new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with tempo.
window : float, default=20
Window size in miliseconds
See Also
--------
tempo, speed, pitch
"""
if not is_number(factor) or factor <= 0:
raise ValueError('factor must be a positive number') # depends on [control=['if'], data=[]]
if factor < 0.5 or factor > 2:
logger.warning('Using an extreme time stretching factor. Quality of results will be poor') # depends on [control=['if'], data=[]]
if abs(factor - 1.0) > 0.1:
logger.warning('For this stretch factor, the tempo effect has better performance.') # depends on [control=['if'], data=[]]
if not is_number(window) or window <= 0:
raise ValueError('window must be a positive number.') # depends on [control=['if'], data=[]]
effect_args = ['stretch', '{:f}'.format(factor), '{:f}'.format(window)]
self.effects.extend(effect_args)
self.effects_log.append('stretch')
return self |
def get_allowed_geometries(layer_purpose_key):
"""Helper function to get all possible geometry
:param layer_purpose_key: A layer purpose key.
:type layer_purpose_key: str
:returns: List of all allowed geometries.
:rtype: list
"""
preferred_order = [
'point',
'line',
'polygon',
'raster'
]
allowed_geometries = set()
all_layer_type = []
if layer_purpose_key == layer_purpose_hazard['key']:
all_layer_type = hazard_all
elif layer_purpose_key == layer_purpose_exposure['key']:
all_layer_type = exposure_all
for layer in all_layer_type:
for allowed_geometry in layer['allowed_geometries']:
allowed_geometries.add(allowed_geometry)
allowed_geometries = list(allowed_geometries)
allowed_geometries_definition = []
for allowed_geometry in allowed_geometries:
allowed_geometries_definition.append(definition(allowed_geometry))
# Adapted from http://stackoverflow.com/a/15650556/1198772
order_dict = {color: index for index, color in enumerate(preferred_order)}
allowed_geometries_definition.sort(key=lambda x: order_dict[x["key"]])
return allowed_geometries_definition | def function[get_allowed_geometries, parameter[layer_purpose_key]]:
constant[Helper function to get all possible geometry
:param layer_purpose_key: A layer purpose key.
:type layer_purpose_key: str
:returns: List of all allowed geometries.
:rtype: list
]
variable[preferred_order] assign[=] list[[<ast.Constant object at 0x7da1b0c3e9e0>, <ast.Constant object at 0x7da1b0c3f5e0>, <ast.Constant object at 0x7da1b0c3c850>, <ast.Constant object at 0x7da1b0c3fbb0>]]
variable[allowed_geometries] assign[=] call[name[set], parameter[]]
variable[all_layer_type] assign[=] list[[]]
if compare[name[layer_purpose_key] equal[==] call[name[layer_purpose_hazard]][constant[key]]] begin[:]
variable[all_layer_type] assign[=] name[hazard_all]
for taget[name[layer]] in starred[name[all_layer_type]] begin[:]
for taget[name[allowed_geometry]] in starred[call[name[layer]][constant[allowed_geometries]]] begin[:]
call[name[allowed_geometries].add, parameter[name[allowed_geometry]]]
variable[allowed_geometries] assign[=] call[name[list], parameter[name[allowed_geometries]]]
variable[allowed_geometries_definition] assign[=] list[[]]
for taget[name[allowed_geometry]] in starred[name[allowed_geometries]] begin[:]
call[name[allowed_geometries_definition].append, parameter[call[name[definition], parameter[name[allowed_geometry]]]]]
variable[order_dict] assign[=] <ast.DictComp object at 0x7da1b0c3f460>
call[name[allowed_geometries_definition].sort, parameter[]]
return[name[allowed_geometries_definition]] | keyword[def] identifier[get_allowed_geometries] ( identifier[layer_purpose_key] ):
literal[string]
identifier[preferred_order] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
]
identifier[allowed_geometries] = identifier[set] ()
identifier[all_layer_type] =[]
keyword[if] identifier[layer_purpose_key] == identifier[layer_purpose_hazard] [ literal[string] ]:
identifier[all_layer_type] = identifier[hazard_all]
keyword[elif] identifier[layer_purpose_key] == identifier[layer_purpose_exposure] [ literal[string] ]:
identifier[all_layer_type] = identifier[exposure_all]
keyword[for] identifier[layer] keyword[in] identifier[all_layer_type] :
keyword[for] identifier[allowed_geometry] keyword[in] identifier[layer] [ literal[string] ]:
identifier[allowed_geometries] . identifier[add] ( identifier[allowed_geometry] )
identifier[allowed_geometries] = identifier[list] ( identifier[allowed_geometries] )
identifier[allowed_geometries_definition] =[]
keyword[for] identifier[allowed_geometry] keyword[in] identifier[allowed_geometries] :
identifier[allowed_geometries_definition] . identifier[append] ( identifier[definition] ( identifier[allowed_geometry] ))
identifier[order_dict] ={ identifier[color] : identifier[index] keyword[for] identifier[index] , identifier[color] keyword[in] identifier[enumerate] ( identifier[preferred_order] )}
identifier[allowed_geometries_definition] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[order_dict] [ identifier[x] [ literal[string] ]])
keyword[return] identifier[allowed_geometries_definition] | def get_allowed_geometries(layer_purpose_key):
"""Helper function to get all possible geometry
:param layer_purpose_key: A layer purpose key.
:type layer_purpose_key: str
:returns: List of all allowed geometries.
:rtype: list
"""
preferred_order = ['point', 'line', 'polygon', 'raster']
allowed_geometries = set()
all_layer_type = []
if layer_purpose_key == layer_purpose_hazard['key']:
all_layer_type = hazard_all # depends on [control=['if'], data=[]]
elif layer_purpose_key == layer_purpose_exposure['key']:
all_layer_type = exposure_all # depends on [control=['if'], data=[]]
for layer in all_layer_type:
for allowed_geometry in layer['allowed_geometries']:
allowed_geometries.add(allowed_geometry) # depends on [control=['for'], data=['allowed_geometry']] # depends on [control=['for'], data=['layer']]
allowed_geometries = list(allowed_geometries)
allowed_geometries_definition = []
for allowed_geometry in allowed_geometries:
allowed_geometries_definition.append(definition(allowed_geometry)) # depends on [control=['for'], data=['allowed_geometry']]
# Adapted from http://stackoverflow.com/a/15650556/1198772
order_dict = {color: index for (index, color) in enumerate(preferred_order)}
allowed_geometries_definition.sort(key=lambda x: order_dict[x['key']])
return allowed_geometries_definition |
def invokeCmd(rh):
"""
Invoke the command in the virtual machine's operating system.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
parms['cmd'] - Command to send
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter cmdVM.invokeCmd, userid: " + rh.userid)
results = execCmdThruIUCV(rh, rh.userid, rh.parms['cmd'])
if results['overallRC'] == 0:
rh.printLn("N", results['response'])
else:
rh.printLn("ES", results['response'])
rh.updateResults(results)
rh.printSysLog("Exit cmdVM.invokeCmd, rc: " + str(results['overallRC']))
return results['overallRC'] | def function[invokeCmd, parameter[rh]]:
constant[
Invoke the command in the virtual machine's operating system.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
parms['cmd'] - Command to send
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
]
call[name[rh].printSysLog, parameter[binary_operation[constant[Enter cmdVM.invokeCmd, userid: ] + name[rh].userid]]]
variable[results] assign[=] call[name[execCmdThruIUCV], parameter[name[rh], name[rh].userid, call[name[rh].parms][constant[cmd]]]]
if compare[call[name[results]][constant[overallRC]] equal[==] constant[0]] begin[:]
call[name[rh].printLn, parameter[constant[N], call[name[results]][constant[response]]]]
call[name[rh].printSysLog, parameter[binary_operation[constant[Exit cmdVM.invokeCmd, rc: ] + call[name[str], parameter[call[name[results]][constant[overallRC]]]]]]]
return[call[name[results]][constant[overallRC]]] | keyword[def] identifier[invokeCmd] ( identifier[rh] ):
literal[string]
identifier[rh] . identifier[printSysLog] ( literal[string] + identifier[rh] . identifier[userid] )
identifier[results] = identifier[execCmdThruIUCV] ( identifier[rh] , identifier[rh] . identifier[userid] , identifier[rh] . identifier[parms] [ literal[string] ])
keyword[if] identifier[results] [ literal[string] ]== literal[int] :
identifier[rh] . identifier[printLn] ( literal[string] , identifier[results] [ literal[string] ])
keyword[else] :
identifier[rh] . identifier[printLn] ( literal[string] , identifier[results] [ literal[string] ])
identifier[rh] . identifier[updateResults] ( identifier[results] )
identifier[rh] . identifier[printSysLog] ( literal[string] + identifier[str] ( identifier[results] [ literal[string] ]))
keyword[return] identifier[results] [ literal[string] ] | def invokeCmd(rh):
"""
Invoke the command in the virtual machine's operating system.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
parms['cmd'] - Command to send
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog('Enter cmdVM.invokeCmd, userid: ' + rh.userid)
results = execCmdThruIUCV(rh, rh.userid, rh.parms['cmd'])
if results['overallRC'] == 0:
rh.printLn('N', results['response']) # depends on [control=['if'], data=[]]
else:
rh.printLn('ES', results['response'])
rh.updateResults(results)
rh.printSysLog('Exit cmdVM.invokeCmd, rc: ' + str(results['overallRC']))
return results['overallRC'] |
def _pre_install():
'''Initialize the parse table at install time'''
# Generate the parsetab.dat file at setup time
dat = join(setup_dir, 'src', 'hcl', 'parsetab.dat')
if exists(dat):
os.unlink(dat)
sys.path.insert(0, join(setup_dir, 'src'))
import hcl
from hcl.parser import HclParser
parser = HclParser() | def function[_pre_install, parameter[]]:
constant[Initialize the parse table at install time]
variable[dat] assign[=] call[name[join], parameter[name[setup_dir], constant[src], constant[hcl], constant[parsetab.dat]]]
if call[name[exists], parameter[name[dat]]] begin[:]
call[name[os].unlink, parameter[name[dat]]]
call[name[sys].path.insert, parameter[constant[0], call[name[join], parameter[name[setup_dir], constant[src]]]]]
import module[hcl]
from relative_module[hcl.parser] import module[HclParser]
variable[parser] assign[=] call[name[HclParser], parameter[]] | keyword[def] identifier[_pre_install] ():
literal[string]
identifier[dat] = identifier[join] ( identifier[setup_dir] , literal[string] , literal[string] , literal[string] )
keyword[if] identifier[exists] ( identifier[dat] ):
identifier[os] . identifier[unlink] ( identifier[dat] )
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[join] ( identifier[setup_dir] , literal[string] ))
keyword[import] identifier[hcl]
keyword[from] identifier[hcl] . identifier[parser] keyword[import] identifier[HclParser]
identifier[parser] = identifier[HclParser] () | def _pre_install():
"""Initialize the parse table at install time"""
# Generate the parsetab.dat file at setup time
dat = join(setup_dir, 'src', 'hcl', 'parsetab.dat')
if exists(dat):
os.unlink(dat) # depends on [control=['if'], data=[]]
sys.path.insert(0, join(setup_dir, 'src'))
import hcl
from hcl.parser import HclParser
parser = HclParser() |
def Glide(self):
"""Return the snapshot in glide.lock form
"""
dict = {
"hash": "???",
"updated": str(datetime.datetime.now(tz=pytz.utc).isoformat()),
"imports": [],
}
decomposer = ImportPathsDecomposerBuilder().buildLocalDecomposer()
decomposer.decompose(self._packages.keys())
classes = decomposer.classes()
for ipp in classes:
dep = {
"name": ipp,
"version": str(self._packages[classes[ipp][0]])
}
if len(classes[ipp]) > 1 or classes[ipp][0] != ipp:
dep["subpackages"] = map(lambda l: l[len(ipp)+1:], classes[ipp])
dict["imports"].append(dep)
return yaml.dump(dict, default_flow_style=False) | def function[Glide, parameter[self]]:
constant[Return the snapshot in glide.lock form
]
variable[dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b2392200>, <ast.Constant object at 0x7da1b2390040>, <ast.Constant object at 0x7da1b23900d0>], [<ast.Constant object at 0x7da1b2390f10>, <ast.Call object at 0x7da1b2390e20>, <ast.List object at 0x7da1b2393a00>]]
variable[decomposer] assign[=] call[call[name[ImportPathsDecomposerBuilder], parameter[]].buildLocalDecomposer, parameter[]]
call[name[decomposer].decompose, parameter[call[name[self]._packages.keys, parameter[]]]]
variable[classes] assign[=] call[name[decomposer].classes, parameter[]]
for taget[name[ipp]] in starred[name[classes]] begin[:]
variable[dep] assign[=] dictionary[[<ast.Constant object at 0x7da1b2391cc0>, <ast.Constant object at 0x7da1b2392350>], [<ast.Name object at 0x7da1b2391600>, <ast.Call object at 0x7da1b2391210>]]
if <ast.BoolOp object at 0x7da1b23922c0> begin[:]
call[name[dep]][constant[subpackages]] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b23d4d60>, call[name[classes]][name[ipp]]]]
call[call[name[dict]][constant[imports]].append, parameter[name[dep]]]
return[call[name[yaml].dump, parameter[name[dict]]]] | keyword[def] identifier[Glide] ( identifier[self] ):
literal[string]
identifier[dict] ={
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ( identifier[tz] = identifier[pytz] . identifier[utc] ). identifier[isoformat] ()),
literal[string] :[],
}
identifier[decomposer] = identifier[ImportPathsDecomposerBuilder] (). identifier[buildLocalDecomposer] ()
identifier[decomposer] . identifier[decompose] ( identifier[self] . identifier[_packages] . identifier[keys] ())
identifier[classes] = identifier[decomposer] . identifier[classes] ()
keyword[for] identifier[ipp] keyword[in] identifier[classes] :
identifier[dep] ={
literal[string] : identifier[ipp] ,
literal[string] : identifier[str] ( identifier[self] . identifier[_packages] [ identifier[classes] [ identifier[ipp] ][ literal[int] ]])
}
keyword[if] identifier[len] ( identifier[classes] [ identifier[ipp] ])> literal[int] keyword[or] identifier[classes] [ identifier[ipp] ][ literal[int] ]!= identifier[ipp] :
identifier[dep] [ literal[string] ]= identifier[map] ( keyword[lambda] identifier[l] : identifier[l] [ identifier[len] ( identifier[ipp] )+ literal[int] :], identifier[classes] [ identifier[ipp] ])
identifier[dict] [ literal[string] ]. identifier[append] ( identifier[dep] )
keyword[return] identifier[yaml] . identifier[dump] ( identifier[dict] , identifier[default_flow_style] = keyword[False] ) | def Glide(self):
"""Return the snapshot in glide.lock form
"""
dict = {'hash': '???', 'updated': str(datetime.datetime.now(tz=pytz.utc).isoformat()), 'imports': []}
decomposer = ImportPathsDecomposerBuilder().buildLocalDecomposer()
decomposer.decompose(self._packages.keys())
classes = decomposer.classes()
for ipp in classes:
dep = {'name': ipp, 'version': str(self._packages[classes[ipp][0]])}
if len(classes[ipp]) > 1 or classes[ipp][0] != ipp:
dep['subpackages'] = map(lambda l: l[len(ipp) + 1:], classes[ipp]) # depends on [control=['if'], data=[]]
dict['imports'].append(dep) # depends on [control=['for'], data=['ipp']]
return yaml.dump(dict, default_flow_style=False) |
def inasafe_place_value_name(number, feature, parent):
"""Given a number, it will return the place value name.
For instance:
* inasafe_place_value_name(10) -> Ten \n
* inasafe_place_value_name(1700) -> Thousand
It needs to be used with inasafe_place_value_coefficient.
"""
_ = feature, parent # NOQA
if number is None:
return None
rounded_number = round_affected_number(
number,
use_rounding=True,
use_population_rounding=True
)
value, unit = denomination(rounded_number, 1000)
if not unit:
return None
else:
return unit['name'] | def function[inasafe_place_value_name, parameter[number, feature, parent]]:
constant[Given a number, it will return the place value name.
For instance:
* inasafe_place_value_name(10) -> Ten
* inasafe_place_value_name(1700) -> Thousand
It needs to be used with inasafe_place_value_coefficient.
]
variable[_] assign[=] tuple[[<ast.Name object at 0x7da1b0c363b0>, <ast.Name object at 0x7da1b0c35810>]]
if compare[name[number] is constant[None]] begin[:]
return[constant[None]]
variable[rounded_number] assign[=] call[name[round_affected_number], parameter[name[number]]]
<ast.Tuple object at 0x7da1b0c0d780> assign[=] call[name[denomination], parameter[name[rounded_number], constant[1000]]]
if <ast.UnaryOp object at 0x7da1b0c0fd90> begin[:]
return[constant[None]] | keyword[def] identifier[inasafe_place_value_name] ( identifier[number] , identifier[feature] , identifier[parent] ):
literal[string]
identifier[_] = identifier[feature] , identifier[parent]
keyword[if] identifier[number] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[rounded_number] = identifier[round_affected_number] (
identifier[number] ,
identifier[use_rounding] = keyword[True] ,
identifier[use_population_rounding] = keyword[True]
)
identifier[value] , identifier[unit] = identifier[denomination] ( identifier[rounded_number] , literal[int] )
keyword[if] keyword[not] identifier[unit] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[unit] [ literal[string] ] | def inasafe_place_value_name(number, feature, parent):
"""Given a number, it will return the place value name.
For instance:
* inasafe_place_value_name(10) -> Ten
* inasafe_place_value_name(1700) -> Thousand
It needs to be used with inasafe_place_value_coefficient.
"""
_ = (feature, parent) # NOQA
if number is None:
return None # depends on [control=['if'], data=[]]
rounded_number = round_affected_number(number, use_rounding=True, use_population_rounding=True)
(value, unit) = denomination(rounded_number, 1000)
if not unit:
return None # depends on [control=['if'], data=[]]
else:
return unit['name'] |
def get_collection(self, service_name, collection_name, base_class=None):
"""
Returns a ``Collection`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param collection_name: A string that specifies the name of the desired
class. Ex. ``QueueCollection``, ``NotificationCollection``,
``TableCollection``, etc.
:type collection_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.collections.Collection subclass>
"""
try:
return self.cache.get_collection(
service_name,
collection_name,
base_class=base_class
)
except NotCached:
pass
# We didn't find it. Construct it.
new_class = self.collection_factory.construct_for(
service_name,
collection_name,
base_class=base_class
)
self.cache.set_collection(service_name, collection_name, new_class)
return new_class | def function[get_collection, parameter[self, service_name, collection_name, base_class]]:
constant[
Returns a ``Collection`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param collection_name: A string that specifies the name of the desired
class. Ex. ``QueueCollection``, ``NotificationCollection``,
``TableCollection``, etc.
:type collection_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.collections.Collection subclass>
]
<ast.Try object at 0x7da18f09f9d0>
variable[new_class] assign[=] call[name[self].collection_factory.construct_for, parameter[name[service_name], name[collection_name]]]
call[name[self].cache.set_collection, parameter[name[service_name], name[collection_name], name[new_class]]]
return[name[new_class]] | keyword[def] identifier[get_collection] ( identifier[self] , identifier[service_name] , identifier[collection_name] , identifier[base_class] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[cache] . identifier[get_collection] (
identifier[service_name] ,
identifier[collection_name] ,
identifier[base_class] = identifier[base_class]
)
keyword[except] identifier[NotCached] :
keyword[pass]
identifier[new_class] = identifier[self] . identifier[collection_factory] . identifier[construct_for] (
identifier[service_name] ,
identifier[collection_name] ,
identifier[base_class] = identifier[base_class]
)
identifier[self] . identifier[cache] . identifier[set_collection] ( identifier[service_name] , identifier[collection_name] , identifier[new_class] )
keyword[return] identifier[new_class] | def get_collection(self, service_name, collection_name, base_class=None):
"""
Returns a ``Collection`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param collection_name: A string that specifies the name of the desired
class. Ex. ``QueueCollection``, ``NotificationCollection``,
``TableCollection``, etc.
:type collection_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.collections.Collection subclass>
"""
try:
return self.cache.get_collection(service_name, collection_name, base_class=base_class) # depends on [control=['try'], data=[]]
except NotCached:
pass # depends on [control=['except'], data=[]]
# We didn't find it. Construct it.
new_class = self.collection_factory.construct_for(service_name, collection_name, base_class=base_class)
self.cache.set_collection(service_name, collection_name, new_class)
return new_class |
def IsWalletTransaction(self, tx):
"""
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
"""
for key, contract in self._contracts.items():
for output in tx.outputs:
if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():
return True
for script in tx.scripts:
if script.VerificationScript:
if bytes(contract.Script) == script.VerificationScript:
return True
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if output.ScriptHash == watch_script_hash:
return True
for script in tx.scripts:
if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:
return True
return False | def function[IsWalletTransaction, parameter[self, tx]]:
constant[
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
]
for taget[tuple[[<ast.Name object at 0x7da18bcc8c10>, <ast.Name object at 0x7da18bcca9b0>]]] in starred[call[name[self]._contracts.items, parameter[]]] begin[:]
for taget[name[output]] in starred[name[tx].outputs] begin[:]
if compare[call[name[output].ScriptHash.ToBytes, parameter[]] equal[==] call[name[contract].ScriptHash.ToBytes, parameter[]]] begin[:]
return[constant[True]]
for taget[name[script]] in starred[name[tx].scripts] begin[:]
if name[script].VerificationScript begin[:]
if compare[call[name[bytes], parameter[name[contract].Script]] equal[==] name[script].VerificationScript] begin[:]
return[constant[True]]
for taget[name[watch_script_hash]] in starred[name[self]._watch_only] begin[:]
for taget[name[output]] in starred[name[tx].outputs] begin[:]
if compare[name[output].ScriptHash equal[==] name[watch_script_hash]] begin[:]
return[constant[True]]
for taget[name[script]] in starred[name[tx].scripts] begin[:]
if compare[call[name[Crypto].ToScriptHash, parameter[name[script].VerificationScript]] equal[==] name[watch_script_hash]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[IsWalletTransaction] ( identifier[self] , identifier[tx] ):
literal[string]
keyword[for] identifier[key] , identifier[contract] keyword[in] identifier[self] . identifier[_contracts] . identifier[items] ():
keyword[for] identifier[output] keyword[in] identifier[tx] . identifier[outputs] :
keyword[if] identifier[output] . identifier[ScriptHash] . identifier[ToBytes] ()== identifier[contract] . identifier[ScriptHash] . identifier[ToBytes] ():
keyword[return] keyword[True]
keyword[for] identifier[script] keyword[in] identifier[tx] . identifier[scripts] :
keyword[if] identifier[script] . identifier[VerificationScript] :
keyword[if] identifier[bytes] ( identifier[contract] . identifier[Script] )== identifier[script] . identifier[VerificationScript] :
keyword[return] keyword[True]
keyword[for] identifier[watch_script_hash] keyword[in] identifier[self] . identifier[_watch_only] :
keyword[for] identifier[output] keyword[in] identifier[tx] . identifier[outputs] :
keyword[if] identifier[output] . identifier[ScriptHash] == identifier[watch_script_hash] :
keyword[return] keyword[True]
keyword[for] identifier[script] keyword[in] identifier[tx] . identifier[scripts] :
keyword[if] identifier[Crypto] . identifier[ToScriptHash] ( identifier[script] . identifier[VerificationScript] , identifier[unhex] = keyword[False] )== identifier[watch_script_hash] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def IsWalletTransaction(self, tx):
"""
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
"""
for (key, contract) in self._contracts.items():
for output in tx.outputs:
if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['output']]
for script in tx.scripts:
if script.VerificationScript:
if bytes(contract.Script) == script.VerificationScript:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']] # depends on [control=['for'], data=[]]
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if output.ScriptHash == watch_script_hash:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['output']]
for script in tx.scripts:
if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']] # depends on [control=['for'], data=['watch_script_hash']]
return False |
def _iterate(self, delay=None, fromqt=False):
"""See twisted.internet.interfaces.IReactorCore.iterate.
"""
self.runUntilCurrent()
self.doIteration(delay, fromqt) | def function[_iterate, parameter[self, delay, fromqt]]:
constant[See twisted.internet.interfaces.IReactorCore.iterate.
]
call[name[self].runUntilCurrent, parameter[]]
call[name[self].doIteration, parameter[name[delay], name[fromqt]]] | keyword[def] identifier[_iterate] ( identifier[self] , identifier[delay] = keyword[None] , identifier[fromqt] = keyword[False] ):
literal[string]
identifier[self] . identifier[runUntilCurrent] ()
identifier[self] . identifier[doIteration] ( identifier[delay] , identifier[fromqt] ) | def _iterate(self, delay=None, fromqt=False):
"""See twisted.internet.interfaces.IReactorCore.iterate.
"""
self.runUntilCurrent()
self.doIteration(delay, fromqt) |
def get_record(self, xml_file):
""" Reads a xml file in JATS format and returns
a xml string in marc format """
self.document = parse(xml_file)
if get_value_in_tag(self.document, "meta"):
raise ApsPackageXMLError("The XML format of %s is not correct"
% (xml_file,))
page_count = self._get_page_count()
rec = create_record()
if page_count:
record_add_field(rec, '300', subfields=[('a', page_count)])
pacscodes = self._get_pacscodes()
for pacscode in pacscodes:
record_add_field(rec, '084', subfields=[('2', 'PACS'),
('a', pacscode)])
subject = self._get_subject()
if subject:
record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('2', 'APS'),
('a', subject)])
keywords = self._get_keywords()
if keywords:
record_add_field(rec, '653', ind1='1', subfields=[('a', ', '.join(keywords)),
('9', 'author')])
title, subtitle, _ = self._get_title()
subfields = []
if subtitle:
subfields.append(('b', subtitle))
if title:
subfields.append(('a', title))
record_add_field(rec, '245', subfields=subfields)
journal, volume, issue, year, start_date, doi,\
article_id, _, _ = self._get_publication_information()
if start_date:
record_add_field(rec, '260', subfields=[('c', start_date),
('t', 'published')])
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi),
('2', 'DOI')])
abstract = self._get_abstract()
if abstract:
record_add_field(rec, '520', subfields=[('a', abstract),
('9', 'APS')])
license, license_type, license_url = self._get_license()
subfields = []
if license:
subfields.append(('a', license))
if license_url:
subfields.append(('u', license_url))
if subfields:
record_add_field(rec, '540', subfields=subfields)
c_holder, c_year, c_statement = self._get_copyright()
c_holder, c_year, c_statement = self._get_copyright()
if c_holder and c_year:
record_add_field(rec, '542', subfields=[('d', c_holder),
('g', c_year),
('e', 'Article')])
elif c_statement:
record_add_field(rec, '542', subfields=[('f', c_statement),
('e', 'Article')])
record_add_field(rec, '773', subfields=[('p', journal),
('v', volume),
('n', issue),
('y', year),
('c', article_id)])
record_add_field(rec, '980', subfields=[('a', 'HEP')])
record_add_field(rec, '980', subfields=[('a', 'Citeable')])
record_add_field(rec, '980', subfields=[('a', 'Published')])
self._add_authors(rec)
self._add_references(rec)
try:
return record_xml_output(rec)
except UnicodeDecodeError:
sys.stderr.write("""Found a bad char in the file
for the article """ + doi)
return "" | def function[get_record, parameter[self, xml_file]]:
constant[ Reads a xml file in JATS format and returns
a xml string in marc format ]
name[self].document assign[=] call[name[parse], parameter[name[xml_file]]]
if call[name[get_value_in_tag], parameter[name[self].document, constant[meta]]] begin[:]
<ast.Raise object at 0x7da1afedb5e0>
variable[page_count] assign[=] call[name[self]._get_page_count, parameter[]]
variable[rec] assign[=] call[name[create_record], parameter[]]
if name[page_count] begin[:]
call[name[record_add_field], parameter[name[rec], constant[300]]]
variable[pacscodes] assign[=] call[name[self]._get_pacscodes, parameter[]]
for taget[name[pacscode]] in starred[name[pacscodes]] begin[:]
call[name[record_add_field], parameter[name[rec], constant[084]]]
variable[subject] assign[=] call[name[self]._get_subject, parameter[]]
if name[subject] begin[:]
call[name[record_add_field], parameter[name[rec], constant[650]]]
variable[keywords] assign[=] call[name[self]._get_keywords, parameter[]]
if name[keywords] begin[:]
call[name[record_add_field], parameter[name[rec], constant[653]]]
<ast.Tuple object at 0x7da1afeda290> assign[=] call[name[self]._get_title, parameter[]]
variable[subfields] assign[=] list[[]]
if name[subtitle] begin[:]
call[name[subfields].append, parameter[tuple[[<ast.Constant object at 0x7da1afed9f60>, <ast.Name object at 0x7da1afed9f30>]]]]
if name[title] begin[:]
call[name[subfields].append, parameter[tuple[[<ast.Constant object at 0x7da1afed9d80>, <ast.Name object at 0x7da1afed9d50>]]]]
call[name[record_add_field], parameter[name[rec], constant[245]]]
<ast.Tuple object at 0x7da1afed9b40> assign[=] call[name[self]._get_publication_information, parameter[]]
if name[start_date] begin[:]
call[name[record_add_field], parameter[name[rec], constant[260]]]
if name[doi] begin[:]
call[name[record_add_field], parameter[name[rec], constant[024]]]
variable[abstract] assign[=] call[name[self]._get_abstract, parameter[]]
if name[abstract] begin[:]
call[name[record_add_field], parameter[name[rec], constant[520]]]
<ast.Tuple object at 0x7da1afed8e50> assign[=] call[name[self]._get_license, parameter[]]
variable[subfields] assign[=] list[[]]
if name[license] begin[:]
call[name[subfields].append, parameter[tuple[[<ast.Constant object at 0x7da1afed8b20>, <ast.Name object at 0x7da1afed8af0>]]]]
if name[license_url] begin[:]
call[name[subfields].append, parameter[tuple[[<ast.Constant object at 0x7da1afed8940>, <ast.Name object at 0x7da1afed8910>]]]]
if name[subfields] begin[:]
call[name[record_add_field], parameter[name[rec], constant[540]]]
<ast.Tuple object at 0x7da1afed86a0> assign[=] call[name[self]._get_copyright, parameter[]]
<ast.Tuple object at 0x7da1afed8520> assign[=] call[name[self]._get_copyright, parameter[]]
if <ast.BoolOp object at 0x7da1afed83a0> begin[:]
call[name[record_add_field], parameter[name[rec], constant[542]]]
call[name[record_add_field], parameter[name[rec], constant[773]]]
call[name[record_add_field], parameter[name[rec], constant[980]]]
call[name[record_add_field], parameter[name[rec], constant[980]]]
call[name[record_add_field], parameter[name[rec], constant[980]]]
call[name[self]._add_authors, parameter[name[rec]]]
call[name[self]._add_references, parameter[name[rec]]]
<ast.Try object at 0x7da1afea95a0> | keyword[def] identifier[get_record] ( identifier[self] , identifier[xml_file] ):
literal[string]
identifier[self] . identifier[document] = identifier[parse] ( identifier[xml_file] )
keyword[if] identifier[get_value_in_tag] ( identifier[self] . identifier[document] , literal[string] ):
keyword[raise] identifier[ApsPackageXMLError] ( literal[string]
%( identifier[xml_file] ,))
identifier[page_count] = identifier[self] . identifier[_get_page_count] ()
identifier[rec] = identifier[create_record] ()
keyword[if] identifier[page_count] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[page_count] )])
identifier[pacscodes] = identifier[self] . identifier[_get_pacscodes] ()
keyword[for] identifier[pacscode] keyword[in] identifier[pacscodes] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , literal[string] ),
( literal[string] , identifier[pacscode] )])
identifier[subject] = identifier[self] . identifier[_get_subject] ()
keyword[if] identifier[subject] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[ind1] = literal[string] , identifier[ind2] = literal[string] , identifier[subfields] =[( literal[string] , literal[string] ),
( literal[string] , identifier[subject] )])
identifier[keywords] = identifier[self] . identifier[_get_keywords] ()
keyword[if] identifier[keywords] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[ind1] = literal[string] , identifier[subfields] =[( literal[string] , literal[string] . identifier[join] ( identifier[keywords] )),
( literal[string] , literal[string] )])
identifier[title] , identifier[subtitle] , identifier[_] = identifier[self] . identifier[_get_title] ()
identifier[subfields] =[]
keyword[if] identifier[subtitle] :
identifier[subfields] . identifier[append] (( literal[string] , identifier[subtitle] ))
keyword[if] identifier[title] :
identifier[subfields] . identifier[append] (( literal[string] , identifier[title] ))
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] = identifier[subfields] )
identifier[journal] , identifier[volume] , identifier[issue] , identifier[year] , identifier[start_date] , identifier[doi] , identifier[article_id] , identifier[_] , identifier[_] = identifier[self] . identifier[_get_publication_information] ()
keyword[if] identifier[start_date] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[start_date] ),
( literal[string] , literal[string] )])
keyword[if] identifier[doi] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[ind1] = literal[string] , identifier[subfields] =[( literal[string] , identifier[doi] ),
( literal[string] , literal[string] )])
identifier[abstract] = identifier[self] . identifier[_get_abstract] ()
keyword[if] identifier[abstract] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[abstract] ),
( literal[string] , literal[string] )])
identifier[license] , identifier[license_type] , identifier[license_url] = identifier[self] . identifier[_get_license] ()
identifier[subfields] =[]
keyword[if] identifier[license] :
identifier[subfields] . identifier[append] (( literal[string] , identifier[license] ))
keyword[if] identifier[license_url] :
identifier[subfields] . identifier[append] (( literal[string] , identifier[license_url] ))
keyword[if] identifier[subfields] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] = identifier[subfields] )
identifier[c_holder] , identifier[c_year] , identifier[c_statement] = identifier[self] . identifier[_get_copyright] ()
identifier[c_holder] , identifier[c_year] , identifier[c_statement] = identifier[self] . identifier[_get_copyright] ()
keyword[if] identifier[c_holder] keyword[and] identifier[c_year] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[c_holder] ),
( literal[string] , identifier[c_year] ),
( literal[string] , literal[string] )])
keyword[elif] identifier[c_statement] :
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[c_statement] ),
( literal[string] , literal[string] )])
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , identifier[journal] ),
( literal[string] , identifier[volume] ),
( literal[string] , identifier[issue] ),
( literal[string] , identifier[year] ),
( literal[string] , identifier[article_id] )])
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , literal[string] )])
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , literal[string] )])
identifier[record_add_field] ( identifier[rec] , literal[string] , identifier[subfields] =[( literal[string] , literal[string] )])
identifier[self] . identifier[_add_authors] ( identifier[rec] )
identifier[self] . identifier[_add_references] ( identifier[rec] )
keyword[try] :
keyword[return] identifier[record_xml_output] ( identifier[rec] )
keyword[except] identifier[UnicodeDecodeError] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] + identifier[doi] )
keyword[return] literal[string] | def get_record(self, xml_file):
""" Reads a xml file in JATS format and returns
a xml string in marc format """
self.document = parse(xml_file)
if get_value_in_tag(self.document, 'meta'):
raise ApsPackageXMLError('The XML format of %s is not correct' % (xml_file,)) # depends on [control=['if'], data=[]]
page_count = self._get_page_count()
rec = create_record()
if page_count:
record_add_field(rec, '300', subfields=[('a', page_count)]) # depends on [control=['if'], data=[]]
pacscodes = self._get_pacscodes()
for pacscode in pacscodes:
record_add_field(rec, '084', subfields=[('2', 'PACS'), ('a', pacscode)]) # depends on [control=['for'], data=['pacscode']]
subject = self._get_subject()
if subject:
record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('2', 'APS'), ('a', subject)]) # depends on [control=['if'], data=[]]
keywords = self._get_keywords()
if keywords:
record_add_field(rec, '653', ind1='1', subfields=[('a', ', '.join(keywords)), ('9', 'author')]) # depends on [control=['if'], data=[]]
(title, subtitle, _) = self._get_title()
subfields = []
if subtitle:
subfields.append(('b', subtitle)) # depends on [control=['if'], data=[]]
if title:
subfields.append(('a', title))
record_add_field(rec, '245', subfields=subfields) # depends on [control=['if'], data=[]]
(journal, volume, issue, year, start_date, doi, article_id, _, _) = self._get_publication_information()
if start_date:
record_add_field(rec, '260', subfields=[('c', start_date), ('t', 'published')]) # depends on [control=['if'], data=[]]
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')]) # depends on [control=['if'], data=[]]
abstract = self._get_abstract()
if abstract:
record_add_field(rec, '520', subfields=[('a', abstract), ('9', 'APS')]) # depends on [control=['if'], data=[]]
(license, license_type, license_url) = self._get_license()
subfields = []
if license:
subfields.append(('a', license)) # depends on [control=['if'], data=[]]
if license_url:
subfields.append(('u', license_url)) # depends on [control=['if'], data=[]]
if subfields:
record_add_field(rec, '540', subfields=subfields) # depends on [control=['if'], data=[]]
(c_holder, c_year, c_statement) = self._get_copyright()
(c_holder, c_year, c_statement) = self._get_copyright()
if c_holder and c_year:
record_add_field(rec, '542', subfields=[('d', c_holder), ('g', c_year), ('e', 'Article')]) # depends on [control=['if'], data=[]]
elif c_statement:
record_add_field(rec, '542', subfields=[('f', c_statement), ('e', 'Article')]) # depends on [control=['if'], data=[]]
record_add_field(rec, '773', subfields=[('p', journal), ('v', volume), ('n', issue), ('y', year), ('c', article_id)])
record_add_field(rec, '980', subfields=[('a', 'HEP')])
record_add_field(rec, '980', subfields=[('a', 'Citeable')])
record_add_field(rec, '980', subfields=[('a', 'Published')])
self._add_authors(rec)
self._add_references(rec)
try:
return record_xml_output(rec) # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
sys.stderr.write('Found a bad char in the file\n for the article ' + doi)
return '' # depends on [control=['except'], data=[]] |
def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs):
"""POST /auth/<mount point>/users/<username>
:param username:
:type username:
:param password:
:type password:
:param policies:
:type policies:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
# Users can have more than 1 policy. It is easier for the user to pass in the
# policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {
'password': password,
'policies': policies
}
params.update(kwargs)
return self._adapter.post('/v1/auth/{}/users/{}'.format(mount_point, username), json=params) | def function[create_userpass, parameter[self, username, password, policies, mount_point]]:
constant[POST /auth/<mount point>/users/<username>
:param username:
:type username:
:param password:
:type password:
:param policies:
:type policies:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype:
]
if call[name[isinstance], parameter[name[policies], tuple[[<ast.Name object at 0x7da1b23592a0>, <ast.Name object at 0x7da1b2358df0>, <ast.Name object at 0x7da1b235a620>]]]] begin[:]
variable[policies] assign[=] call[constant[,].join, parameter[name[policies]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b23597e0>, <ast.Constant object at 0x7da1b235a350>], [<ast.Name object at 0x7da1b2359cc0>, <ast.Name object at 0x7da1b2358c40>]]
call[name[params].update, parameter[name[kwargs]]]
return[call[name[self]._adapter.post, parameter[call[constant[/v1/auth/{}/users/{}].format, parameter[name[mount_point], name[username]]]]]] | keyword[def] identifier[create_userpass] ( identifier[self] , identifier[username] , identifier[password] , identifier[policies] , identifier[mount_point] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[policies] ,( identifier[list] , identifier[set] , identifier[tuple] )):
identifier[policies] = literal[string] . identifier[join] ( identifier[policies] )
identifier[params] ={
literal[string] : identifier[password] ,
literal[string] : identifier[policies]
}
identifier[params] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] ( literal[string] . identifier[format] ( identifier[mount_point] , identifier[username] ), identifier[json] = identifier[params] ) | def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs):
"""POST /auth/<mount point>/users/<username>
:param username:
:type username:
:param password:
:type password:
:param policies:
:type policies:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
# Users can have more than 1 policy. It is easier for the user to pass in the
# policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies) # depends on [control=['if'], data=[]]
params = {'password': password, 'policies': policies}
params.update(kwargs)
return self._adapter.post('/v1/auth/{}/users/{}'.format(mount_point, username), json=params) |
def _parse_value_pb(value_pb, field_type):
"""Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
"""
if value_pb.HasField("null_value"):
return None
if field_type.code == type_pb2.STRING:
result = value_pb.string_value
elif field_type.code == type_pb2.BYTES:
result = value_pb.string_value.encode("utf8")
elif field_type.code == type_pb2.BOOL:
result = value_pb.bool_value
elif field_type.code == type_pb2.INT64:
result = int(value_pb.string_value)
elif field_type.code == type_pb2.FLOAT64:
if value_pb.HasField("string_value"):
result = float(value_pb.string_value)
else:
result = value_pb.number_value
elif field_type.code == type_pb2.DATE:
result = _date_from_iso8601_date(value_pb.string_value)
elif field_type.code == type_pb2.TIMESTAMP:
DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value)
elif field_type.code == type_pb2.ARRAY:
result = [
_parse_value_pb(item_pb, field_type.array_element_type)
for item_pb in value_pb.list_value.values
]
elif field_type.code == type_pb2.STRUCT:
result = [
_parse_value_pb(item_pb, field_type.struct_type.fields[i].type)
for (i, item_pb) in enumerate(value_pb.list_value.values)
]
else:
raise ValueError("Unknown type: %s" % (field_type,))
return result | def function[_parse_value_pb, parameter[value_pb, field_type]]:
constant[Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
]
if call[name[value_pb].HasField, parameter[constant[null_value]]] begin[:]
return[constant[None]]
if compare[name[field_type].code equal[==] name[type_pb2].STRING] begin[:]
variable[result] assign[=] name[value_pb].string_value
return[name[result]] | keyword[def] identifier[_parse_value_pb] ( identifier[value_pb] , identifier[field_type] ):
literal[string]
keyword[if] identifier[value_pb] . identifier[HasField] ( literal[string] ):
keyword[return] keyword[None]
keyword[if] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[STRING] :
identifier[result] = identifier[value_pb] . identifier[string_value]
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[BYTES] :
identifier[result] = identifier[value_pb] . identifier[string_value] . identifier[encode] ( literal[string] )
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[BOOL] :
identifier[result] = identifier[value_pb] . identifier[bool_value]
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[INT64] :
identifier[result] = identifier[int] ( identifier[value_pb] . identifier[string_value] )
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[FLOAT64] :
keyword[if] identifier[value_pb] . identifier[HasField] ( literal[string] ):
identifier[result] = identifier[float] ( identifier[value_pb] . identifier[string_value] )
keyword[else] :
identifier[result] = identifier[value_pb] . identifier[number_value]
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[DATE] :
identifier[result] = identifier[_date_from_iso8601_date] ( identifier[value_pb] . identifier[string_value] )
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[TIMESTAMP] :
identifier[DatetimeWithNanoseconds] = identifier[datetime_helpers] . identifier[DatetimeWithNanoseconds]
identifier[result] = identifier[DatetimeWithNanoseconds] . identifier[from_rfc3339] ( identifier[value_pb] . identifier[string_value] )
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[ARRAY] :
identifier[result] =[
identifier[_parse_value_pb] ( identifier[item_pb] , identifier[field_type] . identifier[array_element_type] )
keyword[for] identifier[item_pb] keyword[in] identifier[value_pb] . identifier[list_value] . identifier[values]
]
keyword[elif] identifier[field_type] . identifier[code] == identifier[type_pb2] . identifier[STRUCT] :
identifier[result] =[
identifier[_parse_value_pb] ( identifier[item_pb] , identifier[field_type] . identifier[struct_type] . identifier[fields] [ identifier[i] ]. identifier[type] )
keyword[for] ( identifier[i] , identifier[item_pb] ) keyword[in] identifier[enumerate] ( identifier[value_pb] . identifier[list_value] . identifier[values] )
]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[field_type] ,))
keyword[return] identifier[result] | def _parse_value_pb(value_pb, field_type):
"""Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
"""
if value_pb.HasField('null_value'):
return None # depends on [control=['if'], data=[]]
if field_type.code == type_pb2.STRING:
result = value_pb.string_value # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.BYTES:
result = value_pb.string_value.encode('utf8') # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.BOOL:
result = value_pb.bool_value # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.INT64:
result = int(value_pb.string_value) # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.FLOAT64:
if value_pb.HasField('string_value'):
result = float(value_pb.string_value) # depends on [control=['if'], data=[]]
else:
result = value_pb.number_value # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.DATE:
result = _date_from_iso8601_date(value_pb.string_value) # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.TIMESTAMP:
DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value) # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.ARRAY:
result = [_parse_value_pb(item_pb, field_type.array_element_type) for item_pb in value_pb.list_value.values] # depends on [control=['if'], data=[]]
elif field_type.code == type_pb2.STRUCT:
result = [_parse_value_pb(item_pb, field_type.struct_type.fields[i].type) for (i, item_pb) in enumerate(value_pb.list_value.values)] # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown type: %s' % (field_type,))
return result |
def _connect(self, context):
"""Initialize the database connection."""
if __debug__:
log.info("Connecting " + self.engine.partition(':')[0] + " database layer.", extra=dict(
uri = redact_uri(self.uri, self.protect),
config = self.config,
alias = self.alias,
))
self.connection = context.db[self.alias] = self._connector(self.uri, **self.config) | def function[_connect, parameter[self, context]]:
constant[Initialize the database connection.]
if name[__debug__] begin[:]
call[name[log].info, parameter[binary_operation[binary_operation[constant[Connecting ] + call[call[name[self].engine.partition, parameter[constant[:]]]][constant[0]]] + constant[ database layer.]]]]
name[self].connection assign[=] call[name[self]._connector, parameter[name[self].uri]] | keyword[def] identifier[_connect] ( identifier[self] , identifier[context] ):
literal[string]
keyword[if] identifier[__debug__] :
identifier[log] . identifier[info] ( literal[string] + identifier[self] . identifier[engine] . identifier[partition] ( literal[string] )[ literal[int] ]+ literal[string] , identifier[extra] = identifier[dict] (
identifier[uri] = identifier[redact_uri] ( identifier[self] . identifier[uri] , identifier[self] . identifier[protect] ),
identifier[config] = identifier[self] . identifier[config] ,
identifier[alias] = identifier[self] . identifier[alias] ,
))
identifier[self] . identifier[connection] = identifier[context] . identifier[db] [ identifier[self] . identifier[alias] ]= identifier[self] . identifier[_connector] ( identifier[self] . identifier[uri] ,** identifier[self] . identifier[config] ) | def _connect(self, context):
"""Initialize the database connection."""
if __debug__:
log.info('Connecting ' + self.engine.partition(':')[0] + ' database layer.', extra=dict(uri=redact_uri(self.uri, self.protect), config=self.config, alias=self.alias)) # depends on [control=['if'], data=[]]
self.connection = context.db[self.alias] = self._connector(self.uri, **self.config) |
def __experimental_range(start, stop, var, cond, loc={}):
'''Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
'''
locals().update(loc)
if start < stop:
for __ in range(start, stop):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __
else:
for __ in range(start, stop, -1):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __ | def function[__experimental_range, parameter[start, stop, var, cond, loc]]:
constant[Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
]
call[call[name[locals], parameter[]].update, parameter[name[loc]]]
if compare[name[start] less[<] name[stop]] begin[:]
for taget[name[__]] in starred[call[name[range], parameter[name[start], name[stop]]]] begin[:]
call[call[name[locals], parameter[]]][name[var]] assign[=] name[__]
if call[name[eval], parameter[name[cond], call[name[globals], parameter[]], call[name[locals], parameter[]]]] begin[:]
<ast.Yield object at 0x7da18f09cc70> | keyword[def] identifier[__experimental_range] ( identifier[start] , identifier[stop] , identifier[var] , identifier[cond] , identifier[loc] ={}):
literal[string]
identifier[locals] (). identifier[update] ( identifier[loc] )
keyword[if] identifier[start] < identifier[stop] :
keyword[for] identifier[__] keyword[in] identifier[range] ( identifier[start] , identifier[stop] ):
identifier[locals] ()[ identifier[var] ]= identifier[__]
keyword[if] identifier[eval] ( identifier[cond] , identifier[globals] (), identifier[locals] ()):
keyword[yield] identifier[__]
keyword[else] :
keyword[for] identifier[__] keyword[in] identifier[range] ( identifier[start] , identifier[stop] ,- literal[int] ):
identifier[locals] ()[ identifier[var] ]= identifier[__]
keyword[if] identifier[eval] ( identifier[cond] , identifier[globals] (), identifier[locals] ()):
keyword[yield] identifier[__] | def __experimental_range(start, stop, var, cond, loc={}):
"""Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
"""
locals().update(loc)
if start < stop:
for __ in range(start, stop):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __ # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['__']] # depends on [control=['if'], data=['start', 'stop']]
else:
for __ in range(start, stop, -1):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __ # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['__']] |
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
# Remaining case: assume struct is iterable...
try:
return [getpaths(r) for r in struct]
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct)) | def function[getpaths, parameter[struct]]:
constant[
Maps all Tasks in a structured data object to their .output().
]
if call[name[isinstance], parameter[name[struct], name[Task]]] begin[:]
return[call[name[struct].output, parameter[]]] | keyword[def] identifier[getpaths] ( identifier[struct] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[struct] , identifier[Task] ):
keyword[return] identifier[struct] . identifier[output] ()
keyword[elif] identifier[isinstance] ( identifier[struct] , identifier[dict] ):
keyword[return] identifier[struct] . identifier[__class__] (( identifier[k] , identifier[getpaths] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[struct] ))
keyword[elif] identifier[isinstance] ( identifier[struct] ,( identifier[list] , identifier[tuple] )):
keyword[return] identifier[struct] . identifier[__class__] ( identifier[getpaths] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[struct] )
keyword[else] :
keyword[try] :
keyword[return] [ identifier[getpaths] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[struct] ]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[str] ( identifier[struct] )) | def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output() # depends on [control=['if'], data=[]]
elif isinstance(struct, dict):
return struct.__class__(((k, getpaths(v)) for (k, v) in six.iteritems(struct))) # depends on [control=['if'], data=[]]
elif isinstance(struct, (list, tuple)):
return struct.__class__((getpaths(r) for r in struct)) # depends on [control=['if'], data=[]]
else:
# Remaining case: assume struct is iterable...
try:
return [getpaths(r) for r in struct] # depends on [control=['try'], data=[]]
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct)) # depends on [control=['except'], data=[]] |
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry):
"""tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)"""
return _swigibpy.EWrapper_tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry) | def function[tickEFP, parameter[self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry]]:
constant[tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)]
return[call[name[_swigibpy].EWrapper_tickEFP, parameter[name[self], name[tickerId], name[tickType], name[basisPoints], name[formattedBasisPoints], name[totalDividends], name[holdDays], name[futureExpiry], name[dividendImpact], name[dividendsToExpiry]]]] | keyword[def] identifier[tickEFP] ( identifier[self] , identifier[tickerId] , identifier[tickType] , identifier[basisPoints] , identifier[formattedBasisPoints] , identifier[totalDividends] , identifier[holdDays] , identifier[futureExpiry] , identifier[dividendImpact] , identifier[dividendsToExpiry] ):
literal[string]
keyword[return] identifier[_swigibpy] . identifier[EWrapper_tickEFP] ( identifier[self] , identifier[tickerId] , identifier[tickType] , identifier[basisPoints] , identifier[formattedBasisPoints] , identifier[totalDividends] , identifier[holdDays] , identifier[futureExpiry] , identifier[dividendImpact] , identifier[dividendsToExpiry] ) | def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry):
"""tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)"""
return _swigibpy.EWrapper_tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry) |
def _generate_struct_class_custom_annotations(self, ns, data_type):
"""
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
"""
self.emit('def _process_custom_annotations(self, annotation_type, field_path, processor):')
with self.indent(), emit_pass_if_nothing_emitted(self):
self.emit(
(
'super({}, self)._process_custom_annotations(annotation_type, field_path, '
'processor)'
).format(class_name_for_data_type(data_type))
)
self.emit()
for field in data_type.fields:
field_name = fmt_var(field.name, check_reserved=True)
for annotation_type, processor in self._generate_custom_annotation_processors(
ns, field.data_type, field.custom_annotations):
annotation_class = class_name_for_annotation_type(annotation_type, ns)
self.emit('if annotation_type is {}:'.format(annotation_class))
with self.indent():
self.emit('self.{} = {}'.format(
field_name,
generate_func_call(
processor,
args=[
"'{{}}.{}'.format(field_path)".format(field_name),
'self.{}'.format(field_name),
])
))
self.emit() | def function[_generate_struct_class_custom_annotations, parameter[self, ns, data_type]]:
constant[
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
]
call[name[self].emit, parameter[constant[def _process_custom_annotations(self, annotation_type, field_path, processor):]]]
with call[name[self].indent, parameter[]] begin[:]
call[name[self].emit, parameter[call[constant[super({}, self)._process_custom_annotations(annotation_type, field_path, processor)].format, parameter[call[name[class_name_for_data_type], parameter[name[data_type]]]]]]]
call[name[self].emit, parameter[]]
for taget[name[field]] in starred[name[data_type].fields] begin[:]
variable[field_name] assign[=] call[name[fmt_var], parameter[name[field].name]]
for taget[tuple[[<ast.Name object at 0x7da20c7c8970>, <ast.Name object at 0x7da20c7caec0>]]] in starred[call[name[self]._generate_custom_annotation_processors, parameter[name[ns], name[field].data_type, name[field].custom_annotations]]] begin[:]
variable[annotation_class] assign[=] call[name[class_name_for_annotation_type], parameter[name[annotation_type], name[ns]]]
call[name[self].emit, parameter[call[constant[if annotation_type is {}:].format, parameter[name[annotation_class]]]]]
with call[name[self].indent, parameter[]] begin[:]
call[name[self].emit, parameter[call[constant[self.{} = {}].format, parameter[name[field_name], call[name[generate_func_call], parameter[name[processor]]]]]]]
call[name[self].emit, parameter[]] | keyword[def] identifier[_generate_struct_class_custom_annotations] ( identifier[self] , identifier[ns] , identifier[data_type] ):
literal[string]
identifier[self] . identifier[emit] ( literal[string] )
keyword[with] identifier[self] . identifier[indent] (), identifier[emit_pass_if_nothing_emitted] ( identifier[self] ):
identifier[self] . identifier[emit] (
(
literal[string]
literal[string]
). identifier[format] ( identifier[class_name_for_data_type] ( identifier[data_type] ))
)
identifier[self] . identifier[emit] ()
keyword[for] identifier[field] keyword[in] identifier[data_type] . identifier[fields] :
identifier[field_name] = identifier[fmt_var] ( identifier[field] . identifier[name] , identifier[check_reserved] = keyword[True] )
keyword[for] identifier[annotation_type] , identifier[processor] keyword[in] identifier[self] . identifier[_generate_custom_annotation_processors] (
identifier[ns] , identifier[field] . identifier[data_type] , identifier[field] . identifier[custom_annotations] ):
identifier[annotation_class] = identifier[class_name_for_annotation_type] ( identifier[annotation_type] , identifier[ns] )
identifier[self] . identifier[emit] ( literal[string] . identifier[format] ( identifier[annotation_class] ))
keyword[with] identifier[self] . identifier[indent] ():
identifier[self] . identifier[emit] ( literal[string] . identifier[format] (
identifier[field_name] ,
identifier[generate_func_call] (
identifier[processor] ,
identifier[args] =[
literal[string] . identifier[format] ( identifier[field_name] ),
literal[string] . identifier[format] ( identifier[field_name] ),
])
))
identifier[self] . identifier[emit] () | def _generate_struct_class_custom_annotations(self, ns, data_type):
"""
The _process_custom_annotations function allows client code to access
custom annotations defined in the spec.
"""
self.emit('def _process_custom_annotations(self, annotation_type, field_path, processor):')
with self.indent(), emit_pass_if_nothing_emitted(self):
self.emit('super({}, self)._process_custom_annotations(annotation_type, field_path, processor)'.format(class_name_for_data_type(data_type)))
self.emit()
for field in data_type.fields:
field_name = fmt_var(field.name, check_reserved=True)
for (annotation_type, processor) in self._generate_custom_annotation_processors(ns, field.data_type, field.custom_annotations):
annotation_class = class_name_for_annotation_type(annotation_type, ns)
self.emit('if annotation_type is {}:'.format(annotation_class))
with self.indent():
self.emit('self.{} = {}'.format(field_name, generate_func_call(processor, args=["'{{}}.{}'.format(field_path)".format(field_name), 'self.{}'.format(field_name)]))) # depends on [control=['with'], data=[]]
self.emit() # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['field']] # depends on [control=['with'], data=[]] |
def instruction_LSL_memory(self, opcode, ea, m):
"""
Logical shift left memory location / Arithmetic shift of memory left
"""
r = self.LSL(m)
# log.debug("$%x LSL memory value $%x << 1 = $%x and write it to $%x \t| %s" % (
# self.program_counter,
# m, r, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
return ea, r & 0xff | def function[instruction_LSL_memory, parameter[self, opcode, ea, m]]:
constant[
Logical shift left memory location / Arithmetic shift of memory left
]
variable[r] assign[=] call[name[self].LSL, parameter[name[m]]]
return[tuple[[<ast.Name object at 0x7da2054a63e0>, <ast.BinOp object at 0x7da2054a7550>]]] | keyword[def] identifier[instruction_LSL_memory] ( identifier[self] , identifier[opcode] , identifier[ea] , identifier[m] ):
literal[string]
identifier[r] = identifier[self] . identifier[LSL] ( identifier[m] )
keyword[return] identifier[ea] , identifier[r] & literal[int] | def instruction_LSL_memory(self, opcode, ea, m):
"""
Logical shift left memory location / Arithmetic shift of memory left
"""
r = self.LSL(m)
# log.debug("$%x LSL memory value $%x << 1 = $%x and write it to $%x \t| %s" % (
# self.program_counter,
# m, r, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
return (ea, r & 255) |
def _add_channel(self, chn, color_min, color_max):
"""Adds a channel to the image object
"""
if isinstance(chn, np.ma.core.MaskedArray):
chn_data = chn.data
chn_mask = chn.mask
else:
chn_data = np.array(chn)
chn_mask = False
scaled = ((chn_data - color_min) *
1.0 / (color_max - color_min))
self.channels.append(np.ma.array(scaled, mask=chn_mask)) | def function[_add_channel, parameter[self, chn, color_min, color_max]]:
constant[Adds a channel to the image object
]
if call[name[isinstance], parameter[name[chn], name[np].ma.core.MaskedArray]] begin[:]
variable[chn_data] assign[=] name[chn].data
variable[chn_mask] assign[=] name[chn].mask
variable[scaled] assign[=] binary_operation[binary_operation[binary_operation[name[chn_data] - name[color_min]] * constant[1.0]] / binary_operation[name[color_max] - name[color_min]]]
call[name[self].channels.append, parameter[call[name[np].ma.array, parameter[name[scaled]]]]] | keyword[def] identifier[_add_channel] ( identifier[self] , identifier[chn] , identifier[color_min] , identifier[color_max] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[chn] , identifier[np] . identifier[ma] . identifier[core] . identifier[MaskedArray] ):
identifier[chn_data] = identifier[chn] . identifier[data]
identifier[chn_mask] = identifier[chn] . identifier[mask]
keyword[else] :
identifier[chn_data] = identifier[np] . identifier[array] ( identifier[chn] )
identifier[chn_mask] = keyword[False]
identifier[scaled] =(( identifier[chn_data] - identifier[color_min] )*
literal[int] /( identifier[color_max] - identifier[color_min] ))
identifier[self] . identifier[channels] . identifier[append] ( identifier[np] . identifier[ma] . identifier[array] ( identifier[scaled] , identifier[mask] = identifier[chn_mask] )) | def _add_channel(self, chn, color_min, color_max):
"""Adds a channel to the image object
"""
if isinstance(chn, np.ma.core.MaskedArray):
chn_data = chn.data
chn_mask = chn.mask # depends on [control=['if'], data=[]]
else:
chn_data = np.array(chn)
chn_mask = False
scaled = (chn_data - color_min) * 1.0 / (color_max - color_min)
self.channels.append(np.ma.array(scaled, mask=chn_mask)) |
def Initialize(api_key, api_secret, api_host="localhost", api_port=443, api_ssl=True, asyncblock=False, timeout=10, req_method="get"):
""" Initializes the Cloudstack API
Accepts arguments:
api_host (localhost)
api_port (443)
api_ssl (True)
api_key
api_secret
"""
config = Config()
if api_ssl:
proto = "https"
else:
proto = "http"
api_url = "%s://%s:%s/client/api" % (proto, api_host, api_port)
if os.access(os.path.expanduser("~") , os.W_OK):
d = os.path.expanduser("~")
else:
d = tempfile.gettempdir()
cache_file = os.getenv('MOLNCTRL_CACHE') or '.molnctrl_cache'
if os.path.exists(os.path.join(d, cache_file)):
apicache = pickle.load(open( os.path.join(d, cache_file), "rb" ))
else:
method = {'description': u'lists all available apis on the server, provided by the Api Discovery plugin',
'isasync': False,
'name': u'listApis',
'params': [{'description': u'API name',
'length': 255,
'name': u'name',
'related': [],
'required': False,
'type': u'string'}],
'related': [],
'requiredparams': []}
_create_api_method(CSApi, "list_apis", method)
c = CSApi(api_url, api_key, api_secret, asyncblock)
apicache = cachemaker.monkeycache(c.list_apis())
pickle.dump(apicache, open(os.path.join(d, cache_file), "wb"))
for verb, methods in six.iteritems(apicache):
if isinstance(methods, dict):
for method in six.iterkeys(methods):
_create_api_method(CSApi, "%s_%s" % (verb, method), methods[method])
return CSApi(api_url, api_key, api_secret, asyncblock, timeout, req_method) | def function[Initialize, parameter[api_key, api_secret, api_host, api_port, api_ssl, asyncblock, timeout, req_method]]:
constant[ Initializes the Cloudstack API
Accepts arguments:
api_host (localhost)
api_port (443)
api_ssl (True)
api_key
api_secret
]
variable[config] assign[=] call[name[Config], parameter[]]
if name[api_ssl] begin[:]
variable[proto] assign[=] constant[https]
variable[api_url] assign[=] binary_operation[constant[%s://%s:%s/client/api] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c76e050>, <ast.Name object at 0x7da20c76f3d0>, <ast.Name object at 0x7da20c76d9c0>]]]
if call[name[os].access, parameter[call[name[os].path.expanduser, parameter[constant[~]]], name[os].W_OK]] begin[:]
variable[d] assign[=] call[name[os].path.expanduser, parameter[constant[~]]]
variable[cache_file] assign[=] <ast.BoolOp object at 0x7da204621de0>
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[d], name[cache_file]]]]] begin[:]
variable[apicache] assign[=] call[name[pickle].load, parameter[call[name[open], parameter[call[name[os].path.join, parameter[name[d], name[cache_file]]], constant[rb]]]]]
for taget[tuple[[<ast.Name object at 0x7da204620160>, <ast.Name object at 0x7da204623940>]]] in starred[call[name[six].iteritems, parameter[name[apicache]]]] begin[:]
if call[name[isinstance], parameter[name[methods], name[dict]]] begin[:]
for taget[name[method]] in starred[call[name[six].iterkeys, parameter[name[methods]]]] begin[:]
call[name[_create_api_method], parameter[name[CSApi], binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204621480>, <ast.Name object at 0x7da2046238b0>]]], call[name[methods]][name[method]]]]
return[call[name[CSApi], parameter[name[api_url], name[api_key], name[api_secret], name[asyncblock], name[timeout], name[req_method]]]] | keyword[def] identifier[Initialize] ( identifier[api_key] , identifier[api_secret] , identifier[api_host] = literal[string] , identifier[api_port] = literal[int] , identifier[api_ssl] = keyword[True] , identifier[asyncblock] = keyword[False] , identifier[timeout] = literal[int] , identifier[req_method] = literal[string] ):
literal[string]
identifier[config] = identifier[Config] ()
keyword[if] identifier[api_ssl] :
identifier[proto] = literal[string]
keyword[else] :
identifier[proto] = literal[string]
identifier[api_url] = literal[string] %( identifier[proto] , identifier[api_host] , identifier[api_port] )
keyword[if] identifier[os] . identifier[access] ( identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ), identifier[os] . identifier[W_OK] ):
identifier[d] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[else] :
identifier[d] = identifier[tempfile] . identifier[gettempdir] ()
identifier[cache_file] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[or] literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[cache_file] )):
identifier[apicache] = identifier[pickle] . identifier[load] ( identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[cache_file] ), literal[string] ))
keyword[else] :
identifier[method] ={ literal[string] : literal[string] ,
literal[string] : keyword[False] ,
literal[string] : literal[string] ,
literal[string] :[{ literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] :[],
literal[string] : keyword[False] ,
literal[string] : literal[string] }],
literal[string] :[],
literal[string] :[]}
identifier[_create_api_method] ( identifier[CSApi] , literal[string] , identifier[method] )
identifier[c] = identifier[CSApi] ( identifier[api_url] , identifier[api_key] , identifier[api_secret] , identifier[asyncblock] )
identifier[apicache] = identifier[cachemaker] . identifier[monkeycache] ( identifier[c] . identifier[list_apis] ())
identifier[pickle] . identifier[dump] ( identifier[apicache] , identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[cache_file] ), literal[string] ))
keyword[for] identifier[verb] , identifier[methods] keyword[in] identifier[six] . identifier[iteritems] ( identifier[apicache] ):
keyword[if] identifier[isinstance] ( identifier[methods] , identifier[dict] ):
keyword[for] identifier[method] keyword[in] identifier[six] . identifier[iterkeys] ( identifier[methods] ):
identifier[_create_api_method] ( identifier[CSApi] , literal[string] %( identifier[verb] , identifier[method] ), identifier[methods] [ identifier[method] ])
keyword[return] identifier[CSApi] ( identifier[api_url] , identifier[api_key] , identifier[api_secret] , identifier[asyncblock] , identifier[timeout] , identifier[req_method] ) | def Initialize(api_key, api_secret, api_host='localhost', api_port=443, api_ssl=True, asyncblock=False, timeout=10, req_method='get'):
""" Initializes the Cloudstack API
Accepts arguments:
api_host (localhost)
api_port (443)
api_ssl (True)
api_key
api_secret
"""
config = Config()
if api_ssl:
proto = 'https' # depends on [control=['if'], data=[]]
else:
proto = 'http'
api_url = '%s://%s:%s/client/api' % (proto, api_host, api_port)
if os.access(os.path.expanduser('~'), os.W_OK):
d = os.path.expanduser('~') # depends on [control=['if'], data=[]]
else:
d = tempfile.gettempdir()
cache_file = os.getenv('MOLNCTRL_CACHE') or '.molnctrl_cache'
if os.path.exists(os.path.join(d, cache_file)):
apicache = pickle.load(open(os.path.join(d, cache_file), 'rb')) # depends on [control=['if'], data=[]]
else:
method = {'description': u'lists all available apis on the server, provided by the Api Discovery plugin', 'isasync': False, 'name': u'listApis', 'params': [{'description': u'API name', 'length': 255, 'name': u'name', 'related': [], 'required': False, 'type': u'string'}], 'related': [], 'requiredparams': []}
_create_api_method(CSApi, 'list_apis', method)
c = CSApi(api_url, api_key, api_secret, asyncblock)
apicache = cachemaker.monkeycache(c.list_apis())
pickle.dump(apicache, open(os.path.join(d, cache_file), 'wb'))
for (verb, methods) in six.iteritems(apicache):
if isinstance(methods, dict):
for method in six.iterkeys(methods):
_create_api_method(CSApi, '%s_%s' % (verb, method), methods[method]) # depends on [control=['for'], data=['method']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return CSApi(api_url, api_key, api_secret, asyncblock, timeout, req_method) |
def create_partition(analysis_request, request, analyses, sample_type=None,
container=None, preservation=None, skip_fields=None,
remove_primary_analyses=True):
"""
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
"""
partition_skip_fields = [
"Analyses",
"Attachment",
"Client",
"Profile",
"Profiles",
"RejectionReasons",
"Remarks",
"ResultsInterpretation",
"ResultsInterpretationDepts",
"Sample",
"Template",
"creation_date",
"id",
"modification_date",
"ParentAnalysisRequest",
"PrimaryAnalysisRequest",
]
if skip_fields:
partition_skip_fields.extend(skip_fields)
partition_skip_fields = list(set(partition_skip_fields))
# Copy field values from the primary analysis request
ar = api.get_object(analysis_request)
record = fields_to_dict(ar, partition_skip_fields)
# Update with values that are partition-specific
record.update({
"InternalUse": True,
"ParentAnalysisRequest": api.get_uid(ar),
})
if sample_type is not None:
record["SampleType"] = sample_type and api.get_uid(sample_type) or ""
if container is not None:
record["Container"] = container and api.get_uid(container) or ""
if preservation is not None:
record["Preservation"] = preservation and api.get_uid(preservation) or ""
# Create the Partition
client = ar.getClient()
analyses = list(set(map(api.get_object, analyses)))
services = map(lambda an: an.getAnalysisService(), analyses)
specs = ar.getSpecification()
specs = specs and specs.getResultsRange() or []
partition = create_analysisrequest(client, request=request, values=record,
analyses=services, specifications=specs)
# Remove analyses from the primary
if remove_primary_analyses:
analyses_ids = map(api.get_id, analyses)
ar.manage_delObjects(analyses_ids)
# Reindex Parent Analysis Request
ar.reindexObject(idxs=["isRootAncestor"])
# Manually set the Date Received to match with its parent. This is
# necessary because crar calls to processForm, so DateReceived is not
# set because the partition has not been received yet
partition.setDateReceived(ar.getDateReceived())
partition.reindexObject(idxs="getDateReceived")
# Force partition to same status as the primary
status = api.get_workflow_status_of(ar)
changeWorkflowState(partition, "bika_ar_workflow", status)
if IReceived.providedBy(ar):
alsoProvides(partition, IReceived)
# And initialize the analyses the partition contains. This is required
# here because the transition "initialize" of analyses rely on a guard,
# so the initialization can only be performed when the sample has been
# received (DateReceived is set)
ActionHandlerPool.get_instance().queue_pool()
for analysis in partition.getAnalyses(full_objects=True):
doActionFor(analysis, "initialize")
ActionHandlerPool.get_instance().resume()
return partition | def function[create_partition, parameter[analysis_request, request, analyses, sample_type, container, preservation, skip_fields, remove_primary_analyses]]:
constant[
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
]
variable[partition_skip_fields] assign[=] list[[<ast.Constant object at 0x7da2054a46a0>, <ast.Constant object at 0x7da2054a4100>, <ast.Constant object at 0x7da2054a6ec0>, <ast.Constant object at 0x7da2054a6110>, <ast.Constant object at 0x7da2054a5e10>, <ast.Constant object at 0x7da1b1d649d0>, <ast.Constant object at 0x7da1b1d67520>, <ast.Constant object at 0x7da2054a5bd0>, <ast.Constant object at 0x7da2054a67d0>, <ast.Constant object at 0x7da2054a6a10>, <ast.Constant object at 0x7da2054a6ef0>, <ast.Constant object at 0x7da2054a4370>, <ast.Constant object at 0x7da2054a4c40>, <ast.Constant object at 0x7da2054a4df0>, <ast.Constant object at 0x7da2054a5b70>, <ast.Constant object at 0x7da2054a70d0>]]
if name[skip_fields] begin[:]
call[name[partition_skip_fields].extend, parameter[name[skip_fields]]]
variable[partition_skip_fields] assign[=] call[name[list], parameter[call[name[set], parameter[name[partition_skip_fields]]]]]
variable[ar] assign[=] call[name[api].get_object, parameter[name[analysis_request]]]
variable[record] assign[=] call[name[fields_to_dict], parameter[name[ar], name[partition_skip_fields]]]
call[name[record].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1d4ba00>, <ast.Constant object at 0x7da1b1d4aa70>], [<ast.Constant object at 0x7da1b1d49f00>, <ast.Call object at 0x7da1b1d4ae00>]]]]
if compare[name[sample_type] is_not constant[None]] begin[:]
call[name[record]][constant[SampleType]] assign[=] <ast.BoolOp object at 0x7da1b1d4b040>
if compare[name[container] is_not constant[None]] begin[:]
call[name[record]][constant[Container]] assign[=] <ast.BoolOp object at 0x7da1b1d495a0>
if compare[name[preservation] is_not constant[None]] begin[:]
call[name[record]][constant[Preservation]] assign[=] <ast.BoolOp object at 0x7da1b1d4a170>
variable[client] assign[=] call[name[ar].getClient, parameter[]]
variable[analyses] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[map], parameter[name[api].get_object, name[analyses]]]]]]]
variable[services] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b1d481f0>, name[analyses]]]
variable[specs] assign[=] call[name[ar].getSpecification, parameter[]]
variable[specs] assign[=] <ast.BoolOp object at 0x7da1b1d4af20>
variable[partition] assign[=] call[name[create_analysisrequest], parameter[name[client]]]
if name[remove_primary_analyses] begin[:]
variable[analyses_ids] assign[=] call[name[map], parameter[name[api].get_id, name[analyses]]]
call[name[ar].manage_delObjects, parameter[name[analyses_ids]]]
call[name[ar].reindexObject, parameter[]]
call[name[partition].setDateReceived, parameter[call[name[ar].getDateReceived, parameter[]]]]
call[name[partition].reindexObject, parameter[]]
variable[status] assign[=] call[name[api].get_workflow_status_of, parameter[name[ar]]]
call[name[changeWorkflowState], parameter[name[partition], constant[bika_ar_workflow], name[status]]]
if call[name[IReceived].providedBy, parameter[name[ar]]] begin[:]
call[name[alsoProvides], parameter[name[partition], name[IReceived]]]
call[call[name[ActionHandlerPool].get_instance, parameter[]].queue_pool, parameter[]]
for taget[name[analysis]] in starred[call[name[partition].getAnalyses, parameter[]]] begin[:]
call[name[doActionFor], parameter[name[analysis], constant[initialize]]]
call[call[name[ActionHandlerPool].get_instance, parameter[]].resume, parameter[]]
return[name[partition]] | keyword[def] identifier[create_partition] ( identifier[analysis_request] , identifier[request] , identifier[analyses] , identifier[sample_type] = keyword[None] ,
identifier[container] = keyword[None] , identifier[preservation] = keyword[None] , identifier[skip_fields] = keyword[None] ,
identifier[remove_primary_analyses] = keyword[True] ):
literal[string]
identifier[partition_skip_fields] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
keyword[if] identifier[skip_fields] :
identifier[partition_skip_fields] . identifier[extend] ( identifier[skip_fields] )
identifier[partition_skip_fields] = identifier[list] ( identifier[set] ( identifier[partition_skip_fields] ))
identifier[ar] = identifier[api] . identifier[get_object] ( identifier[analysis_request] )
identifier[record] = identifier[fields_to_dict] ( identifier[ar] , identifier[partition_skip_fields] )
identifier[record] . identifier[update] ({
literal[string] : keyword[True] ,
literal[string] : identifier[api] . identifier[get_uid] ( identifier[ar] ),
})
keyword[if] identifier[sample_type] keyword[is] keyword[not] keyword[None] :
identifier[record] [ literal[string] ]= identifier[sample_type] keyword[and] identifier[api] . identifier[get_uid] ( identifier[sample_type] ) keyword[or] literal[string]
keyword[if] identifier[container] keyword[is] keyword[not] keyword[None] :
identifier[record] [ literal[string] ]= identifier[container] keyword[and] identifier[api] . identifier[get_uid] ( identifier[container] ) keyword[or] literal[string]
keyword[if] identifier[preservation] keyword[is] keyword[not] keyword[None] :
identifier[record] [ literal[string] ]= identifier[preservation] keyword[and] identifier[api] . identifier[get_uid] ( identifier[preservation] ) keyword[or] literal[string]
identifier[client] = identifier[ar] . identifier[getClient] ()
identifier[analyses] = identifier[list] ( identifier[set] ( identifier[map] ( identifier[api] . identifier[get_object] , identifier[analyses] )))
identifier[services] = identifier[map] ( keyword[lambda] identifier[an] : identifier[an] . identifier[getAnalysisService] (), identifier[analyses] )
identifier[specs] = identifier[ar] . identifier[getSpecification] ()
identifier[specs] = identifier[specs] keyword[and] identifier[specs] . identifier[getResultsRange] () keyword[or] []
identifier[partition] = identifier[create_analysisrequest] ( identifier[client] , identifier[request] = identifier[request] , identifier[values] = identifier[record] ,
identifier[analyses] = identifier[services] , identifier[specifications] = identifier[specs] )
keyword[if] identifier[remove_primary_analyses] :
identifier[analyses_ids] = identifier[map] ( identifier[api] . identifier[get_id] , identifier[analyses] )
identifier[ar] . identifier[manage_delObjects] ( identifier[analyses_ids] )
identifier[ar] . identifier[reindexObject] ( identifier[idxs] =[ literal[string] ])
identifier[partition] . identifier[setDateReceived] ( identifier[ar] . identifier[getDateReceived] ())
identifier[partition] . identifier[reindexObject] ( identifier[idxs] = literal[string] )
identifier[status] = identifier[api] . identifier[get_workflow_status_of] ( identifier[ar] )
identifier[changeWorkflowState] ( identifier[partition] , literal[string] , identifier[status] )
keyword[if] identifier[IReceived] . identifier[providedBy] ( identifier[ar] ):
identifier[alsoProvides] ( identifier[partition] , identifier[IReceived] )
identifier[ActionHandlerPool] . identifier[get_instance] (). identifier[queue_pool] ()
keyword[for] identifier[analysis] keyword[in] identifier[partition] . identifier[getAnalyses] ( identifier[full_objects] = keyword[True] ):
identifier[doActionFor] ( identifier[analysis] , literal[string] )
identifier[ActionHandlerPool] . identifier[get_instance] (). identifier[resume] ()
keyword[return] identifier[partition] | def create_partition(analysis_request, request, analyses, sample_type=None, container=None, preservation=None, skip_fields=None, remove_primary_analyses=True):
"""
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
"""
partition_skip_fields = ['Analyses', 'Attachment', 'Client', 'Profile', 'Profiles', 'RejectionReasons', 'Remarks', 'ResultsInterpretation', 'ResultsInterpretationDepts', 'Sample', 'Template', 'creation_date', 'id', 'modification_date', 'ParentAnalysisRequest', 'PrimaryAnalysisRequest']
if skip_fields:
partition_skip_fields.extend(skip_fields)
partition_skip_fields = list(set(partition_skip_fields)) # depends on [control=['if'], data=[]]
# Copy field values from the primary analysis request
ar = api.get_object(analysis_request)
record = fields_to_dict(ar, partition_skip_fields)
# Update with values that are partition-specific
record.update({'InternalUse': True, 'ParentAnalysisRequest': api.get_uid(ar)})
if sample_type is not None:
record['SampleType'] = sample_type and api.get_uid(sample_type) or '' # depends on [control=['if'], data=['sample_type']]
if container is not None:
record['Container'] = container and api.get_uid(container) or '' # depends on [control=['if'], data=['container']]
if preservation is not None:
record['Preservation'] = preservation and api.get_uid(preservation) or '' # depends on [control=['if'], data=['preservation']]
# Create the Partition
client = ar.getClient()
analyses = list(set(map(api.get_object, analyses)))
services = map(lambda an: an.getAnalysisService(), analyses)
specs = ar.getSpecification()
specs = specs and specs.getResultsRange() or []
partition = create_analysisrequest(client, request=request, values=record, analyses=services, specifications=specs)
# Remove analyses from the primary
if remove_primary_analyses:
analyses_ids = map(api.get_id, analyses)
ar.manage_delObjects(analyses_ids) # depends on [control=['if'], data=[]]
# Reindex Parent Analysis Request
ar.reindexObject(idxs=['isRootAncestor'])
# Manually set the Date Received to match with its parent. This is
# necessary because crar calls to processForm, so DateReceived is not
# set because the partition has not been received yet
partition.setDateReceived(ar.getDateReceived())
partition.reindexObject(idxs='getDateReceived')
# Force partition to same status as the primary
status = api.get_workflow_status_of(ar)
changeWorkflowState(partition, 'bika_ar_workflow', status)
if IReceived.providedBy(ar):
alsoProvides(partition, IReceived) # depends on [control=['if'], data=[]]
# And initialize the analyses the partition contains. This is required
# here because the transition "initialize" of analyses rely on a guard,
# so the initialization can only be performed when the sample has been
# received (DateReceived is set)
ActionHandlerPool.get_instance().queue_pool()
for analysis in partition.getAnalyses(full_objects=True):
doActionFor(analysis, 'initialize') # depends on [control=['for'], data=['analysis']]
ActionHandlerPool.get_instance().resume()
return partition |
def set_field(self, field, rate, approach='linear', mode='persistent',
wait_for_stability=True, delay=1):
"""Sets the magnetic field.
:param field: The target field in Oersted.
.. note:: The conversion is 1 Oe = 0.1 mT.
:param rate: The field rate in Oersted per minute.
:param approach: The approach mode, either 'linear', 'no overshoot' or
'oscillate'.
:param mode: The state of the magnet at the end of the charging
process, either 'persistent' or 'driven'.
:param wait_for_stability: If `True`, the function call blocks until
the target field is reached and stable.
:param delay: Specifies the frequency in seconds how often the magnet
status is checked. (This has no effect if wait_for_stability is
`False`).
"""
self.target_field = field, rate, approach, mode
if wait_for_stability and self.system_status['magnet'].startswith('persist'):
# Wait until the persistent switch heats up.
time.sleep(self.magnet_config[5])
while wait_for_stability:
status = self.system_status['magnet']
if status in ('persistent, stable', 'driven, stable'):
break
time.sleep(delay) | def function[set_field, parameter[self, field, rate, approach, mode, wait_for_stability, delay]]:
constant[Sets the magnetic field.
:param field: The target field in Oersted.
.. note:: The conversion is 1 Oe = 0.1 mT.
:param rate: The field rate in Oersted per minute.
:param approach: The approach mode, either 'linear', 'no overshoot' or
'oscillate'.
:param mode: The state of the magnet at the end of the charging
process, either 'persistent' or 'driven'.
:param wait_for_stability: If `True`, the function call blocks until
the target field is reached and stable.
:param delay: Specifies the frequency in seconds how often the magnet
status is checked. (This has no effect if wait_for_stability is
`False`).
]
name[self].target_field assign[=] tuple[[<ast.Name object at 0x7da1b0bda1d0>, <ast.Name object at 0x7da1b0bd8970>, <ast.Name object at 0x7da1b0bd9c90>, <ast.Name object at 0x7da1b0bdb3d0>]]
if <ast.BoolOp object at 0x7da1b0bdae90> begin[:]
call[name[time].sleep, parameter[call[name[self].magnet_config][constant[5]]]]
while name[wait_for_stability] begin[:]
variable[status] assign[=] call[name[self].system_status][constant[magnet]]
if compare[name[status] in tuple[[<ast.Constant object at 0x7da1b0bd83d0>, <ast.Constant object at 0x7da1b0bd94e0>]]] begin[:]
break
call[name[time].sleep, parameter[name[delay]]] | keyword[def] identifier[set_field] ( identifier[self] , identifier[field] , identifier[rate] , identifier[approach] = literal[string] , identifier[mode] = literal[string] ,
identifier[wait_for_stability] = keyword[True] , identifier[delay] = literal[int] ):
literal[string]
identifier[self] . identifier[target_field] = identifier[field] , identifier[rate] , identifier[approach] , identifier[mode]
keyword[if] identifier[wait_for_stability] keyword[and] identifier[self] . identifier[system_status] [ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[time] . identifier[sleep] ( identifier[self] . identifier[magnet_config] [ literal[int] ])
keyword[while] identifier[wait_for_stability] :
identifier[status] = identifier[self] . identifier[system_status] [ literal[string] ]
keyword[if] identifier[status] keyword[in] ( literal[string] , literal[string] ):
keyword[break]
identifier[time] . identifier[sleep] ( identifier[delay] ) | def set_field(self, field, rate, approach='linear', mode='persistent', wait_for_stability=True, delay=1):
"""Sets the magnetic field.
:param field: The target field in Oersted.
.. note:: The conversion is 1 Oe = 0.1 mT.
:param rate: The field rate in Oersted per minute.
:param approach: The approach mode, either 'linear', 'no overshoot' or
'oscillate'.
:param mode: The state of the magnet at the end of the charging
process, either 'persistent' or 'driven'.
:param wait_for_stability: If `True`, the function call blocks until
the target field is reached and stable.
:param delay: Specifies the frequency in seconds how often the magnet
status is checked. (This has no effect if wait_for_stability is
`False`).
"""
self.target_field = (field, rate, approach, mode)
if wait_for_stability and self.system_status['magnet'].startswith('persist'):
# Wait until the persistent switch heats up.
time.sleep(self.magnet_config[5]) # depends on [control=['if'], data=[]]
while wait_for_stability:
status = self.system_status['magnet']
if status in ('persistent, stable', 'driven, stable'):
break # depends on [control=['if'], data=[]]
time.sleep(delay) # depends on [control=['while'], data=[]] |
def dict_filter_update(base, updates):
# type: (dict, dict) -> None
"""
Update dict with None values filtered out.
"""
base.update((k, v) for k, v in updates.items() if v is not None) | def function[dict_filter_update, parameter[base, updates]]:
constant[
Update dict with None values filtered out.
]
call[name[base].update, parameter[<ast.GeneratorExp object at 0x7da1b0948bb0>]] | keyword[def] identifier[dict_filter_update] ( identifier[base] , identifier[updates] ):
literal[string]
identifier[base] . identifier[update] (( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[updates] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] ) | def dict_filter_update(base, updates):
# type: (dict, dict) -> None
'\n Update dict with None values filtered out.\n '
base.update(((k, v) for (k, v) in updates.items() if v is not None)) |
def _get_hanging_wall_term(self, C, dists, rup):
"""
Compute and return hanging wall model term, see page 1038.
"""
if rup.dip == 90.0:
return np.zeros_like(dists.rx)
else:
Fhw = np.zeros_like(dists.rx)
Fhw[dists.rx > 0] = 1.
# Compute taper t1
T1 = np.ones_like(dists.rx)
T1 *= 60./45. if rup.dip <= 30. else (90.-rup.dip)/45.0
# Compute taper t2 (eq 12 at page 1039) - a2hw set to 0.2 as
# indicated at page 1041
T2 = np.zeros_like(dists.rx)
a2hw = 0.2
if rup.mag > 6.5:
T2 += (1. + a2hw * (rup.mag - 6.5))
elif rup.mag > 5.5:
T2 += (1. + a2hw * (rup.mag - 6.5) - (1. - a2hw) *
(rup.mag - 6.5)**2)
else:
T2 *= 0.
# Compute taper t3 (eq. 13 at page 1039) - r1 and r2 specified at
# page 1040
T3 = np.zeros_like(dists.rx)
r1 = rup.width * np.cos(np.radians(rup.dip))
r2 = 3. * r1
#
idx = dists.rx < r1
T3[idx] = (np.ones_like(dists.rx)[idx] * self.CONSTS['h1'] +
self.CONSTS['h2'] * (dists.rx[idx] / r1) +
self.CONSTS['h3'] * (dists.rx[idx] / r1)**2)
#
idx = ((dists.rx >= r1) & (dists.rx <= r2))
T3[idx] = 1. - (dists.rx[idx] - r1) / (r2 - r1)
# Compute taper t4 (eq. 14 at page 1040)
T4 = np.zeros_like(dists.rx)
#
if rup.ztor <= 10.:
T4 += (1. - rup.ztor**2. / 100.)
# Compute T5 (eq 15a at page 1040) - ry1 computed according to
# suggestions provided at page 1040
T5 = np.zeros_like(dists.rx)
ry1 = dists.rx * np.tan(np.radians(20.))
#
idx = (dists.ry0 - ry1) <= 0.0
T5[idx] = 1.
#
idx = (((dists.ry0 - ry1) > 0.0) & ((dists.ry0 - ry1) < 5.0))
T5[idx] = 1. - (dists.ry0[idx] - ry1[idx]) / 5.0
# Finally, compute the hanging wall term
return Fhw*C['a13']*T1*T2*T3*T4*T5 | def function[_get_hanging_wall_term, parameter[self, C, dists, rup]]:
constant[
Compute and return hanging wall model term, see page 1038.
]
if compare[name[rup].dip equal[==] constant[90.0]] begin[:]
return[call[name[np].zeros_like, parameter[name[dists].rx]]] | keyword[def] identifier[_get_hanging_wall_term] ( identifier[self] , identifier[C] , identifier[dists] , identifier[rup] ):
literal[string]
keyword[if] identifier[rup] . identifier[dip] == literal[int] :
keyword[return] identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
keyword[else] :
identifier[Fhw] = identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
identifier[Fhw] [ identifier[dists] . identifier[rx] > literal[int] ]= literal[int]
identifier[T1] = identifier[np] . identifier[ones_like] ( identifier[dists] . identifier[rx] )
identifier[T1] *= literal[int] / literal[int] keyword[if] identifier[rup] . identifier[dip] <= literal[int] keyword[else] ( literal[int] - identifier[rup] . identifier[dip] )/ literal[int]
identifier[T2] = identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
identifier[a2hw] = literal[int]
keyword[if] identifier[rup] . identifier[mag] > literal[int] :
identifier[T2] +=( literal[int] + identifier[a2hw] *( identifier[rup] . identifier[mag] - literal[int] ))
keyword[elif] identifier[rup] . identifier[mag] > literal[int] :
identifier[T2] +=( literal[int] + identifier[a2hw] *( identifier[rup] . identifier[mag] - literal[int] )-( literal[int] - identifier[a2hw] )*
( identifier[rup] . identifier[mag] - literal[int] )** literal[int] )
keyword[else] :
identifier[T2] *= literal[int]
identifier[T3] = identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
identifier[r1] = identifier[rup] . identifier[width] * identifier[np] . identifier[cos] ( identifier[np] . identifier[radians] ( identifier[rup] . identifier[dip] ))
identifier[r2] = literal[int] * identifier[r1]
identifier[idx] = identifier[dists] . identifier[rx] < identifier[r1]
identifier[T3] [ identifier[idx] ]=( identifier[np] . identifier[ones_like] ( identifier[dists] . identifier[rx] )[ identifier[idx] ]* identifier[self] . identifier[CONSTS] [ literal[string] ]+
identifier[self] . identifier[CONSTS] [ literal[string] ]*( identifier[dists] . identifier[rx] [ identifier[idx] ]/ identifier[r1] )+
identifier[self] . identifier[CONSTS] [ literal[string] ]*( identifier[dists] . identifier[rx] [ identifier[idx] ]/ identifier[r1] )** literal[int] )
identifier[idx] =(( identifier[dists] . identifier[rx] >= identifier[r1] )&( identifier[dists] . identifier[rx] <= identifier[r2] ))
identifier[T3] [ identifier[idx] ]= literal[int] -( identifier[dists] . identifier[rx] [ identifier[idx] ]- identifier[r1] )/( identifier[r2] - identifier[r1] )
identifier[T4] = identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
keyword[if] identifier[rup] . identifier[ztor] <= literal[int] :
identifier[T4] +=( literal[int] - identifier[rup] . identifier[ztor] ** literal[int] / literal[int] )
identifier[T5] = identifier[np] . identifier[zeros_like] ( identifier[dists] . identifier[rx] )
identifier[ry1] = identifier[dists] . identifier[rx] * identifier[np] . identifier[tan] ( identifier[np] . identifier[radians] ( literal[int] ))
identifier[idx] =( identifier[dists] . identifier[ry0] - identifier[ry1] )<= literal[int]
identifier[T5] [ identifier[idx] ]= literal[int]
identifier[idx] =((( identifier[dists] . identifier[ry0] - identifier[ry1] )> literal[int] )&(( identifier[dists] . identifier[ry0] - identifier[ry1] )< literal[int] ))
identifier[T5] [ identifier[idx] ]= literal[int] -( identifier[dists] . identifier[ry0] [ identifier[idx] ]- identifier[ry1] [ identifier[idx] ])/ literal[int]
keyword[return] identifier[Fhw] * identifier[C] [ literal[string] ]* identifier[T1] * identifier[T2] * identifier[T3] * identifier[T4] * identifier[T5] | def _get_hanging_wall_term(self, C, dists, rup):
"""
Compute and return hanging wall model term, see page 1038.
"""
if rup.dip == 90.0:
return np.zeros_like(dists.rx) # depends on [control=['if'], data=[]]
else:
Fhw = np.zeros_like(dists.rx)
Fhw[dists.rx > 0] = 1.0
# Compute taper t1
T1 = np.ones_like(dists.rx)
T1 *= 60.0 / 45.0 if rup.dip <= 30.0 else (90.0 - rup.dip) / 45.0
# Compute taper t2 (eq 12 at page 1039) - a2hw set to 0.2 as
# indicated at page 1041
T2 = np.zeros_like(dists.rx)
a2hw = 0.2
if rup.mag > 6.5:
T2 += 1.0 + a2hw * (rup.mag - 6.5) # depends on [control=['if'], data=[]]
elif rup.mag > 5.5:
T2 += 1.0 + a2hw * (rup.mag - 6.5) - (1.0 - a2hw) * (rup.mag - 6.5) ** 2 # depends on [control=['if'], data=[]]
else:
T2 *= 0.0
# Compute taper t3 (eq. 13 at page 1039) - r1 and r2 specified at
# page 1040
T3 = np.zeros_like(dists.rx)
r1 = rup.width * np.cos(np.radians(rup.dip))
r2 = 3.0 * r1
#
idx = dists.rx < r1
T3[idx] = np.ones_like(dists.rx)[idx] * self.CONSTS['h1'] + self.CONSTS['h2'] * (dists.rx[idx] / r1) + self.CONSTS['h3'] * (dists.rx[idx] / r1) ** 2
#
idx = (dists.rx >= r1) & (dists.rx <= r2)
T3[idx] = 1.0 - (dists.rx[idx] - r1) / (r2 - r1)
# Compute taper t4 (eq. 14 at page 1040)
T4 = np.zeros_like(dists.rx)
#
if rup.ztor <= 10.0:
T4 += 1.0 - rup.ztor ** 2.0 / 100.0 # depends on [control=['if'], data=[]]
# Compute T5 (eq 15a at page 1040) - ry1 computed according to
# suggestions provided at page 1040
T5 = np.zeros_like(dists.rx)
ry1 = dists.rx * np.tan(np.radians(20.0))
#
idx = dists.ry0 - ry1 <= 0.0
T5[idx] = 1.0
#
idx = (dists.ry0 - ry1 > 0.0) & (dists.ry0 - ry1 < 5.0)
T5[idx] = 1.0 - (dists.ry0[idx] - ry1[idx]) / 5.0
# Finally, compute the hanging wall term
return Fhw * C['a13'] * T1 * T2 * T3 * T4 * T5 |
def min_volatility(self):
"""
Minimise volatility.
:return: asset weights for the volatility-minimising portfolio
:rtype: dict
"""
args = (self.cov_matrix, self.gamma)
result = sco.minimize(
objective_functions.volatility,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=self.constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights)) | def function[min_volatility, parameter[self]]:
constant[
Minimise volatility.
:return: asset weights for the volatility-minimising portfolio
:rtype: dict
]
variable[args] assign[=] tuple[[<ast.Attribute object at 0x7da204565870>, <ast.Attribute object at 0x7da204566650>]]
variable[result] assign[=] call[name[sco].minimize, parameter[name[objective_functions].volatility]]
name[self].weights assign[=] call[name[result]][constant[x]]
return[call[name[dict], parameter[call[name[zip], parameter[name[self].tickers, name[self].weights]]]]] | keyword[def] identifier[min_volatility] ( identifier[self] ):
literal[string]
identifier[args] =( identifier[self] . identifier[cov_matrix] , identifier[self] . identifier[gamma] )
identifier[result] = identifier[sco] . identifier[minimize] (
identifier[objective_functions] . identifier[volatility] ,
identifier[x0] = identifier[self] . identifier[initial_guess] ,
identifier[args] = identifier[args] ,
identifier[method] = literal[string] ,
identifier[bounds] = identifier[self] . identifier[bounds] ,
identifier[constraints] = identifier[self] . identifier[constraints] ,
)
identifier[self] . identifier[weights] = identifier[result] [ literal[string] ]
keyword[return] identifier[dict] ( identifier[zip] ( identifier[self] . identifier[tickers] , identifier[self] . identifier[weights] )) | def min_volatility(self):
"""
Minimise volatility.
:return: asset weights for the volatility-minimising portfolio
:rtype: dict
"""
args = (self.cov_matrix, self.gamma)
result = sco.minimize(objective_functions.volatility, x0=self.initial_guess, args=args, method='SLSQP', bounds=self.bounds, constraints=self.constraints)
self.weights = result['x']
return dict(zip(self.tickers, self.weights)) |
def idle_task(self):
'''handle missing log data'''
if self.download_last_timestamp is not None and time.time() - self.download_last_timestamp > 0.7:
self.download_last_timestamp = time.time()
self.handle_log_data_missing() | def function[idle_task, parameter[self]]:
constant[handle missing log data]
if <ast.BoolOp object at 0x7da18f09f6a0> begin[:]
name[self].download_last_timestamp assign[=] call[name[time].time, parameter[]]
call[name[self].handle_log_data_missing, parameter[]] | keyword[def] identifier[idle_task] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[download_last_timestamp] keyword[is] keyword[not] keyword[None] keyword[and] identifier[time] . identifier[time] ()- identifier[self] . identifier[download_last_timestamp] > literal[int] :
identifier[self] . identifier[download_last_timestamp] = identifier[time] . identifier[time] ()
identifier[self] . identifier[handle_log_data_missing] () | def idle_task(self):
"""handle missing log data"""
if self.download_last_timestamp is not None and time.time() - self.download_last_timestamp > 0.7:
self.download_last_timestamp = time.time()
self.handle_log_data_missing() # depends on [control=['if'], data=[]] |
def search_conf_item(start_path, item_type, item_name):
""" search expected function or variable recursive upward
@param
start_path: search start path
item_type: "function" or "variable"
item_name: function name or variable name
e.g.
search_conf_item('C:/Users/RockFeng/Desktop/s/preference.py','function','test_func')
"""
dir_path = os.path.dirname(os.path.abspath(start_path))
target_file = os.path.join(dir_path, "preference.py")
if os.path.isfile(target_file):
imported_module = ModuleUtils.get_imported_module_from_file(target_file)
items_dict = ModuleUtils.filter_module(imported_module, item_type)
if item_name in items_dict:
return items_dict[item_name]
else:
return ModuleUtils.search_conf_item(dir_path, item_type, item_name)
if dir_path == start_path:
# system root path
err_msg = "'{}' not found in recursive upward path!".format(item_name)
if item_type == "function":
raise p_exception.FunctionNotFound(err_msg)
else:
raise p_exception.VariableNotFound(err_msg)
return ModuleUtils.search_conf_item(dir_path, item_type, item_name) | def function[search_conf_item, parameter[start_path, item_type, item_name]]:
constant[ search expected function or variable recursive upward
@param
start_path: search start path
item_type: "function" or "variable"
item_name: function name or variable name
e.g.
search_conf_item('C:/Users/RockFeng/Desktop/s/preference.py','function','test_func')
]
variable[dir_path] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[start_path]]]]]
variable[target_file] assign[=] call[name[os].path.join, parameter[name[dir_path], constant[preference.py]]]
if call[name[os].path.isfile, parameter[name[target_file]]] begin[:]
variable[imported_module] assign[=] call[name[ModuleUtils].get_imported_module_from_file, parameter[name[target_file]]]
variable[items_dict] assign[=] call[name[ModuleUtils].filter_module, parameter[name[imported_module], name[item_type]]]
if compare[name[item_name] in name[items_dict]] begin[:]
return[call[name[items_dict]][name[item_name]]]
if compare[name[dir_path] equal[==] name[start_path]] begin[:]
variable[err_msg] assign[=] call[constant['{}' not found in recursive upward path!].format, parameter[name[item_name]]]
if compare[name[item_type] equal[==] constant[function]] begin[:]
<ast.Raise object at 0x7da1b1021810>
return[call[name[ModuleUtils].search_conf_item, parameter[name[dir_path], name[item_type], name[item_name]]]] | keyword[def] identifier[search_conf_item] ( identifier[start_path] , identifier[item_type] , identifier[item_name] ):
literal[string]
identifier[dir_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[start_path] ))
identifier[target_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir_path] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[target_file] ):
identifier[imported_module] = identifier[ModuleUtils] . identifier[get_imported_module_from_file] ( identifier[target_file] )
identifier[items_dict] = identifier[ModuleUtils] . identifier[filter_module] ( identifier[imported_module] , identifier[item_type] )
keyword[if] identifier[item_name] keyword[in] identifier[items_dict] :
keyword[return] identifier[items_dict] [ identifier[item_name] ]
keyword[else] :
keyword[return] identifier[ModuleUtils] . identifier[search_conf_item] ( identifier[dir_path] , identifier[item_type] , identifier[item_name] )
keyword[if] identifier[dir_path] == identifier[start_path] :
identifier[err_msg] = literal[string] . identifier[format] ( identifier[item_name] )
keyword[if] identifier[item_type] == literal[string] :
keyword[raise] identifier[p_exception] . identifier[FunctionNotFound] ( identifier[err_msg] )
keyword[else] :
keyword[raise] identifier[p_exception] . identifier[VariableNotFound] ( identifier[err_msg] )
keyword[return] identifier[ModuleUtils] . identifier[search_conf_item] ( identifier[dir_path] , identifier[item_type] , identifier[item_name] ) | def search_conf_item(start_path, item_type, item_name):
""" search expected function or variable recursive upward
@param
start_path: search start path
item_type: "function" or "variable"
item_name: function name or variable name
e.g.
search_conf_item('C:/Users/RockFeng/Desktop/s/preference.py','function','test_func')
"""
dir_path = os.path.dirname(os.path.abspath(start_path))
target_file = os.path.join(dir_path, 'preference.py')
if os.path.isfile(target_file):
imported_module = ModuleUtils.get_imported_module_from_file(target_file)
items_dict = ModuleUtils.filter_module(imported_module, item_type)
if item_name in items_dict:
return items_dict[item_name] # depends on [control=['if'], data=['item_name', 'items_dict']]
else:
return ModuleUtils.search_conf_item(dir_path, item_type, item_name) # depends on [control=['if'], data=[]]
if dir_path == start_path: # system root path
err_msg = "'{}' not found in recursive upward path!".format(item_name)
if item_type == 'function':
raise p_exception.FunctionNotFound(err_msg) # depends on [control=['if'], data=[]]
else:
raise p_exception.VariableNotFound(err_msg) # depends on [control=['if'], data=[]]
return ModuleUtils.search_conf_item(dir_path, item_type, item_name) |
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID) | def function[addToCache, parameter[self, localFilePath, jobStoreFileID, callingFunc, mutable]]:
constant[
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
]
assert[compare[name[callingFunc] in tuple[[<ast.Constant object at 0x7da212db4cd0>, <ast.Constant object at 0x7da212db4c70>]]]]
with call[name[self].cacheLock, parameter[]] begin[:]
variable[cachedFile] assign[=] call[name[self].encodedFileID, parameter[name[jobStoreFileID]]]
if compare[call[name[os].stat, parameter[name[self].localCacheDir]].st_dev not_equal[!=] call[name[os].stat, parameter[call[name[os].path.dirname, parameter[name[localFilePath]]]]].st_dev] begin[:]
<ast.Raise object at 0x7da18f58cc70>
if <ast.UnaryOp object at 0x7da20eb294b0> begin[:]
<ast.Raise object at 0x7da20eb28c10>
if <ast.BoolOp object at 0x7da20c76d0f0> begin[:]
call[name[shutil].copyfile, parameter[name[cachedFile], name[localFilePath]]]
variable[fileSize] assign[=] call[name[os].stat, parameter[name[cachedFile]]].st_size
variable[cacheInfo] assign[=] call[name[self]._CacheState._load, parameter[name[self].cacheStateFile]]
<ast.AugAssign object at 0x7da20c76d090>
if <ast.UnaryOp object at 0x7da20c76e410> begin[:]
call[name[os].remove, parameter[name[cachedFile]]]
<ast.AugAssign object at 0x7da20c76c670>
call[name[logger].debug, parameter[binary_operation[binary_operation[constant[Could not download both download ] + binary_operation[constant[%s as mutable and add to ] <ast.Mod object at 0x7da2590d6920> call[name[os].path.basename, parameter[name[localFilePath]]]]] + constant[cache. Hence only mutable copy retained.]]]]
variable[jobState] assign[=] call[name[self]._JobState, parameter[call[name[cacheInfo].jobState][name[self].jobID]]]
call[name[jobState].addToJobSpecFiles, parameter[name[jobStoreFileID], name[localFilePath], <ast.UnaryOp object at 0x7da1b1eede10>, constant[False]]]
call[name[cacheInfo].jobState][name[self].jobID] assign[=] name[jobState].__dict__
call[name[cacheInfo].write, parameter[name[self].cacheStateFile]] | keyword[def] identifier[addToCache] ( identifier[self] , identifier[localFilePath] , identifier[jobStoreFileID] , identifier[callingFunc] , identifier[mutable] = keyword[False] ):
literal[string]
keyword[assert] identifier[callingFunc] keyword[in] ( literal[string] , literal[string] )
keyword[with] identifier[self] . identifier[cacheLock] () keyword[as] identifier[lockFileHandle] :
identifier[cachedFile] = identifier[self] . identifier[encodedFileID] ( identifier[jobStoreFileID] )
keyword[if] ( identifier[os] . identifier[stat] ( identifier[self] . identifier[localCacheDir] ). identifier[st_dev] !=
identifier[os] . identifier[stat] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[localFilePath] )). identifier[st_dev] ):
keyword[raise] identifier[InvalidSourceCacheError] ( literal[string]
literal[string] %( identifier[self] . identifier[localCacheDir] ,
identifier[localFilePath] ))
keyword[if] keyword[not] identifier[localFilePath] . identifier[startswith] ( identifier[self] . identifier[localTempDir] ):
keyword[raise] identifier[InvalidSourceCacheError] ( literal[string]
literal[string] % identifier[localFilePath] )
keyword[if] identifier[callingFunc] == literal[string] keyword[and] identifier[mutable] :
identifier[shutil] . identifier[copyfile] ( identifier[cachedFile] , identifier[localFilePath] )
identifier[fileSize] = identifier[os] . identifier[stat] ( identifier[cachedFile] ). identifier[st_size]
identifier[cacheInfo] = identifier[self] . identifier[_CacheState] . identifier[_load] ( identifier[self] . identifier[cacheStateFile] )
identifier[cacheInfo] . identifier[cached] += identifier[fileSize] keyword[if] identifier[cacheInfo] . identifier[nlink] != literal[int] keyword[else] literal[int]
keyword[if] keyword[not] identifier[cacheInfo] . identifier[isBalanced] ():
identifier[os] . identifier[remove] ( identifier[cachedFile] )
identifier[cacheInfo] . identifier[cached] -= identifier[fileSize] keyword[if] identifier[cacheInfo] . identifier[nlink] != literal[int] keyword[else] literal[int]
identifier[logger] . identifier[debug] ( literal[string] +
literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[localFilePath] )+
literal[string] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] %
identifier[jobStoreFileID] )
identifier[jobState] = identifier[self] . identifier[_JobState] ( identifier[cacheInfo] . identifier[jobState] [ identifier[self] . identifier[jobID] ])
identifier[jobState] . identifier[addToJobSpecFiles] ( identifier[jobStoreFileID] , identifier[localFilePath] ,- literal[int] , keyword[False] )
identifier[cacheInfo] . identifier[jobState] [ identifier[self] . identifier[jobID] ]= identifier[jobState] . identifier[__dict__]
identifier[cacheInfo] . identifier[write] ( identifier[self] . identifier[cacheStateFile] )
keyword[else] :
keyword[if] identifier[callingFunc] == literal[string] :
identifier[src] = identifier[cachedFile]
identifier[dest] = identifier[localFilePath]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
identifier[os] . identifier[remove] ( identifier[dest] )
keyword[else] :
identifier[src] = identifier[localFilePath]
identifier[dest] = identifier[cachedFile]
keyword[try] :
identifier[os] . identifier[link] ( identifier[src] , identifier[dest] )
keyword[except] identifier[OSError] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[errno] != identifier[errno] . identifier[EEXIST] :
keyword[raise]
keyword[raise] identifier[CacheError] ( literal[string] % identifier[src] )
keyword[else] :
identifier[os] . identifier[chmod] ( identifier[cachedFile] , identifier[stat] . identifier[S_IRUSR] | identifier[stat] . identifier[S_IRGRP] | identifier[stat] . identifier[S_IROTH] )
identifier[self] . identifier[returnFileSize] ( identifier[jobStoreFileID] , identifier[localFilePath] , identifier[lockFileHandle] ,
identifier[fileAlreadyCached] = keyword[False] )
keyword[if] identifier[callingFunc] == literal[string] :
identifier[logger] . identifier[debug] ( literal[string] %
identifier[jobStoreFileID] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] %
identifier[jobStoreFileID] ) | def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if os.stat(self.localCacheDir).st_dev != os.stat(os.path.dirname(localFilePath)).st_dev:
raise InvalidSourceCacheError('Attempting to cache a file across file systems cachedir = %s, file = %s.' % (self.localCacheDir, localFilePath)) # depends on [control=['if'], data=[]]
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file %s.' % localFilePath) # depends on [control=['if'], data=[]]
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' + '%s as mutable and add to ' % os.path.basename(localFilePath) + 'cache. Hence only mutable copy retained.') # depends on [control=['if'], data=[]]
else:
logger.debug("CACHE: Added file with ID '%s' to the cache." % jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile) # depends on [control=['if'], data=[]]
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest) # depends on [control=['try'], data=[]]
except OSError as err:
if err.errno != errno.EEXIST:
raise # depends on [control=['if'], data=[]]
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src) # depends on [control=['except'], data=['err']]
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle, fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug("CACHE: Read file with ID '%s' from the cache." % jobStoreFileID) # depends on [control=['if'], data=[]]
else:
logger.debug("CACHE: Added file with ID '%s' to the cache." % jobStoreFileID) # depends on [control=['with'], data=['lockFileHandle']] |
def get_rubric(self):
"""Gets the rubric.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['rubricId']):
raise errors.IllegalState('this AssessmentTaken has no rubric')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_taken_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentTaken lookup')
lookup_session = mgr.get_assessment_taken_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bank_view()
osid_object = lookup_session.get_assessment_taken(self.get_rubric_id())
return osid_object | def function[get_rubric, parameter[self]]:
constant[Gets the rubric.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
]
if <ast.UnaryOp object at 0x7da20c7cbca0> begin[:]
<ast.Raise object at 0x7da20c7c9450>
variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[ASSESSMENT]]]
if <ast.UnaryOp object at 0x7da20c7caaa0> begin[:]
<ast.Raise object at 0x7da2046224d0>
variable[lookup_session] assign[=] call[name[mgr].get_assessment_taken_lookup_session, parameter[]]
call[name[lookup_session].use_federated_bank_view, parameter[]]
variable[osid_object] assign[=] call[name[lookup_session].get_assessment_taken, parameter[call[name[self].get_rubric_id, parameter[]]]]
return[name[osid_object]] | keyword[def] identifier[get_rubric] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[bool] ( identifier[self] . identifier[_my_map] [ literal[string] ]):
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] )
keyword[if] keyword[not] identifier[mgr] . identifier[supports_assessment_taken_lookup] ():
keyword[raise] identifier[errors] . identifier[OperationFailed] ( literal[string] )
identifier[lookup_session] = identifier[mgr] . identifier[get_assessment_taken_lookup_session] ( identifier[proxy] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ))
identifier[lookup_session] . identifier[use_federated_bank_view] ()
identifier[osid_object] = identifier[lookup_session] . identifier[get_assessment_taken] ( identifier[self] . identifier[get_rubric_id] ())
keyword[return] identifier[osid_object] | def get_rubric(self):
"""Gets the rubric.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['rubricId']):
raise errors.IllegalState('this AssessmentTaken has no rubric') # depends on [control=['if'], data=[]]
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_taken_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentTaken lookup') # depends on [control=['if'], data=[]]
lookup_session = mgr.get_assessment_taken_lookup_session(proxy=getattr(self, '_proxy', None))
lookup_session.use_federated_bank_view()
osid_object = lookup_session.get_assessment_taken(self.get_rubric_id())
return osid_object |
def psit(t, xp, x):
""" score of the model (gradient of log-likelihood at theta=theta_0)
"""
if t == 0:
return -0.5 / sigma0**2 + \
(0.5 * (1. - phi0**2) / sigma0**4) * (x - mu0)**2
else:
return -0.5 / sigma0**2 + (0.5 / sigma0**4) * \
((x - mu0) - phi0 * (xp - mu0))**2 | def function[psit, parameter[t, xp, x]]:
constant[ score of the model (gradient of log-likelihood at theta=theta_0)
]
if compare[name[t] equal[==] constant[0]] begin[:]
return[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20e957dc0> / binary_operation[name[sigma0] ** constant[2]]] + binary_operation[binary_operation[binary_operation[constant[0.5] * binary_operation[constant[1.0] - binary_operation[name[phi0] ** constant[2]]]] / binary_operation[name[sigma0] ** constant[4]]] * binary_operation[binary_operation[name[x] - name[mu0]] ** constant[2]]]]] | keyword[def] identifier[psit] ( identifier[t] , identifier[xp] , identifier[x] ):
literal[string]
keyword[if] identifier[t] == literal[int] :
keyword[return] - literal[int] / identifier[sigma0] ** literal[int] +( literal[int] *( literal[int] - identifier[phi0] ** literal[int] )/ identifier[sigma0] ** literal[int] )*( identifier[x] - identifier[mu0] )** literal[int]
keyword[else] :
keyword[return] - literal[int] / identifier[sigma0] ** literal[int] +( literal[int] / identifier[sigma0] ** literal[int] )*(( identifier[x] - identifier[mu0] )- identifier[phi0] *( identifier[xp] - identifier[mu0] ))** literal[int] | def psit(t, xp, x):
""" score of the model (gradient of log-likelihood at theta=theta_0)
"""
if t == 0:
return -0.5 / sigma0 ** 2 + 0.5 * (1.0 - phi0 ** 2) / sigma0 ** 4 * (x - mu0) ** 2 # depends on [control=['if'], data=[]]
else:
return -0.5 / sigma0 ** 2 + 0.5 / sigma0 ** 4 * (x - mu0 - phi0 * (xp - mu0)) ** 2 |
def _image_gradients(self, input_csvlines, label, image_column_name):
"""Compute gradients from prob of label to image. Used by integrated gradients (probe)."""
with tf.Graph().as_default() as g, tf.Session() as sess:
logging_level = tf.logging.get_verbosity()
try:
tf.logging.set_verbosity(tf.logging.ERROR)
meta_graph_pb = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=self._model_dir)
finally:
tf.logging.set_verbosity(logging_level)
signature = meta_graph_pb.signature_def['serving_default']
input_alias_map = {name: tensor_info_proto.name
for (name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {name: tensor_info_proto.name
for (name, tensor_info_proto) in signature.outputs.items()}
csv_tensor_name = list(input_alias_map.values())[0]
# The image tensor is already built into ML Workbench graph.
float_image = g.get_tensor_by_name("import/gradients_%s:0" % image_column_name)
if label not in output_alias_map:
raise ValueError('The label "%s" does not exist in output map.' % label)
prob = g.get_tensor_by_name(output_alias_map[label])
grads = tf.gradients(prob, float_image)[0]
grads_values = sess.run(fetches=grads, feed_dict={csv_tensor_name: input_csvlines})
return grads_values | def function[_image_gradients, parameter[self, input_csvlines, label, image_column_name]]:
constant[Compute gradients from prob of label to image. Used by integrated gradients (probe).]
with call[call[name[tf].Graph, parameter[]].as_default, parameter[]] begin[:]
variable[logging_level] assign[=] call[name[tf].logging.get_verbosity, parameter[]]
<ast.Try object at 0x7da18f00c910>
variable[signature] assign[=] call[name[meta_graph_pb].signature_def][constant[serving_default]]
variable[input_alias_map] assign[=] <ast.DictComp object at 0x7da18f00e530>
variable[output_alias_map] assign[=] <ast.DictComp object at 0x7da18f00f040>
variable[csv_tensor_name] assign[=] call[call[name[list], parameter[call[name[input_alias_map].values, parameter[]]]]][constant[0]]
variable[float_image] assign[=] call[name[g].get_tensor_by_name, parameter[binary_operation[constant[import/gradients_%s:0] <ast.Mod object at 0x7da2590d6920> name[image_column_name]]]]
if compare[name[label] <ast.NotIn object at 0x7da2590d7190> name[output_alias_map]] begin[:]
<ast.Raise object at 0x7da18f00eef0>
variable[prob] assign[=] call[name[g].get_tensor_by_name, parameter[call[name[output_alias_map]][name[label]]]]
variable[grads] assign[=] call[call[name[tf].gradients, parameter[name[prob], name[float_image]]]][constant[0]]
variable[grads_values] assign[=] call[name[sess].run, parameter[]]
return[name[grads_values]] | keyword[def] identifier[_image_gradients] ( identifier[self] , identifier[input_csvlines] , identifier[label] , identifier[image_column_name] ):
literal[string]
keyword[with] identifier[tf] . identifier[Graph] (). identifier[as_default] () keyword[as] identifier[g] , identifier[tf] . identifier[Session] () keyword[as] identifier[sess] :
identifier[logging_level] = identifier[tf] . identifier[logging] . identifier[get_verbosity] ()
keyword[try] :
identifier[tf] . identifier[logging] . identifier[set_verbosity] ( identifier[tf] . identifier[logging] . identifier[ERROR] )
identifier[meta_graph_pb] = identifier[tf] . identifier[saved_model] . identifier[loader] . identifier[load] (
identifier[sess] = identifier[sess] ,
identifier[tags] =[ identifier[tf] . identifier[saved_model] . identifier[tag_constants] . identifier[SERVING] ],
identifier[export_dir] = identifier[self] . identifier[_model_dir] )
keyword[finally] :
identifier[tf] . identifier[logging] . identifier[set_verbosity] ( identifier[logging_level] )
identifier[signature] = identifier[meta_graph_pb] . identifier[signature_def] [ literal[string] ]
identifier[input_alias_map] ={ identifier[name] : identifier[tensor_info_proto] . identifier[name]
keyword[for] ( identifier[name] , identifier[tensor_info_proto] ) keyword[in] identifier[signature] . identifier[inputs] . identifier[items] ()}
identifier[output_alias_map] ={ identifier[name] : identifier[tensor_info_proto] . identifier[name]
keyword[for] ( identifier[name] , identifier[tensor_info_proto] ) keyword[in] identifier[signature] . identifier[outputs] . identifier[items] ()}
identifier[csv_tensor_name] = identifier[list] ( identifier[input_alias_map] . identifier[values] ())[ literal[int] ]
identifier[float_image] = identifier[g] . identifier[get_tensor_by_name] ( literal[string] % identifier[image_column_name] )
keyword[if] identifier[label] keyword[not] keyword[in] identifier[output_alias_map] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[label] )
identifier[prob] = identifier[g] . identifier[get_tensor_by_name] ( identifier[output_alias_map] [ identifier[label] ])
identifier[grads] = identifier[tf] . identifier[gradients] ( identifier[prob] , identifier[float_image] )[ literal[int] ]
identifier[grads_values] = identifier[sess] . identifier[run] ( identifier[fetches] = identifier[grads] , identifier[feed_dict] ={ identifier[csv_tensor_name] : identifier[input_csvlines] })
keyword[return] identifier[grads_values] | def _image_gradients(self, input_csvlines, label, image_column_name):
"""Compute gradients from prob of label to image. Used by integrated gradients (probe)."""
with tf.Graph().as_default() as g, tf.Session() as sess:
logging_level = tf.logging.get_verbosity()
try:
tf.logging.set_verbosity(tf.logging.ERROR)
meta_graph_pb = tf.saved_model.loader.load(sess=sess, tags=[tf.saved_model.tag_constants.SERVING], export_dir=self._model_dir) # depends on [control=['try'], data=[]]
finally:
tf.logging.set_verbosity(logging_level)
signature = meta_graph_pb.signature_def['serving_default']
input_alias_map = {name: tensor_info_proto.name for (name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {name: tensor_info_proto.name for (name, tensor_info_proto) in signature.outputs.items()}
csv_tensor_name = list(input_alias_map.values())[0]
# The image tensor is already built into ML Workbench graph.
float_image = g.get_tensor_by_name('import/gradients_%s:0' % image_column_name)
if label not in output_alias_map:
raise ValueError('The label "%s" does not exist in output map.' % label) # depends on [control=['if'], data=['label']]
prob = g.get_tensor_by_name(output_alias_map[label])
grads = tf.gradients(prob, float_image)[0]
grads_values = sess.run(fetches=grads, feed_dict={csv_tensor_name: input_csvlines}) # depends on [control=['with'], data=['g']]
return grads_values |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.