code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def qasm(self):
"""Return OpenQASM string."""
string_temp = self.header + "\n"
string_temp += self.extension_lib + "\n"
for register in self.qregs:
string_temp += register.qasm() + "\n"
for register in self.cregs:
string_temp += register.qasm() + "\n"
for instruction, qargs, cargs in self.data:
if instruction.name == 'measure':
qubit = qargs[0]
clbit = cargs[0]
string_temp += "%s %s[%d] -> %s[%d];\n" % (instruction.qasm(),
qubit[0].name, qubit[1],
clbit[0].name, clbit[1])
else:
string_temp += "%s %s;\n" % (instruction.qasm(),
",".join(["%s[%d]" % (j[0].name, j[1])
for j in qargs + cargs]))
return string_temp | def function[qasm, parameter[self]]:
constant[Return OpenQASM string.]
variable[string_temp] assign[=] binary_operation[name[self].header + constant[
]]
<ast.AugAssign object at 0x7da1b0536bf0>
for taget[name[register]] in starred[name[self].qregs] begin[:]
<ast.AugAssign object at 0x7da1b0535840>
for taget[name[register]] in starred[name[self].cregs] begin[:]
<ast.AugAssign object at 0x7da1b0536ad0>
for taget[tuple[[<ast.Name object at 0x7da1b0536740>, <ast.Name object at 0x7da1b05343a0>, <ast.Name object at 0x7da1b0535d80>]]] in starred[name[self].data] begin[:]
if compare[name[instruction].name equal[==] constant[measure]] begin[:]
variable[qubit] assign[=] call[name[qargs]][constant[0]]
variable[clbit] assign[=] call[name[cargs]][constant[0]]
<ast.AugAssign object at 0x7da1b0534ee0>
return[name[string_temp]] | keyword[def] identifier[qasm] ( identifier[self] ):
literal[string]
identifier[string_temp] = identifier[self] . identifier[header] + literal[string]
identifier[string_temp] += identifier[self] . identifier[extension_lib] + literal[string]
keyword[for] identifier[register] keyword[in] identifier[self] . identifier[qregs] :
identifier[string_temp] += identifier[register] . identifier[qasm] ()+ literal[string]
keyword[for] identifier[register] keyword[in] identifier[self] . identifier[cregs] :
identifier[string_temp] += identifier[register] . identifier[qasm] ()+ literal[string]
keyword[for] identifier[instruction] , identifier[qargs] , identifier[cargs] keyword[in] identifier[self] . identifier[data] :
keyword[if] identifier[instruction] . identifier[name] == literal[string] :
identifier[qubit] = identifier[qargs] [ literal[int] ]
identifier[clbit] = identifier[cargs] [ literal[int] ]
identifier[string_temp] += literal[string] %( identifier[instruction] . identifier[qasm] (),
identifier[qubit] [ literal[int] ]. identifier[name] , identifier[qubit] [ literal[int] ],
identifier[clbit] [ literal[int] ]. identifier[name] , identifier[clbit] [ literal[int] ])
keyword[else] :
identifier[string_temp] += literal[string] %( identifier[instruction] . identifier[qasm] (),
literal[string] . identifier[join] ([ literal[string] %( identifier[j] [ literal[int] ]. identifier[name] , identifier[j] [ literal[int] ])
keyword[for] identifier[j] keyword[in] identifier[qargs] + identifier[cargs] ]))
keyword[return] identifier[string_temp] | def qasm(self):
"""Return OpenQASM string."""
string_temp = self.header + '\n'
string_temp += self.extension_lib + '\n'
for register in self.qregs:
string_temp += register.qasm() + '\n' # depends on [control=['for'], data=['register']]
for register in self.cregs:
string_temp += register.qasm() + '\n' # depends on [control=['for'], data=['register']]
for (instruction, qargs, cargs) in self.data:
if instruction.name == 'measure':
qubit = qargs[0]
clbit = cargs[0]
string_temp += '%s %s[%d] -> %s[%d];\n' % (instruction.qasm(), qubit[0].name, qubit[1], clbit[0].name, clbit[1]) # depends on [control=['if'], data=[]]
else:
string_temp += '%s %s;\n' % (instruction.qasm(), ','.join(['%s[%d]' % (j[0].name, j[1]) for j in qargs + cargs])) # depends on [control=['for'], data=[]]
return string_temp |
def return_type(rettype):
"""
Decorate a function to automatically convert its return type to a string
using a custom function.
Web-based service functions must return text to the client. Tangelo
contains default logic to convert many kinds of values into string, but this
decorator allows the service writer to specify custom behavior falling
outside of the default. If the conversion fails, an appropriate server
error will be raised.
"""
def wrap(f):
@functools.wraps(f)
def converter(*pargs, **kwargs):
# Run the function to capture the output.
result = f(*pargs, **kwargs)
# Convert the result using the return type function.
try:
result = rettype(result)
except ValueError as e:
http_status(500, "Return Value Conversion Failed")
content_type("application/json")
return {"error": str(e)}
return result
return converter
return wrap | def function[return_type, parameter[rettype]]:
constant[
Decorate a function to automatically convert its return type to a string
using a custom function.
Web-based service functions must return text to the client. Tangelo
contains default logic to convert many kinds of values into string, but this
decorator allows the service writer to specify custom behavior falling
outside of the default. If the conversion fails, an appropriate server
error will be raised.
]
def function[wrap, parameter[f]]:
def function[converter, parameter[]]:
variable[result] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da18bc71a80>]]
<ast.Try object at 0x7da18bc73be0>
return[name[result]]
return[name[converter]]
return[name[wrap]] | keyword[def] identifier[return_type] ( identifier[rettype] ):
literal[string]
keyword[def] identifier[wrap] ( identifier[f] ):
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[converter] (* identifier[pargs] ,** identifier[kwargs] ):
identifier[result] = identifier[f] (* identifier[pargs] ,** identifier[kwargs] )
keyword[try] :
identifier[result] = identifier[rettype] ( identifier[result] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[http_status] ( literal[int] , literal[string] )
identifier[content_type] ( literal[string] )
keyword[return] { literal[string] : identifier[str] ( identifier[e] )}
keyword[return] identifier[result]
keyword[return] identifier[converter]
keyword[return] identifier[wrap] | def return_type(rettype):
"""
Decorate a function to automatically convert its return type to a string
using a custom function.
Web-based service functions must return text to the client. Tangelo
contains default logic to convert many kinds of values into string, but this
decorator allows the service writer to specify custom behavior falling
outside of the default. If the conversion fails, an appropriate server
error will be raised.
"""
def wrap(f):
@functools.wraps(f)
def converter(*pargs, **kwargs):
# Run the function to capture the output.
result = f(*pargs, **kwargs)
# Convert the result using the return type function.
try:
result = rettype(result) # depends on [control=['try'], data=[]]
except ValueError as e:
http_status(500, 'Return Value Conversion Failed')
content_type('application/json')
return {'error': str(e)} # depends on [control=['except'], data=['e']]
return result
return converter
return wrap |
def frequency(self, data_frame):
"""
This method returns the number of #taps divided by the test duration
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return frequency: frequency
:rtype frequency: float
"""
freq = sum(data_frame.action_type == 1) / data_frame.td[-1]
duration = math.ceil(data_frame.td[-1])
return freq, duration | def function[frequency, parameter[self, data_frame]]:
constant[
This method returns the number of #taps divided by the test duration
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return frequency: frequency
:rtype frequency: float
]
variable[freq] assign[=] binary_operation[call[name[sum], parameter[compare[name[data_frame].action_type equal[==] constant[1]]]] / call[name[data_frame].td][<ast.UnaryOp object at 0x7da1b1b7ea10>]]
variable[duration] assign[=] call[name[math].ceil, parameter[call[name[data_frame].td][<ast.UnaryOp object at 0x7da1b1b7db10>]]]
return[tuple[[<ast.Name object at 0x7da1b1b7ecb0>, <ast.Name object at 0x7da1b1b7ca30>]]] | keyword[def] identifier[frequency] ( identifier[self] , identifier[data_frame] ):
literal[string]
identifier[freq] = identifier[sum] ( identifier[data_frame] . identifier[action_type] == literal[int] )/ identifier[data_frame] . identifier[td] [- literal[int] ]
identifier[duration] = identifier[math] . identifier[ceil] ( identifier[data_frame] . identifier[td] [- literal[int] ])
keyword[return] identifier[freq] , identifier[duration] | def frequency(self, data_frame):
"""
This method returns the number of #taps divided by the test duration
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return frequency: frequency
:rtype frequency: float
"""
freq = sum(data_frame.action_type == 1) / data_frame.td[-1]
duration = math.ceil(data_frame.td[-1])
return (freq, duration) |
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph | def function[_parse_alfred_vis, parameter[self, data]]:
constant[
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
]
variable[graph] assign[=] call[name[self]._init_graph, parameter[]]
if compare[constant[source_version] in name[data]] begin[:]
name[self].version assign[=] call[name[data]][constant[source_version]]
if compare[constant[vis] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
<ast.Raise object at 0x7da1b272f190>
variable[node_list] assign[=] call[name[self]._get_aggregated_node_list, parameter[call[name[data]][constant[vis]]]]
for taget[name[node]] in starred[call[name[data]][constant[vis]]] begin[:]
for taget[name[neigh]] in starred[call[name[node]][constant[neighbors]]] begin[:]
call[name[graph].add_node, parameter[call[name[node]][constant[primary]]]]
variable[primary_neigh] assign[=] call[name[self]._get_primary_address, parameter[call[name[neigh]][constant[neighbor]], name[node_list]]]
call[name[graph].add_edge, parameter[call[name[node]][constant[primary]], name[primary_neigh]]]
return[name[graph]] | keyword[def] identifier[_parse_alfred_vis] ( identifier[self] , identifier[data] ):
literal[string]
identifier[graph] = identifier[self] . identifier[_init_graph] ()
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[self] . identifier[version] = identifier[data] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
keyword[raise] identifier[ParserError] ( literal[string] )
identifier[node_list] = identifier[self] . identifier[_get_aggregated_node_list] ( identifier[data] [ literal[string] ])
keyword[for] identifier[node] keyword[in] identifier[data] [ literal[string] ]:
keyword[for] identifier[neigh] keyword[in] identifier[node] [ literal[string] ]:
identifier[graph] . identifier[add_node] ( identifier[node] [ literal[string] ],**{
literal[string] : identifier[node] . identifier[get] ( literal[string] ,[]),
literal[string] : identifier[node] . identifier[get] ( literal[string] ,[])
})
identifier[primary_neigh] = identifier[self] . identifier[_get_primary_address] ( identifier[neigh] [ literal[string] ],
identifier[node_list] )
identifier[graph] . identifier[add_edge] ( identifier[node] [ literal[string] ],
identifier[primary_neigh] ,
identifier[weight] = identifier[float] ( identifier[neigh] [ literal[string] ]))
keyword[return] identifier[graph] | def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version'] # depends on [control=['if'], data=['data']]
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found') # depends on [control=['if'], data=[]]
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data['vis']:
for neigh in node['neighbors']:
graph.add_node(node['primary'], **{'local_addresses': node.get('secondary', []), 'clients': node.get('clients', [])})
primary_neigh = self._get_primary_address(neigh['neighbor'], node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'], primary_neigh, weight=float(neigh['metric'])) # depends on [control=['for'], data=['neigh']] # depends on [control=['for'], data=['node']]
return graph |
def DiffPrimitiveArrays(self, oldObj, newObj):
"""Diff two primitive arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
match = True
if self._ignoreArrayOrder:
oldSet = oldObj and frozenset(oldObj) or frozenset()
newSet = newObj and frozenset(newObj) or frozenset()
match = (oldSet == newSet)
else:
for i, j in zip(oldObj, newObj):
if i != j:
match = False
break
if not match:
__Log__.debug(
'DiffPrimitiveArrays: One of the elements do not match.')
return False
return True | def function[DiffPrimitiveArrays, parameter[self, oldObj, newObj]]:
constant[Diff two primitive arrays]
if compare[call[name[len], parameter[name[oldObj]]] not_equal[!=] call[name[len], parameter[name[newObj]]]] begin[:]
call[name[__Log__].debug, parameter[binary_operation[constant[DiffDoArrays: Array lengths do not match %d != %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c9918a0>, <ast.Call object at 0x7da20c990fd0>]]]]]
return[constant[False]]
variable[match] assign[=] constant[True]
if name[self]._ignoreArrayOrder begin[:]
variable[oldSet] assign[=] <ast.BoolOp object at 0x7da20c991630>
variable[newSet] assign[=] <ast.BoolOp object at 0x7da20c993850>
variable[match] assign[=] compare[name[oldSet] equal[==] name[newSet]]
if <ast.UnaryOp object at 0x7da20c992cb0> begin[:]
call[name[__Log__].debug, parameter[constant[DiffPrimitiveArrays: One of the elements do not match.]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[DiffPrimitiveArrays] ( identifier[self] , identifier[oldObj] , identifier[newObj] ):
literal[string]
keyword[if] identifier[len] ( identifier[oldObj] )!= identifier[len] ( identifier[newObj] ):
identifier[__Log__] . identifier[debug] ( literal[string]
%( identifier[len] ( identifier[oldObj] ), identifier[len] ( identifier[newObj] )))
keyword[return] keyword[False]
identifier[match] = keyword[True]
keyword[if] identifier[self] . identifier[_ignoreArrayOrder] :
identifier[oldSet] = identifier[oldObj] keyword[and] identifier[frozenset] ( identifier[oldObj] ) keyword[or] identifier[frozenset] ()
identifier[newSet] = identifier[newObj] keyword[and] identifier[frozenset] ( identifier[newObj] ) keyword[or] identifier[frozenset] ()
identifier[match] =( identifier[oldSet] == identifier[newSet] )
keyword[else] :
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[oldObj] , identifier[newObj] ):
keyword[if] identifier[i] != identifier[j] :
identifier[match] = keyword[False]
keyword[break]
keyword[if] keyword[not] identifier[match] :
identifier[__Log__] . identifier[debug] (
literal[string] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def DiffPrimitiveArrays(self, oldObj, newObj):
"""Diff two primitive arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d' % (len(oldObj), len(newObj)))
return False # depends on [control=['if'], data=[]]
match = True
if self._ignoreArrayOrder:
oldSet = oldObj and frozenset(oldObj) or frozenset()
newSet = newObj and frozenset(newObj) or frozenset()
match = oldSet == newSet # depends on [control=['if'], data=[]]
else:
for (i, j) in zip(oldObj, newObj):
if i != j:
match = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not match:
__Log__.debug('DiffPrimitiveArrays: One of the elements do not match.')
return False # depends on [control=['if'], data=[]]
return True |
def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path | def function[_get_samtools0_path, parameter[self]]:
constant[Retrieve PATH to the samtools version specific for eriscript.
]
variable[samtools_path] assign[=] call[name[os].path.realpath, parameter[call[name[os].path.join, parameter[call[name[self]._get_ericscript_path, parameter[]], constant[..], constant[..], constant[bin]]]]]
return[name[samtools_path]] | keyword[def] identifier[_get_samtools0_path] ( identifier[self] ):
literal[string]
identifier[samtools_path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_get_ericscript_path] (), literal[string] , literal[string] , literal[string] ))
keyword[return] identifier[samtools_path] | def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(), '..', '..', 'bin'))
return samtools_path |
def from_optional_dicts_by_key(cls, ds: Optional[dict],
force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TOption[TDict[T]]:
"""From dict of dict to optional dict of instance.
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts_by_key(None).is_none()
True
>>> Human.from_optional_dicts_by_key({}).get()
{}
"""
return TOption(cls.from_dicts_by_key(ds,
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) if ds is not None else None) | def function[from_optional_dicts_by_key, parameter[cls, ds, force_snake_case, force_cast, restrict]]:
constant[From dict of dict to optional dict of instance.
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts_by_key(None).is_none()
True
>>> Human.from_optional_dicts_by_key({}).get()
{}
]
return[call[name[TOption], parameter[<ast.IfExp object at 0x7da20cabcbe0>]]] | keyword[def] identifier[from_optional_dicts_by_key] ( identifier[cls] , identifier[ds] : identifier[Optional] [ identifier[dict] ],
identifier[force_snake_case] = keyword[True] , identifier[force_cast] : identifier[bool] = keyword[False] , identifier[restrict] : identifier[bool] = keyword[True] )-> identifier[TOption] [ identifier[TDict] [ identifier[T] ]]:
literal[string]
keyword[return] identifier[TOption] ( identifier[cls] . identifier[from_dicts_by_key] ( identifier[ds] ,
identifier[force_snake_case] = identifier[force_snake_case] ,
identifier[force_cast] = identifier[force_cast] ,
identifier[restrict] = identifier[restrict] ) keyword[if] identifier[ds] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] ) | def from_optional_dicts_by_key(cls, ds: Optional[dict], force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TOption[TDict[T]]:
"""From dict of dict to optional dict of instance.
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts_by_key(None).is_none()
True
>>> Human.from_optional_dicts_by_key({}).get()
{}
"""
return TOption(cls.from_dicts_by_key(ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict) if ds is not None else None) |
def add_view_info(self, view_info: ViewInfo):
'''Adds view information to error message'''
try:
next(info for info in self._view_infos if info.view == view_info.view)
except StopIteration:
indent = len(self._view_infos) * '\t'
self._view_infos.append(view_info)
info = 'Line {0} in "{1}"'.format(view_info.line, view_info.view)
self.add_info(indent + 'View info', info) | def function[add_view_info, parameter[self, view_info]]:
constant[Adds view information to error message]
<ast.Try object at 0x7da20e955960> | keyword[def] identifier[add_view_info] ( identifier[self] , identifier[view_info] : identifier[ViewInfo] ):
literal[string]
keyword[try] :
identifier[next] ( identifier[info] keyword[for] identifier[info] keyword[in] identifier[self] . identifier[_view_infos] keyword[if] identifier[info] . identifier[view] == identifier[view_info] . identifier[view] )
keyword[except] identifier[StopIteration] :
identifier[indent] = identifier[len] ( identifier[self] . identifier[_view_infos] )* literal[string]
identifier[self] . identifier[_view_infos] . identifier[append] ( identifier[view_info] )
identifier[info] = literal[string] . identifier[format] ( identifier[view_info] . identifier[line] , identifier[view_info] . identifier[view] )
identifier[self] . identifier[add_info] ( identifier[indent] + literal[string] , identifier[info] ) | def add_view_info(self, view_info: ViewInfo):
"""Adds view information to error message"""
try:
next((info for info in self._view_infos if info.view == view_info.view)) # depends on [control=['try'], data=[]]
except StopIteration:
indent = len(self._view_infos) * '\t'
self._view_infos.append(view_info)
info = 'Line {0} in "{1}"'.format(view_info.line, view_info.view)
self.add_info(indent + 'View info', info) # depends on [control=['except'], data=[]] |
def _get_char_pixels(self, s):
"""
Internal. Safeguards the character indexed dictionary for the
show_message function below
"""
if len(s) == 1 and s in self._text_dict.keys():
return list(self._text_dict[s])
else:
return list(self._text_dict['?']) | def function[_get_char_pixels, parameter[self, s]]:
constant[
Internal. Safeguards the character indexed dictionary for the
show_message function below
]
if <ast.BoolOp object at 0x7da1b08a15a0> begin[:]
return[call[name[list], parameter[call[name[self]._text_dict][name[s]]]]] | keyword[def] identifier[_get_char_pixels] ( identifier[self] , identifier[s] ):
literal[string]
keyword[if] identifier[len] ( identifier[s] )== literal[int] keyword[and] identifier[s] keyword[in] identifier[self] . identifier[_text_dict] . identifier[keys] ():
keyword[return] identifier[list] ( identifier[self] . identifier[_text_dict] [ identifier[s] ])
keyword[else] :
keyword[return] identifier[list] ( identifier[self] . identifier[_text_dict] [ literal[string] ]) | def _get_char_pixels(self, s):
"""
Internal. Safeguards the character indexed dictionary for the
show_message function below
"""
if len(s) == 1 and s in self._text_dict.keys():
return list(self._text_dict[s]) # depends on [control=['if'], data=[]]
else:
return list(self._text_dict['?']) |
def __parse_inchi():
'''Gets and parses file'''
filename = get_file('chebiId_inchi.tsv')
with io.open(filename, 'r', encoding='cp1252') as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split('\t')
__INCHIS[int(tokens[0])] = tokens[1] | def function[__parse_inchi, parameter[]]:
constant[Gets and parses file]
variable[filename] assign[=] call[name[get_file], parameter[constant[chebiId_inchi.tsv]]]
with call[name[io].open, parameter[name[filename], constant[r]]] begin[:]
call[name[next], parameter[name[textfile]]]
for taget[name[line]] in starred[name[textfile]] begin[:]
variable[tokens] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ]]]
call[name[__INCHIS]][call[name[int], parameter[call[name[tokens]][constant[0]]]]] assign[=] call[name[tokens]][constant[1]] | keyword[def] identifier[__parse_inchi] ():
literal[string]
identifier[filename] = identifier[get_file] ( literal[string] )
keyword[with] identifier[io] . identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[textfile] :
identifier[next] ( identifier[textfile] )
keyword[for] identifier[line] keyword[in] identifier[textfile] :
identifier[tokens] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[__INCHIS] [ identifier[int] ( identifier[tokens] [ literal[int] ])]= identifier[tokens] [ literal[int] ] | def __parse_inchi():
"""Gets and parses file"""
filename = get_file('chebiId_inchi.tsv')
with io.open(filename, 'r', encoding='cp1252') as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split('\t')
__INCHIS[int(tokens[0])] = tokens[1] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['textfile']] |
def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1,
international_monoclinic=True):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l.append(m[site.specie.symbol])
if len(l) == len(structure):
site_properties.update({k.lower(): l})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l))
structure = structure.copy(site_properties=site_properties)
if sym_prec:
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError(
"Standardizing cell failed! Old structure doesn't match new.")
structure = new_structure
return structure | def function[get_structure_from_prev_run, parameter[vasprun, outcar, sym_prec, international_monoclinic]]:
constant[
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
]
variable[structure] assign[=] name[vasprun].final_structure
variable[site_properties] assign[=] dictionary[[], []]
if name[vasprun].is_spin begin[:]
if <ast.BoolOp object at 0x7da18eb56710> begin[:]
call[name[site_properties].update, parameter[dictionary[[<ast.Constant object at 0x7da18eb56b90>], [<ast.ListComp object at 0x7da18eb55570>]]]]
if call[name[vasprun].parameters.get, parameter[constant[LDAU], constant[False]]] begin[:]
for taget[name[k]] in starred[tuple[[<ast.Constant object at 0x7da18eb569b0>, <ast.Constant object at 0x7da18eb575e0>, <ast.Constant object at 0x7da18eb55360>]]] begin[:]
variable[vals] assign[=] call[name[vasprun].incar][name[k]]
variable[m] assign[=] dictionary[[], []]
variable[l] assign[=] list[[]]
variable[s] assign[=] constant[0]
for taget[name[site]] in starred[name[structure]] begin[:]
if compare[name[site].specie.symbol <ast.NotIn object at 0x7da2590d7190> name[m]] begin[:]
call[name[m]][name[site].specie.symbol] assign[=] call[name[vals]][name[s]]
<ast.AugAssign object at 0x7da18eb54ac0>
call[name[l].append, parameter[call[name[m]][name[site].specie.symbol]]]
if compare[call[name[len], parameter[name[l]]] equal[==] call[name[len], parameter[name[structure]]]] begin[:]
call[name[site_properties].update, parameter[dictionary[[<ast.Call object at 0x7da18eb550f0>], [<ast.Name object at 0x7da18eb55db0>]]]]
variable[structure] assign[=] call[name[structure].copy, parameter[]]
if name[sym_prec] begin[:]
variable[sym_finder] assign[=] call[name[SpacegroupAnalyzer], parameter[name[structure]]]
variable[new_structure] assign[=] call[name[sym_finder].get_primitive_standard_structure, parameter[]]
variable[vpa_old] assign[=] binary_operation[name[structure].volume / name[structure].num_sites]
variable[vpa_new] assign[=] binary_operation[name[new_structure].volume / name[new_structure].num_sites]
if compare[binary_operation[call[name[abs], parameter[binary_operation[name[vpa_old] - name[vpa_new]]]] / name[vpa_old]] greater[>] constant[0.02]] begin[:]
<ast.Raise object at 0x7da18eb56680>
variable[sm] assign[=] call[name[StructureMatcher], parameter[]]
if <ast.UnaryOp object at 0x7da18eb553f0> begin[:]
<ast.Raise object at 0x7da18eb578e0>
variable[structure] assign[=] name[new_structure]
return[name[structure]] | keyword[def] identifier[get_structure_from_prev_run] ( identifier[vasprun] , identifier[outcar] = keyword[None] , identifier[sym_prec] = literal[int] ,
identifier[international_monoclinic] = keyword[True] ):
literal[string]
identifier[structure] = identifier[vasprun] . identifier[final_structure]
identifier[site_properties] ={}
keyword[if] identifier[vasprun] . identifier[is_spin] :
keyword[if] identifier[outcar] keyword[and] identifier[outcar] . identifier[magnetization] :
identifier[site_properties] . identifier[update] ({ literal[string] :[ identifier[i] [ literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[outcar] . identifier[magnetization] ]})
keyword[else] :
identifier[site_properties] . identifier[update] ({ literal[string] : identifier[vasprun] . identifier[parameters] [ literal[string] ]})
keyword[if] identifier[vasprun] . identifier[parameters] . identifier[get] ( literal[string] , keyword[False] ):
keyword[for] identifier[k] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[vals] = identifier[vasprun] . identifier[incar] [ identifier[k] ]
identifier[m] ={}
identifier[l] =[]
identifier[s] = literal[int]
keyword[for] identifier[site] keyword[in] identifier[structure] :
keyword[if] identifier[site] . identifier[specie] . identifier[symbol] keyword[not] keyword[in] identifier[m] :
identifier[m] [ identifier[site] . identifier[specie] . identifier[symbol] ]= identifier[vals] [ identifier[s] ]
identifier[s] += literal[int]
identifier[l] . identifier[append] ( identifier[m] [ identifier[site] . identifier[specie] . identifier[symbol] ])
keyword[if] identifier[len] ( identifier[l] )== identifier[len] ( identifier[structure] ):
identifier[site_properties] . identifier[update] ({ identifier[k] . identifier[lower] (): identifier[l] })
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[l] ))
identifier[structure] = identifier[structure] . identifier[copy] ( identifier[site_properties] = identifier[site_properties] )
keyword[if] identifier[sym_prec] :
identifier[sym_finder] = identifier[SpacegroupAnalyzer] ( identifier[structure] , identifier[symprec] = identifier[sym_prec] )
identifier[new_structure] = identifier[sym_finder] . identifier[get_primitive_standard_structure] (
identifier[international_monoclinic] = identifier[international_monoclinic] )
identifier[vpa_old] = identifier[structure] . identifier[volume] / identifier[structure] . identifier[num_sites]
identifier[vpa_new] = identifier[new_structure] . identifier[volume] / identifier[new_structure] . identifier[num_sites]
keyword[if] identifier[abs] ( identifier[vpa_old] - identifier[vpa_new] )/ identifier[vpa_old] > literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[vpa_old] , identifier[vpa_new] ))
identifier[sm] = identifier[StructureMatcher] ()
keyword[if] keyword[not] identifier[sm] . identifier[fit] ( identifier[structure] , identifier[new_structure] ):
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[structure] = identifier[new_structure]
keyword[return] identifier[structure] | def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1, international_monoclinic=True):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({'magmom': [i['tot'] for i in outcar.magnetization]}) # depends on [control=['if'], data=[]]
else:
site_properties.update({'magmom': vasprun.parameters['MAGMOM']}) # depends on [control=['if'], data=[]]
# ldau
if vasprun.parameters.get('LDAU', False):
for k in ('LDAUU', 'LDAUJ', 'LDAUL'):
vals = vasprun.incar[k]
m = {}
l = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1 # depends on [control=['if'], data=['m']]
l.append(m[site.specie.symbol]) # depends on [control=['for'], data=['site']]
if len(l) == len(structure):
site_properties.update({k.lower(): l}) # depends on [control=['if'], data=[]]
else:
raise ValueError('length of list {} not the same asstructure'.format(l)) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
structure = structure.copy(site_properties=site_properties)
if sym_prec:
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError('Standardizing cell failed! VPA old: {}, VPA new: {}'.format(vpa_old, vpa_new)) # depends on [control=['if'], data=[]]
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError("Standardizing cell failed! Old structure doesn't match new.") # depends on [control=['if'], data=[]]
structure = new_structure # depends on [control=['if'], data=[]]
return structure |
def raise_307(instance, location):
"""Abort the current request with a 307 (Temporary Redirect) response
code. Sets the Location header correctly. If the location does not start
with a slash, the path of the current request is prepended.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 307
"""
_set_location(instance, location)
instance.response.status = 307
raise ResponseException(instance.response) | def function[raise_307, parameter[instance, location]]:
constant[Abort the current request with a 307 (Temporary Redirect) response
code. Sets the Location header correctly. If the location does not start
with a slash, the path of the current request is prepended.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 307
]
call[name[_set_location], parameter[name[instance], name[location]]]
name[instance].response.status assign[=] constant[307]
<ast.Raise object at 0x7da18bcca500> | keyword[def] identifier[raise_307] ( identifier[instance] , identifier[location] ):
literal[string]
identifier[_set_location] ( identifier[instance] , identifier[location] )
identifier[instance] . identifier[response] . identifier[status] = literal[int]
keyword[raise] identifier[ResponseException] ( identifier[instance] . identifier[response] ) | def raise_307(instance, location):
"""Abort the current request with a 307 (Temporary Redirect) response
code. Sets the Location header correctly. If the location does not start
with a slash, the path of the current request is prepended.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 307
"""
_set_location(instance, location)
instance.response.status = 307
raise ResponseException(instance.response) |
def _configure_sockets(self, config, with_streamer=False, with_forwarder=False):
"""Configure sockets for HQ
:param dict config: test configuration
:param bool with_streamer: tell if we need to connect to streamer or simply bind
:param bool with_forwarder: tell if we need to connect to forwarder or simply bind
"""
rc_port = config.get('rc_port', 5001)
self.result_collector.set_hwm(0)
self.result_collector.bind("tcp://*:{}".format(rc_port))
self.poller.register(self.result_collector, zmq.POLLIN) | def function[_configure_sockets, parameter[self, config, with_streamer, with_forwarder]]:
constant[Configure sockets for HQ
:param dict config: test configuration
:param bool with_streamer: tell if we need to connect to streamer or simply bind
:param bool with_forwarder: tell if we need to connect to forwarder or simply bind
]
variable[rc_port] assign[=] call[name[config].get, parameter[constant[rc_port], constant[5001]]]
call[name[self].result_collector.set_hwm, parameter[constant[0]]]
call[name[self].result_collector.bind, parameter[call[constant[tcp://*:{}].format, parameter[name[rc_port]]]]]
call[name[self].poller.register, parameter[name[self].result_collector, name[zmq].POLLIN]] | keyword[def] identifier[_configure_sockets] ( identifier[self] , identifier[config] , identifier[with_streamer] = keyword[False] , identifier[with_forwarder] = keyword[False] ):
literal[string]
identifier[rc_port] = identifier[config] . identifier[get] ( literal[string] , literal[int] )
identifier[self] . identifier[result_collector] . identifier[set_hwm] ( literal[int] )
identifier[self] . identifier[result_collector] . identifier[bind] ( literal[string] . identifier[format] ( identifier[rc_port] ))
identifier[self] . identifier[poller] . identifier[register] ( identifier[self] . identifier[result_collector] , identifier[zmq] . identifier[POLLIN] ) | def _configure_sockets(self, config, with_streamer=False, with_forwarder=False):
"""Configure sockets for HQ
:param dict config: test configuration
:param bool with_streamer: tell if we need to connect to streamer or simply bind
:param bool with_forwarder: tell if we need to connect to forwarder or simply bind
"""
rc_port = config.get('rc_port', 5001)
self.result_collector.set_hwm(0)
self.result_collector.bind('tcp://*:{}'.format(rc_port))
self.poller.register(self.result_collector, zmq.POLLIN) |
def _sample_template(sample, out_dir):
"""R code to get QC for one sample"""
bam_fn = dd.get_work_bam(sample)
genome = dd.get_genome_build(sample)
if genome in supported:
peaks = sample.get("peaks_files", []).get("main")
if peaks:
r_code = ("library(ChIPQC);\n"
"sample = ChIPQCsample(\"{bam_fn}\","
"\"{peaks}\", "
"annotation = \"{genome}\","
");\n"
"ChIPQCreport(sample);\n")
r_code_fn = os.path.join(out_dir, "chipqc.r")
with open(r_code_fn, 'w') as inh:
inh.write(r_code.format(**locals()))
return r_code_fn | def function[_sample_template, parameter[sample, out_dir]]:
constant[R code to get QC for one sample]
variable[bam_fn] assign[=] call[name[dd].get_work_bam, parameter[name[sample]]]
variable[genome] assign[=] call[name[dd].get_genome_build, parameter[name[sample]]]
if compare[name[genome] in name[supported]] begin[:]
variable[peaks] assign[=] call[call[name[sample].get, parameter[constant[peaks_files], list[[]]]].get, parameter[constant[main]]]
if name[peaks] begin[:]
variable[r_code] assign[=] constant[library(ChIPQC);
sample = ChIPQCsample("{bam_fn}","{peaks}", annotation = "{genome}",);
ChIPQCreport(sample);
]
variable[r_code_fn] assign[=] call[name[os].path.join, parameter[name[out_dir], constant[chipqc.r]]]
with call[name[open], parameter[name[r_code_fn], constant[w]]] begin[:]
call[name[inh].write, parameter[call[name[r_code].format, parameter[]]]]
return[name[r_code_fn]] | keyword[def] identifier[_sample_template] ( identifier[sample] , identifier[out_dir] ):
literal[string]
identifier[bam_fn] = identifier[dd] . identifier[get_work_bam] ( identifier[sample] )
identifier[genome] = identifier[dd] . identifier[get_genome_build] ( identifier[sample] )
keyword[if] identifier[genome] keyword[in] identifier[supported] :
identifier[peaks] = identifier[sample] . identifier[get] ( literal[string] ,[]). identifier[get] ( literal[string] )
keyword[if] identifier[peaks] :
identifier[r_code] =( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[r_code_fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] )
keyword[with] identifier[open] ( identifier[r_code_fn] , literal[string] ) keyword[as] identifier[inh] :
identifier[inh] . identifier[write] ( identifier[r_code] . identifier[format] (** identifier[locals] ()))
keyword[return] identifier[r_code_fn] | def _sample_template(sample, out_dir):
"""R code to get QC for one sample"""
bam_fn = dd.get_work_bam(sample)
genome = dd.get_genome_build(sample)
if genome in supported:
peaks = sample.get('peaks_files', []).get('main')
if peaks:
r_code = 'library(ChIPQC);\nsample = ChIPQCsample("{bam_fn}","{peaks}", annotation = "{genome}",);\nChIPQCreport(sample);\n'
r_code_fn = os.path.join(out_dir, 'chipqc.r')
with open(r_code_fn, 'w') as inh:
inh.write(r_code.format(**locals())) # depends on [control=['with'], data=['inh']]
return r_code_fn # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_revocation_time(self):
"""Get the revocation time as naive datetime.
Note that this method is only used by cryptography>=2.4.
"""
if self.revoked is False:
return
if timezone.is_aware(self.revoked_date):
# convert datetime object to UTC and make it naive
return timezone.make_naive(self.revoked_date, pytz.utc)
return self.revoked_date | def function[get_revocation_time, parameter[self]]:
constant[Get the revocation time as naive datetime.
Note that this method is only used by cryptography>=2.4.
]
if compare[name[self].revoked is constant[False]] begin[:]
return[None]
if call[name[timezone].is_aware, parameter[name[self].revoked_date]] begin[:]
return[call[name[timezone].make_naive, parameter[name[self].revoked_date, name[pytz].utc]]]
return[name[self].revoked_date] | keyword[def] identifier[get_revocation_time] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[revoked] keyword[is] keyword[False] :
keyword[return]
keyword[if] identifier[timezone] . identifier[is_aware] ( identifier[self] . identifier[revoked_date] ):
keyword[return] identifier[timezone] . identifier[make_naive] ( identifier[self] . identifier[revoked_date] , identifier[pytz] . identifier[utc] )
keyword[return] identifier[self] . identifier[revoked_date] | def get_revocation_time(self):
"""Get the revocation time as naive datetime.
Note that this method is only used by cryptography>=2.4.
"""
if self.revoked is False:
return # depends on [control=['if'], data=[]]
if timezone.is_aware(self.revoked_date):
# convert datetime object to UTC and make it naive
return timezone.make_naive(self.revoked_date, pytz.utc) # depends on [control=['if'], data=[]]
return self.revoked_date |
def lock(self, key, value, *, flags=None, session):
"""Locks the Key with the given Session
Parameters:
key (str): Key to set
value (Payload): Value to set, It will be encoded by flags
session (ObjectID): Session ID
The Key will only be set if its current modify index matches the
supplied Index
"""
self.append({
"Verb": "lock",
"Key": key,
"Value": encode_value(value, flags, base64=True).decode("utf-8"),
"Flags": flags,
"Session": extract_attr(session, keys=["ID"])
})
return self | def function[lock, parameter[self, key, value]]:
constant[Locks the Key with the given Session
Parameters:
key (str): Key to set
value (Payload): Value to set, It will be encoded by flags
session (ObjectID): Session ID
The Key will only be set if its current modify index matches the
supplied Index
]
call[name[self].append, parameter[dictionary[[<ast.Constant object at 0x7da18f09dde0>, <ast.Constant object at 0x7da18f09c430>, <ast.Constant object at 0x7da18f09fa30>, <ast.Constant object at 0x7da18f09e9b0>, <ast.Constant object at 0x7da18f09d840>], [<ast.Constant object at 0x7da18f09f250>, <ast.Name object at 0x7da18f09c100>, <ast.Call object at 0x7da18f09da50>, <ast.Name object at 0x7da18f09f910>, <ast.Call object at 0x7da18f09e620>]]]]
return[name[self]] | keyword[def] identifier[lock] ( identifier[self] , identifier[key] , identifier[value] ,*, identifier[flags] = keyword[None] , identifier[session] ):
literal[string]
identifier[self] . identifier[append] ({
literal[string] : literal[string] ,
literal[string] : identifier[key] ,
literal[string] : identifier[encode_value] ( identifier[value] , identifier[flags] , identifier[base64] = keyword[True] ). identifier[decode] ( literal[string] ),
literal[string] : identifier[flags] ,
literal[string] : identifier[extract_attr] ( identifier[session] , identifier[keys] =[ literal[string] ])
})
keyword[return] identifier[self] | def lock(self, key, value, *, flags=None, session):
"""Locks the Key with the given Session
Parameters:
key (str): Key to set
value (Payload): Value to set, It will be encoded by flags
session (ObjectID): Session ID
The Key will only be set if its current modify index matches the
supplied Index
"""
self.append({'Verb': 'lock', 'Key': key, 'Value': encode_value(value, flags, base64=True).decode('utf-8'), 'Flags': flags, 'Session': extract_attr(session, keys=['ID'])})
return self |
def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks)
else:
skip_check_dict = defaultdict(lambda: None)
if len(checkers) == 0:
print("No valid checkers found for tests '{}'".format(",".join(checker_names)))
for checker_name, checker_class in checkers:
checker = checker_class() # instantiate a Checker object
checker.setup(ds) # setup method to prep
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for c, max_level in checks:
try:
vals.extend(self._run_check(c, ds, max_level))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = groups, errs
return ret_val | def function[run, parameter[self, ds, skip_checks]]:
constant[
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
]
variable[ret_val] assign[=] dictionary[[], []]
variable[checkers] assign[=] call[name[self]._get_valid_checkers, parameter[name[ds], name[checker_names]]]
if compare[name[skip_checks] is_not constant[None]] begin[:]
variable[skip_check_dict] assign[=] call[name[CheckSuite]._process_skip_checks, parameter[name[skip_checks]]]
if compare[call[name[len], parameter[name[checkers]]] equal[==] constant[0]] begin[:]
call[name[print], parameter[call[constant[No valid checkers found for tests '{}'].format, parameter[call[constant[,].join, parameter[name[checker_names]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da2041dac20>, <ast.Name object at 0x7da2041db610>]]] in starred[name[checkers]] begin[:]
variable[checker] assign[=] call[name[checker_class], parameter[]]
call[name[checker].setup, parameter[name[ds]]]
variable[checks] assign[=] call[name[self]._get_checks, parameter[name[checker], name[skip_check_dict]]]
variable[vals] assign[=] list[[]]
variable[errs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2041db820>, <ast.Name object at 0x7da2041db850>]]] in starred[name[checks]] begin[:]
<ast.Try object at 0x7da2041d9840>
variable[groups] assign[=] call[name[self].scores, parameter[name[vals]]]
call[name[ret_val]][name[checker_name]] assign[=] tuple[[<ast.Name object at 0x7da2041d96c0>, <ast.Name object at 0x7da2041d8520>]]
return[name[ret_val]] | keyword[def] identifier[run] ( identifier[self] , identifier[ds] , identifier[skip_checks] ,* identifier[checker_names] ):
literal[string]
identifier[ret_val] ={}
identifier[checkers] = identifier[self] . identifier[_get_valid_checkers] ( identifier[ds] , identifier[checker_names] )
keyword[if] identifier[skip_checks] keyword[is] keyword[not] keyword[None] :
identifier[skip_check_dict] = identifier[CheckSuite] . identifier[_process_skip_checks] ( identifier[skip_checks] )
keyword[else] :
identifier[skip_check_dict] = identifier[defaultdict] ( keyword[lambda] : keyword[None] )
keyword[if] identifier[len] ( identifier[checkers] )== literal[int] :
identifier[print] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[checker_names] )))
keyword[for] identifier[checker_name] , identifier[checker_class] keyword[in] identifier[checkers] :
identifier[checker] = identifier[checker_class] ()
identifier[checker] . identifier[setup] ( identifier[ds] )
identifier[checks] = identifier[self] . identifier[_get_checks] ( identifier[checker] , identifier[skip_check_dict] )
identifier[vals] =[]
identifier[errs] ={}
keyword[for] identifier[c] , identifier[max_level] keyword[in] identifier[checks] :
keyword[try] :
identifier[vals] . identifier[extend] ( identifier[self] . identifier[_run_check] ( identifier[c] , identifier[ds] , identifier[max_level] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[errs] [ identifier[c] . identifier[__func__] . identifier[__name__] ]=( identifier[e] , identifier[sys] . identifier[exc_info] ()[ literal[int] ])
identifier[groups] = identifier[self] . identifier[scores] ( identifier[vals] )
identifier[ret_val] [ identifier[checker_name] ]= identifier[groups] , identifier[errs]
keyword[return] identifier[ret_val] | def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks) # depends on [control=['if'], data=['skip_checks']]
else:
skip_check_dict = defaultdict(lambda : None)
if len(checkers) == 0:
print("No valid checkers found for tests '{}'".format(','.join(checker_names))) # depends on [control=['if'], data=[]]
for (checker_name, checker_class) in checkers:
checker = checker_class() # instantiate a Checker object
checker.setup(ds) # setup method to prep
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for (c, max_level) in checks:
try:
vals.extend(self._run_check(c, ds, max_level)) # depends on [control=['try'], data=[]]
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2]) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = (groups, errs) # depends on [control=['for'], data=[]]
return ret_val |
def from_schemafile(cls, schemafile):
"""Create a Flatson instance from a schemafile
"""
with open(schemafile) as f:
return cls(json.load(f)) | def function[from_schemafile, parameter[cls, schemafile]]:
constant[Create a Flatson instance from a schemafile
]
with call[name[open], parameter[name[schemafile]]] begin[:]
return[call[name[cls], parameter[call[name[json].load, parameter[name[f]]]]]] | keyword[def] identifier[from_schemafile] ( identifier[cls] , identifier[schemafile] ):
literal[string]
keyword[with] identifier[open] ( identifier[schemafile] ) keyword[as] identifier[f] :
keyword[return] identifier[cls] ( identifier[json] . identifier[load] ( identifier[f] )) | def from_schemafile(cls, schemafile):
"""Create a Flatson instance from a schemafile
"""
with open(schemafile) as f:
return cls(json.load(f)) # depends on [control=['with'], data=['f']] |
def calculate_size(name, id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(id)
return data_size | def function[calculate_size, parameter[name, id]]:
constant[ Calculates the request payload size]
variable[data_size] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b16a40d0>
<ast.AugAssign object at 0x7da1b16a71c0>
return[name[data_size]] | keyword[def] identifier[calculate_size] ( identifier[name] , identifier[id] ):
literal[string]
identifier[data_size] = literal[int]
identifier[data_size] += identifier[calculate_size_str] ( identifier[name] )
identifier[data_size] += identifier[calculate_size_str] ( identifier[id] )
keyword[return] identifier[data_size] | def calculate_size(name, id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(id)
return data_size |
def extractRuntime(runtime_dirs):
"""
Used to find the correct static lib name to pass to gcc
"""
names = [str(item) for name in runtime_dirs for item in os.listdir(name)]
string = '\n'.join(names)
result = extract(RUNTIME_PATTERN, string, condense=True)
return result | def function[extractRuntime, parameter[runtime_dirs]]:
constant[
Used to find the correct static lib name to pass to gcc
]
variable[names] assign[=] <ast.ListComp object at 0x7da1b2373190>
variable[string] assign[=] call[constant[
].join, parameter[name[names]]]
variable[result] assign[=] call[name[extract], parameter[name[RUNTIME_PATTERN], name[string]]]
return[name[result]] | keyword[def] identifier[extractRuntime] ( identifier[runtime_dirs] ):
literal[string]
identifier[names] =[ identifier[str] ( identifier[item] ) keyword[for] identifier[name] keyword[in] identifier[runtime_dirs] keyword[for] identifier[item] keyword[in] identifier[os] . identifier[listdir] ( identifier[name] )]
identifier[string] = literal[string] . identifier[join] ( identifier[names] )
identifier[result] = identifier[extract] ( identifier[RUNTIME_PATTERN] , identifier[string] , identifier[condense] = keyword[True] )
keyword[return] identifier[result] | def extractRuntime(runtime_dirs):
"""
Used to find the correct static lib name to pass to gcc
"""
names = [str(item) for name in runtime_dirs for item in os.listdir(name)]
string = '\n'.join(names)
result = extract(RUNTIME_PATTERN, string, condense=True)
return result |
def arango_id_to_key(_id):
"""Remove illegal chars from potential arangodb _key (id)
Args:
_id (str): id to be used as arangodb _key
Returns:
(str): _key value with illegal chars removed
"""
key = re.sub(r"[^a-zA-Z0-9\_\-\:\.\@\(\)\+\,\=\;\$\!\*\%]+", r"_", _id)
if len(key) > 254:
log.error(
f"Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}"
)
elif len(key) < 1:
log.error(f"Arango _key cannot be an empty string: Len={len(key)} Key: {key}")
return key | def function[arango_id_to_key, parameter[_id]]:
constant[Remove illegal chars from potential arangodb _key (id)
Args:
_id (str): id to be used as arangodb _key
Returns:
(str): _key value with illegal chars removed
]
variable[key] assign[=] call[name[re].sub, parameter[constant[[^a-zA-Z0-9\_\-\:\.\@\(\)\+\,\=\;\$\!\*\%]+], constant[_], name[_id]]]
if compare[call[name[len], parameter[name[key]]] greater[>] constant[254]] begin[:]
call[name[log].error, parameter[<ast.JoinedStr object at 0x7da18f58db10>]]
return[name[key]] | keyword[def] identifier[arango_id_to_key] ( identifier[_id] ):
literal[string]
identifier[key] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[_id] )
keyword[if] identifier[len] ( identifier[key] )> literal[int] :
identifier[log] . identifier[error] (
literal[string]
)
keyword[elif] identifier[len] ( identifier[key] )< literal[int] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] identifier[key] | def arango_id_to_key(_id):
"""Remove illegal chars from potential arangodb _key (id)
Args:
_id (str): id to be used as arangodb _key
Returns:
(str): _key value with illegal chars removed
"""
key = re.sub('[^a-zA-Z0-9\\_\\-\\:\\.\\@\\(\\)\\+\\,\\=\\;\\$\\!\\*\\%]+', '_', _id)
if len(key) > 254:
log.error(f'Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}') # depends on [control=['if'], data=[]]
elif len(key) < 1:
log.error(f'Arango _key cannot be an empty string: Len={len(key)} Key: {key}') # depends on [control=['if'], data=[]]
return key |
def no_sleep():
"""
Context that prevents the computer from going to sleep.
"""
mode = power.ES.continuous | power.ES.system_required
handle_nonzero_success(power.SetThreadExecutionState(mode))
try:
yield
finally:
handle_nonzero_success(power.SetThreadExecutionState(power.ES.continuous)) | def function[no_sleep, parameter[]]:
constant[
Context that prevents the computer from going to sleep.
]
variable[mode] assign[=] binary_operation[name[power].ES.continuous <ast.BitOr object at 0x7da2590d6aa0> name[power].ES.system_required]
call[name[handle_nonzero_success], parameter[call[name[power].SetThreadExecutionState, parameter[name[mode]]]]]
<ast.Try object at 0x7da18dc992a0> | keyword[def] identifier[no_sleep] ():
literal[string]
identifier[mode] = identifier[power] . identifier[ES] . identifier[continuous] | identifier[power] . identifier[ES] . identifier[system_required]
identifier[handle_nonzero_success] ( identifier[power] . identifier[SetThreadExecutionState] ( identifier[mode] ))
keyword[try] :
keyword[yield]
keyword[finally] :
identifier[handle_nonzero_success] ( identifier[power] . identifier[SetThreadExecutionState] ( identifier[power] . identifier[ES] . identifier[continuous] )) | def no_sleep():
"""
Context that prevents the computer from going to sleep.
"""
mode = power.ES.continuous | power.ES.system_required
handle_nonzero_success(power.SetThreadExecutionState(mode))
try:
yield # depends on [control=['try'], data=[]]
finally:
handle_nonzero_success(power.SetThreadExecutionState(power.ES.continuous)) |
def _match_service(self, line_with_color):
"""Return line if line matches this service's name, return None otherwise."""
line = re.compile("(\x1b\[\d+m)+").sub("", line_with_color) # Strip color codes
regexp = re.compile(r"^\[(.*?)\]\s(.*?)$")
if regexp.match(line):
title = regexp.match(line).group(1).strip()
if title in self.titles:
return (title, regexp.match(line).group(2))
return None | def function[_match_service, parameter[self, line_with_color]]:
constant[Return line if line matches this service's name, return None otherwise.]
variable[line] assign[=] call[call[name[re].compile, parameter[constant[(\[\d+m)+]]].sub, parameter[constant[], name[line_with_color]]]
variable[regexp] assign[=] call[name[re].compile, parameter[constant[^\[(.*?)\]\s(.*?)$]]]
if call[name[regexp].match, parameter[name[line]]] begin[:]
variable[title] assign[=] call[call[call[name[regexp].match, parameter[name[line]]].group, parameter[constant[1]]].strip, parameter[]]
if compare[name[title] in name[self].titles] begin[:]
return[tuple[[<ast.Name object at 0x7da1b15d7820>, <ast.Call object at 0x7da1b15d7010>]]]
return[constant[None]] | keyword[def] identifier[_match_service] ( identifier[self] , identifier[line_with_color] ):
literal[string]
identifier[line] = identifier[re] . identifier[compile] ( literal[string] ). identifier[sub] ( literal[string] , identifier[line_with_color] )
identifier[regexp] = identifier[re] . identifier[compile] ( literal[string] )
keyword[if] identifier[regexp] . identifier[match] ( identifier[line] ):
identifier[title] = identifier[regexp] . identifier[match] ( identifier[line] ). identifier[group] ( literal[int] ). identifier[strip] ()
keyword[if] identifier[title] keyword[in] identifier[self] . identifier[titles] :
keyword[return] ( identifier[title] , identifier[regexp] . identifier[match] ( identifier[line] ). identifier[group] ( literal[int] ))
keyword[return] keyword[None] | def _match_service(self, line_with_color):
"""Return line if line matches this service's name, return None otherwise."""
line = re.compile('(\x1b\\[\\d+m)+').sub('', line_with_color) # Strip color codes
regexp = re.compile('^\\[(.*?)\\]\\s(.*?)$')
if regexp.match(line):
title = regexp.match(line).group(1).strip()
if title in self.titles:
return (title, regexp.match(line).group(2)) # depends on [control=['if'], data=['title']] # depends on [control=['if'], data=[]]
return None |
def EventIvorn(ivorn, cite_type):
"""
Used to cite earlier VOEvents.
Use in conjunction with :func:`.add_citations`
Args:
ivorn(str): It is assumed this will be copied verbatim from elsewhere,
and so these should have any prefix (e.g. 'ivo://','http://')
already in place - the function will not alter the value.
cite_type (:class:`.definitions.cite_types`): String conforming to one
of the standard citation types.
"""
# This is an ugly hack around the limitations of the lxml.objectify API:
c = objectify.StringElement(cite=cite_type)
c._setText(ivorn)
c.tag = "EventIVORN"
return c | def function[EventIvorn, parameter[ivorn, cite_type]]:
constant[
Used to cite earlier VOEvents.
Use in conjunction with :func:`.add_citations`
Args:
ivorn(str): It is assumed this will be copied verbatim from elsewhere,
and so these should have any prefix (e.g. 'ivo://','http://')
already in place - the function will not alter the value.
cite_type (:class:`.definitions.cite_types`): String conforming to one
of the standard citation types.
]
variable[c] assign[=] call[name[objectify].StringElement, parameter[]]
call[name[c]._setText, parameter[name[ivorn]]]
name[c].tag assign[=] constant[EventIVORN]
return[name[c]] | keyword[def] identifier[EventIvorn] ( identifier[ivorn] , identifier[cite_type] ):
literal[string]
identifier[c] = identifier[objectify] . identifier[StringElement] ( identifier[cite] = identifier[cite_type] )
identifier[c] . identifier[_setText] ( identifier[ivorn] )
identifier[c] . identifier[tag] = literal[string]
keyword[return] identifier[c] | def EventIvorn(ivorn, cite_type):
"""
Used to cite earlier VOEvents.
Use in conjunction with :func:`.add_citations`
Args:
ivorn(str): It is assumed this will be copied verbatim from elsewhere,
and so these should have any prefix (e.g. 'ivo://','http://')
already in place - the function will not alter the value.
cite_type (:class:`.definitions.cite_types`): String conforming to one
of the standard citation types.
"""
# This is an ugly hack around the limitations of the lxml.objectify API:
c = objectify.StringElement(cite=cite_type)
c._setText(ivorn)
c.tag = 'EventIVORN'
return c |
def get_polygons(self, by_spec=False, depth=None):
"""
Returns a list of polygons created by this reference.
Parameters
----------
by_spec : bool
If ``True``, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If ``by_spec`` is ``True`` the key will
be name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
``by_spec`` is ``True``).
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0)
st = numpy.array([-st, st])
if self.magnification is not None:
mag = numpy.array([self.magnification, self.magnification])
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = numpy.array([1, -1], dtype='int')
if by_spec:
cell_polygons = self.ref_cell.get_polygons(True, depth)
polygons = {}
for kk in cell_polygons.keys():
polygons[kk] = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array(
[self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons[kk]:
if self.magnification:
polygons[kk].append(points * mag + spc)
else:
polygons[kk].append(points + spc)
if self.x_reflection:
polygons[kk][-1] = polygons[kk][-1] * xrefl
if self.rotation is not None:
polygons[kk][-1] = (
polygons[kk][-1] * ct +
polygons[kk][-1][:, ::-1] * st)
if self.origin is not None:
polygons[kk][-1] = polygons[kk][-1] + orgn
else:
cell_polygons = self.ref_cell.get_polygons(depth=depth)
polygons = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array(
[self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons:
if self.magnification:
polygons.append(points * mag + spc)
else:
polygons.append(points + spc)
if self.x_reflection:
polygons[-1] = polygons[-1] * xrefl
if self.rotation is not None:
polygons[-1] = (
polygons[-1] * ct + polygons[-1][:, ::-1] * st)
if self.origin is not None:
polygons[-1] = polygons[-1] + orgn
return polygons | def function[get_polygons, parameter[self, by_spec, depth]]:
constant[
Returns a list of polygons created by this reference.
Parameters
----------
by_spec : bool
If ``True``, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If ``by_spec`` is ``True`` the key will
be name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
``by_spec`` is ``True``).
]
if <ast.UnaryOp object at 0x7da2047e9ba0> begin[:]
return[<ast.IfExp object at 0x7da2047e8910>]
if compare[name[self].rotation is_not constant[None]] begin[:]
variable[ct] assign[=] call[name[numpy].cos, parameter[binary_operation[binary_operation[name[self].rotation * name[numpy].pi] / constant[180.0]]]]
variable[st] assign[=] call[name[numpy].sin, parameter[binary_operation[binary_operation[name[self].rotation * name[numpy].pi] / constant[180.0]]]]
variable[st] assign[=] call[name[numpy].array, parameter[list[[<ast.UnaryOp object at 0x7da2047e9c90>, <ast.Name object at 0x7da2047e95a0>]]]]
if compare[name[self].magnification is_not constant[None]] begin[:]
variable[mag] assign[=] call[name[numpy].array, parameter[list[[<ast.Attribute object at 0x7da2047e9540>, <ast.Attribute object at 0x7da2047e8a30>]]]]
if compare[name[self].origin is_not constant[None]] begin[:]
variable[orgn] assign[=] call[name[numpy].array, parameter[name[self].origin]]
if name[self].x_reflection begin[:]
variable[xrefl] assign[=] call[name[numpy].array, parameter[list[[<ast.Constant object at 0x7da2047e8130>, <ast.UnaryOp object at 0x7da2047e89a0>]]]]
if name[by_spec] begin[:]
variable[cell_polygons] assign[=] call[name[self].ref_cell.get_polygons, parameter[constant[True], name[depth]]]
variable[polygons] assign[=] dictionary[[], []]
for taget[name[kk]] in starred[call[name[cell_polygons].keys, parameter[]]] begin[:]
call[name[polygons]][name[kk]] assign[=] list[[]]
for taget[name[ii]] in starred[call[name[range], parameter[name[self].columns]]] begin[:]
for taget[name[jj]] in starred[call[name[range], parameter[name[self].rows]]] begin[:]
variable[spc] assign[=] call[name[numpy].array, parameter[list[[<ast.BinOp object at 0x7da20ec062c0>, <ast.BinOp object at 0x7da20c7961a0>]]]]
for taget[name[points]] in starred[call[name[cell_polygons]][name[kk]]] begin[:]
if name[self].magnification begin[:]
call[call[name[polygons]][name[kk]].append, parameter[binary_operation[binary_operation[name[points] * name[mag]] + name[spc]]]]
if name[self].x_reflection begin[:]
call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da1b065d360>] assign[=] binary_operation[call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da1b065eb00>] * name[xrefl]]
if compare[name[self].rotation is_not constant[None]] begin[:]
call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da1b065e260>] assign[=] binary_operation[binary_operation[call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da1b065ea70>] * name[ct]] + binary_operation[call[call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da207f010c0>]][tuple[[<ast.Slice object at 0x7da207f03910>, <ast.Slice object at 0x7da207f00e50>]]] * name[st]]]
if compare[name[self].origin is_not constant[None]] begin[:]
call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da207f01360>] assign[=] binary_operation[call[call[name[polygons]][name[kk]]][<ast.UnaryOp object at 0x7da207f03b80>] + name[orgn]]
return[name[polygons]] | keyword[def] identifier[get_polygons] ( identifier[self] , identifier[by_spec] = keyword[False] , identifier[depth] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[ref_cell] , identifier[Cell] ):
keyword[return] identifier[dict] () keyword[if] identifier[by_spec] keyword[else] []
keyword[if] identifier[self] . identifier[rotation] keyword[is] keyword[not] keyword[None] :
identifier[ct] = identifier[numpy] . identifier[cos] ( identifier[self] . identifier[rotation] * identifier[numpy] . identifier[pi] / literal[int] )
identifier[st] = identifier[numpy] . identifier[sin] ( identifier[self] . identifier[rotation] * identifier[numpy] . identifier[pi] / literal[int] )
identifier[st] = identifier[numpy] . identifier[array] ([- identifier[st] , identifier[st] ])
keyword[if] identifier[self] . identifier[magnification] keyword[is] keyword[not] keyword[None] :
identifier[mag] = identifier[numpy] . identifier[array] ([ identifier[self] . identifier[magnification] , identifier[self] . identifier[magnification] ])
keyword[if] identifier[self] . identifier[origin] keyword[is] keyword[not] keyword[None] :
identifier[orgn] = identifier[numpy] . identifier[array] ( identifier[self] . identifier[origin] )
keyword[if] identifier[self] . identifier[x_reflection] :
identifier[xrefl] = identifier[numpy] . identifier[array] ([ literal[int] ,- literal[int] ], identifier[dtype] = literal[string] )
keyword[if] identifier[by_spec] :
identifier[cell_polygons] = identifier[self] . identifier[ref_cell] . identifier[get_polygons] ( keyword[True] , identifier[depth] )
identifier[polygons] ={}
keyword[for] identifier[kk] keyword[in] identifier[cell_polygons] . identifier[keys] ():
identifier[polygons] [ identifier[kk] ]=[]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[self] . identifier[columns] ):
keyword[for] identifier[jj] keyword[in] identifier[range] ( identifier[self] . identifier[rows] ):
identifier[spc] = identifier[numpy] . identifier[array] (
[ identifier[self] . identifier[spacing] [ literal[int] ]* identifier[ii] , identifier[self] . identifier[spacing] [ literal[int] ]* identifier[jj] ])
keyword[for] identifier[points] keyword[in] identifier[cell_polygons] [ identifier[kk] ]:
keyword[if] identifier[self] . identifier[magnification] :
identifier[polygons] [ identifier[kk] ]. identifier[append] ( identifier[points] * identifier[mag] + identifier[spc] )
keyword[else] :
identifier[polygons] [ identifier[kk] ]. identifier[append] ( identifier[points] + identifier[spc] )
keyword[if] identifier[self] . identifier[x_reflection] :
identifier[polygons] [ identifier[kk] ][- literal[int] ]= identifier[polygons] [ identifier[kk] ][- literal[int] ]* identifier[xrefl]
keyword[if] identifier[self] . identifier[rotation] keyword[is] keyword[not] keyword[None] :
identifier[polygons] [ identifier[kk] ][- literal[int] ]=(
identifier[polygons] [ identifier[kk] ][- literal[int] ]* identifier[ct] +
identifier[polygons] [ identifier[kk] ][- literal[int] ][:,::- literal[int] ]* identifier[st] )
keyword[if] identifier[self] . identifier[origin] keyword[is] keyword[not] keyword[None] :
identifier[polygons] [ identifier[kk] ][- literal[int] ]= identifier[polygons] [ identifier[kk] ][- literal[int] ]+ identifier[orgn]
keyword[else] :
identifier[cell_polygons] = identifier[self] . identifier[ref_cell] . identifier[get_polygons] ( identifier[depth] = identifier[depth] )
identifier[polygons] =[]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[self] . identifier[columns] ):
keyword[for] identifier[jj] keyword[in] identifier[range] ( identifier[self] . identifier[rows] ):
identifier[spc] = identifier[numpy] . identifier[array] (
[ identifier[self] . identifier[spacing] [ literal[int] ]* identifier[ii] , identifier[self] . identifier[spacing] [ literal[int] ]* identifier[jj] ])
keyword[for] identifier[points] keyword[in] identifier[cell_polygons] :
keyword[if] identifier[self] . identifier[magnification] :
identifier[polygons] . identifier[append] ( identifier[points] * identifier[mag] + identifier[spc] )
keyword[else] :
identifier[polygons] . identifier[append] ( identifier[points] + identifier[spc] )
keyword[if] identifier[self] . identifier[x_reflection] :
identifier[polygons] [- literal[int] ]= identifier[polygons] [- literal[int] ]* identifier[xrefl]
keyword[if] identifier[self] . identifier[rotation] keyword[is] keyword[not] keyword[None] :
identifier[polygons] [- literal[int] ]=(
identifier[polygons] [- literal[int] ]* identifier[ct] + identifier[polygons] [- literal[int] ][:,::- literal[int] ]* identifier[st] )
keyword[if] identifier[self] . identifier[origin] keyword[is] keyword[not] keyword[None] :
identifier[polygons] [- literal[int] ]= identifier[polygons] [- literal[int] ]+ identifier[orgn]
keyword[return] identifier[polygons] | def get_polygons(self, by_spec=False, depth=None):
"""
Returns a list of polygons created by this reference.
Parameters
----------
by_spec : bool
If ``True``, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If ``by_spec`` is ``True`` the key will
be name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
``by_spec`` is ``True``).
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else [] # depends on [control=['if'], data=[]]
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0)
st = numpy.array([-st, st]) # depends on [control=['if'], data=[]]
if self.magnification is not None:
mag = numpy.array([self.magnification, self.magnification]) # depends on [control=['if'], data=[]]
if self.origin is not None:
orgn = numpy.array(self.origin) # depends on [control=['if'], data=[]]
if self.x_reflection:
xrefl = numpy.array([1, -1], dtype='int') # depends on [control=['if'], data=[]]
if by_spec:
cell_polygons = self.ref_cell.get_polygons(True, depth)
polygons = {}
for kk in cell_polygons.keys():
polygons[kk] = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons[kk]:
if self.magnification:
polygons[kk].append(points * mag + spc) # depends on [control=['if'], data=[]]
else:
polygons[kk].append(points + spc)
if self.x_reflection:
polygons[kk][-1] = polygons[kk][-1] * xrefl # depends on [control=['if'], data=[]]
if self.rotation is not None:
polygons[kk][-1] = polygons[kk][-1] * ct + polygons[kk][-1][:, ::-1] * st # depends on [control=['if'], data=[]]
if self.origin is not None:
polygons[kk][-1] = polygons[kk][-1] + orgn # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['points']] # depends on [control=['for'], data=['jj']] # depends on [control=['for'], data=['ii']] # depends on [control=['for'], data=['kk']] # depends on [control=['if'], data=[]]
else:
cell_polygons = self.ref_cell.get_polygons(depth=depth)
polygons = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons:
if self.magnification:
polygons.append(points * mag + spc) # depends on [control=['if'], data=[]]
else:
polygons.append(points + spc)
if self.x_reflection:
polygons[-1] = polygons[-1] * xrefl # depends on [control=['if'], data=[]]
if self.rotation is not None:
polygons[-1] = polygons[-1] * ct + polygons[-1][:, ::-1] * st # depends on [control=['if'], data=[]]
if self.origin is not None:
polygons[-1] = polygons[-1] + orgn # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['points']] # depends on [control=['for'], data=['jj']] # depends on [control=['for'], data=['ii']]
return polygons |
def folder2db(folder_name, debug, energy_limit, skip_folders,
goto_reaction):
"""Read folder and collect data in local sqlite3 database"""
folder_name = folder_name.rstrip('/')
skip = []
for s in skip_folders.split(', '):
for sk in s.split(','):
skip.append(sk)
pub_id = _folder2db.main(folder_name, debug, energy_limit,
skip, goto_reaction)
if pub_id:
print('')
print('')
print('Ready to release the data?')
print(
" Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.".format(**locals()))
print(" Then log in at www.catalysis-hub.org/upload/ to verify and release. ") | def function[folder2db, parameter[folder_name, debug, energy_limit, skip_folders, goto_reaction]]:
constant[Read folder and collect data in local sqlite3 database]
variable[folder_name] assign[=] call[name[folder_name].rstrip, parameter[constant[/]]]
variable[skip] assign[=] list[[]]
for taget[name[s]] in starred[call[name[skip_folders].split, parameter[constant[, ]]]] begin[:]
for taget[name[sk]] in starred[call[name[s].split, parameter[constant[,]]]] begin[:]
call[name[skip].append, parameter[name[sk]]]
variable[pub_id] assign[=] call[name[_folder2db].main, parameter[name[folder_name], name[debug], name[energy_limit], name[skip], name[goto_reaction]]]
if name[pub_id] begin[:]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[Ready to release the data?]]]
call[name[print], parameter[call[constant[ Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.].format, parameter[]]]]
call[name[print], parameter[constant[ Then log in at www.catalysis-hub.org/upload/ to verify and release. ]]] | keyword[def] identifier[folder2db] ( identifier[folder_name] , identifier[debug] , identifier[energy_limit] , identifier[skip_folders] ,
identifier[goto_reaction] ):
literal[string]
identifier[folder_name] = identifier[folder_name] . identifier[rstrip] ( literal[string] )
identifier[skip] =[]
keyword[for] identifier[s] keyword[in] identifier[skip_folders] . identifier[split] ( literal[string] ):
keyword[for] identifier[sk] keyword[in] identifier[s] . identifier[split] ( literal[string] ):
identifier[skip] . identifier[append] ( identifier[sk] )
identifier[pub_id] = identifier[_folder2db] . identifier[main] ( identifier[folder_name] , identifier[debug] , identifier[energy_limit] ,
identifier[skip] , identifier[goto_reaction] )
keyword[if] identifier[pub_id] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] (
literal[string] . identifier[format] (** identifier[locals] ()))
identifier[print] ( literal[string] ) | def folder2db(folder_name, debug, energy_limit, skip_folders, goto_reaction):
"""Read folder and collect data in local sqlite3 database"""
folder_name = folder_name.rstrip('/')
skip = []
for s in skip_folders.split(', '):
for sk in s.split(','):
skip.append(sk) # depends on [control=['for'], data=['sk']] # depends on [control=['for'], data=['s']]
pub_id = _folder2db.main(folder_name, debug, energy_limit, skip, goto_reaction)
if pub_id:
print('')
print('')
print('Ready to release the data?')
print(" Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.".format(**locals()))
print(' Then log in at www.catalysis-hub.org/upload/ to verify and release. ') # depends on [control=['if'], data=[]] |
def make_frog_fresco(text, width, padding=8):
"""\
Formats your lovely text into a speech bubble spouted by this adorable
little frog.
"""
stem = r' /'
frog = r"""
{text}
{stem}
@..@
(----)
( >__< )
^^ ~~ ^^"""
offset = len(stem) - 1
formatted_indent = ' ' * offset
formatted_text = textwrap.fill(text, width=width-padding,
initial_indent=formatted_indent, subsequent_indent=formatted_indent)
return frog.format(stem=stem, text=formatted_text) | def function[make_frog_fresco, parameter[text, width, padding]]:
constant[ Formats your lovely text into a speech bubble spouted by this adorable
little frog.
]
variable[stem] assign[=] constant[ /]
variable[frog] assign[=] constant[
{text}
{stem}
@..@
(----)
( >__< )
^^ ~~ ^^]
variable[offset] assign[=] binary_operation[call[name[len], parameter[name[stem]]] - constant[1]]
variable[formatted_indent] assign[=] binary_operation[constant[ ] * name[offset]]
variable[formatted_text] assign[=] call[name[textwrap].fill, parameter[name[text]]]
return[call[name[frog].format, parameter[]]] | keyword[def] identifier[make_frog_fresco] ( identifier[text] , identifier[width] , identifier[padding] = literal[int] ):
literal[string]
identifier[stem] = literal[string]
identifier[frog] = literal[string]
identifier[offset] = identifier[len] ( identifier[stem] )- literal[int]
identifier[formatted_indent] = literal[string] * identifier[offset]
identifier[formatted_text] = identifier[textwrap] . identifier[fill] ( identifier[text] , identifier[width] = identifier[width] - identifier[padding] ,
identifier[initial_indent] = identifier[formatted_indent] , identifier[subsequent_indent] = identifier[formatted_indent] )
keyword[return] identifier[frog] . identifier[format] ( identifier[stem] = identifier[stem] , identifier[text] = identifier[formatted_text] ) | def make_frog_fresco(text, width, padding=8):
""" Formats your lovely text into a speech bubble spouted by this adorable
little frog.
"""
stem = ' /'
frog = '\n{text}\n{stem}\n @..@\n (----)\n( >__< )\n^^ ~~ ^^'
offset = len(stem) - 1
formatted_indent = ' ' * offset
formatted_text = textwrap.fill(text, width=width - padding, initial_indent=formatted_indent, subsequent_indent=formatted_indent)
return frog.format(stem=stem, text=formatted_text) |
def _unpack_obs(obs, space, tensorlib=tf):
"""Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if (isinstance(space, gym.spaces.Dict)
or isinstance(space, gym.spaces.Tuple)):
prep = get_preprocessor(space)(space)
if len(obs.shape) != 2 or obs.shape[1] != prep.shape[0]:
raise ValueError(
"Expected flattened obs shape of [None, {}], got {}".format(
prep.shape[0], obs.shape))
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
offset = 0
if isinstance(space, gym.spaces.Tuple):
u = []
for p, v in zip(prep.preprocessors, space.spaces):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u.append(
_unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib))
else:
u = OrderedDict()
for p, (k, v) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib)
return u
else:
return obs | def function[_unpack_obs, parameter[obs, space, tensorlib]]:
constant[Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
]
if <ast.BoolOp object at 0x7da207f99a80> begin[:]
variable[prep] assign[=] call[call[name[get_preprocessor], parameter[name[space]]], parameter[name[space]]]
if <ast.BoolOp object at 0x7da207f98a00> begin[:]
<ast.Raise object at 0x7da1b2344520>
assert[compare[call[name[len], parameter[name[prep].preprocessors]] equal[==] call[name[len], parameter[name[space].spaces]]]]
variable[offset] assign[=] constant[0]
if call[name[isinstance], parameter[name[space], name[gym].spaces.Tuple]] begin[:]
variable[u] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b23479d0>, <ast.Name object at 0x7da1b2346f80>]]] in starred[call[name[zip], parameter[name[prep].preprocessors, name[space].spaces]]] begin[:]
variable[obs_slice] assign[=] call[name[obs]][tuple[[<ast.Slice object at 0x7da1b2347a90>, <ast.Slice object at 0x7da1b2347cd0>]]]
<ast.AugAssign object at 0x7da1b2345060>
call[name[u].append, parameter[call[name[_unpack_obs], parameter[call[name[tensorlib].reshape, parameter[name[obs_slice], binary_operation[list[[<ast.UnaryOp object at 0x7da1b2345540>]] + call[name[list], parameter[name[p].shape]]]]], name[v]]]]]
return[name[u]] | keyword[def] identifier[_unpack_obs] ( identifier[obs] , identifier[space] , identifier[tensorlib] = identifier[tf] ):
literal[string]
keyword[if] ( identifier[isinstance] ( identifier[space] , identifier[gym] . identifier[spaces] . identifier[Dict] )
keyword[or] identifier[isinstance] ( identifier[space] , identifier[gym] . identifier[spaces] . identifier[Tuple] )):
identifier[prep] = identifier[get_preprocessor] ( identifier[space] )( identifier[space] )
keyword[if] identifier[len] ( identifier[obs] . identifier[shape] )!= literal[int] keyword[or] identifier[obs] . identifier[shape] [ literal[int] ]!= identifier[prep] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[prep] . identifier[shape] [ literal[int] ], identifier[obs] . identifier[shape] ))
keyword[assert] identifier[len] ( identifier[prep] . identifier[preprocessors] )== identifier[len] ( identifier[space] . identifier[spaces] ),( identifier[len] ( identifier[prep] . identifier[preprocessors] )== identifier[len] ( identifier[space] . identifier[spaces] ))
identifier[offset] = literal[int]
keyword[if] identifier[isinstance] ( identifier[space] , identifier[gym] . identifier[spaces] . identifier[Tuple] ):
identifier[u] =[]
keyword[for] identifier[p] , identifier[v] keyword[in] identifier[zip] ( identifier[prep] . identifier[preprocessors] , identifier[space] . identifier[spaces] ):
identifier[obs_slice] = identifier[obs] [:, identifier[offset] : identifier[offset] + identifier[p] . identifier[size] ]
identifier[offset] += identifier[p] . identifier[size]
identifier[u] . identifier[append] (
identifier[_unpack_obs] (
identifier[tensorlib] . identifier[reshape] ( identifier[obs_slice] ,[- literal[int] ]+ identifier[list] ( identifier[p] . identifier[shape] )),
identifier[v] ,
identifier[tensorlib] = identifier[tensorlib] ))
keyword[else] :
identifier[u] = identifier[OrderedDict] ()
keyword[for] identifier[p] ,( identifier[k] , identifier[v] ) keyword[in] identifier[zip] ( identifier[prep] . identifier[preprocessors] , identifier[space] . identifier[spaces] . identifier[items] ()):
identifier[obs_slice] = identifier[obs] [:, identifier[offset] : identifier[offset] + identifier[p] . identifier[size] ]
identifier[offset] += identifier[p] . identifier[size]
identifier[u] [ identifier[k] ]= identifier[_unpack_obs] (
identifier[tensorlib] . identifier[reshape] ( identifier[obs_slice] ,[- literal[int] ]+ identifier[list] ( identifier[p] . identifier[shape] )),
identifier[v] ,
identifier[tensorlib] = identifier[tensorlib] )
keyword[return] identifier[u]
keyword[else] :
keyword[return] identifier[obs] | def _unpack_obs(obs, space, tensorlib=tf):
"""Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if isinstance(space, gym.spaces.Dict) or isinstance(space, gym.spaces.Tuple):
prep = get_preprocessor(space)(space)
if len(obs.shape) != 2 or obs.shape[1] != prep.shape[0]:
raise ValueError('Expected flattened obs shape of [None, {}], got {}'.format(prep.shape[0], obs.shape)) # depends on [control=['if'], data=[]]
assert len(prep.preprocessors) == len(space.spaces), len(prep.preprocessors) == len(space.spaces)
offset = 0
if isinstance(space, gym.spaces.Tuple):
u = []
for (p, v) in zip(prep.preprocessors, space.spaces):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u.append(_unpack_obs(tensorlib.reshape(obs_slice, [-1] + list(p.shape)), v, tensorlib=tensorlib)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
u = OrderedDict()
for (p, (k, v)) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(tensorlib.reshape(obs_slice, [-1] + list(p.shape)), v, tensorlib=tensorlib) # depends on [control=['for'], data=[]]
return u # depends on [control=['if'], data=[]]
else:
return obs |
def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom | def function[ds_geom, parameter[ds, t_srs]]:
constant[Return dataset bbox envelope as geom
]
variable[gt] assign[=] call[name[ds].GetGeoTransform, parameter[]]
variable[ds_srs] assign[=] call[name[get_ds_srs], parameter[name[ds]]]
if compare[name[t_srs] is constant[None]] begin[:]
variable[t_srs] assign[=] name[ds_srs]
variable[ns] assign[=] name[ds].RasterXSize
variable[nl] assign[=] name[ds].RasterYSize
variable[x] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0677eb0>, <ast.Name object at 0x7da1b0677f40>, <ast.Name object at 0x7da1b0677d60>, <ast.Constant object at 0x7da1b0677f10>, <ast.Constant object at 0x7da1b0677d00>]]]]
variable[y] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b0677cd0>, <ast.Constant object at 0x7da1b0677010>, <ast.Name object at 0x7da1b0676f80>, <ast.Name object at 0x7da1b06776a0>, <ast.Constant object at 0x7da1b0677bb0>]]]]
<ast.AugAssign object at 0x7da1b0677fa0>
<ast.AugAssign object at 0x7da1b0677a60>
<ast.Tuple object at 0x7da1b06770d0> assign[=] call[name[pixelToMap], parameter[name[x], name[y], name[gt]]]
variable[geom_wkt] assign[=] call[constant[POLYGON(({0}))].format, parameter[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b0516680>]]]]
variable[geom] assign[=] call[name[ogr].CreateGeometryFromWkt, parameter[name[geom_wkt]]]
call[name[geom].AssignSpatialReference, parameter[name[ds_srs]]]
if <ast.UnaryOp object at 0x7da1b0517010> begin[:]
call[name[geom_transform], parameter[name[geom], name[t_srs]]]
return[name[geom]] | keyword[def] identifier[ds_geom] ( identifier[ds] , identifier[t_srs] = keyword[None] ):
literal[string]
identifier[gt] = identifier[ds] . identifier[GetGeoTransform] ()
identifier[ds_srs] = identifier[get_ds_srs] ( identifier[ds] )
keyword[if] identifier[t_srs] keyword[is] keyword[None] :
identifier[t_srs] = identifier[ds_srs]
identifier[ns] = identifier[ds] . identifier[RasterXSize]
identifier[nl] = identifier[ds] . identifier[RasterYSize]
identifier[x] = identifier[np] . identifier[array] ([ literal[int] , identifier[ns] , identifier[ns] , literal[int] , literal[int] ], identifier[dtype] = identifier[float] )
identifier[y] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , identifier[nl] , identifier[nl] , literal[int] ], identifier[dtype] = identifier[float] )
identifier[x] -= literal[int]
identifier[y] -= literal[int]
identifier[mx] , identifier[my] = identifier[pixelToMap] ( identifier[x] , identifier[y] , identifier[gt] )
identifier[geom_wkt] = literal[string] . identifier[format] ( literal[string] . identifier[join] ([ literal[string] . identifier[format] (* identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[zip] ( identifier[mx] , identifier[my] )]))
identifier[geom] = identifier[ogr] . identifier[CreateGeometryFromWkt] ( identifier[geom_wkt] )
identifier[geom] . identifier[AssignSpatialReference] ( identifier[ds_srs] )
keyword[if] keyword[not] identifier[ds_srs] . identifier[IsSame] ( identifier[t_srs] ):
identifier[geom_transform] ( identifier[geom] , identifier[t_srs] )
keyword[return] identifier[geom] | def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs # depends on [control=['if'], data=['t_srs']]
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
(mx, my) = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx, my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs) # depends on [control=['if'], data=[]]
return geom |
def c(self):
"""Caching client for not repeapting checks"""
if self._client is None:
self._parse_settings()
self._client = Rumetr(**self.settings)
return self._client | def function[c, parameter[self]]:
constant[Caching client for not repeapting checks]
if compare[name[self]._client is constant[None]] begin[:]
call[name[self]._parse_settings, parameter[]]
name[self]._client assign[=] call[name[Rumetr], parameter[]]
return[name[self]._client] | keyword[def] identifier[c] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_client] keyword[is] keyword[None] :
identifier[self] . identifier[_parse_settings] ()
identifier[self] . identifier[_client] = identifier[Rumetr] (** identifier[self] . identifier[settings] )
keyword[return] identifier[self] . identifier[_client] | def c(self):
"""Caching client for not repeapting checks"""
if self._client is None:
self._parse_settings()
self._client = Rumetr(**self.settings) # depends on [control=['if'], data=[]]
return self._client |
def summary(self, raw):
"""Parse, format and return scan summary."""
taxonomies = []
level = "info"
namespace = "Patrowl"
# getreport service
if self.service == 'getreport':
if 'risk_level' in raw and raw['risk_level']:
risk_level = raw['risk_level']
# Grade
if risk_level['grade'] in ["A", "B"]:
level = "safe"
else:
level = "suspicious"
taxonomies.append(self.build_taxonomy(level, namespace, "Grade", risk_level['grade']))
# Findings
if risk_level['high'] > 0:
level = "malicious"
elif risk_level['medium'] > 0 or risk_level['low'] > 0:
level = "suspicious"
else:
level = "info"
taxonomies.append(self.build_taxonomy(
level, namespace, "Findings", "{}/{}/{}/{}".format(
risk_level['high'],
risk_level['medium'],
risk_level['low'],
risk_level['info']
)))
#todo: add_asset service
return {"taxonomies": taxonomies} | def function[summary, parameter[self, raw]]:
constant[Parse, format and return scan summary.]
variable[taxonomies] assign[=] list[[]]
variable[level] assign[=] constant[info]
variable[namespace] assign[=] constant[Patrowl]
if compare[name[self].service equal[==] constant[getreport]] begin[:]
if <ast.BoolOp object at 0x7da1b17d70a0> begin[:]
variable[risk_level] assign[=] call[name[raw]][constant[risk_level]]
if compare[call[name[risk_level]][constant[grade]] in list[[<ast.Constant object at 0x7da1b17d74f0>, <ast.Constant object at 0x7da1b17d7070>]]] begin[:]
variable[level] assign[=] constant[safe]
call[name[taxonomies].append, parameter[call[name[self].build_taxonomy, parameter[name[level], name[namespace], constant[Grade], call[name[risk_level]][constant[grade]]]]]]
if compare[call[name[risk_level]][constant[high]] greater[>] constant[0]] begin[:]
variable[level] assign[=] constant[malicious]
call[name[taxonomies].append, parameter[call[name[self].build_taxonomy, parameter[name[level], name[namespace], constant[Findings], call[constant[{}/{}/{}/{}].format, parameter[call[name[risk_level]][constant[high]], call[name[risk_level]][constant[medium]], call[name[risk_level]][constant[low]], call[name[risk_level]][constant[info]]]]]]]]
return[dictionary[[<ast.Constant object at 0x7da18f09f940>], [<ast.Name object at 0x7da18f09e050>]]] | keyword[def] identifier[summary] ( identifier[self] , identifier[raw] ):
literal[string]
identifier[taxonomies] =[]
identifier[level] = literal[string]
identifier[namespace] = literal[string]
keyword[if] identifier[self] . identifier[service] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[raw] keyword[and] identifier[raw] [ literal[string] ]:
identifier[risk_level] = identifier[raw] [ literal[string] ]
keyword[if] identifier[risk_level] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
identifier[level] = literal[string]
keyword[else] :
identifier[level] = literal[string]
identifier[taxonomies] . identifier[append] ( identifier[self] . identifier[build_taxonomy] ( identifier[level] , identifier[namespace] , literal[string] , identifier[risk_level] [ literal[string] ]))
keyword[if] identifier[risk_level] [ literal[string] ]> literal[int] :
identifier[level] = literal[string]
keyword[elif] identifier[risk_level] [ literal[string] ]> literal[int] keyword[or] identifier[risk_level] [ literal[string] ]> literal[int] :
identifier[level] = literal[string]
keyword[else] :
identifier[level] = literal[string]
identifier[taxonomies] . identifier[append] ( identifier[self] . identifier[build_taxonomy] (
identifier[level] , identifier[namespace] , literal[string] , literal[string] . identifier[format] (
identifier[risk_level] [ literal[string] ],
identifier[risk_level] [ literal[string] ],
identifier[risk_level] [ literal[string] ],
identifier[risk_level] [ literal[string] ]
)))
keyword[return] { literal[string] : identifier[taxonomies] } | def summary(self, raw):
"""Parse, format and return scan summary."""
taxonomies = []
level = 'info'
namespace = 'Patrowl'
# getreport service
if self.service == 'getreport':
if 'risk_level' in raw and raw['risk_level']:
risk_level = raw['risk_level']
# Grade
if risk_level['grade'] in ['A', 'B']:
level = 'safe' # depends on [control=['if'], data=[]]
else:
level = 'suspicious'
taxonomies.append(self.build_taxonomy(level, namespace, 'Grade', risk_level['grade']))
# Findings
if risk_level['high'] > 0:
level = 'malicious' # depends on [control=['if'], data=[]]
elif risk_level['medium'] > 0 or risk_level['low'] > 0:
level = 'suspicious' # depends on [control=['if'], data=[]]
else:
level = 'info'
taxonomies.append(self.build_taxonomy(level, namespace, 'Findings', '{}/{}/{}/{}'.format(risk_level['high'], risk_level['medium'], risk_level['low'], risk_level['info']))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
#todo: add_asset service
return {'taxonomies': taxonomies} |
def doctemplate(*args):
"""Return a decorator putting ``args`` into the docstring of the decorated ``func``.
>>> @doctemplate('spam', 'spam')
... def spam():
... '''Returns %s, lovely %s.'''
... return 'Spam'
>>> spam.__doc__
'Returns spam, lovely spam.'
"""
def decorator(func):
func.__doc__ = func.__doc__ % tuple(args)
return func
return decorator | def function[doctemplate, parameter[]]:
constant[Return a decorator putting ``args`` into the docstring of the decorated ``func``.
>>> @doctemplate('spam', 'spam')
... def spam():
... '''Returns %s, lovely %s.'''
... return 'Spam'
>>> spam.__doc__
'Returns spam, lovely spam.'
]
def function[decorator, parameter[func]]:
name[func].__doc__ assign[=] binary_operation[name[func].__doc__ <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[name[args]]]]
return[name[func]]
return[name[decorator]] | keyword[def] identifier[doctemplate] (* identifier[args] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
identifier[func] . identifier[__doc__] = identifier[func] . identifier[__doc__] % identifier[tuple] ( identifier[args] )
keyword[return] identifier[func]
keyword[return] identifier[decorator] | def doctemplate(*args):
"""Return a decorator putting ``args`` into the docstring of the decorated ``func``.
>>> @doctemplate('spam', 'spam')
... def spam():
... '''Returns %s, lovely %s.'''
... return 'Spam'
>>> spam.__doc__
'Returns spam, lovely spam.'
"""
def decorator(func):
func.__doc__ = func.__doc__ % tuple(args)
return func
return decorator |
def _set_element_property(parent_to_parse, element_path, prop_name, value):
""" Assigns the value to the parsed parent element and then returns it """
element = get_element(parent_to_parse)
if element is None:
return None
if element_path and not element_exists(element, element_path):
element = insert_element(element, 0, element_path)
if not isinstance(value, string_types):
value = u''
setattr(element, prop_name, value)
return element | def function[_set_element_property, parameter[parent_to_parse, element_path, prop_name, value]]:
constant[ Assigns the value to the parsed parent element and then returns it ]
variable[element] assign[=] call[name[get_element], parameter[name[parent_to_parse]]]
if compare[name[element] is constant[None]] begin[:]
return[constant[None]]
if <ast.BoolOp object at 0x7da1b277c370> begin[:]
variable[element] assign[=] call[name[insert_element], parameter[name[element], constant[0], name[element_path]]]
if <ast.UnaryOp object at 0x7da1b277d390> begin[:]
variable[value] assign[=] constant[]
call[name[setattr], parameter[name[element], name[prop_name], name[value]]]
return[name[element]] | keyword[def] identifier[_set_element_property] ( identifier[parent_to_parse] , identifier[element_path] , identifier[prop_name] , identifier[value] ):
literal[string]
identifier[element] = identifier[get_element] ( identifier[parent_to_parse] )
keyword[if] identifier[element] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[element_path] keyword[and] keyword[not] identifier[element_exists] ( identifier[element] , identifier[element_path] ):
identifier[element] = identifier[insert_element] ( identifier[element] , literal[int] , identifier[element_path] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
identifier[value] = literal[string]
identifier[setattr] ( identifier[element] , identifier[prop_name] , identifier[value] )
keyword[return] identifier[element] | def _set_element_property(parent_to_parse, element_path, prop_name, value):
""" Assigns the value to the parsed parent element and then returns it """
element = get_element(parent_to_parse)
if element is None:
return None # depends on [control=['if'], data=[]]
if element_path and (not element_exists(element, element_path)):
element = insert_element(element, 0, element_path) # depends on [control=['if'], data=[]]
if not isinstance(value, string_types):
value = u'' # depends on [control=['if'], data=[]]
setattr(element, prop_name, value)
return element |
def account(self):
"""
:returns: Account provided as the authenticating account
:rtype: AccountContext
"""
if self._account is None:
self._account = AccountContext(self, self.domain.twilio.account_sid)
return self._account | def function[account, parameter[self]]:
constant[
:returns: Account provided as the authenticating account
:rtype: AccountContext
]
if compare[name[self]._account is constant[None]] begin[:]
name[self]._account assign[=] call[name[AccountContext], parameter[name[self], name[self].domain.twilio.account_sid]]
return[name[self]._account] | keyword[def] identifier[account] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_account] keyword[is] keyword[None] :
identifier[self] . identifier[_account] = identifier[AccountContext] ( identifier[self] , identifier[self] . identifier[domain] . identifier[twilio] . identifier[account_sid] )
keyword[return] identifier[self] . identifier[_account] | def account(self):
"""
:returns: Account provided as the authenticating account
:rtype: AccountContext
"""
if self._account is None:
self._account = AccountContext(self, self.domain.twilio.account_sid) # depends on [control=['if'], data=[]]
return self._account |
def transform(self, sequences):
"""Apply the dimensionality reduction on X.
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading
sequences_new = []
for X in sequences:
X = array2d(X)
if self.means_ is not None:
X = X - self.means_
X_transformed = np.dot(X, self.components_.T)
if self.kinetic_mapping:
X_transformed *= self.eigenvalues_
if self.commute_mapping:
# thanks to @maxentile and @jchodera for providing/directing to a
# reference implementation in pyemma
#(markovmodel/PyEMMA#963)
# dampening smaller timescales based recommendtion of [7]
#
# some timescales are NaNs and regularized timescales will
# be negative when they are less than the lag time; all these
# are set to zero using nan_to_num before returning
regularized_timescales = 0.5 * self.timescales_ *\
np.tanh( np.pi *((self.timescales_ - self.lag_time)
/self.lag_time) + 1)
X_transformed *= np.sqrt(regularized_timescales / 2)
X_transformed = np.nan_to_num(X_transformed)
sequences_new.append(X_transformed)
return sequences_new | def function[transform, parameter[self, sequences]]:
constant[Apply the dimensionality reduction on X.
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
]
call[name[check_iter_of_sequences], parameter[name[sequences]]]
variable[sequences_new] assign[=] list[[]]
for taget[name[X]] in starred[name[sequences]] begin[:]
variable[X] assign[=] call[name[array2d], parameter[name[X]]]
if compare[name[self].means_ is_not constant[None]] begin[:]
variable[X] assign[=] binary_operation[name[X] - name[self].means_]
variable[X_transformed] assign[=] call[name[np].dot, parameter[name[X], name[self].components_.T]]
if name[self].kinetic_mapping begin[:]
<ast.AugAssign object at 0x7da1b063cdc0>
if name[self].commute_mapping begin[:]
variable[regularized_timescales] assign[=] binary_operation[binary_operation[constant[0.5] * name[self].timescales_] * call[name[np].tanh, parameter[binary_operation[binary_operation[name[np].pi * binary_operation[binary_operation[name[self].timescales_ - name[self].lag_time] / name[self].lag_time]] + constant[1]]]]]
<ast.AugAssign object at 0x7da1b07ae7a0>
variable[X_transformed] assign[=] call[name[np].nan_to_num, parameter[name[X_transformed]]]
call[name[sequences_new].append, parameter[name[X_transformed]]]
return[name[sequences_new]] | keyword[def] identifier[transform] ( identifier[self] , identifier[sequences] ):
literal[string]
identifier[check_iter_of_sequences] ( identifier[sequences] , identifier[max_iter] = literal[int] )
identifier[sequences_new] =[]
keyword[for] identifier[X] keyword[in] identifier[sequences] :
identifier[X] = identifier[array2d] ( identifier[X] )
keyword[if] identifier[self] . identifier[means_] keyword[is] keyword[not] keyword[None] :
identifier[X] = identifier[X] - identifier[self] . identifier[means_]
identifier[X_transformed] = identifier[np] . identifier[dot] ( identifier[X] , identifier[self] . identifier[components_] . identifier[T] )
keyword[if] identifier[self] . identifier[kinetic_mapping] :
identifier[X_transformed] *= identifier[self] . identifier[eigenvalues_]
keyword[if] identifier[self] . identifier[commute_mapping] :
identifier[regularized_timescales] = literal[int] * identifier[self] . identifier[timescales_] * identifier[np] . identifier[tanh] ( identifier[np] . identifier[pi] *(( identifier[self] . identifier[timescales_] - identifier[self] . identifier[lag_time] )
/ identifier[self] . identifier[lag_time] )+ literal[int] )
identifier[X_transformed] *= identifier[np] . identifier[sqrt] ( identifier[regularized_timescales] / literal[int] )
identifier[X_transformed] = identifier[np] . identifier[nan_to_num] ( identifier[X_transformed] )
identifier[sequences_new] . identifier[append] ( identifier[X_transformed] )
keyword[return] identifier[sequences_new] | def transform(self, sequences):
"""Apply the dimensionality reduction on X.
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading
sequences_new = []
for X in sequences:
X = array2d(X)
if self.means_ is not None:
X = X - self.means_ # depends on [control=['if'], data=[]]
X_transformed = np.dot(X, self.components_.T)
if self.kinetic_mapping:
X_transformed *= self.eigenvalues_ # depends on [control=['if'], data=[]]
if self.commute_mapping:
# thanks to @maxentile and @jchodera for providing/directing to a
# reference implementation in pyemma
#(markovmodel/PyEMMA#963)
# dampening smaller timescales based recommendtion of [7]
#
# some timescales are NaNs and regularized timescales will
# be negative when they are less than the lag time; all these
# are set to zero using nan_to_num before returning
regularized_timescales = 0.5 * self.timescales_ * np.tanh(np.pi * ((self.timescales_ - self.lag_time) / self.lag_time) + 1)
X_transformed *= np.sqrt(regularized_timescales / 2)
X_transformed = np.nan_to_num(X_transformed) # depends on [control=['if'], data=[]]
sequences_new.append(X_transformed) # depends on [control=['for'], data=['X']]
return sequences_new |
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor | def function[_query_mysql, parameter[self]]:
constant[
Queries mysql and returns a cursor to the results.
]
variable[mysql] assign[=] call[name[MySqlHook], parameter[]]
variable[conn] assign[=] call[name[mysql].get_conn, parameter[]]
variable[cursor] assign[=] call[name[conn].cursor, parameter[]]
call[name[cursor].execute, parameter[name[self].sql]]
return[name[cursor]] | keyword[def] identifier[_query_mysql] ( identifier[self] ):
literal[string]
identifier[mysql] = identifier[MySqlHook] ( identifier[mysql_conn_id] = identifier[self] . identifier[mysql_conn_id] )
identifier[conn] = identifier[mysql] . identifier[get_conn] ()
identifier[cursor] = identifier[conn] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( identifier[self] . identifier[sql] )
keyword[return] identifier[cursor] | def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor |
def invalidate_files(self, direct_filenames):
"""Invalidates the given filenames in an internal product Graph instance."""
invalidated = self._scheduler.invalidate_files(direct_filenames)
self._maybe_visualize()
return invalidated | def function[invalidate_files, parameter[self, direct_filenames]]:
constant[Invalidates the given filenames in an internal product Graph instance.]
variable[invalidated] assign[=] call[name[self]._scheduler.invalidate_files, parameter[name[direct_filenames]]]
call[name[self]._maybe_visualize, parameter[]]
return[name[invalidated]] | keyword[def] identifier[invalidate_files] ( identifier[self] , identifier[direct_filenames] ):
literal[string]
identifier[invalidated] = identifier[self] . identifier[_scheduler] . identifier[invalidate_files] ( identifier[direct_filenames] )
identifier[self] . identifier[_maybe_visualize] ()
keyword[return] identifier[invalidated] | def invalidate_files(self, direct_filenames):
"""Invalidates the given filenames in an internal product Graph instance."""
invalidated = self._scheduler.invalidate_files(direct_filenames)
self._maybe_visualize()
return invalidated |
def _longest_val_in_column(self, col):
"""
get size of longest value in specific column
:param col: str, column name
:return int
"""
try:
# +2 is for implicit separator
return max([len(x[col]) for x in self.table if x[col]]) + 2
except KeyError:
logger.error("there is no column %r", col)
raise | def function[_longest_val_in_column, parameter[self, col]]:
constant[
get size of longest value in specific column
:param col: str, column name
:return int
]
<ast.Try object at 0x7da1b0fdb730> | keyword[def] identifier[_longest_val_in_column] ( identifier[self] , identifier[col] ):
literal[string]
keyword[try] :
keyword[return] identifier[max] ([ identifier[len] ( identifier[x] [ identifier[col] ]) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[table] keyword[if] identifier[x] [ identifier[col] ]])+ literal[int]
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[error] ( literal[string] , identifier[col] )
keyword[raise] | def _longest_val_in_column(self, col):
"""
get size of longest value in specific column
:param col: str, column name
:return int
"""
try:
# +2 is for implicit separator
return max([len(x[col]) for x in self.table if x[col]]) + 2 # depends on [control=['try'], data=[]]
except KeyError:
logger.error('there is no column %r', col)
raise # depends on [control=['except'], data=[]] |
def download():
"""
Download all files from an FTP share
"""
ftp = ftplib.FTP(SITE)
ftp.set_debuglevel(DEBUG)
ftp.login(USER, PASSWD)
ftp.cwd(DIR)
filelist = ftp.nlst()
filecounter = MANAGER.counter(total=len(filelist), desc='Downloading',
unit='files')
for filename in filelist:
with Writer(filename, ftp.size(filename), DEST) as writer:
ftp.retrbinary('RETR %s' % filename, writer.write)
print(filename)
filecounter.update()
ftp.close() | def function[download, parameter[]]:
constant[
Download all files from an FTP share
]
variable[ftp] assign[=] call[name[ftplib].FTP, parameter[name[SITE]]]
call[name[ftp].set_debuglevel, parameter[name[DEBUG]]]
call[name[ftp].login, parameter[name[USER], name[PASSWD]]]
call[name[ftp].cwd, parameter[name[DIR]]]
variable[filelist] assign[=] call[name[ftp].nlst, parameter[]]
variable[filecounter] assign[=] call[name[MANAGER].counter, parameter[]]
for taget[name[filename]] in starred[name[filelist]] begin[:]
with call[name[Writer], parameter[name[filename], call[name[ftp].size, parameter[name[filename]]], name[DEST]]] begin[:]
call[name[ftp].retrbinary, parameter[binary_operation[constant[RETR %s] <ast.Mod object at 0x7da2590d6920> name[filename]], name[writer].write]]
call[name[print], parameter[name[filename]]]
call[name[filecounter].update, parameter[]]
call[name[ftp].close, parameter[]] | keyword[def] identifier[download] ():
literal[string]
identifier[ftp] = identifier[ftplib] . identifier[FTP] ( identifier[SITE] )
identifier[ftp] . identifier[set_debuglevel] ( identifier[DEBUG] )
identifier[ftp] . identifier[login] ( identifier[USER] , identifier[PASSWD] )
identifier[ftp] . identifier[cwd] ( identifier[DIR] )
identifier[filelist] = identifier[ftp] . identifier[nlst] ()
identifier[filecounter] = identifier[MANAGER] . identifier[counter] ( identifier[total] = identifier[len] ( identifier[filelist] ), identifier[desc] = literal[string] ,
identifier[unit] = literal[string] )
keyword[for] identifier[filename] keyword[in] identifier[filelist] :
keyword[with] identifier[Writer] ( identifier[filename] , identifier[ftp] . identifier[size] ( identifier[filename] ), identifier[DEST] ) keyword[as] identifier[writer] :
identifier[ftp] . identifier[retrbinary] ( literal[string] % identifier[filename] , identifier[writer] . identifier[write] )
identifier[print] ( identifier[filename] )
identifier[filecounter] . identifier[update] ()
identifier[ftp] . identifier[close] () | def download():
"""
Download all files from an FTP share
"""
ftp = ftplib.FTP(SITE)
ftp.set_debuglevel(DEBUG)
ftp.login(USER, PASSWD)
ftp.cwd(DIR)
filelist = ftp.nlst()
filecounter = MANAGER.counter(total=len(filelist), desc='Downloading', unit='files')
for filename in filelist:
with Writer(filename, ftp.size(filename), DEST) as writer:
ftp.retrbinary('RETR %s' % filename, writer.write) # depends on [control=['with'], data=['writer']]
print(filename)
filecounter.update() # depends on [control=['for'], data=['filename']]
ftp.close() |
def _get_video(edx_video_id):
"""
Get a Video instance, prefetching encoded video and course information.
Raises ValVideoNotFoundError if the video cannot be retrieved.
"""
try:
return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id)
except Video.DoesNotExist:
error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id)
raise ValVideoNotFoundError(error_message)
except Exception:
error_message = u"Could not get edx_video_id: {0}".format(edx_video_id)
logger.exception(error_message)
raise ValInternalError(error_message) | def function[_get_video, parameter[edx_video_id]]:
constant[
Get a Video instance, prefetching encoded video and course information.
Raises ValVideoNotFoundError if the video cannot be retrieved.
]
<ast.Try object at 0x7da20c795210> | keyword[def] identifier[_get_video] ( identifier[edx_video_id] ):
literal[string]
keyword[try] :
keyword[return] identifier[Video] . identifier[objects] . identifier[prefetch_related] ( literal[string] , literal[string] ). identifier[get] ( identifier[edx_video_id] = identifier[edx_video_id] )
keyword[except] identifier[Video] . identifier[DoesNotExist] :
identifier[error_message] = literal[string] . identifier[format] ( identifier[edx_video_id] )
keyword[raise] identifier[ValVideoNotFoundError] ( identifier[error_message] )
keyword[except] identifier[Exception] :
identifier[error_message] = literal[string] . identifier[format] ( identifier[edx_video_id] )
identifier[logger] . identifier[exception] ( identifier[error_message] )
keyword[raise] identifier[ValInternalError] ( identifier[error_message] ) | def _get_video(edx_video_id):
"""
Get a Video instance, prefetching encoded video and course information.
Raises ValVideoNotFoundError if the video cannot be retrieved.
"""
try:
return Video.objects.prefetch_related('encoded_videos', 'courses').get(edx_video_id=edx_video_id) # depends on [control=['try'], data=[]]
except Video.DoesNotExist:
error_message = u'Video not found for edx_video_id: {0}'.format(edx_video_id)
raise ValVideoNotFoundError(error_message) # depends on [control=['except'], data=[]]
except Exception:
error_message = u'Could not get edx_video_id: {0}'.format(edx_video_id)
logger.exception(error_message)
raise ValInternalError(error_message) # depends on [control=['except'], data=[]] |
def _generic_convert_string(v, from_type, to_type, encoding):
"""
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
"""
assert six.PY2, "This function should be used with Python 2 only"
assert from_type != to_type
if from_type == six.binary_type and isinstance(v, six.binary_type):
return six.text_type(v, encoding)
elif from_type == six.text_type and isinstance(v, six.text_type):
return v.encode(encoding)
elif isinstance(v, (list, tuple, set)):
return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v])
elif isinstance(v, dict):
return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.iteritems()}
return v | def function[_generic_convert_string, parameter[v, from_type, to_type, encoding]]:
constant[
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
]
assert[name[six].PY2]
assert[compare[name[from_type] not_equal[!=] name[to_type]]]
if <ast.BoolOp object at 0x7da1b04d12a0> begin[:]
return[call[name[six].text_type, parameter[name[v], name[encoding]]]]
return[name[v]] | keyword[def] identifier[_generic_convert_string] ( identifier[v] , identifier[from_type] , identifier[to_type] , identifier[encoding] ):
literal[string]
keyword[assert] identifier[six] . identifier[PY2] , literal[string]
keyword[assert] identifier[from_type] != identifier[to_type]
keyword[if] identifier[from_type] == identifier[six] . identifier[binary_type] keyword[and] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[binary_type] ):
keyword[return] identifier[six] . identifier[text_type] ( identifier[v] , identifier[encoding] )
keyword[elif] identifier[from_type] == identifier[six] . identifier[text_type] keyword[and] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[v] . identifier[encode] ( identifier[encoding] )
keyword[elif] identifier[isinstance] ( identifier[v] ,( identifier[list] , identifier[tuple] , identifier[set] )):
keyword[return] identifier[type] ( identifier[v] )([ identifier[_generic_convert_string] ( identifier[element] , identifier[from_type] , identifier[to_type] , identifier[encoding] ) keyword[for] identifier[element] keyword[in] identifier[v] ])
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[dict] ):
keyword[return] { identifier[k] : identifier[_generic_convert_string] ( identifier[v] , identifier[from_type] , identifier[to_type] , identifier[encoding] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[v] . identifier[iteritems] ()}
keyword[return] identifier[v] | def _generic_convert_string(v, from_type, to_type, encoding):
"""
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
"""
assert six.PY2, 'This function should be used with Python 2 only'
assert from_type != to_type
if from_type == six.binary_type and isinstance(v, six.binary_type):
return six.text_type(v, encoding) # depends on [control=['if'], data=[]]
elif from_type == six.text_type and isinstance(v, six.text_type):
return v.encode(encoding) # depends on [control=['if'], data=[]]
elif isinstance(v, (list, tuple, set)):
return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v]) # depends on [control=['if'], data=[]]
elif isinstance(v, dict):
return {k: _generic_convert_string(v, from_type, to_type, encoding) for (k, v) in v.iteritems()} # depends on [control=['if'], data=[]]
return v |
def set_column_sizes(self, values):
"""Sets the size value for each column
Args:
values (iterable of int or str): values are treated as percentage.
"""
self.style['grid-template-columns'] = ' '.join(map(lambda value: (str(value) if str(value).endswith('%') else str(value) + '%') , values)) | def function[set_column_sizes, parameter[self, values]]:
constant[Sets the size value for each column
Args:
values (iterable of int or str): values are treated as percentage.
]
call[name[self].style][constant[grid-template-columns]] assign[=] call[constant[ ].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18dc9aad0>, name[values]]]]] | keyword[def] identifier[set_column_sizes] ( identifier[self] , identifier[values] ):
literal[string]
identifier[self] . identifier[style] [ literal[string] ]= literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[value] :( identifier[str] ( identifier[value] ) keyword[if] identifier[str] ( identifier[value] ). identifier[endswith] ( literal[string] ) keyword[else] identifier[str] ( identifier[value] )+ literal[string] ), identifier[values] )) | def set_column_sizes(self, values):
"""Sets the size value for each column
Args:
values (iterable of int or str): values are treated as percentage.
"""
self.style['grid-template-columns'] = ' '.join(map(lambda value: str(value) if str(value).endswith('%') else str(value) + '%', values)) |
def start(self):
"""
This method defines the workflow of SirMordred. So it calls to:
- initialize the databases
- execute the different phases for the first iteration
(collection, identities, enrichment)
- start the collection and enrichment in parallel by data source
- start also the Sorting Hat merge
"""
# logger.debug("Starting SirMordred engine ...")
logger.info("")
logger.info("----------------------------")
logger.info("Starting SirMordred engine ...")
logger.info("- - - - - - - - - - - - - - ")
# check we have access to the needed ES
if not self.check_es_access():
print('Can not access Elasticsearch service. Exiting sirmordred ...')
sys.exit(1)
# If arthur is configured check that it is working
if self.conf['es_collection']['arthur']:
if not self.check_redis_access():
print('Can not access redis service. Exiting sirmordred ...')
sys.exit(1)
if not self.check_arthur_access():
print('Can not access arthur service. Exiting sirmordred ...')
sys.exit(1)
# If bestiary is configured check that it is working
if self.conf['projects']['projects_url']:
if not self.check_bestiary_access():
print('Can not access bestiary service. Exiting sirmordred ...')
sys.exit(1)
# Initial round: panels and projects loading
self.__execute_initial_load()
# Tasks to be executed during updating process
all_tasks_cls = []
all_tasks_cls.append(TaskProjects) # projects update is always needed
if self.conf['phases']['collection']:
if not self.conf['es_collection']['arthur']:
all_tasks_cls.append(TaskRawDataCollection)
else:
all_tasks_cls.append(TaskRawDataArthurCollection)
if self.conf['phases']['identities']:
# load identities and orgs periodically for updates
all_tasks_cls.append(TaskIdentitiesLoad)
all_tasks_cls.append(TaskIdentitiesMerge)
all_tasks_cls.append(TaskIdentitiesExport)
# This is done in enrichement before doing the enrich
# if self.conf['phases']['collection']:
# all_tasks_cls.append(TaskIdentitiesCollection)
if self.conf['phases']['enrichment']:
all_tasks_cls.append(TaskEnrich)
if self.conf['phases']['track_items']:
all_tasks_cls.append(TaskTrackItems)
if self.conf['phases']['report']:
all_tasks_cls.append(TaskReport)
# this is the main loop, where the execution should spend
# most of its time
while True:
if not all_tasks_cls:
logger.warning("No tasks to execute.")
break
try:
if not self.conf['general']['update']:
self.execute_batch_tasks(all_tasks_cls,
self.conf['sortinghat']['sleep_for'],
self.conf['general']['min_update_delay'])
self.execute_batch_tasks(all_tasks_cls,
self.conf['sortinghat']['sleep_for'],
self.conf['general']['min_update_delay'])
break
else:
self.execute_nonstop_tasks(all_tasks_cls)
# FIXME this point is never reached so despite the exception is
# handled and the error is shown, the traceback is not printed
except DataCollectionError as e:
logger.error(str(e))
var = traceback.format_exc()
logger.error(var)
except DataEnrichmentError as e:
logger.error(str(e))
var = traceback.format_exc()
logger.error(var)
logger.info("Finished SirMordred engine ...") | def function[start, parameter[self]]:
constant[
This method defines the workflow of SirMordred. So it calls to:
- initialize the databases
- execute the different phases for the first iteration
(collection, identities, enrichment)
- start the collection and enrichment in parallel by data source
- start also the Sorting Hat merge
]
call[name[logger].info, parameter[constant[]]]
call[name[logger].info, parameter[constant[----------------------------]]]
call[name[logger].info, parameter[constant[Starting SirMordred engine ...]]]
call[name[logger].info, parameter[constant[- - - - - - - - - - - - - - ]]]
if <ast.UnaryOp object at 0x7da1b0108d00> begin[:]
call[name[print], parameter[constant[Can not access Elasticsearch service. Exiting sirmordred ...]]]
call[name[sys].exit, parameter[constant[1]]]
if call[call[name[self].conf][constant[es_collection]]][constant[arthur]] begin[:]
if <ast.UnaryOp object at 0x7da1b0109720> begin[:]
call[name[print], parameter[constant[Can not access redis service. Exiting sirmordred ...]]]
call[name[sys].exit, parameter[constant[1]]]
if <ast.UnaryOp object at 0x7da1b0109570> begin[:]
call[name[print], parameter[constant[Can not access arthur service. Exiting sirmordred ...]]]
call[name[sys].exit, parameter[constant[1]]]
if call[call[name[self].conf][constant[projects]]][constant[projects_url]] begin[:]
if <ast.UnaryOp object at 0x7da1b0140e20> begin[:]
call[name[print], parameter[constant[Can not access bestiary service. Exiting sirmordred ...]]]
call[name[sys].exit, parameter[constant[1]]]
call[name[self].__execute_initial_load, parameter[]]
variable[all_tasks_cls] assign[=] list[[]]
call[name[all_tasks_cls].append, parameter[name[TaskProjects]]]
if call[call[name[self].conf][constant[phases]]][constant[collection]] begin[:]
if <ast.UnaryOp object at 0x7da1b0143970> begin[:]
call[name[all_tasks_cls].append, parameter[name[TaskRawDataCollection]]]
if call[call[name[self].conf][constant[phases]]][constant[identities]] begin[:]
call[name[all_tasks_cls].append, parameter[name[TaskIdentitiesLoad]]]
call[name[all_tasks_cls].append, parameter[name[TaskIdentitiesMerge]]]
call[name[all_tasks_cls].append, parameter[name[TaskIdentitiesExport]]]
if call[call[name[self].conf][constant[phases]]][constant[enrichment]] begin[:]
call[name[all_tasks_cls].append, parameter[name[TaskEnrich]]]
if call[call[name[self].conf][constant[phases]]][constant[track_items]] begin[:]
call[name[all_tasks_cls].append, parameter[name[TaskTrackItems]]]
if call[call[name[self].conf][constant[phases]]][constant[report]] begin[:]
call[name[all_tasks_cls].append, parameter[name[TaskReport]]]
while constant[True] begin[:]
if <ast.UnaryOp object at 0x7da1b0141420> begin[:]
call[name[logger].warning, parameter[constant[No tasks to execute.]]]
break
<ast.Try object at 0x7da1b01403a0>
call[name[logger].info, parameter[constant[Finished SirMordred engine ...]]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[check_es_access] ():
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
keyword[if] keyword[not] identifier[self] . identifier[check_redis_access] ():
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[check_arthur_access] ():
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
keyword[if] keyword[not] identifier[self] . identifier[check_bestiary_access] ():
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[self] . identifier[__execute_initial_load] ()
identifier[all_tasks_cls] =[]
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskProjects] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
keyword[if] keyword[not] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskRawDataCollection] )
keyword[else] :
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskRawDataArthurCollection] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskIdentitiesLoad] )
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskIdentitiesMerge] )
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskIdentitiesExport] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskEnrich] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskTrackItems] )
keyword[if] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[all_tasks_cls] . identifier[append] ( identifier[TaskReport] )
keyword[while] keyword[True] :
keyword[if] keyword[not] identifier[all_tasks_cls] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[break]
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ]:
identifier[self] . identifier[execute_batch_tasks] ( identifier[all_tasks_cls] ,
identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ],
identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ])
identifier[self] . identifier[execute_batch_tasks] ( identifier[all_tasks_cls] ,
identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ],
identifier[self] . identifier[conf] [ literal[string] ][ literal[string] ])
keyword[break]
keyword[else] :
identifier[self] . identifier[execute_nonstop_tasks] ( identifier[all_tasks_cls] )
keyword[except] identifier[DataCollectionError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[str] ( identifier[e] ))
identifier[var] = identifier[traceback] . identifier[format_exc] ()
identifier[logger] . identifier[error] ( identifier[var] )
keyword[except] identifier[DataEnrichmentError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[str] ( identifier[e] ))
identifier[var] = identifier[traceback] . identifier[format_exc] ()
identifier[logger] . identifier[error] ( identifier[var] )
identifier[logger] . identifier[info] ( literal[string] ) | def start(self):
"""
This method defines the workflow of SirMordred. So it calls to:
- initialize the databases
- execute the different phases for the first iteration
(collection, identities, enrichment)
- start the collection and enrichment in parallel by data source
- start also the Sorting Hat merge
"""
# logger.debug("Starting SirMordred engine ...")
logger.info('')
logger.info('----------------------------')
logger.info('Starting SirMordred engine ...')
logger.info('- - - - - - - - - - - - - - ')
# check we have access to the needed ES
if not self.check_es_access():
print('Can not access Elasticsearch service. Exiting sirmordred ...')
sys.exit(1) # depends on [control=['if'], data=[]]
# If arthur is configured check that it is working
if self.conf['es_collection']['arthur']:
if not self.check_redis_access():
print('Can not access redis service. Exiting sirmordred ...')
sys.exit(1) # depends on [control=['if'], data=[]]
if not self.check_arthur_access():
print('Can not access arthur service. Exiting sirmordred ...')
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If bestiary is configured check that it is working
if self.conf['projects']['projects_url']:
if not self.check_bestiary_access():
print('Can not access bestiary service. Exiting sirmordred ...')
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Initial round: panels and projects loading
self.__execute_initial_load()
# Tasks to be executed during updating process
all_tasks_cls = []
all_tasks_cls.append(TaskProjects) # projects update is always needed
if self.conf['phases']['collection']:
if not self.conf['es_collection']['arthur']:
all_tasks_cls.append(TaskRawDataCollection) # depends on [control=['if'], data=[]]
else:
all_tasks_cls.append(TaskRawDataArthurCollection) # depends on [control=['if'], data=[]]
if self.conf['phases']['identities']:
# load identities and orgs periodically for updates
all_tasks_cls.append(TaskIdentitiesLoad)
all_tasks_cls.append(TaskIdentitiesMerge)
all_tasks_cls.append(TaskIdentitiesExport) # depends on [control=['if'], data=[]]
# This is done in enrichement before doing the enrich
# if self.conf['phases']['collection']:
# all_tasks_cls.append(TaskIdentitiesCollection)
if self.conf['phases']['enrichment']:
all_tasks_cls.append(TaskEnrich) # depends on [control=['if'], data=[]]
if self.conf['phases']['track_items']:
all_tasks_cls.append(TaskTrackItems) # depends on [control=['if'], data=[]]
if self.conf['phases']['report']:
all_tasks_cls.append(TaskReport) # depends on [control=['if'], data=[]]
# this is the main loop, where the execution should spend
# most of its time
while True:
if not all_tasks_cls:
logger.warning('No tasks to execute.')
break # depends on [control=['if'], data=[]]
try:
if not self.conf['general']['update']:
self.execute_batch_tasks(all_tasks_cls, self.conf['sortinghat']['sleep_for'], self.conf['general']['min_update_delay'])
self.execute_batch_tasks(all_tasks_cls, self.conf['sortinghat']['sleep_for'], self.conf['general']['min_update_delay'])
break # depends on [control=['if'], data=[]]
else:
self.execute_nonstop_tasks(all_tasks_cls) # depends on [control=['try'], data=[]]
# FIXME this point is never reached so despite the exception is
# handled and the error is shown, the traceback is not printed
except DataCollectionError as e:
logger.error(str(e))
var = traceback.format_exc()
logger.error(var) # depends on [control=['except'], data=['e']]
except DataEnrichmentError as e:
logger.error(str(e))
var = traceback.format_exc()
logger.error(var) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
logger.info('Finished SirMordred engine ...') |
def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs) | def function[azureContainers, parameter[self]]:
constant[
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
]
return[call[name[self]._makeApiCall, parameter[call[name[self].funcinfo][constant[azureContainers]], <ast.Starred object at 0x7da20c6e73a0>]]] | keyword[def] identifier[azureContainers] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] ) | def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo['azureContainers'], *args, **kwargs) |
def get_par_change_limits(self):
""" calculate the various parameter change limits used in pest.
Works in control file values space (not log transformed space). Also
adds columns for effective upper and lower which account for par bounds and the
value of parchglim
Returns
-------
df : pandas.DataFrame
a copy of self.parameter_data with columns for relative and factor change limits
Note
----
does not yet support absolute parameter change limits!
"""
par = self.parameter_data
fpars = par.loc[par.parchglim=="factor","parnme"]
rpars = par.loc[par.parchglim == "relative", "parnme"]
apars = par.loc[par.parchglim == "absolute", "parnme"]
change_df = par.copy()
fpm = self.control_data.facparmax
rpm = self.control_data.relparmax
facorig = self.control_data.facorig
base_vals = par.parval1.copy()
# apply zero value correction
base_vals[base_vals==0] = par.loc[base_vals==0,"parubnd"] / 4.0
# apply facorig
replace_pars = base_vals.index.map(lambda x: par.loc[x,"partrans"]!="log" and np.abs(base_vals.loc[x]) < facorig*np.abs(base_vals.loc[x]))
#print(facorig,replace_pars)
base_vals.loc[replace_pars] = base_vals.loc[replace_pars] * facorig
# negative fac pars
nfpars = par.loc[base_vals.apply(lambda x: x < 0)].index
change_df.loc[nfpars, "fac_upper"] = base_vals / fpm
change_df.loc[nfpars, "fac_lower"] = base_vals * fpm
# postive fac pars
pfpars = par.loc[base_vals.apply(lambda x: x > 0)].index
change_df.loc[pfpars, "fac_upper"] = base_vals * fpm
change_df.loc[pfpars, "fac_lower"] = base_vals / fpm
# relative
rdelta = base_vals.apply(np.abs) * rpm
change_df.loc[:,"rel_upper"] = base_vals + rdelta
change_df.loc[:,"rel_lower"] = base_vals - rdelta
change_df.loc[:,"chg_upper"] = np.NaN
change_df.loc[fpars,"chg_upper"] = change_df.fac_upper[fpars]
change_df.loc[rpars, "chg_upper"] = change_df.rel_upper[rpars]
change_df.loc[:, "chg_lower"] = np.NaN
change_df.loc[fpars, "chg_lower"] = change_df.fac_lower[fpars]
change_df.loc[rpars, "chg_lower"] = change_df.rel_lower[rpars]
# effective limits
change_df.loc[:,"eff_upper"] = change_df.loc[:,["parubnd","chg_upper"]].min(axis=1)
change_df.loc[:,"eff_lower"] = change_df.loc[:, ["parlbnd", "chg_lower"]].max(axis=1)
return change_df | def function[get_par_change_limits, parameter[self]]:
constant[ calculate the various parameter change limits used in pest.
Works in control file values space (not log transformed space). Also
adds columns for effective upper and lower which account for par bounds and the
value of parchglim
Returns
-------
df : pandas.DataFrame
a copy of self.parameter_data with columns for relative and factor change limits
Note
----
does not yet support absolute parameter change limits!
]
variable[par] assign[=] name[self].parameter_data
variable[fpars] assign[=] call[name[par].loc][tuple[[<ast.Compare object at 0x7da1b1d68760>, <ast.Constant object at 0x7da1b1d68820>]]]
variable[rpars] assign[=] call[name[par].loc][tuple[[<ast.Compare object at 0x7da1b1d69210>, <ast.Constant object at 0x7da1b1d699f0>]]]
variable[apars] assign[=] call[name[par].loc][tuple[[<ast.Compare object at 0x7da1b1d692d0>, <ast.Constant object at 0x7da1b1d68430>]]]
variable[change_df] assign[=] call[name[par].copy, parameter[]]
variable[fpm] assign[=] name[self].control_data.facparmax
variable[rpm] assign[=] name[self].control_data.relparmax
variable[facorig] assign[=] name[self].control_data.facorig
variable[base_vals] assign[=] call[name[par].parval1.copy, parameter[]]
call[name[base_vals]][compare[name[base_vals] equal[==] constant[0]]] assign[=] binary_operation[call[name[par].loc][tuple[[<ast.Compare object at 0x7da1b1d6b7c0>, <ast.Constant object at 0x7da1b1d6a9e0>]]] / constant[4.0]]
variable[replace_pars] assign[=] call[name[base_vals].index.map, parameter[<ast.Lambda object at 0x7da1b1d6b2e0>]]
call[name[base_vals].loc][name[replace_pars]] assign[=] binary_operation[call[name[base_vals].loc][name[replace_pars]] * name[facorig]]
variable[nfpars] assign[=] call[name[par].loc][call[name[base_vals].apply, parameter[<ast.Lambda object at 0x7da1b1d28070>]]].index
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d28040>, <ast.Constant object at 0x7da1b1d283d0>]]] assign[=] binary_operation[name[base_vals] / name[fpm]]
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d296f0>, <ast.Constant object at 0x7da1b1d29690>]]] assign[=] binary_operation[name[base_vals] * name[fpm]]
variable[pfpars] assign[=] call[name[par].loc][call[name[base_vals].apply, parameter[<ast.Lambda object at 0x7da1b1d29600>]]].index
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d29e40>, <ast.Constant object at 0x7da1b1d29ab0>]]] assign[=] binary_operation[name[base_vals] * name[fpm]]
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d28850>, <ast.Constant object at 0x7da1b1d29e70>]]] assign[=] binary_operation[name[base_vals] / name[fpm]]
variable[rdelta] assign[=] binary_operation[call[name[base_vals].apply, parameter[name[np].abs]] * name[rpm]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d293c0>, <ast.Constant object at 0x7da1b1d29b10>]]] assign[=] binary_operation[name[base_vals] + name[rdelta]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d28d90>, <ast.Constant object at 0x7da1b1d28220>]]] assign[=] binary_operation[name[base_vals] - name[rdelta]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d29a20>, <ast.Constant object at 0x7da1b1d289d0>]]] assign[=] name[np].NaN
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d288e0>, <ast.Constant object at 0x7da1b1d5c430>]]] assign[=] call[name[change_df].fac_upper][name[fpars]]
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d5cb50>, <ast.Constant object at 0x7da1b1d5caf0>]]] assign[=] call[name[change_df].rel_upper][name[rpars]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d5c9a0>, <ast.Constant object at 0x7da1b1d5cbb0>]]] assign[=] name[np].NaN
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d5c3a0>, <ast.Constant object at 0x7da1b1d5c400>]]] assign[=] call[name[change_df].fac_lower][name[fpars]]
call[name[change_df].loc][tuple[[<ast.Name object at 0x7da1b1d5c490>, <ast.Constant object at 0x7da1b1d5c550>]]] assign[=] call[name[change_df].rel_lower][name[rpars]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d5c460>, <ast.Constant object at 0x7da1b1d5c6d0>]]] assign[=] call[call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d5c730>, <ast.List object at 0x7da1b1d5c760>]]].min, parameter[]]
call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d2d840>, <ast.Constant object at 0x7da1b1d2d930>]]] assign[=] call[call[name[change_df].loc][tuple[[<ast.Slice object at 0x7da1b1d2d690>, <ast.List object at 0x7da1b1d2d180>]]].max, parameter[]]
return[name[change_df]] | keyword[def] identifier[get_par_change_limits] ( identifier[self] ):
literal[string]
identifier[par] = identifier[self] . identifier[parameter_data]
identifier[fpars] = identifier[par] . identifier[loc] [ identifier[par] . identifier[parchglim] == literal[string] , literal[string] ]
identifier[rpars] = identifier[par] . identifier[loc] [ identifier[par] . identifier[parchglim] == literal[string] , literal[string] ]
identifier[apars] = identifier[par] . identifier[loc] [ identifier[par] . identifier[parchglim] == literal[string] , literal[string] ]
identifier[change_df] = identifier[par] . identifier[copy] ()
identifier[fpm] = identifier[self] . identifier[control_data] . identifier[facparmax]
identifier[rpm] = identifier[self] . identifier[control_data] . identifier[relparmax]
identifier[facorig] = identifier[self] . identifier[control_data] . identifier[facorig]
identifier[base_vals] = identifier[par] . identifier[parval1] . identifier[copy] ()
identifier[base_vals] [ identifier[base_vals] == literal[int] ]= identifier[par] . identifier[loc] [ identifier[base_vals] == literal[int] , literal[string] ]/ literal[int]
identifier[replace_pars] = identifier[base_vals] . identifier[index] . identifier[map] ( keyword[lambda] identifier[x] : identifier[par] . identifier[loc] [ identifier[x] , literal[string] ]!= literal[string] keyword[and] identifier[np] . identifier[abs] ( identifier[base_vals] . identifier[loc] [ identifier[x] ])< identifier[facorig] * identifier[np] . identifier[abs] ( identifier[base_vals] . identifier[loc] [ identifier[x] ]))
identifier[base_vals] . identifier[loc] [ identifier[replace_pars] ]= identifier[base_vals] . identifier[loc] [ identifier[replace_pars] ]* identifier[facorig]
identifier[nfpars] = identifier[par] . identifier[loc] [ identifier[base_vals] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] < literal[int] )]. identifier[index]
identifier[change_df] . identifier[loc] [ identifier[nfpars] , literal[string] ]= identifier[base_vals] / identifier[fpm]
identifier[change_df] . identifier[loc] [ identifier[nfpars] , literal[string] ]= identifier[base_vals] * identifier[fpm]
identifier[pfpars] = identifier[par] . identifier[loc] [ identifier[base_vals] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] > literal[int] )]. identifier[index]
identifier[change_df] . identifier[loc] [ identifier[pfpars] , literal[string] ]= identifier[base_vals] * identifier[fpm]
identifier[change_df] . identifier[loc] [ identifier[pfpars] , literal[string] ]= identifier[base_vals] / identifier[fpm]
identifier[rdelta] = identifier[base_vals] . identifier[apply] ( identifier[np] . identifier[abs] )* identifier[rpm]
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[base_vals] + identifier[rdelta]
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[base_vals] - identifier[rdelta]
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[np] . identifier[NaN]
identifier[change_df] . identifier[loc] [ identifier[fpars] , literal[string] ]= identifier[change_df] . identifier[fac_upper] [ identifier[fpars] ]
identifier[change_df] . identifier[loc] [ identifier[rpars] , literal[string] ]= identifier[change_df] . identifier[rel_upper] [ identifier[rpars] ]
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[np] . identifier[NaN]
identifier[change_df] . identifier[loc] [ identifier[fpars] , literal[string] ]= identifier[change_df] . identifier[fac_lower] [ identifier[fpars] ]
identifier[change_df] . identifier[loc] [ identifier[rpars] , literal[string] ]= identifier[change_df] . identifier[rel_lower] [ identifier[rpars] ]
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[change_df] . identifier[loc] [:,[ literal[string] , literal[string] ]]. identifier[min] ( identifier[axis] = literal[int] )
identifier[change_df] . identifier[loc] [:, literal[string] ]= identifier[change_df] . identifier[loc] [:,[ literal[string] , literal[string] ]]. identifier[max] ( identifier[axis] = literal[int] )
keyword[return] identifier[change_df] | def get_par_change_limits(self):
""" calculate the various parameter change limits used in pest.
Works in control file values space (not log transformed space). Also
adds columns for effective upper and lower which account for par bounds and the
value of parchglim
Returns
-------
df : pandas.DataFrame
a copy of self.parameter_data with columns for relative and factor change limits
Note
----
does not yet support absolute parameter change limits!
"""
par = self.parameter_data
fpars = par.loc[par.parchglim == 'factor', 'parnme']
rpars = par.loc[par.parchglim == 'relative', 'parnme']
apars = par.loc[par.parchglim == 'absolute', 'parnme']
change_df = par.copy()
fpm = self.control_data.facparmax
rpm = self.control_data.relparmax
facorig = self.control_data.facorig
base_vals = par.parval1.copy()
# apply zero value correction
base_vals[base_vals == 0] = par.loc[base_vals == 0, 'parubnd'] / 4.0
# apply facorig
replace_pars = base_vals.index.map(lambda x: par.loc[x, 'partrans'] != 'log' and np.abs(base_vals.loc[x]) < facorig * np.abs(base_vals.loc[x]))
#print(facorig,replace_pars)
base_vals.loc[replace_pars] = base_vals.loc[replace_pars] * facorig
# negative fac pars
nfpars = par.loc[base_vals.apply(lambda x: x < 0)].index
change_df.loc[nfpars, 'fac_upper'] = base_vals / fpm
change_df.loc[nfpars, 'fac_lower'] = base_vals * fpm
# postive fac pars
pfpars = par.loc[base_vals.apply(lambda x: x > 0)].index
change_df.loc[pfpars, 'fac_upper'] = base_vals * fpm
change_df.loc[pfpars, 'fac_lower'] = base_vals / fpm
# relative
rdelta = base_vals.apply(np.abs) * rpm
change_df.loc[:, 'rel_upper'] = base_vals + rdelta
change_df.loc[:, 'rel_lower'] = base_vals - rdelta
change_df.loc[:, 'chg_upper'] = np.NaN
change_df.loc[fpars, 'chg_upper'] = change_df.fac_upper[fpars]
change_df.loc[rpars, 'chg_upper'] = change_df.rel_upper[rpars]
change_df.loc[:, 'chg_lower'] = np.NaN
change_df.loc[fpars, 'chg_lower'] = change_df.fac_lower[fpars]
change_df.loc[rpars, 'chg_lower'] = change_df.rel_lower[rpars]
# effective limits
change_df.loc[:, 'eff_upper'] = change_df.loc[:, ['parubnd', 'chg_upper']].min(axis=1)
change_df.loc[:, 'eff_lower'] = change_df.loc[:, ['parlbnd', 'chg_lower']].max(axis=1)
return change_df |
def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return self.update, new_node | def function[apply, parameter[self, node]]:
constant[ Apply transformation and return if an update happened. ]
variable[new_node] assign[=] call[name[self].run, parameter[name[node]]]
return[tuple[[<ast.Attribute object at 0x7da20c76c0a0>, <ast.Name object at 0x7da20c76f1f0>]]] | keyword[def] identifier[apply] ( identifier[self] , identifier[node] ):
literal[string]
identifier[new_node] = identifier[self] . identifier[run] ( identifier[node] )
keyword[return] identifier[self] . identifier[update] , identifier[new_node] | def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return (self.update, new_node) |
def delete(self):
"""
Remove the document and all of its bundles from ProvStore.
.. warning::
Cannot be undone.
"""
if self.abstract:
raise AbstractDocumentException()
self._api.delete_document(self.id)
self._id = None
return True | def function[delete, parameter[self]]:
constant[
Remove the document and all of its bundles from ProvStore.
.. warning::
Cannot be undone.
]
if name[self].abstract begin[:]
<ast.Raise object at 0x7da20e9b0220>
call[name[self]._api.delete_document, parameter[name[self].id]]
name[self]._id assign[=] constant[None]
return[constant[True]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[abstract] :
keyword[raise] identifier[AbstractDocumentException] ()
identifier[self] . identifier[_api] . identifier[delete_document] ( identifier[self] . identifier[id] )
identifier[self] . identifier[_id] = keyword[None]
keyword[return] keyword[True] | def delete(self):
"""
Remove the document and all of its bundles from ProvStore.
.. warning::
Cannot be undone.
"""
if self.abstract:
raise AbstractDocumentException() # depends on [control=['if'], data=[]]
self._api.delete_document(self.id)
self._id = None
return True |
def get_time(self) -> float:
"""
Get the current time in seconds
Returns:
The current time in seconds
"""
if self.pause_time is not None:
curr_time = self.pause_time - self.offset - self.start_time
return curr_time
curr_time = time.time()
return curr_time - self.start_time - self.offset | def function[get_time, parameter[self]]:
constant[
Get the current time in seconds
Returns:
The current time in seconds
]
if compare[name[self].pause_time is_not constant[None]] begin[:]
variable[curr_time] assign[=] binary_operation[binary_operation[name[self].pause_time - name[self].offset] - name[self].start_time]
return[name[curr_time]]
variable[curr_time] assign[=] call[name[time].time, parameter[]]
return[binary_operation[binary_operation[name[curr_time] - name[self].start_time] - name[self].offset]] | keyword[def] identifier[get_time] ( identifier[self] )-> identifier[float] :
literal[string]
keyword[if] identifier[self] . identifier[pause_time] keyword[is] keyword[not] keyword[None] :
identifier[curr_time] = identifier[self] . identifier[pause_time] - identifier[self] . identifier[offset] - identifier[self] . identifier[start_time]
keyword[return] identifier[curr_time]
identifier[curr_time] = identifier[time] . identifier[time] ()
keyword[return] identifier[curr_time] - identifier[self] . identifier[start_time] - identifier[self] . identifier[offset] | def get_time(self) -> float:
"""
Get the current time in seconds
Returns:
The current time in seconds
"""
if self.pause_time is not None:
curr_time = self.pause_time - self.offset - self.start_time
return curr_time # depends on [control=['if'], data=[]]
curr_time = time.time()
return curr_time - self.start_time - self.offset |
def _create_service(self, parameters={}, **kwargs):
"""
Create a Cloud Foundry service that has custom parameters.
"""
logging.debug("_create_service()")
logging.debug(str.join(',', [self.service_name, self.plan_name,
self.name, str(parameters)]))
return self.service.create_service(self.service_name, self.plan_name,
self.name, parameters, **kwargs) | def function[_create_service, parameter[self, parameters]]:
constant[
Create a Cloud Foundry service that has custom parameters.
]
call[name[logging].debug, parameter[constant[_create_service()]]]
call[name[logging].debug, parameter[call[name[str].join, parameter[constant[,], list[[<ast.Attribute object at 0x7da2041d8a30>, <ast.Attribute object at 0x7da2041d8f70>, <ast.Attribute object at 0x7da2041d98a0>, <ast.Call object at 0x7da2041db3a0>]]]]]]
return[call[name[self].service.create_service, parameter[name[self].service_name, name[self].plan_name, name[self].name, name[parameters]]]] | keyword[def] identifier[_create_service] ( identifier[self] , identifier[parameters] ={},** identifier[kwargs] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] )
identifier[logging] . identifier[debug] ( identifier[str] . identifier[join] ( literal[string] ,[ identifier[self] . identifier[service_name] , identifier[self] . identifier[plan_name] ,
identifier[self] . identifier[name] , identifier[str] ( identifier[parameters] )]))
keyword[return] identifier[self] . identifier[service] . identifier[create_service] ( identifier[self] . identifier[service_name] , identifier[self] . identifier[plan_name] ,
identifier[self] . identifier[name] , identifier[parameters] ,** identifier[kwargs] ) | def _create_service(self, parameters={}, **kwargs):
"""
Create a Cloud Foundry service that has custom parameters.
"""
logging.debug('_create_service()')
logging.debug(str.join(',', [self.service_name, self.plan_name, self.name, str(parameters)]))
return self.service.create_service(self.service_name, self.plan_name, self.name, parameters, **kwargs) |
def gradient_n_pal(colors, values=None, name='gradientn'):
"""
Create a n color gradient palette
Parameters
----------
colors : list
list of colors
values : list, optional
list of points in the range [0, 1] at which to
place each color. Must be the same size as
`colors`. Default to evenly space the colors
name : str
Name to call the resultant MPL colormap
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = gradient_n_pal(['red', 'blue'])
>>> palette([0, .25, .5, .75, 1])
['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']
"""
# Note: For better results across devices and media types,
# it would be better to do the interpolation in
# Lab color space.
if values is None:
colormap = mcolors.LinearSegmentedColormap.from_list(
name, colors)
else:
colormap = mcolors.LinearSegmentedColormap.from_list(
name, list(zip(values, colors)))
def _gradient_n_pal(vals):
return ratios_to_colors(vals, colormap)
return _gradient_n_pal | def function[gradient_n_pal, parameter[colors, values, name]]:
constant[
Create a n color gradient palette
Parameters
----------
colors : list
list of colors
values : list, optional
list of points in the range [0, 1] at which to
place each color. Must be the same size as
`colors`. Default to evenly space the colors
name : str
Name to call the resultant MPL colormap
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = gradient_n_pal(['red', 'blue'])
>>> palette([0, .25, .5, .75, 1])
['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']
]
if compare[name[values] is constant[None]] begin[:]
variable[colormap] assign[=] call[name[mcolors].LinearSegmentedColormap.from_list, parameter[name[name], name[colors]]]
def function[_gradient_n_pal, parameter[vals]]:
return[call[name[ratios_to_colors], parameter[name[vals], name[colormap]]]]
return[name[_gradient_n_pal]] | keyword[def] identifier[gradient_n_pal] ( identifier[colors] , identifier[values] = keyword[None] , identifier[name] = literal[string] ):
literal[string]
keyword[if] identifier[values] keyword[is] keyword[None] :
identifier[colormap] = identifier[mcolors] . identifier[LinearSegmentedColormap] . identifier[from_list] (
identifier[name] , identifier[colors] )
keyword[else] :
identifier[colormap] = identifier[mcolors] . identifier[LinearSegmentedColormap] . identifier[from_list] (
identifier[name] , identifier[list] ( identifier[zip] ( identifier[values] , identifier[colors] )))
keyword[def] identifier[_gradient_n_pal] ( identifier[vals] ):
keyword[return] identifier[ratios_to_colors] ( identifier[vals] , identifier[colormap] )
keyword[return] identifier[_gradient_n_pal] | def gradient_n_pal(colors, values=None, name='gradientn'):
"""
Create a n color gradient palette
Parameters
----------
colors : list
list of colors
values : list, optional
list of points in the range [0, 1] at which to
place each color. Must be the same size as
`colors`. Default to evenly space the colors
name : str
Name to call the resultant MPL colormap
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = gradient_n_pal(['red', 'blue'])
>>> palette([0, .25, .5, .75, 1])
['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']
"""
# Note: For better results across devices and media types,
# it would be better to do the interpolation in
# Lab color space.
if values is None:
colormap = mcolors.LinearSegmentedColormap.from_list(name, colors) # depends on [control=['if'], data=[]]
else:
colormap = mcolors.LinearSegmentedColormap.from_list(name, list(zip(values, colors)))
def _gradient_n_pal(vals):
return ratios_to_colors(vals, colormap)
return _gradient_n_pal |
def cancel_spot_instance_requests(self, request_ids):
"""
Cancel the specified Spot Instance Requests.
:type request_ids: list
:param request_ids: A list of strings of the Request IDs to terminate
:rtype: list
:return: A list of the instances terminated
"""
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
return self.get_list('CancelSpotInstanceRequests', params,
[('item', Instance)], verb='POST') | def function[cancel_spot_instance_requests, parameter[self, request_ids]]:
constant[
Cancel the specified Spot Instance Requests.
:type request_ids: list
:param request_ids: A list of strings of the Request IDs to terminate
:rtype: list
:return: A list of the instances terminated
]
variable[params] assign[=] dictionary[[], []]
if name[request_ids] begin[:]
call[name[self].build_list_params, parameter[name[params], name[request_ids], constant[SpotInstanceRequestId]]]
return[call[name[self].get_list, parameter[constant[CancelSpotInstanceRequests], name[params], list[[<ast.Tuple object at 0x7da1b253f730>]]]]] | keyword[def] identifier[cancel_spot_instance_requests] ( identifier[self] , identifier[request_ids] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[request_ids] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[request_ids] , literal[string] )
keyword[return] identifier[self] . identifier[get_list] ( literal[string] , identifier[params] ,
[( literal[string] , identifier[Instance] )], identifier[verb] = literal[string] ) | def cancel_spot_instance_requests(self, request_ids):
"""
Cancel the specified Spot Instance Requests.
:type request_ids: list
:param request_ids: A list of strings of the Request IDs to terminate
:rtype: list
:return: A list of the instances terminated
"""
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId') # depends on [control=['if'], data=[]]
return self.get_list('CancelSpotInstanceRequests', params, [('item', Instance)], verb='POST') |
def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until)) | def function[loadItems, parameter[self, excludeRead, loadLimit, since, until]]:
constant[
Load items and call itemsLoadedDone to transform data in objects
]
call[name[self].clearItems, parameter[]]
name[self].loadtLoadOk assign[=] constant[False]
name[self].lastLoadLength assign[=] constant[0]
call[name[self]._itemsLoadedDone, parameter[call[name[self]._getContent, parameter[name[excludeRead], constant[None], name[loadLimit], name[since], name[until]]]]] | keyword[def] identifier[loadItems] ( identifier[self] , identifier[excludeRead] = keyword[False] , identifier[loadLimit] = literal[int] , identifier[since] = keyword[None] , identifier[until] = keyword[None] ):
literal[string]
identifier[self] . identifier[clearItems] ()
identifier[self] . identifier[loadtLoadOk] = keyword[False]
identifier[self] . identifier[lastLoadLength] = literal[int]
identifier[self] . identifier[_itemsLoadedDone] ( identifier[self] . identifier[_getContent] ( identifier[excludeRead] , keyword[None] , identifier[loadLimit] , identifier[since] , identifier[until] )) | def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until)) |
def read_var_bytes(self, max_size=sys.maxsize) -> bytes:
"""
Read a variable length of bytes from the stream.
Args:
max_size (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
"""
length = self.read_var_int(max_size)
return self.read_bytes(length) | def function[read_var_bytes, parameter[self, max_size]]:
constant[
Read a variable length of bytes from the stream.
Args:
max_size (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
]
variable[length] assign[=] call[name[self].read_var_int, parameter[name[max_size]]]
return[call[name[self].read_bytes, parameter[name[length]]]] | keyword[def] identifier[read_var_bytes] ( identifier[self] , identifier[max_size] = identifier[sys] . identifier[maxsize] )-> identifier[bytes] :
literal[string]
identifier[length] = identifier[self] . identifier[read_var_int] ( identifier[max_size] )
keyword[return] identifier[self] . identifier[read_bytes] ( identifier[length] ) | def read_var_bytes(self, max_size=sys.maxsize) -> bytes:
"""
Read a variable length of bytes from the stream.
Args:
max_size (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
"""
length = self.read_var_int(max_size)
return self.read_bytes(length) |
def add_user(
self,
name,
**attrs
):
"""Add a user to config."""
if self.user_exists(name):
raise KubeConfError("user with the given name already exists.")
users = self.get_users()
# Add parameters.
new_user = {'name': name, 'user':{}}
attrs_ = new_user['user']
attrs_.update(attrs)
users.append(new_user) | def function[add_user, parameter[self, name]]:
constant[Add a user to config.]
if call[name[self].user_exists, parameter[name[name]]] begin[:]
<ast.Raise object at 0x7da18c4ceaa0>
variable[users] assign[=] call[name[self].get_users, parameter[]]
variable[new_user] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf550>, <ast.Constant object at 0x7da18c4cd930>], [<ast.Name object at 0x7da18c4cecb0>, <ast.Dict object at 0x7da18c4ceb60>]]
variable[attrs_] assign[=] call[name[new_user]][constant[user]]
call[name[attrs_].update, parameter[name[attrs]]]
call[name[users].append, parameter[name[new_user]]] | keyword[def] identifier[add_user] (
identifier[self] ,
identifier[name] ,
** identifier[attrs]
):
literal[string]
keyword[if] identifier[self] . identifier[user_exists] ( identifier[name] ):
keyword[raise] identifier[KubeConfError] ( literal[string] )
identifier[users] = identifier[self] . identifier[get_users] ()
identifier[new_user] ={ literal[string] : identifier[name] , literal[string] :{}}
identifier[attrs_] = identifier[new_user] [ literal[string] ]
identifier[attrs_] . identifier[update] ( identifier[attrs] )
identifier[users] . identifier[append] ( identifier[new_user] ) | def add_user(self, name, **attrs):
"""Add a user to config."""
if self.user_exists(name):
raise KubeConfError('user with the given name already exists.') # depends on [control=['if'], data=[]]
users = self.get_users()
# Add parameters.
new_user = {'name': name, 'user': {}}
attrs_ = new_user['user']
attrs_.update(attrs)
users.append(new_user) |
def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags
if (not self.kube_config.airflow_configmap and
'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets):
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN")
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
dag_volume_mount_path = os.path.join(
self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest, # repo
self.kube_config.git_subpath # dags
)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path
return env | def function[_get_environment, parameter[self]]:
constant[Defines any necessary environment variables for the pod executor]
variable[env] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18bcc8b80>, <ast.Name object at 0x7da18bccace0>]]] in starred[call[name[six].iteritems, parameter[name[self].kube_config.kube_env_vars]]] begin[:]
call[name[env]][name[env_var_name]] assign[=] name[env_var_val]
call[name[env]][constant[AIRFLOW__CORE__EXECUTOR]] assign[=] constant[LocalExecutor]
if name[self].kube_config.airflow_configmap begin[:]
call[name[env]][constant[AIRFLOW_HOME]] assign[=] name[self].worker_airflow_home
call[name[env]][constant[AIRFLOW__CORE__DAGS_FOLDER]] assign[=] name[self].worker_airflow_dags
if <ast.BoolOp object at 0x7da1b056d780> begin[:]
call[name[env]][constant[AIRFLOW__CORE__SQL_ALCHEMY_CONN]] assign[=] call[name[conf].get, parameter[constant[core], constant[SQL_ALCHEMY_CONN]]]
if name[self].kube_config.git_dags_folder_mount_point begin[:]
variable[dag_volume_mount_path] assign[=] call[name[os].path.join, parameter[name[self].kube_config.git_dags_folder_mount_point, name[self].kube_config.git_sync_dest, name[self].kube_config.git_subpath]]
call[name[env]][constant[AIRFLOW__CORE__DAGS_FOLDER]] assign[=] name[dag_volume_mount_path]
return[name[env]] | keyword[def] identifier[_get_environment] ( identifier[self] ):
literal[string]
identifier[env] ={}
keyword[for] identifier[env_var_name] , identifier[env_var_val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[kube_config] . identifier[kube_env_vars] ):
identifier[env] [ identifier[env_var_name] ]= identifier[env_var_val]
identifier[env] [ literal[string] ]= literal[string]
keyword[if] identifier[self] . identifier[kube_config] . identifier[airflow_configmap] :
identifier[env] [ literal[string] ]= identifier[self] . identifier[worker_airflow_home]
identifier[env] [ literal[string] ]= identifier[self] . identifier[worker_airflow_dags]
keyword[if] ( keyword[not] identifier[self] . identifier[kube_config] . identifier[airflow_configmap] keyword[and]
literal[string] keyword[not] keyword[in] identifier[self] . identifier[kube_config] . identifier[kube_secrets] ):
identifier[env] [ literal[string] ]= identifier[conf] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[kube_config] . identifier[git_dags_folder_mount_point] :
identifier[dag_volume_mount_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[kube_config] . identifier[git_dags_folder_mount_point] ,
identifier[self] . identifier[kube_config] . identifier[git_sync_dest] ,
identifier[self] . identifier[kube_config] . identifier[git_subpath]
)
identifier[env] [ literal[string] ]= identifier[dag_volume_mount_path]
keyword[return] identifier[env] | def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for (env_var_name, env_var_val) in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val # depends on [control=['for'], data=[]]
env['AIRFLOW__CORE__EXECUTOR'] = 'LocalExecutor'
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags # depends on [control=['if'], data=[]]
if not self.kube_config.airflow_configmap and 'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets:
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get('core', 'SQL_ALCHEMY_CONN') # depends on [control=['if'], data=[]]
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
# repo
# dags
dag_volume_mount_path = os.path.join(self.kube_config.git_dags_folder_mount_point, self.kube_config.git_sync_dest, self.kube_config.git_subpath)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path # depends on [control=['if'], data=[]]
return env |
def star(self) -> snug.Query[bool]:
"""star this repo"""
req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}')
return (yield req).status_code == 204 | def function[star, parameter[self]]:
constant[star this repo]
variable[req] assign[=] call[name[snug].PUT, parameter[binary_operation[name[BASE] + <ast.JoinedStr object at 0x7da1b2448cd0>]]]
return[compare[<ast.Yield object at 0x7da1b244aaa0>.status_code equal[==] constant[204]]] | keyword[def] identifier[star] ( identifier[self] )-> identifier[snug] . identifier[Query] [ identifier[bool] ]:
literal[string]
identifier[req] = identifier[snug] . identifier[PUT] ( identifier[BASE] + literal[string] )
keyword[return] ( keyword[yield] identifier[req] ). identifier[status_code] == literal[int] | def star(self) -> snug.Query[bool]:
"""star this repo"""
req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}')
return (yield req).status_code == 204 |
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
) | def function[_parse_key, parameter[self, indent]]:
constant[Parse a series of key-value pairs.]
variable[data] assign[=] dictionary[[], []]
variable[new_indent] assign[=] name[indent]
while <ast.BoolOp object at 0x7da2047eb5e0> begin[:]
call[name[self]._skip_whitespace, parameter[]]
variable[cur_token] assign[=] name[self]._cur_token
if compare[call[name[cur_token]][constant[type]] is name[TT].id] begin[:]
variable[key] assign[=] call[name[cur_token]][constant[value]]
variable[next_token] assign[=] call[name[self]._nth_token, parameter[]]
if compare[call[name[next_token]][constant[type]] is name[TT].colon] begin[:]
call[name[self]._increment, parameter[constant[2]]]
call[name[self]._skip_whitespace, parameter[]]
call[name[self]._skip_newlines, parameter[]]
call[name[data]][name[key]] assign[=] call[name[self]._parse_value, parameter[]]
if compare[call[call[name[self].tokens][binary_operation[name[self]._cur_position - constant[1]]]][constant[type]] is_not name[TT].lbreak] begin[:]
call[name[self]._skip_whitespace, parameter[]]
call[name[self]._skip_newlines, parameter[]]
variable[new_indent] assign[=] constant[0]
variable[temp_position] assign[=] name[self]._cur_position
while <ast.BoolOp object at 0x7da20c6aae90> begin[:]
<ast.AugAssign object at 0x7da20c6a8970>
<ast.AugAssign object at 0x7da20c6aa8c0>
if <ast.BoolOp object at 0x7da20c6a91b0> begin[:]
return[name[data]] | keyword[def] identifier[_parse_key] ( identifier[self] , identifier[indent] ):
literal[string]
identifier[data] ={}
identifier[new_indent] = identifier[indent]
keyword[while] keyword[not] identifier[self] . identifier[_finished] keyword[and] identifier[new_indent] == identifier[indent] :
identifier[self] . identifier[_skip_whitespace] ()
identifier[cur_token] = identifier[self] . identifier[_cur_token]
keyword[if] identifier[cur_token] [ literal[string] ] keyword[is] identifier[TT] . identifier[id] :
identifier[key] = identifier[cur_token] [ literal[string] ]
identifier[next_token] = identifier[self] . identifier[_nth_token] ()
keyword[if] identifier[next_token] [ literal[string] ] keyword[is] identifier[TT] . identifier[colon] :
identifier[self] . identifier[_increment] ( literal[int] )
identifier[self] . identifier[_skip_whitespace] ()
identifier[self] . identifier[_skip_newlines] ()
identifier[data] [ identifier[key] ]= identifier[self] . identifier[_parse_value] ()
keyword[else] :
keyword[raise] identifier[ParseError] ( literal[string] , identifier[next_token] )
keyword[else] :
keyword[if] identifier[cur_token] [ literal[string] ] keyword[is] identifier[TT] . identifier[hyphen] :
keyword[return] identifier[data]
keyword[else] :
keyword[raise] identifier[ParseError] ( literal[string] , identifier[cur_token] )
keyword[if] identifier[self] . identifier[tokens] [ identifier[self] . identifier[_cur_position] - literal[int] ][ literal[string] ] keyword[is] keyword[not] identifier[TT] . identifier[lbreak] :
identifier[self] . identifier[_skip_whitespace] ()
identifier[self] . identifier[_skip_newlines] ()
identifier[new_indent] = literal[int]
identifier[temp_position] = identifier[self] . identifier[_cur_position]
keyword[while] (
identifier[temp_position] < identifier[self] . identifier[num_tokens] - literal[int] keyword[and]
identifier[self] . identifier[tokens] [ identifier[temp_position] ][ literal[string] ] keyword[is] identifier[TT] . identifier[ws]
):
identifier[temp_position] += literal[int]
identifier[new_indent] += literal[int]
keyword[if] identifier[indent] == literal[int] keyword[or] identifier[new_indent] < identifier[indent] :
keyword[return] identifier[data]
keyword[else] :
keyword[raise] identifier[Exception] (
literal[string]
literal[string] . identifier[format] (
identifier[cur_token] [ literal[string] ]
)
) | def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value() # depends on [control=['if'], data=[]]
else:
raise ParseError("':'", next_token) # depends on [control=['if'], data=[]]
elif cur_token['type'] is TT.hyphen:
return data # depends on [control=['if'], data=[]]
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines() # depends on [control=['if'], data=[]]
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while temp_position < self.num_tokens - 1 and self.tokens[temp_position]['type'] is TT.ws:
temp_position += 1
new_indent += 1 # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]]
if indent == 0 or new_indent < indent:
return data # depends on [control=['if'], data=[]]
else:
raise Exception('Parser screwed up, increase of indent on line {} should have been caught by _parse_value().'.format(cur_token['line'])) |
def zscale(image, nsamples=1000, contrast=0.25):
"""Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
image is a 2-d numpy array
returns (z1, z2)
"""
# Sample the image
samples = zsc_sample(image, nsamples)
return zscale_samples(samples, contrast=contrast) | def function[zscale, parameter[image, nsamples, contrast]]:
constant[Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
image is a 2-d numpy array
returns (z1, z2)
]
variable[samples] assign[=] call[name[zsc_sample], parameter[name[image], name[nsamples]]]
return[call[name[zscale_samples], parameter[name[samples]]]] | keyword[def] identifier[zscale] ( identifier[image] , identifier[nsamples] = literal[int] , identifier[contrast] = literal[int] ):
literal[string]
identifier[samples] = identifier[zsc_sample] ( identifier[image] , identifier[nsamples] )
keyword[return] identifier[zscale_samples] ( identifier[samples] , identifier[contrast] = identifier[contrast] ) | def zscale(image, nsamples=1000, contrast=0.25):
"""Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
image is a 2-d numpy array
returns (z1, z2)
"""
# Sample the image
samples = zsc_sample(image, nsamples)
return zscale_samples(samples, contrast=contrast) |
def list_buckets(self, offset=0, limit=100):
"""Limit breaks above 100"""
# TODO: If limit > 100, do multiple fetches
if limit > 100:
raise Exception("Zenobase can't handle limits over 100")
return self._get("/users/{}/buckets/?order=label&offset={}&limit={}".format(self.client_id, offset, limit)) | def function[list_buckets, parameter[self, offset, limit]]:
constant[Limit breaks above 100]
if compare[name[limit] greater[>] constant[100]] begin[:]
<ast.Raise object at 0x7da20e961270>
return[call[name[self]._get, parameter[call[constant[/users/{}/buckets/?order=label&offset={}&limit={}].format, parameter[name[self].client_id, name[offset], name[limit]]]]]] | keyword[def] identifier[list_buckets] ( identifier[self] , identifier[offset] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
keyword[if] identifier[limit] > literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[self] . identifier[_get] ( literal[string] . identifier[format] ( identifier[self] . identifier[client_id] , identifier[offset] , identifier[limit] )) | def list_buckets(self, offset=0, limit=100):
"""Limit breaks above 100"""
# TODO: If limit > 100, do multiple fetches
if limit > 100:
raise Exception("Zenobase can't handle limits over 100") # depends on [control=['if'], data=[]]
return self._get('/users/{}/buckets/?order=label&offset={}&limit={}'.format(self.client_id, offset, limit)) |
def __add_delayed_assert_failure(self):
""" Add a delayed_assert failure into a list for future processing. """
current_url = self.driver.current_url
message = self.__get_exception_message()
self.__delayed_assert_failures.append(
"CHECK #%s: (%s)\n %s" % (
self.__delayed_assert_count, current_url, message)) | def function[__add_delayed_assert_failure, parameter[self]]:
constant[ Add a delayed_assert failure into a list for future processing. ]
variable[current_url] assign[=] name[self].driver.current_url
variable[message] assign[=] call[name[self].__get_exception_message, parameter[]]
call[name[self].__delayed_assert_failures.append, parameter[binary_operation[constant[CHECK #%s: (%s)
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1bbae30>, <ast.Name object at 0x7da1b1bbbb80>, <ast.Name object at 0x7da1b1bbb4c0>]]]]] | keyword[def] identifier[__add_delayed_assert_failure] ( identifier[self] ):
literal[string]
identifier[current_url] = identifier[self] . identifier[driver] . identifier[current_url]
identifier[message] = identifier[self] . identifier[__get_exception_message] ()
identifier[self] . identifier[__delayed_assert_failures] . identifier[append] (
literal[string] %(
identifier[self] . identifier[__delayed_assert_count] , identifier[current_url] , identifier[message] )) | def __add_delayed_assert_failure(self):
""" Add a delayed_assert failure into a list for future processing. """
current_url = self.driver.current_url
message = self.__get_exception_message()
self.__delayed_assert_failures.append('CHECK #%s: (%s)\n %s' % (self.__delayed_assert_count, current_url, message)) |
def eval_basis(self, x, regularize=True):
"""
basis_mat = C.eval_basis(x)
Evaluates self's basis functions on x and returns them stacked
in a matrix. basis_mat[i,j] gives basis function i evaluated at
x[j,:].
"""
if regularize:
x = regularize_array(x)
out = zeros((self.n, x.shape[0]), dtype=float, order='F')
for i in xrange(self.n):
out[i] = self.basis[i](x, **self.params)
return out | def function[eval_basis, parameter[self, x, regularize]]:
constant[
basis_mat = C.eval_basis(x)
Evaluates self's basis functions on x and returns them stacked
in a matrix. basis_mat[i,j] gives basis function i evaluated at
x[j,:].
]
if name[regularize] begin[:]
variable[x] assign[=] call[name[regularize_array], parameter[name[x]]]
variable[out] assign[=] call[name[zeros], parameter[tuple[[<ast.Attribute object at 0x7da1b17ab0d0>, <ast.Subscript object at 0x7da1b17ab5b0>]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[self].n]]] begin[:]
call[name[out]][name[i]] assign[=] call[call[name[self].basis][name[i]], parameter[name[x]]]
return[name[out]] | keyword[def] identifier[eval_basis] ( identifier[self] , identifier[x] , identifier[regularize] = keyword[True] ):
literal[string]
keyword[if] identifier[regularize] :
identifier[x] = identifier[regularize_array] ( identifier[x] )
identifier[out] = identifier[zeros] (( identifier[self] . identifier[n] , identifier[x] . identifier[shape] [ literal[int] ]), identifier[dtype] = identifier[float] , identifier[order] = literal[string] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[n] ):
identifier[out] [ identifier[i] ]= identifier[self] . identifier[basis] [ identifier[i] ]( identifier[x] ,** identifier[self] . identifier[params] )
keyword[return] identifier[out] | def eval_basis(self, x, regularize=True):
"""
basis_mat = C.eval_basis(x)
Evaluates self's basis functions on x and returns them stacked
in a matrix. basis_mat[i,j] gives basis function i evaluated at
x[j,:].
"""
if regularize:
x = regularize_array(x) # depends on [control=['if'], data=[]]
out = zeros((self.n, x.shape[0]), dtype=float, order='F')
for i in xrange(self.n):
out[i] = self.basis[i](x, **self.params) # depends on [control=['for'], data=['i']]
return out |
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True) | def function[connect, parameter[self]]:
constant[Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
]
if call[name[self].is_connected, parameter[]] begin[:]
<ast.Raise object at 0x7da18f00ee00>
variable[cb1] assign[=] name[self]._read_callback
variable[cb2] assign[=] name[self]._close_callback
name[self].__callback_queue assign[=] call[name[collections].deque, parameter[]]
name[self]._reply_list assign[=] list[[]]
name[self].__reader assign[=] call[name[hiredis].Reader, parameter[]]
variable[kwargs] assign[=] name[self].connection_kwargs
name[self].__connection assign[=] call[name[Connection], parameter[name[cb1], name[cb2]]]
variable[connection_status] assign[=] <ast.Yield object at 0x7da18f00f130>
if compare[name[connection_status] is_not constant[True]] begin[:]
<ast.Raise object at 0x7da18f00f280>
if compare[name[self].password is_not constant[None]] begin[:]
variable[authentication_status] assign[=] <ast.Yield object at 0x7da18f00cc40>
if compare[name[authentication_status] not_equal[!=] constant[b'OK']] begin[:]
call[name[LOG].warning, parameter[constant[impossible to connect: bad password]]]
call[name[self].__connection.disconnect, parameter[]]
<ast.Raise object at 0x7da18f00d540>
if compare[name[self].db not_equal[!=] constant[0]] begin[:]
variable[db_status] assign[=] <ast.Yield object at 0x7da18f00ccd0>
if compare[name[db_status] not_equal[!=] constant[b'OK']] begin[:]
call[name[LOG].warning, parameter[constant[can't select db %s], name[self].db]]
<ast.Raise object at 0x7da207f02fb0>
<ast.Raise object at 0x7da207f024a0> | keyword[def] identifier[connect] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_connected] ():
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( keyword[True] )
identifier[cb1] = identifier[self] . identifier[_read_callback]
identifier[cb2] = identifier[self] . identifier[_close_callback]
identifier[self] . identifier[__callback_queue] = identifier[collections] . identifier[deque] ()
identifier[self] . identifier[_reply_list] =[]
identifier[self] . identifier[__reader] = identifier[hiredis] . identifier[Reader] ( identifier[replyError] = identifier[ClientError] )
identifier[kwargs] = identifier[self] . identifier[connection_kwargs]
identifier[self] . identifier[__connection] = identifier[Connection] ( identifier[cb1] , identifier[cb2] ,** identifier[kwargs] )
identifier[connection_status] = keyword[yield] identifier[self] . identifier[__connection] . identifier[connect] ()
keyword[if] identifier[connection_status] keyword[is] keyword[not] keyword[True] :
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( keyword[False] )
keyword[if] identifier[self] . identifier[password] keyword[is] keyword[not] keyword[None] :
identifier[authentication_status] = keyword[yield] identifier[self] . identifier[_call] ( literal[string] , identifier[self] . identifier[password] )
keyword[if] identifier[authentication_status] != literal[string] :
identifier[LOG] . identifier[warning] ( literal[string] )
identifier[self] . identifier[__connection] . identifier[disconnect] ()
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( keyword[False] )
keyword[if] identifier[self] . identifier[db] != literal[int] :
identifier[db_status] = keyword[yield] identifier[self] . identifier[_call] ( literal[string] , identifier[self] . identifier[db] )
keyword[if] identifier[db_status] != literal[string] :
identifier[LOG] . identifier[warning] ( literal[string] , identifier[self] . identifier[db] )
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( keyword[False] )
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] ( keyword[True] ) | def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True) # depends on [control=['if'], data=[]]
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = (yield self.__connection.connect())
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False) # depends on [control=['if'], data=[]]
if self.password is not None:
authentication_status = (yield self._call('AUTH', self.password))
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning('impossible to connect: bad password')
self.__connection.disconnect()
raise tornado.gen.Return(False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.db != 0:
db_status = (yield self._call('SELECT', self.db))
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
raise tornado.gen.Return(True) |
def _enforce_bounds(self, x):
""""Enforce the bounds on x if only infinitesimal violations occurs"""
assert len(x) == len(self.bounds)
x_enforced = []
for x_i, (lb, ub) in zip(x, self.bounds):
if x_i < lb:
if x_i > lb - (ub-lb)/1e10:
x_enforced.append(lb)
else:
x_enforced.append(x_i)
elif x_i > ub:
if x_i < ub + (ub-lb)/1e10:
x_enforced.append(ub)
else:
x_enforced.append(x_i)
else:
x_enforced.append(x_i)
return np.array(x_enforced) | def function[_enforce_bounds, parameter[self, x]]:
constant["Enforce the bounds on x if only infinitesimal violations occurs]
assert[compare[call[name[len], parameter[name[x]]] equal[==] call[name[len], parameter[name[self].bounds]]]]
variable[x_enforced] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0c50dc0>, <ast.Tuple object at 0x7da1b0c53df0>]]] in starred[call[name[zip], parameter[name[x], name[self].bounds]]] begin[:]
if compare[name[x_i] less[<] name[lb]] begin[:]
if compare[name[x_i] greater[>] binary_operation[name[lb] - binary_operation[binary_operation[name[ub] - name[lb]] / constant[10000000000.0]]]] begin[:]
call[name[x_enforced].append, parameter[name[lb]]]
return[call[name[np].array, parameter[name[x_enforced]]]] | keyword[def] identifier[_enforce_bounds] ( identifier[self] , identifier[x] ):
literal[string]
keyword[assert] identifier[len] ( identifier[x] )== identifier[len] ( identifier[self] . identifier[bounds] )
identifier[x_enforced] =[]
keyword[for] identifier[x_i] ,( identifier[lb] , identifier[ub] ) keyword[in] identifier[zip] ( identifier[x] , identifier[self] . identifier[bounds] ):
keyword[if] identifier[x_i] < identifier[lb] :
keyword[if] identifier[x_i] > identifier[lb] -( identifier[ub] - identifier[lb] )/ literal[int] :
identifier[x_enforced] . identifier[append] ( identifier[lb] )
keyword[else] :
identifier[x_enforced] . identifier[append] ( identifier[x_i] )
keyword[elif] identifier[x_i] > identifier[ub] :
keyword[if] identifier[x_i] < identifier[ub] +( identifier[ub] - identifier[lb] )/ literal[int] :
identifier[x_enforced] . identifier[append] ( identifier[ub] )
keyword[else] :
identifier[x_enforced] . identifier[append] ( identifier[x_i] )
keyword[else] :
identifier[x_enforced] . identifier[append] ( identifier[x_i] )
keyword[return] identifier[np] . identifier[array] ( identifier[x_enforced] ) | def _enforce_bounds(self, x):
""""Enforce the bounds on x if only infinitesimal violations occurs"""
assert len(x) == len(self.bounds)
x_enforced = []
for (x_i, (lb, ub)) in zip(x, self.bounds):
if x_i < lb:
if x_i > lb - (ub - lb) / 10000000000.0:
x_enforced.append(lb) # depends on [control=['if'], data=[]]
else:
x_enforced.append(x_i) # depends on [control=['if'], data=['x_i', 'lb']]
elif x_i > ub:
if x_i < ub + (ub - lb) / 10000000000.0:
x_enforced.append(ub) # depends on [control=['if'], data=[]]
else:
x_enforced.append(x_i) # depends on [control=['if'], data=['x_i', 'ub']]
else:
x_enforced.append(x_i) # depends on [control=['for'], data=[]]
return np.array(x_enforced) |
def _decompress_data(self, data, options):
'''Decompress data'''
compression_algorithm_id = options['compression_algorithm_id']
if compression_algorithm_id not in self.compression_algorithms:
raise Exception('Unknown compression algorithm id: %d'
% compression_algorithm_id)
compression_algorithm = \
self.compression_algorithms[compression_algorithm_id]
algorithm = self._get_algorithm_info(compression_algorithm)
data = self._decode(data, algorithm)
return data | def function[_decompress_data, parameter[self, data, options]]:
constant[Decompress data]
variable[compression_algorithm_id] assign[=] call[name[options]][constant[compression_algorithm_id]]
if compare[name[compression_algorithm_id] <ast.NotIn object at 0x7da2590d7190> name[self].compression_algorithms] begin[:]
<ast.Raise object at 0x7da2054a4130>
variable[compression_algorithm] assign[=] call[name[self].compression_algorithms][name[compression_algorithm_id]]
variable[algorithm] assign[=] call[name[self]._get_algorithm_info, parameter[name[compression_algorithm]]]
variable[data] assign[=] call[name[self]._decode, parameter[name[data], name[algorithm]]]
return[name[data]] | keyword[def] identifier[_decompress_data] ( identifier[self] , identifier[data] , identifier[options] ):
literal[string]
identifier[compression_algorithm_id] = identifier[options] [ literal[string] ]
keyword[if] identifier[compression_algorithm_id] keyword[not] keyword[in] identifier[self] . identifier[compression_algorithms] :
keyword[raise] identifier[Exception] ( literal[string]
% identifier[compression_algorithm_id] )
identifier[compression_algorithm] = identifier[self] . identifier[compression_algorithms] [ identifier[compression_algorithm_id] ]
identifier[algorithm] = identifier[self] . identifier[_get_algorithm_info] ( identifier[compression_algorithm] )
identifier[data] = identifier[self] . identifier[_decode] ( identifier[data] , identifier[algorithm] )
keyword[return] identifier[data] | def _decompress_data(self, data, options):
"""Decompress data"""
compression_algorithm_id = options['compression_algorithm_id']
if compression_algorithm_id not in self.compression_algorithms:
raise Exception('Unknown compression algorithm id: %d' % compression_algorithm_id) # depends on [control=['if'], data=['compression_algorithm_id']]
compression_algorithm = self.compression_algorithms[compression_algorithm_id]
algorithm = self._get_algorithm_info(compression_algorithm)
data = self._decode(data, algorithm)
return data |
def _ParseCredentialOptions(self, options):
"""Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
credentials = getattr(options, 'credentials', [])
if not isinstance(credentials, list):
raise errors.BadConfigOption('Unsupported credentials value.')
for credential_string in credentials:
credential_type, _, credential_data = credential_string.partition(':')
if not credential_type or not credential_data:
raise errors.BadConfigOption(
'Badly formatted credential: {0:s}.'.format(credential_string))
if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:
raise errors.BadConfigOption(
'Unsupported credential type for: {0:s}.'.format(
credential_string))
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode('hex')
except TypeError:
raise errors.BadConfigOption(
'Unsupported credential data for: {0:s}.'.format(
credential_string))
self._credentials.append((credential_type, credential_data)) | def function[_ParseCredentialOptions, parameter[self, options]]:
constant[Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
]
variable[credentials] assign[=] call[name[getattr], parameter[name[options], constant[credentials], list[[]]]]
if <ast.UnaryOp object at 0x7da18dc9bf10> begin[:]
<ast.Raise object at 0x7da18dc99570>
for taget[name[credential_string]] in starred[name[credentials]] begin[:]
<ast.Tuple object at 0x7da20c795fc0> assign[=] call[name[credential_string].partition, parameter[constant[:]]]
if <ast.BoolOp object at 0x7da20c794730> begin[:]
<ast.Raise object at 0x7da20c794370>
if compare[name[credential_type] <ast.NotIn object at 0x7da2590d7190> name[self]._SUPPORTED_CREDENTIAL_TYPES] begin[:]
<ast.Raise object at 0x7da20c794f70>
if compare[name[credential_type] in name[self]._BINARY_DATA_CREDENTIAL_TYPES] begin[:]
<ast.Try object at 0x7da2045661d0>
call[name[self]._credentials.append, parameter[tuple[[<ast.Name object at 0x7da20c6a8460>, <ast.Name object at 0x7da20c6aaa10>]]]] | keyword[def] identifier[_ParseCredentialOptions] ( identifier[self] , identifier[options] ):
literal[string]
identifier[credentials] = identifier[getattr] ( identifier[options] , literal[string] ,[])
keyword[if] keyword[not] identifier[isinstance] ( identifier[credentials] , identifier[list] ):
keyword[raise] identifier[errors] . identifier[BadConfigOption] ( literal[string] )
keyword[for] identifier[credential_string] keyword[in] identifier[credentials] :
identifier[credential_type] , identifier[_] , identifier[credential_data] = identifier[credential_string] . identifier[partition] ( literal[string] )
keyword[if] keyword[not] identifier[credential_type] keyword[or] keyword[not] identifier[credential_data] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] (
literal[string] . identifier[format] ( identifier[credential_string] ))
keyword[if] identifier[credential_type] keyword[not] keyword[in] identifier[self] . identifier[_SUPPORTED_CREDENTIAL_TYPES] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] (
literal[string] . identifier[format] (
identifier[credential_string] ))
keyword[if] identifier[credential_type] keyword[in] identifier[self] . identifier[_BINARY_DATA_CREDENTIAL_TYPES] :
keyword[try] :
identifier[credential_data] = identifier[credential_data] . identifier[decode] ( literal[string] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] (
literal[string] . identifier[format] (
identifier[credential_string] ))
identifier[self] . identifier[_credentials] . identifier[append] (( identifier[credential_type] , identifier[credential_data] )) | def _ParseCredentialOptions(self, options):
"""Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
credentials = getattr(options, 'credentials', [])
if not isinstance(credentials, list):
raise errors.BadConfigOption('Unsupported credentials value.') # depends on [control=['if'], data=[]]
for credential_string in credentials:
(credential_type, _, credential_data) = credential_string.partition(':')
if not credential_type or not credential_data:
raise errors.BadConfigOption('Badly formatted credential: {0:s}.'.format(credential_string)) # depends on [control=['if'], data=[]]
if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:
raise errors.BadConfigOption('Unsupported credential type for: {0:s}.'.format(credential_string)) # depends on [control=['if'], data=[]]
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode('hex') # depends on [control=['try'], data=[]]
except TypeError:
raise errors.BadConfigOption('Unsupported credential data for: {0:s}.'.format(credential_string)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
self._credentials.append((credential_type, credential_data)) # depends on [control=['for'], data=['credential_string']] |
def exists_evaluator(self, index):
""" Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
"""
attr_name = self.condition_data[index][0]
return self.attributes.get(attr_name) is not None | def function[exists_evaluator, parameter[self, index]]:
constant[ Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
]
variable[attr_name] assign[=] call[call[name[self].condition_data][name[index]]][constant[0]]
return[compare[call[name[self].attributes.get, parameter[name[attr_name]]] is_not constant[None]]] | keyword[def] identifier[exists_evaluator] ( identifier[self] , identifier[index] ):
literal[string]
identifier[attr_name] = identifier[self] . identifier[condition_data] [ identifier[index] ][ literal[int] ]
keyword[return] identifier[self] . identifier[attributes] . identifier[get] ( identifier[attr_name] ) keyword[is] keyword[not] keyword[None] | def exists_evaluator(self, index):
""" Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
"""
attr_name = self.condition_data[index][0]
return self.attributes.get(attr_name) is not None |
def cut_cuboid(
self,
a=20,
b=None,
c=None,
origin=None,
outside_sliced=True,
preserve_bonds=False):
"""Cut a cuboid specified by edge and radius.
Args:
a (float): Value of the a edge.
b (float): Value of the b edge. Takes value of a if None.
c (float): Value of the c edge. Takes value of a if None.
origin (list): Please note that you can also pass an
integer. In this case it is interpreted as the index
of the atom which is taken as origin.
outside_sliced (bool): Atoms outside/inside the sphere are
cut away.
preserve_bonds (bool): Do not cut covalent bonds.
Returns:
Cartesian:
"""
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
b = a if b is None else b
c = a if c is None else c
sides = np.array([a, b, c])
pos = self.loc[:, ['x', 'y', 'z']]
if outside_sliced:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) < 1.]
else:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) > 1.]
if preserve_bonds:
molecule = self._preserve_bonds(molecule)
return molecule | def function[cut_cuboid, parameter[self, a, b, c, origin, outside_sliced, preserve_bonds]]:
constant[Cut a cuboid specified by edge and radius.
Args:
a (float): Value of the a edge.
b (float): Value of the b edge. Takes value of a if None.
c (float): Value of the c edge. Takes value of a if None.
origin (list): Please note that you can also pass an
integer. In this case it is interpreted as the index
of the atom which is taken as origin.
outside_sliced (bool): Atoms outside/inside the sphere are
cut away.
preserve_bonds (bool): Do not cut covalent bonds.
Returns:
Cartesian:
]
if compare[name[origin] is constant[None]] begin[:]
variable[origin] assign[=] call[name[np].zeros, parameter[constant[3]]]
variable[b] assign[=] <ast.IfExp object at 0x7da1b287ba00>
variable[c] assign[=] <ast.IfExp object at 0x7da1b287a1a0>
variable[sides] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b2878dc0>, <ast.Name object at 0x7da1b2878f70>, <ast.Name object at 0x7da1b287b880>]]]]
variable[pos] assign[=] call[name[self].loc][tuple[[<ast.Slice object at 0x7da1b2879c30>, <ast.List object at 0x7da1b287b580>]]]
if name[outside_sliced] begin[:]
variable[molecule] assign[=] call[name[self]][compare[call[binary_operation[binary_operation[name[pos] - name[origin]] / binary_operation[name[sides] / constant[2]]].max, parameter[]] less[<] constant[1.0]]]
if name[preserve_bonds] begin[:]
variable[molecule] assign[=] call[name[self]._preserve_bonds, parameter[name[molecule]]]
return[name[molecule]] | keyword[def] identifier[cut_cuboid] (
identifier[self] ,
identifier[a] = literal[int] ,
identifier[b] = keyword[None] ,
identifier[c] = keyword[None] ,
identifier[origin] = keyword[None] ,
identifier[outside_sliced] = keyword[True] ,
identifier[preserve_bonds] = keyword[False] ):
literal[string]
keyword[if] identifier[origin] keyword[is] keyword[None] :
identifier[origin] = identifier[np] . identifier[zeros] ( literal[int] )
keyword[elif] identifier[pd] . identifier[api] . identifier[types] . identifier[is_list_like] ( identifier[origin] ):
identifier[origin] = identifier[np] . identifier[array] ( identifier[origin] , identifier[dtype] = literal[string] )
keyword[else] :
identifier[origin] = identifier[self] . identifier[loc] [ identifier[origin] ,[ literal[string] , literal[string] , literal[string] ]]
identifier[b] = identifier[a] keyword[if] identifier[b] keyword[is] keyword[None] keyword[else] identifier[b]
identifier[c] = identifier[a] keyword[if] identifier[c] keyword[is] keyword[None] keyword[else] identifier[c]
identifier[sides] = identifier[np] . identifier[array] ([ identifier[a] , identifier[b] , identifier[c] ])
identifier[pos] = identifier[self] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] ]]
keyword[if] identifier[outside_sliced] :
identifier[molecule] = identifier[self] [(( identifier[pos] - identifier[origin] )/( identifier[sides] / literal[int] )). identifier[max] ( identifier[axis] = literal[int] )< literal[int] ]
keyword[else] :
identifier[molecule] = identifier[self] [(( identifier[pos] - identifier[origin] )/( identifier[sides] / literal[int] )). identifier[max] ( identifier[axis] = literal[int] )> literal[int] ]
keyword[if] identifier[preserve_bonds] :
identifier[molecule] = identifier[self] . identifier[_preserve_bonds] ( identifier[molecule] )
keyword[return] identifier[molecule] | def cut_cuboid(self, a=20, b=None, c=None, origin=None, outside_sliced=True, preserve_bonds=False):
"""Cut a cuboid specified by edge and radius.
Args:
a (float): Value of the a edge.
b (float): Value of the b edge. Takes value of a if None.
c (float): Value of the c edge. Takes value of a if None.
origin (list): Please note that you can also pass an
integer. In this case it is interpreted as the index
of the atom which is taken as origin.
outside_sliced (bool): Atoms outside/inside the sphere are
cut away.
preserve_bonds (bool): Do not cut covalent bonds.
Returns:
Cartesian:
"""
if origin is None:
origin = np.zeros(3) # depends on [control=['if'], data=['origin']]
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8') # depends on [control=['if'], data=[]]
else:
origin = self.loc[origin, ['x', 'y', 'z']]
b = a if b is None else b
c = a if c is None else c
sides = np.array([a, b, c])
pos = self.loc[:, ['x', 'y', 'z']]
if outside_sliced:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) < 1.0] # depends on [control=['if'], data=[]]
else:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) > 1.0]
if preserve_bonds:
molecule = self._preserve_bonds(molecule) # depends on [control=['if'], data=[]]
return molecule |
def _server_url(server):
"""
Normalizes a given server string to an url
>>> print(_server_url('a'))
http://a
>>> print(_server_url('a:9345'))
http://a:9345
>>> print(_server_url('https://a:9345'))
https://a:9345
>>> print(_server_url('https://a'))
https://a
>>> print(_server_url('demo.crate.io'))
http://demo.crate.io
"""
if not _HTTP_PAT.match(server):
server = 'http://%s' % server
parsed = urlparse(server)
url = '%s://%s' % (parsed.scheme, parsed.netloc)
return url | def function[_server_url, parameter[server]]:
constant[
Normalizes a given server string to an url
>>> print(_server_url('a'))
http://a
>>> print(_server_url('a:9345'))
http://a:9345
>>> print(_server_url('https://a:9345'))
https://a:9345
>>> print(_server_url('https://a'))
https://a
>>> print(_server_url('demo.crate.io'))
http://demo.crate.io
]
if <ast.UnaryOp object at 0x7da1b1080280> begin[:]
variable[server] assign[=] binary_operation[constant[http://%s] <ast.Mod object at 0x7da2590d6920> name[server]]
variable[parsed] assign[=] call[name[urlparse], parameter[name[server]]]
variable[url] assign[=] binary_operation[constant[%s://%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1082710>, <ast.Attribute object at 0x7da1b1080070>]]]
return[name[url]] | keyword[def] identifier[_server_url] ( identifier[server] ):
literal[string]
keyword[if] keyword[not] identifier[_HTTP_PAT] . identifier[match] ( identifier[server] ):
identifier[server] = literal[string] % identifier[server]
identifier[parsed] = identifier[urlparse] ( identifier[server] )
identifier[url] = literal[string] %( identifier[parsed] . identifier[scheme] , identifier[parsed] . identifier[netloc] )
keyword[return] identifier[url] | def _server_url(server):
"""
Normalizes a given server string to an url
>>> print(_server_url('a'))
http://a
>>> print(_server_url('a:9345'))
http://a:9345
>>> print(_server_url('https://a:9345'))
https://a:9345
>>> print(_server_url('https://a'))
https://a
>>> print(_server_url('demo.crate.io'))
http://demo.crate.io
"""
if not _HTTP_PAT.match(server):
server = 'http://%s' % server # depends on [control=['if'], data=[]]
parsed = urlparse(server)
url = '%s://%s' % (parsed.scheme, parsed.netloc)
return url |
def __load_predictions(self):
"""private method set the predictions attribute from:
mixed list of row names, matrix files and ndarrays
a single row name
an ascii file
can be none if only interested in parameters.
"""
if self.prediction_arg is None:
self.__predictions = None
return
self.log("loading forecasts")
if not isinstance(self.prediction_arg, list):
self.prediction_arg = [self.prediction_arg]
row_names = []
vecs = []
mat = None
for arg in self.prediction_arg:
if isinstance(arg, Matrix):
# a vector
if arg.shape[1] == 1:
vecs.append(arg)
else:
if self.jco is not None:
assert arg.shape[0] == self.jco.shape[1],\
"linear_analysis.__load_predictions(): " +\
"multi-prediction matrix(npar,npred) not aligned " +\
"with jco(nobs,npar): " + str(arg.shape) +\
' ' + str(self.jco.shape)
#for pred_name in arg.row_names:
# vecs.append(arg.extract(row_names=pred_name).T)
mat = arg
elif isinstance(arg, str):
if arg.lower() in self.jco.row_names:
row_names.append(arg.lower())
else:
try:
pred_mat = self.__fromfile(arg,astype=Matrix)
except Exception as e:
raise Exception("forecast argument: "+arg+" not found in " +\
"jco row names and could not be " +\
"loaded from a file.")
# vector
if pred_mat.shape[1] == 1:
vecs.append(pred_mat)
else:
#for pred_name in pred_mat.row_names:
# vecs.append(pred_mat.get(row_names=pred_name))
if mat is None:
mat = pred_mat
else:
mat = mat.extend((pred_mat))
elif isinstance(arg, np.ndarray):
self.logger.warn("linear_analysis.__load_predictions(): " +
"instantiating prediction matrix from " +
"ndarray, can't verify alignment")
self.logger.warn("linear_analysis.__load_predictions(): " +
"instantiating prediction matrix from " +
"ndarray, generating generic prediction names")
pred_names = ["pred_{0}".format(i+1) for i in range(arg.shape[0])]
if self.jco:
names = self.jco.col_names
elif self.parcov:
names = self.parcov.col_names
else:
raise Exception("linear_analysis.__load_predictions(): " +
"ndarray passed for predicitons " +
"requires jco or parcov to get " +
"parameter names")
if mat is None:
mat = Matrix(x=arg,row_names=pred_names,col_names=names).T
else:
mat = mat.extend(Matrix(x=arg,row_names=pred_names,col_names=names).T)
#for pred_name in pred_names:
# vecs.append(pred_matrix.get(row_names=pred_name).T)
else:
raise Exception("unrecognized predictions argument: " +
str(arg))
# turn vecs into a pyemu.Matrix
if len(vecs) > 0:
xs = vecs[0].x
for vec in vecs[1:]:
xs = xs.extend(vec.x)
names = [vec.col_names[0] for vec in vecs]
if mat is None:
mat = Matrix(x=xs,row_names=vecs[0].row_names,
col_names=names)
else:
mat = mat.extend(Matrix(x = np.array(xs),
row_names=vecs[0].row_names,
col_names=names))
if len(row_names) > 0:
extract = self.jco.extract(row_names=row_names).T
if mat is None:
mat = extract
else:
mat = mat.extend(extract)
#for row_name in row_names:
# vecs.append(extract.get(row_names=row_name).T)
# call obscov to load __obscov so that __obscov
# (priavte) can be manipulated
self.__obscov.drop(row_names, axis=0)
self.__predictions = mat
try:
fnames = [fname for fname in self.forecast_names if fname in self.pst.nnz_obs_names]
except:
fnames = []
if len(fnames) > 0:
self.logger.warn("forecasts with non-zero weight in pst: {0}...".format(','.join(fnames)) +
"\n -> re-setting these forecast weights to zero...")
self.pst.observation_data.loc[fnames,"weight"] = 0.0
self.log("loading forecasts")
self.logger.statement("forecast names: {0}".format(','.join(mat.col_names)))
return self.__predictions | def function[__load_predictions, parameter[self]]:
constant[private method set the predictions attribute from:
mixed list of row names, matrix files and ndarrays
a single row name
an ascii file
can be none if only interested in parameters.
]
if compare[name[self].prediction_arg is constant[None]] begin[:]
name[self].__predictions assign[=] constant[None]
return[None]
call[name[self].log, parameter[constant[loading forecasts]]]
if <ast.UnaryOp object at 0x7da1b2282800> begin[:]
name[self].prediction_arg assign[=] list[[<ast.Attribute object at 0x7da1b2282350>]]
variable[row_names] assign[=] list[[]]
variable[vecs] assign[=] list[[]]
variable[mat] assign[=] constant[None]
for taget[name[arg]] in starred[name[self].prediction_arg] begin[:]
if call[name[isinstance], parameter[name[arg], name[Matrix]]] begin[:]
if compare[call[name[arg].shape][constant[1]] equal[==] constant[1]] begin[:]
call[name[vecs].append, parameter[name[arg]]]
if compare[call[name[len], parameter[name[vecs]]] greater[>] constant[0]] begin[:]
variable[xs] assign[=] call[name[vecs]][constant[0]].x
for taget[name[vec]] in starred[call[name[vecs]][<ast.Slice object at 0x7da1b22800d0>]] begin[:]
variable[xs] assign[=] call[name[xs].extend, parameter[name[vec].x]]
variable[names] assign[=] <ast.ListComp object at 0x7da1b2280460>
if compare[name[mat] is constant[None]] begin[:]
variable[mat] assign[=] call[name[Matrix], parameter[]]
if compare[call[name[len], parameter[name[row_names]]] greater[>] constant[0]] begin[:]
variable[extract] assign[=] call[name[self].jco.extract, parameter[]].T
if compare[name[mat] is constant[None]] begin[:]
variable[mat] assign[=] name[extract]
call[name[self].__obscov.drop, parameter[name[row_names]]]
name[self].__predictions assign[=] name[mat]
<ast.Try object at 0x7da1b1d39390>
if compare[call[name[len], parameter[name[fnames]]] greater[>] constant[0]] begin[:]
call[name[self].logger.warn, parameter[binary_operation[call[constant[forecasts with non-zero weight in pst: {0}...].format, parameter[call[constant[,].join, parameter[name[fnames]]]]] + constant[
-> re-setting these forecast weights to zero...]]]]
call[name[self].pst.observation_data.loc][tuple[[<ast.Name object at 0x7da1b1d3a740>, <ast.Constant object at 0x7da1b1d3a7d0>]]] assign[=] constant[0.0]
call[name[self].log, parameter[constant[loading forecasts]]]
call[name[self].logger.statement, parameter[call[constant[forecast names: {0}].format, parameter[call[constant[,].join, parameter[name[mat].col_names]]]]]]
return[name[self].__predictions] | keyword[def] identifier[__load_predictions] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[prediction_arg] keyword[is] keyword[None] :
identifier[self] . identifier[__predictions] = keyword[None]
keyword[return]
identifier[self] . identifier[log] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[prediction_arg] , identifier[list] ):
identifier[self] . identifier[prediction_arg] =[ identifier[self] . identifier[prediction_arg] ]
identifier[row_names] =[]
identifier[vecs] =[]
identifier[mat] = keyword[None]
keyword[for] identifier[arg] keyword[in] identifier[self] . identifier[prediction_arg] :
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[Matrix] ):
keyword[if] identifier[arg] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[vecs] . identifier[append] ( identifier[arg] )
keyword[else] :
keyword[if] identifier[self] . identifier[jco] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[arg] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[jco] . identifier[shape] [ literal[int] ], literal[string] + literal[string] + literal[string] + identifier[str] ( identifier[arg] . identifier[shape] )+ literal[string] + identifier[str] ( identifier[self] . identifier[jco] . identifier[shape] )
identifier[mat] = identifier[arg]
keyword[elif] identifier[isinstance] ( identifier[arg] , identifier[str] ):
keyword[if] identifier[arg] . identifier[lower] () keyword[in] identifier[self] . identifier[jco] . identifier[row_names] :
identifier[row_names] . identifier[append] ( identifier[arg] . identifier[lower] ())
keyword[else] :
keyword[try] :
identifier[pred_mat] = identifier[self] . identifier[__fromfile] ( identifier[arg] , identifier[astype] = identifier[Matrix] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( literal[string] + identifier[arg] + literal[string] + literal[string] + literal[string] )
keyword[if] identifier[pred_mat] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[vecs] . identifier[append] ( identifier[pred_mat] )
keyword[else] :
keyword[if] identifier[mat] keyword[is] keyword[None] :
identifier[mat] = identifier[pred_mat]
keyword[else] :
identifier[mat] = identifier[mat] . identifier[extend] (( identifier[pred_mat] ))
keyword[elif] identifier[isinstance] ( identifier[arg] , identifier[np] . identifier[ndarray] ):
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] +
literal[string] +
literal[string] )
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] +
literal[string] +
literal[string] )
identifier[pred_names] =[ literal[string] . identifier[format] ( identifier[i] + literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[arg] . identifier[shape] [ literal[int] ])]
keyword[if] identifier[self] . identifier[jco] :
identifier[names] = identifier[self] . identifier[jco] . identifier[col_names]
keyword[elif] identifier[self] . identifier[parcov] :
identifier[names] = identifier[self] . identifier[parcov] . identifier[col_names]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] +
literal[string] +
literal[string] )
keyword[if] identifier[mat] keyword[is] keyword[None] :
identifier[mat] = identifier[Matrix] ( identifier[x] = identifier[arg] , identifier[row_names] = identifier[pred_names] , identifier[col_names] = identifier[names] ). identifier[T]
keyword[else] :
identifier[mat] = identifier[mat] . identifier[extend] ( identifier[Matrix] ( identifier[x] = identifier[arg] , identifier[row_names] = identifier[pred_names] , identifier[col_names] = identifier[names] ). identifier[T] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] +
identifier[str] ( identifier[arg] ))
keyword[if] identifier[len] ( identifier[vecs] )> literal[int] :
identifier[xs] = identifier[vecs] [ literal[int] ]. identifier[x]
keyword[for] identifier[vec] keyword[in] identifier[vecs] [ literal[int] :]:
identifier[xs] = identifier[xs] . identifier[extend] ( identifier[vec] . identifier[x] )
identifier[names] =[ identifier[vec] . identifier[col_names] [ literal[int] ] keyword[for] identifier[vec] keyword[in] identifier[vecs] ]
keyword[if] identifier[mat] keyword[is] keyword[None] :
identifier[mat] = identifier[Matrix] ( identifier[x] = identifier[xs] , identifier[row_names] = identifier[vecs] [ literal[int] ]. identifier[row_names] ,
identifier[col_names] = identifier[names] )
keyword[else] :
identifier[mat] = identifier[mat] . identifier[extend] ( identifier[Matrix] ( identifier[x] = identifier[np] . identifier[array] ( identifier[xs] ),
identifier[row_names] = identifier[vecs] [ literal[int] ]. identifier[row_names] ,
identifier[col_names] = identifier[names] ))
keyword[if] identifier[len] ( identifier[row_names] )> literal[int] :
identifier[extract] = identifier[self] . identifier[jco] . identifier[extract] ( identifier[row_names] = identifier[row_names] ). identifier[T]
keyword[if] identifier[mat] keyword[is] keyword[None] :
identifier[mat] = identifier[extract]
keyword[else] :
identifier[mat] = identifier[mat] . identifier[extend] ( identifier[extract] )
identifier[self] . identifier[__obscov] . identifier[drop] ( identifier[row_names] , identifier[axis] = literal[int] )
identifier[self] . identifier[__predictions] = identifier[mat]
keyword[try] :
identifier[fnames] =[ identifier[fname] keyword[for] identifier[fname] keyword[in] identifier[self] . identifier[forecast_names] keyword[if] identifier[fname] keyword[in] identifier[self] . identifier[pst] . identifier[nnz_obs_names] ]
keyword[except] :
identifier[fnames] =[]
keyword[if] identifier[len] ( identifier[fnames] )> literal[int] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[fnames] ))+
literal[string] )
identifier[self] . identifier[pst] . identifier[observation_data] . identifier[loc] [ identifier[fnames] , literal[string] ]= literal[int]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[logger] . identifier[statement] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[mat] . identifier[col_names] )))
keyword[return] identifier[self] . identifier[__predictions] | def __load_predictions(self):
"""private method set the predictions attribute from:
mixed list of row names, matrix files and ndarrays
a single row name
an ascii file
can be none if only interested in parameters.
"""
if self.prediction_arg is None:
self.__predictions = None
return # depends on [control=['if'], data=[]]
self.log('loading forecasts')
if not isinstance(self.prediction_arg, list):
self.prediction_arg = [self.prediction_arg] # depends on [control=['if'], data=[]]
row_names = []
vecs = []
mat = None
for arg in self.prediction_arg:
if isinstance(arg, Matrix):
# a vector
if arg.shape[1] == 1:
vecs.append(arg) # depends on [control=['if'], data=[]]
else:
if self.jco is not None:
assert arg.shape[0] == self.jco.shape[1], 'linear_analysis.__load_predictions(): ' + 'multi-prediction matrix(npar,npred) not aligned ' + 'with jco(nobs,npar): ' + str(arg.shape) + ' ' + str(self.jco.shape) # depends on [control=['if'], data=[]]
#for pred_name in arg.row_names:
# vecs.append(arg.extract(row_names=pred_name).T)
mat = arg # depends on [control=['if'], data=[]]
elif isinstance(arg, str):
if arg.lower() in self.jco.row_names:
row_names.append(arg.lower()) # depends on [control=['if'], data=[]]
else:
try:
pred_mat = self.__fromfile(arg, astype=Matrix) # depends on [control=['try'], data=[]]
except Exception as e:
raise Exception('forecast argument: ' + arg + ' not found in ' + 'jco row names and could not be ' + 'loaded from a file.') # depends on [control=['except'], data=[]]
# vector
if pred_mat.shape[1] == 1:
vecs.append(pred_mat) # depends on [control=['if'], data=[]]
#for pred_name in pred_mat.row_names:
# vecs.append(pred_mat.get(row_names=pred_name))
elif mat is None:
mat = pred_mat # depends on [control=['if'], data=['mat']]
else:
mat = mat.extend(pred_mat) # depends on [control=['if'], data=[]]
elif isinstance(arg, np.ndarray):
self.logger.warn('linear_analysis.__load_predictions(): ' + 'instantiating prediction matrix from ' + "ndarray, can't verify alignment")
self.logger.warn('linear_analysis.__load_predictions(): ' + 'instantiating prediction matrix from ' + 'ndarray, generating generic prediction names')
pred_names = ['pred_{0}'.format(i + 1) for i in range(arg.shape[0])]
if self.jco:
names = self.jco.col_names # depends on [control=['if'], data=[]]
elif self.parcov:
names = self.parcov.col_names # depends on [control=['if'], data=[]]
else:
raise Exception('linear_analysis.__load_predictions(): ' + 'ndarray passed for predicitons ' + 'requires jco or parcov to get ' + 'parameter names')
if mat is None:
mat = Matrix(x=arg, row_names=pred_names, col_names=names).T # depends on [control=['if'], data=['mat']]
else:
mat = mat.extend(Matrix(x=arg, row_names=pred_names, col_names=names).T) # depends on [control=['if'], data=[]]
else:
#for pred_name in pred_names:
# vecs.append(pred_matrix.get(row_names=pred_name).T)
raise Exception('unrecognized predictions argument: ' + str(arg)) # depends on [control=['for'], data=['arg']]
# turn vecs into a pyemu.Matrix
if len(vecs) > 0:
xs = vecs[0].x
for vec in vecs[1:]:
xs = xs.extend(vec.x) # depends on [control=['for'], data=['vec']]
names = [vec.col_names[0] for vec in vecs]
if mat is None:
mat = Matrix(x=xs, row_names=vecs[0].row_names, col_names=names) # depends on [control=['if'], data=['mat']]
else:
mat = mat.extend(Matrix(x=np.array(xs), row_names=vecs[0].row_names, col_names=names)) # depends on [control=['if'], data=[]]
if len(row_names) > 0:
extract = self.jco.extract(row_names=row_names).T
if mat is None:
mat = extract # depends on [control=['if'], data=['mat']]
else:
mat = mat.extend(extract)
#for row_name in row_names:
# vecs.append(extract.get(row_names=row_name).T)
# call obscov to load __obscov so that __obscov
# (priavte) can be manipulated
self.__obscov.drop(row_names, axis=0) # depends on [control=['if'], data=[]]
self.__predictions = mat
try:
fnames = [fname for fname in self.forecast_names if fname in self.pst.nnz_obs_names] # depends on [control=['try'], data=[]]
except:
fnames = [] # depends on [control=['except'], data=[]]
if len(fnames) > 0:
self.logger.warn('forecasts with non-zero weight in pst: {0}...'.format(','.join(fnames)) + '\n -> re-setting these forecast weights to zero...')
self.pst.observation_data.loc[fnames, 'weight'] = 0.0 # depends on [control=['if'], data=[]]
self.log('loading forecasts')
self.logger.statement('forecast names: {0}'.format(','.join(mat.col_names)))
return self.__predictions |
def get_plugin_actions(self):
"""Return a list of actions related to plugin."""
create_nb_action = create_action(self,
_("New notebook"),
icon=ima.icon('filenew'),
triggered=self.create_new_client)
self.save_as_action = create_action(self,
_("Save as..."),
icon=ima.icon('filesaveas'),
triggered=self.save_as)
open_action = create_action(self,
_("Open..."),
icon=ima.icon('fileopen'),
triggered=self.open_notebook)
self.open_console_action = create_action(self,
_("Open console"),
icon=ima.icon(
'ipython_console'),
triggered=self.open_console)
self.clear_recent_notebooks_action =\
create_action(self, _("Clear this list"),
triggered=self.clear_recent_notebooks)
# Plugin actions
self.menu_actions = [create_nb_action, open_action,
self.recent_notebook_menu, MENU_SEPARATOR,
self.save_as_action, MENU_SEPARATOR,
self.open_console_action]
self.setup_menu_actions()
return self.menu_actions | def function[get_plugin_actions, parameter[self]]:
constant[Return a list of actions related to plugin.]
variable[create_nb_action] assign[=] call[name[create_action], parameter[name[self], call[name[_], parameter[constant[New notebook]]]]]
name[self].save_as_action assign[=] call[name[create_action], parameter[name[self], call[name[_], parameter[constant[Save as...]]]]]
variable[open_action] assign[=] call[name[create_action], parameter[name[self], call[name[_], parameter[constant[Open...]]]]]
name[self].open_console_action assign[=] call[name[create_action], parameter[name[self], call[name[_], parameter[constant[Open console]]]]]
name[self].clear_recent_notebooks_action assign[=] call[name[create_action], parameter[name[self], call[name[_], parameter[constant[Clear this list]]]]]
name[self].menu_actions assign[=] list[[<ast.Name object at 0x7da20c6e6530>, <ast.Name object at 0x7da20c6e4c10>, <ast.Attribute object at 0x7da20c6e73a0>, <ast.Name object at 0x7da20c6e5360>, <ast.Attribute object at 0x7da20c6e6650>, <ast.Name object at 0x7da20c6e7910>, <ast.Attribute object at 0x7da20c6aa560>]]
call[name[self].setup_menu_actions, parameter[]]
return[name[self].menu_actions] | keyword[def] identifier[get_plugin_actions] ( identifier[self] ):
literal[string]
identifier[create_nb_action] = identifier[create_action] ( identifier[self] ,
identifier[_] ( literal[string] ),
identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ),
identifier[triggered] = identifier[self] . identifier[create_new_client] )
identifier[self] . identifier[save_as_action] = identifier[create_action] ( identifier[self] ,
identifier[_] ( literal[string] ),
identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ),
identifier[triggered] = identifier[self] . identifier[save_as] )
identifier[open_action] = identifier[create_action] ( identifier[self] ,
identifier[_] ( literal[string] ),
identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ),
identifier[triggered] = identifier[self] . identifier[open_notebook] )
identifier[self] . identifier[open_console_action] = identifier[create_action] ( identifier[self] ,
identifier[_] ( literal[string] ),
identifier[icon] = identifier[ima] . identifier[icon] (
literal[string] ),
identifier[triggered] = identifier[self] . identifier[open_console] )
identifier[self] . identifier[clear_recent_notebooks_action] = identifier[create_action] ( identifier[self] , identifier[_] ( literal[string] ),
identifier[triggered] = identifier[self] . identifier[clear_recent_notebooks] )
identifier[self] . identifier[menu_actions] =[ identifier[create_nb_action] , identifier[open_action] ,
identifier[self] . identifier[recent_notebook_menu] , identifier[MENU_SEPARATOR] ,
identifier[self] . identifier[save_as_action] , identifier[MENU_SEPARATOR] ,
identifier[self] . identifier[open_console_action] ]
identifier[self] . identifier[setup_menu_actions] ()
keyword[return] identifier[self] . identifier[menu_actions] | def get_plugin_actions(self):
"""Return a list of actions related to plugin."""
create_nb_action = create_action(self, _('New notebook'), icon=ima.icon('filenew'), triggered=self.create_new_client)
self.save_as_action = create_action(self, _('Save as...'), icon=ima.icon('filesaveas'), triggered=self.save_as)
open_action = create_action(self, _('Open...'), icon=ima.icon('fileopen'), triggered=self.open_notebook)
self.open_console_action = create_action(self, _('Open console'), icon=ima.icon('ipython_console'), triggered=self.open_console)
self.clear_recent_notebooks_action = create_action(self, _('Clear this list'), triggered=self.clear_recent_notebooks) # Plugin actions
self.menu_actions = [create_nb_action, open_action, self.recent_notebook_menu, MENU_SEPARATOR, self.save_as_action, MENU_SEPARATOR, self.open_console_action]
self.setup_menu_actions()
return self.menu_actions |
def cores(self):
"""Generate the set of all cores in the system.
Yields
------
(x, y, p, :py:class:`~rig.machine_control.consts.AppState`)
A core in the machine, and its state. Cores related to a specific
chip are yielded consecutively in ascending order of core number.
"""
for (x, y), chip_info in iteritems(self):
for p, state in enumerate(chip_info.core_states):
yield (x, y, p, state) | def function[cores, parameter[self]]:
constant[Generate the set of all cores in the system.
Yields
------
(x, y, p, :py:class:`~rig.machine_control.consts.AppState`)
A core in the machine, and its state. Cores related to a specific
chip are yielded consecutively in ascending order of core number.
]
for taget[tuple[[<ast.Tuple object at 0x7da1b19ce6b0>, <ast.Name object at 0x7da1b19ce620>]]] in starred[call[name[iteritems], parameter[name[self]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b19ce8c0>, <ast.Name object at 0x7da1b19ce740>]]] in starred[call[name[enumerate], parameter[name[chip_info].core_states]]] begin[:]
<ast.Yield object at 0x7da1b19ceaa0> | keyword[def] identifier[cores] ( identifier[self] ):
literal[string]
keyword[for] ( identifier[x] , identifier[y] ), identifier[chip_info] keyword[in] identifier[iteritems] ( identifier[self] ):
keyword[for] identifier[p] , identifier[state] keyword[in] identifier[enumerate] ( identifier[chip_info] . identifier[core_states] ):
keyword[yield] ( identifier[x] , identifier[y] , identifier[p] , identifier[state] ) | def cores(self):
"""Generate the set of all cores in the system.
Yields
------
(x, y, p, :py:class:`~rig.machine_control.consts.AppState`)
A core in the machine, and its state. Cores related to a specific
chip are yielded consecutively in ascending order of core number.
"""
for ((x, y), chip_info) in iteritems(self):
for (p, state) in enumerate(chip_info.core_states):
yield (x, y, p, state) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def _entry_management_url_download(self, passed):
"""
Check if the given information is a URL.
If it is the case, it download and update the location of file to test.
:param passed: The url passed to the system.
:type passed: str
:return: The state of the check.
:rtype: bool
"""
if passed and self.checker.is_url_valid(passed):
# The passed string is an URL.
# We get the file name based on the URL.
# We actually just get the string after the last `/` in the URL.
file_to_test = passed.split("/")[-1]
if (
not PyFunceble.path.isfile(file_to_test)
or PyFunceble.INTERN["counter"]["number"]["tested"] == 0
):
# The filename does not exist in the current directory
# or the currently number of tested is equal to 0.
# We download the content of the link.
Download(passed, file_to_test).text()
# The files does exist or the currently number of tested is greater than
# 0.
# We initiate the file we have to test.
PyFunceble.INTERN["file_to_test"] = file_to_test
# We return true to say that everything goes right.
return True
# The passed string is not an URL.
# We do not need to do anything else.
return False | def function[_entry_management_url_download, parameter[self, passed]]:
constant[
Check if the given information is a URL.
If it is the case, it download and update the location of file to test.
:param passed: The url passed to the system.
:type passed: str
:return: The state of the check.
:rtype: bool
]
if <ast.BoolOp object at 0x7da20e954820> begin[:]
variable[file_to_test] assign[=] call[call[name[passed].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da20e9579d0>]
if <ast.BoolOp object at 0x7da20e955fc0> begin[:]
call[call[name[Download], parameter[name[passed], name[file_to_test]]].text, parameter[]]
call[name[PyFunceble].INTERN][constant[file_to_test]] assign[=] name[file_to_test]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_entry_management_url_download] ( identifier[self] , identifier[passed] ):
literal[string]
keyword[if] identifier[passed] keyword[and] identifier[self] . identifier[checker] . identifier[is_url_valid] ( identifier[passed] ):
identifier[file_to_test] = identifier[passed] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] (
keyword[not] identifier[PyFunceble] . identifier[path] . identifier[isfile] ( identifier[file_to_test] )
keyword[or] identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][ literal[string] ]== literal[int]
):
identifier[Download] ( identifier[passed] , identifier[file_to_test] ). identifier[text] ()
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]= identifier[file_to_test]
keyword[return] keyword[True]
keyword[return] keyword[False] | def _entry_management_url_download(self, passed):
"""
Check if the given information is a URL.
If it is the case, it download and update the location of file to test.
:param passed: The url passed to the system.
:type passed: str
:return: The state of the check.
:rtype: bool
"""
if passed and self.checker.is_url_valid(passed):
# The passed string is an URL.
# We get the file name based on the URL.
# We actually just get the string after the last `/` in the URL.
file_to_test = passed.split('/')[-1]
if not PyFunceble.path.isfile(file_to_test) or PyFunceble.INTERN['counter']['number']['tested'] == 0:
# The filename does not exist in the current directory
# or the currently number of tested is equal to 0.
# We download the content of the link.
Download(passed, file_to_test).text() # depends on [control=['if'], data=[]]
# The files does exist or the currently number of tested is greater than
# 0.
# We initiate the file we have to test.
PyFunceble.INTERN['file_to_test'] = file_to_test
# We return true to say that everything goes right.
return True # depends on [control=['if'], data=[]]
# The passed string is not an URL.
# We do not need to do anything else.
return False |
def _get_gosrcs_upper(self, goids, max_upper, go2parentids):
"""Get GO IDs for the upper portion of the GO DAG."""
gosrcs_upper = set()
get_nt = self.gosubdag.go2nt.get
go2nt = {g:get_nt(g) for g in goids}
# Sort by descending order of descendant counts to find potential new hdrgos
go_nt = sorted(go2nt.items(), key=lambda t: -1*t[1].dcnt)
goids_upper = set()
for goid, _ in go_nt: # Loop through GO ID, GO nt
goids_upper.add(goid)
if goid in go2parentids:
goids_upper |= go2parentids[goid]
#print "{} {:3} {}".format(goid, len(goids_upper), gont.GO_name)
if len(goids_upper) < max_upper:
gosrcs_upper.add(goid)
else:
break
return gosrcs_upper | def function[_get_gosrcs_upper, parameter[self, goids, max_upper, go2parentids]]:
constant[Get GO IDs for the upper portion of the GO DAG.]
variable[gosrcs_upper] assign[=] call[name[set], parameter[]]
variable[get_nt] assign[=] name[self].gosubdag.go2nt.get
variable[go2nt] assign[=] <ast.DictComp object at 0x7da18bcca560>
variable[go_nt] assign[=] call[name[sorted], parameter[call[name[go2nt].items, parameter[]]]]
variable[goids_upper] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bccafe0>, <ast.Name object at 0x7da18bcc9240>]]] in starred[name[go_nt]] begin[:]
call[name[goids_upper].add, parameter[name[goid]]]
if compare[name[goid] in name[go2parentids]] begin[:]
<ast.AugAssign object at 0x7da18bccb220>
if compare[call[name[len], parameter[name[goids_upper]]] less[<] name[max_upper]] begin[:]
call[name[gosrcs_upper].add, parameter[name[goid]]]
return[name[gosrcs_upper]] | keyword[def] identifier[_get_gosrcs_upper] ( identifier[self] , identifier[goids] , identifier[max_upper] , identifier[go2parentids] ):
literal[string]
identifier[gosrcs_upper] = identifier[set] ()
identifier[get_nt] = identifier[self] . identifier[gosubdag] . identifier[go2nt] . identifier[get]
identifier[go2nt] ={ identifier[g] : identifier[get_nt] ( identifier[g] ) keyword[for] identifier[g] keyword[in] identifier[goids] }
identifier[go_nt] = identifier[sorted] ( identifier[go2nt] . identifier[items] (), identifier[key] = keyword[lambda] identifier[t] :- literal[int] * identifier[t] [ literal[int] ]. identifier[dcnt] )
identifier[goids_upper] = identifier[set] ()
keyword[for] identifier[goid] , identifier[_] keyword[in] identifier[go_nt] :
identifier[goids_upper] . identifier[add] ( identifier[goid] )
keyword[if] identifier[goid] keyword[in] identifier[go2parentids] :
identifier[goids_upper] |= identifier[go2parentids] [ identifier[goid] ]
keyword[if] identifier[len] ( identifier[goids_upper] )< identifier[max_upper] :
identifier[gosrcs_upper] . identifier[add] ( identifier[goid] )
keyword[else] :
keyword[break]
keyword[return] identifier[gosrcs_upper] | def _get_gosrcs_upper(self, goids, max_upper, go2parentids):
"""Get GO IDs for the upper portion of the GO DAG."""
gosrcs_upper = set()
get_nt = self.gosubdag.go2nt.get
go2nt = {g: get_nt(g) for g in goids}
# Sort by descending order of descendant counts to find potential new hdrgos
go_nt = sorted(go2nt.items(), key=lambda t: -1 * t[1].dcnt)
goids_upper = set()
for (goid, _) in go_nt: # Loop through GO ID, GO nt
goids_upper.add(goid)
if goid in go2parentids:
goids_upper |= go2parentids[goid] # depends on [control=['if'], data=['goid', 'go2parentids']]
#print "{} {:3} {}".format(goid, len(goids_upper), gont.GO_name)
if len(goids_upper) < max_upper:
gosrcs_upper.add(goid) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=[]]
return gosrcs_upper |
def clear_modified_data(self):
"""
Clears only the modified data
"""
self.__modified_data__ = {}
self.__deleted_fields__ = []
for value in self.__original_data__.values():
try:
value.clear_modified_data()
except AttributeError:
pass | def function[clear_modified_data, parameter[self]]:
constant[
Clears only the modified data
]
name[self].__modified_data__ assign[=] dictionary[[], []]
name[self].__deleted_fields__ assign[=] list[[]]
for taget[name[value]] in starred[call[name[self].__original_data__.values, parameter[]]] begin[:]
<ast.Try object at 0x7da1b0b6dcf0> | keyword[def] identifier[clear_modified_data] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__modified_data__] ={}
identifier[self] . identifier[__deleted_fields__] =[]
keyword[for] identifier[value] keyword[in] identifier[self] . identifier[__original_data__] . identifier[values] ():
keyword[try] :
identifier[value] . identifier[clear_modified_data] ()
keyword[except] identifier[AttributeError] :
keyword[pass] | def clear_modified_data(self):
"""
Clears only the modified data
"""
self.__modified_data__ = {}
self.__deleted_fields__ = []
for value in self.__original_data__.values():
try:
value.clear_modified_data() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['value']] |
def set_servo(self, gpio, pulse_width_us):
"""
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
"""
# Make sure we can set the exact pulse_width_us
_pulse_incr_us = _PWM.get_pulse_incr_us()
if pulse_width_us % _pulse_incr_us:
# No clean division possible
raise AttributeError(("Pulse width increment granularity %sus "
"cannot divide a pulse-time of %sus") % (_pulse_incr_us,
pulse_width_us))
# Initialize channel if not already done, else check subcycle time
if _PWM.is_channel_initialized(self._dma_channel):
_subcycle_us = _PWM.get_channel_subcycle_time_us(self._dma_channel)
if _subcycle_us != self._subcycle_time_us:
raise AttributeError(("Error: DMA channel %s is setup with a "
"subcycle_time of %sus (instead of %sus)") % \
(self._dma_channel, _subcycle_us,
self._subcycle_time_us))
else:
init_channel(self._dma_channel, self._subcycle_time_us)
# Add pulse for this GPIO
add_channel_pulse(self._dma_channel, gpio, 0, \
int(pulse_width_us / _pulse_incr_us)) | def function[set_servo, parameter[self, gpio, pulse_width_us]]:
constant[
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
]
variable[_pulse_incr_us] assign[=] call[name[_PWM].get_pulse_incr_us, parameter[]]
if binary_operation[name[pulse_width_us] <ast.Mod object at 0x7da2590d6920> name[_pulse_incr_us]] begin[:]
<ast.Raise object at 0x7da18f58d540>
if call[name[_PWM].is_channel_initialized, parameter[name[self]._dma_channel]] begin[:]
variable[_subcycle_us] assign[=] call[name[_PWM].get_channel_subcycle_time_us, parameter[name[self]._dma_channel]]
if compare[name[_subcycle_us] not_equal[!=] name[self]._subcycle_time_us] begin[:]
<ast.Raise object at 0x7da18f58e830>
call[name[add_channel_pulse], parameter[name[self]._dma_channel, name[gpio], constant[0], call[name[int], parameter[binary_operation[name[pulse_width_us] / name[_pulse_incr_us]]]]]] | keyword[def] identifier[set_servo] ( identifier[self] , identifier[gpio] , identifier[pulse_width_us] ):
literal[string]
identifier[_pulse_incr_us] = identifier[_PWM] . identifier[get_pulse_incr_us] ()
keyword[if] identifier[pulse_width_us] % identifier[_pulse_incr_us] :
keyword[raise] identifier[AttributeError] (( literal[string]
literal[string] )%( identifier[_pulse_incr_us] ,
identifier[pulse_width_us] ))
keyword[if] identifier[_PWM] . identifier[is_channel_initialized] ( identifier[self] . identifier[_dma_channel] ):
identifier[_subcycle_us] = identifier[_PWM] . identifier[get_channel_subcycle_time_us] ( identifier[self] . identifier[_dma_channel] )
keyword[if] identifier[_subcycle_us] != identifier[self] . identifier[_subcycle_time_us] :
keyword[raise] identifier[AttributeError] (( literal[string]
literal[string] )%( identifier[self] . identifier[_dma_channel] , identifier[_subcycle_us] ,
identifier[self] . identifier[_subcycle_time_us] ))
keyword[else] :
identifier[init_channel] ( identifier[self] . identifier[_dma_channel] , identifier[self] . identifier[_subcycle_time_us] )
identifier[add_channel_pulse] ( identifier[self] . identifier[_dma_channel] , identifier[gpio] , literal[int] , identifier[int] ( identifier[pulse_width_us] / identifier[_pulse_incr_us] )) | def set_servo(self, gpio, pulse_width_us):
"""
Sets a pulse-width on a gpio to repeat every subcycle
(by default every 20ms).
"""
# Make sure we can set the exact pulse_width_us
_pulse_incr_us = _PWM.get_pulse_incr_us()
if pulse_width_us % _pulse_incr_us:
# No clean division possible
raise AttributeError('Pulse width increment granularity %sus cannot divide a pulse-time of %sus' % (_pulse_incr_us, pulse_width_us)) # depends on [control=['if'], data=[]]
# Initialize channel if not already done, else check subcycle time
if _PWM.is_channel_initialized(self._dma_channel):
_subcycle_us = _PWM.get_channel_subcycle_time_us(self._dma_channel)
if _subcycle_us != self._subcycle_time_us:
raise AttributeError('Error: DMA channel %s is setup with a subcycle_time of %sus (instead of %sus)' % (self._dma_channel, _subcycle_us, self._subcycle_time_us)) # depends on [control=['if'], data=['_subcycle_us']] # depends on [control=['if'], data=[]]
else:
init_channel(self._dma_channel, self._subcycle_time_us)
# Add pulse for this GPIO
add_channel_pulse(self._dma_channel, gpio, 0, int(pulse_width_us / _pulse_incr_us)) |
def FundamentalType(self, _type):
"""Returns the proper ctypes class name for a fundamental type
1) activates generation of appropriate headers for
## int128_t
## c_long_double_t
2) return appropriate name for type
"""
log.debug('HERE in FundamentalType for %s %s', _type, _type.name)
if _type.name in ["None", "c_long_double_t", "c_uint128", "c_int128"]:
self.enable_fundamental_type_wrappers()
return _type.name
return "ctypes.%s" % (_type.name) | def function[FundamentalType, parameter[self, _type]]:
constant[Returns the proper ctypes class name for a fundamental type
1) activates generation of appropriate headers for
## int128_t
## c_long_double_t
2) return appropriate name for type
]
call[name[log].debug, parameter[constant[HERE in FundamentalType for %s %s], name[_type], name[_type].name]]
if compare[name[_type].name in list[[<ast.Constant object at 0x7da20c6c5b70>, <ast.Constant object at 0x7da20c6c5db0>, <ast.Constant object at 0x7da20c6c7310>, <ast.Constant object at 0x7da20c6c6ec0>]]] begin[:]
call[name[self].enable_fundamental_type_wrappers, parameter[]]
return[name[_type].name]
return[binary_operation[constant[ctypes.%s] <ast.Mod object at 0x7da2590d6920> name[_type].name]] | keyword[def] identifier[FundamentalType] ( identifier[self] , identifier[_type] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[_type] , identifier[_type] . identifier[name] )
keyword[if] identifier[_type] . identifier[name] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[self] . identifier[enable_fundamental_type_wrappers] ()
keyword[return] identifier[_type] . identifier[name]
keyword[return] literal[string] %( identifier[_type] . identifier[name] ) | def FundamentalType(self, _type):
"""Returns the proper ctypes class name for a fundamental type
1) activates generation of appropriate headers for
## int128_t
## c_long_double_t
2) return appropriate name for type
"""
log.debug('HERE in FundamentalType for %s %s', _type, _type.name)
if _type.name in ['None', 'c_long_double_t', 'c_uint128', 'c_int128']:
self.enable_fundamental_type_wrappers()
return _type.name # depends on [control=['if'], data=[]]
return 'ctypes.%s' % _type.name |
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return crypto.sign(self._key, message, 'sha256') | def function[sign, parameter[self, message]]:
constant[Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
]
variable[message] assign[=] call[name[_helpers]._to_bytes, parameter[name[message]]]
return[call[name[crypto].sign, parameter[name[self]._key, name[message], constant[sha256]]]] | keyword[def] identifier[sign] ( identifier[self] , identifier[message] ):
literal[string]
identifier[message] = identifier[_helpers] . identifier[_to_bytes] ( identifier[message] , identifier[encoding] = literal[string] )
keyword[return] identifier[crypto] . identifier[sign] ( identifier[self] . identifier[_key] , identifier[message] , literal[string] ) | def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return crypto.sign(self._key, message, 'sha256') |
def _set_blob_properties(self, sd, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, str) -> None
"""Set blob properties (md5, cache control)
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param str digest: md5 digest
"""
blobxfer.operations.azure.blob.set_blob_properties(
sd.dst_entity, digest)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) | def function[_set_blob_properties, parameter[self, sd, digest]]:
constant[Set blob properties (md5, cache control)
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param str digest: md5 digest
]
call[name[blobxfer].operations.azure.blob.set_blob_properties, parameter[name[sd].dst_entity, name[digest]]]
if call[name[blobxfer].util.is_not_empty, parameter[name[sd].dst_entity.replica_targets]] begin[:]
for taget[name[ase]] in starred[name[sd].dst_entity.replica_targets] begin[:]
call[name[blobxfer].operations.azure.blob.set_blob_properties, parameter[name[ase], name[digest]]] | keyword[def] identifier[_set_blob_properties] ( identifier[self] , identifier[sd] , identifier[digest] ):
literal[string]
identifier[blobxfer] . identifier[operations] . identifier[azure] . identifier[blob] . identifier[set_blob_properties] (
identifier[sd] . identifier[dst_entity] , identifier[digest] )
keyword[if] identifier[blobxfer] . identifier[util] . identifier[is_not_empty] ( identifier[sd] . identifier[dst_entity] . identifier[replica_targets] ):
keyword[for] identifier[ase] keyword[in] identifier[sd] . identifier[dst_entity] . identifier[replica_targets] :
identifier[blobxfer] . identifier[operations] . identifier[azure] . identifier[blob] . identifier[set_blob_properties] ( identifier[ase] , identifier[digest] ) | def _set_blob_properties(self, sd, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, str) -> None
'Set blob properties (md5, cache control)\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param str digest: md5 digest\n '
blobxfer.operations.azure.blob.set_blob_properties(sd.dst_entity, digest)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) # depends on [control=['for'], data=['ase']] # depends on [control=['if'], data=[]] |
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
obj = self._clone()
if args:
for arg in args:
if isinstance(arg, Facet):
obj._facets.append(arg)
elif isinstance(arg, six.string_types):
modifier = "term"
tokens = arg.split("__")
if len(tokens)>1 and tokens[-1] in ["term", "stat", "histo", "date"]:
modifier=tokens[-1]
tokens=tokens[:-1]
field, djfield = self._django_to_es_field("__".join(tokens))
if modifier=="term":
obj._facets.append(TermFacet(name=arg, field=field, **kwargs))
elif modifier=="term_stat":
obj._facets.append(TermStatsFacet(name=arg, **kwargs))
elif modifier=="stat":
obj._facets.append(StatisticalFacet(name=arg, field=field, **kwargs))
elif modifier=="histo":
obj._facets.append(HistogramFacet(name=arg, field=field, **kwargs))
elif modifier=="date":
obj._facets.append(DateHistogramFacet(name=arg, field=field, **kwargs))
else:
raise NotImplementedError("invalid type")
else:
# Add the aggregates/facet to the query
for name, field in kwargs.items():
obj._facets.append(
TermFacet(field=field.replace(FIELD_SEPARATOR, "."), name=name.replace(FIELD_SEPARATOR, ".")))
return obj | def function[annotate, parameter[self]]:
constant[
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
]
variable[obj] assign[=] call[name[self]._clone, parameter[]]
if name[args] begin[:]
for taget[name[arg]] in starred[name[args]] begin[:]
if call[name[isinstance], parameter[name[arg], name[Facet]]] begin[:]
call[name[obj]._facets.append, parameter[name[arg]]]
return[name[obj]] | keyword[def] identifier[annotate] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[obj] = identifier[self] . identifier[_clone] ()
keyword[if] identifier[args] :
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[Facet] ):
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[arg] )
keyword[elif] identifier[isinstance] ( identifier[arg] , identifier[six] . identifier[string_types] ):
identifier[modifier] = literal[string]
identifier[tokens] = identifier[arg] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[tokens] )> literal[int] keyword[and] identifier[tokens] [- literal[int] ] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[modifier] = identifier[tokens] [- literal[int] ]
identifier[tokens] = identifier[tokens] [:- literal[int] ]
identifier[field] , identifier[djfield] = identifier[self] . identifier[_django_to_es_field] ( literal[string] . identifier[join] ( identifier[tokens] ))
keyword[if] identifier[modifier] == literal[string] :
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[TermFacet] ( identifier[name] = identifier[arg] , identifier[field] = identifier[field] ,** identifier[kwargs] ))
keyword[elif] identifier[modifier] == literal[string] :
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[TermStatsFacet] ( identifier[name] = identifier[arg] ,** identifier[kwargs] ))
keyword[elif] identifier[modifier] == literal[string] :
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[StatisticalFacet] ( identifier[name] = identifier[arg] , identifier[field] = identifier[field] ,** identifier[kwargs] ))
keyword[elif] identifier[modifier] == literal[string] :
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[HistogramFacet] ( identifier[name] = identifier[arg] , identifier[field] = identifier[field] ,** identifier[kwargs] ))
keyword[elif] identifier[modifier] == literal[string] :
identifier[obj] . identifier[_facets] . identifier[append] ( identifier[DateHistogramFacet] ( identifier[name] = identifier[arg] , identifier[field] = identifier[field] ,** identifier[kwargs] ))
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[else] :
keyword[for] identifier[name] , identifier[field] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[obj] . identifier[_facets] . identifier[append] (
identifier[TermFacet] ( identifier[field] = identifier[field] . identifier[replace] ( identifier[FIELD_SEPARATOR] , literal[string] ), identifier[name] = identifier[name] . identifier[replace] ( identifier[FIELD_SEPARATOR] , literal[string] )))
keyword[return] identifier[obj] | def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
obj = self._clone()
if args:
for arg in args:
if isinstance(arg, Facet):
obj._facets.append(arg) # depends on [control=['if'], data=[]]
elif isinstance(arg, six.string_types):
modifier = 'term'
tokens = arg.split('__')
if len(tokens) > 1 and tokens[-1] in ['term', 'stat', 'histo', 'date']:
modifier = tokens[-1]
tokens = tokens[:-1] # depends on [control=['if'], data=[]]
(field, djfield) = self._django_to_es_field('__'.join(tokens))
if modifier == 'term':
obj._facets.append(TermFacet(name=arg, field=field, **kwargs)) # depends on [control=['if'], data=[]]
elif modifier == 'term_stat':
obj._facets.append(TermStatsFacet(name=arg, **kwargs)) # depends on [control=['if'], data=[]]
elif modifier == 'stat':
obj._facets.append(StatisticalFacet(name=arg, field=field, **kwargs)) # depends on [control=['if'], data=[]]
elif modifier == 'histo':
obj._facets.append(HistogramFacet(name=arg, field=field, **kwargs)) # depends on [control=['if'], data=[]]
elif modifier == 'date':
obj._facets.append(DateHistogramFacet(name=arg, field=field, **kwargs)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('invalid type') # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
else:
# Add the aggregates/facet to the query
for (name, field) in kwargs.items():
obj._facets.append(TermFacet(field=field.replace(FIELD_SEPARATOR, '.'), name=name.replace(FIELD_SEPARATOR, '.'))) # depends on [control=['for'], data=[]]
return obj |
def get_referenced_object(prev_obj, obj, dot_separated_name,
desired_type=None):
"""
get objects based on a path
Args:
prev_obj: the object containing obj (req. if obj is a list)
obj: the current object
dot_separated_name: the attribute name "a.b.c.d" starting from obj
Note: the attribute "parent(TYPE)" is a shortcut to jump to the
parent of type "TYPE" (exact match of type name).
desired_type: (optional)
Returns:
the object if found, None if not found or Postponed() if some postponed
refs are found on the path
"""
from textx.scoping import Postponed
assert prev_obj or not type(obj) is list
names = dot_separated_name.split(".")
match = re.match(r'parent\((\w+)\)', names[0])
if match:
next_obj = obj
desired_parent_typename = match.group(1)
next_obj = get_recursive_parent_with_typename(next_obj,
desired_parent_typename)
if next_obj:
return get_referenced_object(None, next_obj, ".".join(names[1:]),
desired_type)
else:
return None
elif type(obj) is list:
next_obj = None
for res in obj:
if hasattr(res, "name") and res.name == names[0]:
if desired_type is None or textx_isinstance(res, desired_type):
next_obj = res
else:
raise TypeError(
"{} has type {} instead of {}.".format(
names[0], type(res).__name__,
desired_type.__name__))
if not next_obj:
# if prev_obj needs to be resolved: return Postponed.
if needs_to_be_resolved(prev_obj, names[0]):
return Postponed()
else:
return None
elif type(obj) is Postponed:
return Postponed()
else:
next_obj = getattr(obj, names[0])
if not next_obj:
# if obj in in crossref return Postponed, else None
if needs_to_be_resolved(obj, names[0]):
return Postponed()
else:
return None
if len(names) > 1:
return get_referenced_object(obj, next_obj, ".".join(
names[1:]), desired_type)
if type(next_obj) is list and needs_to_be_resolved(obj, names[0]):
return Postponed()
return next_obj | def function[get_referenced_object, parameter[prev_obj, obj, dot_separated_name, desired_type]]:
constant[
get objects based on a path
Args:
prev_obj: the object containing obj (req. if obj is a list)
obj: the current object
dot_separated_name: the attribute name "a.b.c.d" starting from obj
Note: the attribute "parent(TYPE)" is a shortcut to jump to the
parent of type "TYPE" (exact match of type name).
desired_type: (optional)
Returns:
the object if found, None if not found or Postponed() if some postponed
refs are found on the path
]
from relative_module[textx.scoping] import module[Postponed]
assert[<ast.BoolOp object at 0x7da20e7494b0>]
variable[names] assign[=] call[name[dot_separated_name].split, parameter[constant[.]]]
variable[match] assign[=] call[name[re].match, parameter[constant[parent\((\w+)\)], call[name[names]][constant[0]]]]
if name[match] begin[:]
variable[next_obj] assign[=] name[obj]
variable[desired_parent_typename] assign[=] call[name[match].group, parameter[constant[1]]]
variable[next_obj] assign[=] call[name[get_recursive_parent_with_typename], parameter[name[next_obj], name[desired_parent_typename]]]
if name[next_obj] begin[:]
return[call[name[get_referenced_object], parameter[constant[None], name[next_obj], call[constant[.].join, parameter[call[name[names]][<ast.Slice object at 0x7da20e74a8f0>]]], name[desired_type]]]]
if <ast.UnaryOp object at 0x7da20e9b3a00> begin[:]
if call[name[needs_to_be_resolved], parameter[name[obj], call[name[names]][constant[0]]]] begin[:]
return[call[name[Postponed], parameter[]]]
if compare[call[name[len], parameter[name[names]]] greater[>] constant[1]] begin[:]
return[call[name[get_referenced_object], parameter[name[obj], name[next_obj], call[constant[.].join, parameter[call[name[names]][<ast.Slice object at 0x7da20e9b26e0>]]], name[desired_type]]]]
if <ast.BoolOp object at 0x7da20e9b2e60> begin[:]
return[call[name[Postponed], parameter[]]]
return[name[next_obj]] | keyword[def] identifier[get_referenced_object] ( identifier[prev_obj] , identifier[obj] , identifier[dot_separated_name] ,
identifier[desired_type] = keyword[None] ):
literal[string]
keyword[from] identifier[textx] . identifier[scoping] keyword[import] identifier[Postponed]
keyword[assert] identifier[prev_obj] keyword[or] keyword[not] identifier[type] ( identifier[obj] ) keyword[is] identifier[list]
identifier[names] = identifier[dot_separated_name] . identifier[split] ( literal[string] )
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[names] [ literal[int] ])
keyword[if] identifier[match] :
identifier[next_obj] = identifier[obj]
identifier[desired_parent_typename] = identifier[match] . identifier[group] ( literal[int] )
identifier[next_obj] = identifier[get_recursive_parent_with_typename] ( identifier[next_obj] ,
identifier[desired_parent_typename] )
keyword[if] identifier[next_obj] :
keyword[return] identifier[get_referenced_object] ( keyword[None] , identifier[next_obj] , literal[string] . identifier[join] ( identifier[names] [ literal[int] :]),
identifier[desired_type] )
keyword[else] :
keyword[return] keyword[None]
keyword[elif] identifier[type] ( identifier[obj] ) keyword[is] identifier[list] :
identifier[next_obj] = keyword[None]
keyword[for] identifier[res] keyword[in] identifier[obj] :
keyword[if] identifier[hasattr] ( identifier[res] , literal[string] ) keyword[and] identifier[res] . identifier[name] == identifier[names] [ literal[int] ]:
keyword[if] identifier[desired_type] keyword[is] keyword[None] keyword[or] identifier[textx_isinstance] ( identifier[res] , identifier[desired_type] ):
identifier[next_obj] = identifier[res]
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string] . identifier[format] (
identifier[names] [ literal[int] ], identifier[type] ( identifier[res] ). identifier[__name__] ,
identifier[desired_type] . identifier[__name__] ))
keyword[if] keyword[not] identifier[next_obj] :
keyword[if] identifier[needs_to_be_resolved] ( identifier[prev_obj] , identifier[names] [ literal[int] ]):
keyword[return] identifier[Postponed] ()
keyword[else] :
keyword[return] keyword[None]
keyword[elif] identifier[type] ( identifier[obj] ) keyword[is] identifier[Postponed] :
keyword[return] identifier[Postponed] ()
keyword[else] :
identifier[next_obj] = identifier[getattr] ( identifier[obj] , identifier[names] [ literal[int] ])
keyword[if] keyword[not] identifier[next_obj] :
keyword[if] identifier[needs_to_be_resolved] ( identifier[obj] , identifier[names] [ literal[int] ]):
keyword[return] identifier[Postponed] ()
keyword[else] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[names] )> literal[int] :
keyword[return] identifier[get_referenced_object] ( identifier[obj] , identifier[next_obj] , literal[string] . identifier[join] (
identifier[names] [ literal[int] :]), identifier[desired_type] )
keyword[if] identifier[type] ( identifier[next_obj] ) keyword[is] identifier[list] keyword[and] identifier[needs_to_be_resolved] ( identifier[obj] , identifier[names] [ literal[int] ]):
keyword[return] identifier[Postponed] ()
keyword[return] identifier[next_obj] | def get_referenced_object(prev_obj, obj, dot_separated_name, desired_type=None):
"""
get objects based on a path
Args:
prev_obj: the object containing obj (req. if obj is a list)
obj: the current object
dot_separated_name: the attribute name "a.b.c.d" starting from obj
Note: the attribute "parent(TYPE)" is a shortcut to jump to the
parent of type "TYPE" (exact match of type name).
desired_type: (optional)
Returns:
the object if found, None if not found or Postponed() if some postponed
refs are found on the path
"""
from textx.scoping import Postponed
assert prev_obj or not type(obj) is list
names = dot_separated_name.split('.')
match = re.match('parent\\((\\w+)\\)', names[0])
if match:
next_obj = obj
desired_parent_typename = match.group(1)
next_obj = get_recursive_parent_with_typename(next_obj, desired_parent_typename)
if next_obj:
return get_referenced_object(None, next_obj, '.'.join(names[1:]), desired_type) # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
elif type(obj) is list:
next_obj = None
for res in obj:
if hasattr(res, 'name') and res.name == names[0]:
if desired_type is None or textx_isinstance(res, desired_type):
next_obj = res # depends on [control=['if'], data=[]]
else:
raise TypeError('{} has type {} instead of {}.'.format(names[0], type(res).__name__, desired_type.__name__)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['res']]
if not next_obj:
# if prev_obj needs to be resolved: return Postponed.
if needs_to_be_resolved(prev_obj, names[0]):
return Postponed() # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type(obj) is Postponed:
return Postponed() # depends on [control=['if'], data=['Postponed']]
else:
next_obj = getattr(obj, names[0])
if not next_obj:
# if obj in in crossref return Postponed, else None
if needs_to_be_resolved(obj, names[0]):
return Postponed() # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
if len(names) > 1:
return get_referenced_object(obj, next_obj, '.'.join(names[1:]), desired_type) # depends on [control=['if'], data=[]]
if type(next_obj) is list and needs_to_be_resolved(obj, names[0]):
return Postponed() # depends on [control=['if'], data=[]]
return next_obj |
def logout(self):
"""Log out of the account."""
self._master_token = None
self._auth_token = None
self._email = None
self._android_id = None | def function[logout, parameter[self]]:
constant[Log out of the account.]
name[self]._master_token assign[=] constant[None]
name[self]._auth_token assign[=] constant[None]
name[self]._email assign[=] constant[None]
name[self]._android_id assign[=] constant[None] | keyword[def] identifier[logout] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_master_token] = keyword[None]
identifier[self] . identifier[_auth_token] = keyword[None]
identifier[self] . identifier[_email] = keyword[None]
identifier[self] . identifier[_android_id] = keyword[None] | def logout(self):
"""Log out of the account."""
self._master_token = None
self._auth_token = None
self._email = None
self._android_id = None |
def process_exception(self, request, exception):
"""
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
"""
if isinstance(exception, CasTicketException):
do_logout(request)
# This assumes that request.path requires authentication.
return HttpResponseRedirect(request.path)
else:
return None | def function[process_exception, parameter[self, request, exception]]:
constant[
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
]
if call[name[isinstance], parameter[name[exception], name[CasTicketException]]] begin[:]
call[name[do_logout], parameter[name[request]]]
return[call[name[HttpResponseRedirect], parameter[name[request].path]]] | keyword[def] identifier[process_exception] ( identifier[self] , identifier[request] , identifier[exception] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[exception] , identifier[CasTicketException] ):
identifier[do_logout] ( identifier[request] )
keyword[return] identifier[HttpResponseRedirect] ( identifier[request] . identifier[path] )
keyword[else] :
keyword[return] keyword[None] | def process_exception(self, request, exception):
"""
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
"""
if isinstance(exception, CasTicketException):
do_logout(request)
# This assumes that request.path requires authentication.
return HttpResponseRedirect(request.path) # depends on [control=['if'], data=[]]
else:
return None |
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit) | def function[clone_to_path, parameter[gh_token, folder, sdk_git_id, branch_or_commit]]:
constant[Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
]
call[name[_LOGGER].info, parameter[constant[Clone SDK repository %s], name[sdk_git_id]]]
variable[url_parsing] assign[=] call[name[urlsplit], parameter[name[sdk_git_id]]]
variable[sdk_git_id] assign[=] name[url_parsing].path
if call[name[sdk_git_id].startswith, parameter[constant[/]]] begin[:]
variable[sdk_git_id] assign[=] call[name[sdk_git_id]][<ast.Slice object at 0x7da1b24e74f0>]
variable[credentials_part] assign[=] constant[]
if name[gh_token] begin[:]
variable[login] assign[=] call[name[user_from_token], parameter[name[gh_token]]].login
variable[credentials_part] assign[=] call[constant[{user}:{token}@].format, parameter[]]
variable[https_authenticated_url] assign[=] call[constant[https://{credentials}github.com/{sdk_git_id}.git].format, parameter[]]
call[name[_git_clone_to_path], parameter[name[https_authenticated_url], name[folder]]]
if name[pr_number] begin[:]
<ast.Try object at 0x7da1b24e4310>
call[name[checkout_with_fetch], parameter[name[folder], call[constant[pull/{}/head].format, parameter[name[pr_number]]]]]
if name[branch_or_commit] begin[:]
variable[repo] assign[=] call[name[Repo], parameter[call[name[str], parameter[name[folder]]]]]
call[name[repo].git.checkout, parameter[name[branch_or_commit]]] | keyword[def] identifier[clone_to_path] ( identifier[gh_token] , identifier[folder] , identifier[sdk_git_id] , identifier[branch_or_commit] = keyword[None] ,*, identifier[pr_number] = keyword[None] ):
literal[string]
identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[sdk_git_id] )
identifier[url_parsing] = identifier[urlsplit] ( identifier[sdk_git_id] )
identifier[sdk_git_id] = identifier[url_parsing] . identifier[path]
keyword[if] identifier[sdk_git_id] . identifier[startswith] ( literal[string] ):
identifier[sdk_git_id] = identifier[sdk_git_id] [ literal[int] :]
identifier[credentials_part] = literal[string]
keyword[if] identifier[gh_token] :
identifier[login] = identifier[user_from_token] ( identifier[gh_token] ). identifier[login]
identifier[credentials_part] = literal[string] . identifier[format] (
identifier[user] = identifier[login] ,
identifier[token] = identifier[gh_token]
)
keyword[else] :
identifier[_LOGGER] . identifier[warning] ( literal[string] )
identifier[https_authenticated_url] = literal[string] . identifier[format] (
identifier[credentials] = identifier[credentials_part] ,
identifier[sdk_git_id] = identifier[sdk_git_id]
)
identifier[_git_clone_to_path] ( identifier[https_authenticated_url] , identifier[folder] )
keyword[if] identifier[pr_number] :
keyword[try] :
identifier[checkout_with_fetch] ( identifier[folder] , literal[string] . identifier[format] ( identifier[pr_number] ))
keyword[return]
keyword[except] identifier[Exception] :
keyword[pass]
identifier[checkout_with_fetch] ( identifier[folder] , literal[string] . identifier[format] ( identifier[pr_number] ))
keyword[if] identifier[branch_or_commit] :
identifier[repo] = identifier[Repo] ( identifier[str] ( identifier[folder] ))
identifier[repo] . identifier[git] . identifier[checkout] ( identifier[branch_or_commit] ) | def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info('Clone SDK repository %s', sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith('/'):
sdk_git_id = sdk_git_id[1:] # depends on [control=['if'], data=[]]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(user=login, token=gh_token) # depends on [control=['if'], data=[]]
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(credentials=credentials_part, sdk_git_id=sdk_git_id)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, 'pull/{}/merge'.format(pr_number))
return # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head" # depends on [control=['except'], data=[]]
checkout_with_fetch(folder, 'pull/{}/head'.format(pr_number)) # depends on [control=['if'], data=[]]
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit) # depends on [control=['if'], data=[]] |
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify Panel in place using non-NA values from other Panel.
May also use object coercible to Panel. Will align on items.
Parameters
----------
other : Panel, or object coercible to Panel
The object from which the caller will be udpated.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
How individual DataFrames are joined.
overwrite : bool, default True
If True then overwrite values for common keys in the calling Panel.
filter_func : callable(1d-array) -> 1d-array<bool>, default None
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise an error if a DataFrame and other both.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
See Also
--------
DataFrame.update : Similar method for DataFrames.
dict.update : Similar method for dictionaries.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join=join, overwrite=overwrite,
filter_func=filter_func, errors=errors) | def function[update, parameter[self, other, join, overwrite, filter_func, errors]]:
constant[
Modify Panel in place using non-NA values from other Panel.
May also use object coercible to Panel. Will align on items.
Parameters
----------
other : Panel, or object coercible to Panel
The object from which the caller will be udpated.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
How individual DataFrames are joined.
overwrite : bool, default True
If True then overwrite values for common keys in the calling Panel.
filter_func : callable(1d-array) -> 1d-array<bool>, default None
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise an error if a DataFrame and other both.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
See Also
--------
DataFrame.update : Similar method for DataFrames.
dict.update : Similar method for dictionaries.
]
if <ast.UnaryOp object at 0x7da1b20b5090> begin[:]
variable[other] assign[=] call[name[self]._constructor, parameter[name[other]]]
variable[axis_name] assign[=] name[self]._info_axis_name
variable[axis_values] assign[=] name[self]._info_axis
variable[other] assign[=] call[name[other].reindex, parameter[]]
for taget[name[frame]] in starred[name[axis_values]] begin[:]
call[call[name[self]][name[frame]].update, parameter[call[name[other]][name[frame]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[other] , identifier[join] = literal[string] , identifier[overwrite] = keyword[True] , identifier[filter_func] = keyword[None] ,
identifier[errors] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[self] . identifier[_constructor] ):
identifier[other] = identifier[self] . identifier[_constructor] ( identifier[other] )
identifier[axis_name] = identifier[self] . identifier[_info_axis_name]
identifier[axis_values] = identifier[self] . identifier[_info_axis]
identifier[other] = identifier[other] . identifier[reindex] (**{ identifier[axis_name] : identifier[axis_values] })
keyword[for] identifier[frame] keyword[in] identifier[axis_values] :
identifier[self] [ identifier[frame] ]. identifier[update] ( identifier[other] [ identifier[frame] ], identifier[join] = identifier[join] , identifier[overwrite] = identifier[overwrite] ,
identifier[filter_func] = identifier[filter_func] , identifier[errors] = identifier[errors] ) | def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'):
"""
Modify Panel in place using non-NA values from other Panel.
May also use object coercible to Panel. Will align on items.
Parameters
----------
other : Panel, or object coercible to Panel
The object from which the caller will be udpated.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
How individual DataFrames are joined.
overwrite : bool, default True
If True then overwrite values for common keys in the calling Panel.
filter_func : callable(1d-array) -> 1d-array<bool>, default None
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise an error if a DataFrame and other both.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
See Also
--------
DataFrame.update : Similar method for DataFrames.
dict.update : Similar method for dictionaries.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other) # depends on [control=['if'], data=[]]
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join=join, overwrite=overwrite, filter_func=filter_func, errors=errors) # depends on [control=['for'], data=['frame']] |
def load_configuration(self) -> None:
"""
Read the configuration from a configuration file
"""
config_file = self.default_config_file
if self.config_file:
config_file = self.config_file
self.config = ConfigParser()
self.config.read(config_file) | def function[load_configuration, parameter[self]]:
constant[
Read the configuration from a configuration file
]
variable[config_file] assign[=] name[self].default_config_file
if name[self].config_file begin[:]
variable[config_file] assign[=] name[self].config_file
name[self].config assign[=] call[name[ConfigParser], parameter[]]
call[name[self].config.read, parameter[name[config_file]]] | keyword[def] identifier[load_configuration] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[config_file] = identifier[self] . identifier[default_config_file]
keyword[if] identifier[self] . identifier[config_file] :
identifier[config_file] = identifier[self] . identifier[config_file]
identifier[self] . identifier[config] = identifier[ConfigParser] ()
identifier[self] . identifier[config] . identifier[read] ( identifier[config_file] ) | def load_configuration(self) -> None:
"""
Read the configuration from a configuration file
"""
config_file = self.default_config_file
if self.config_file:
config_file = self.config_file # depends on [control=['if'], data=[]]
self.config = ConfigParser()
self.config.read(config_file) |
def refresh(self, leave_clean=False):
"""Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
"""
remote, merge = self._get_upstream()
self._check_call(['fetch', '--tags', remote, merge], raise_type=Scm.RemoteException)
try:
self._check_call(['rebase', 'FETCH_HEAD'], raise_type=Scm.LocalException)
except Scm.LocalException as e:
if leave_clean:
logger.debug('Cleaning up after failed rebase')
try:
self._check_call(['rebase', '--abort'], raise_type=Scm.LocalException)
except Scm.LocalException as abort_exc:
logger.debug('Failed to up after failed rebase')
logger.debug(traceback.format_exc(abort_exc))
# But let the original exception propagate, since that's the more interesting one
raise e | def function[refresh, parameter[self, leave_clean]]:
constant[Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
]
<ast.Tuple object at 0x7da1b1e5c970> assign[=] call[name[self]._get_upstream, parameter[]]
call[name[self]._check_call, parameter[list[[<ast.Constant object at 0x7da1b1e5c850>, <ast.Constant object at 0x7da1b1e5f580>, <ast.Name object at 0x7da1b1e5e860>, <ast.Name object at 0x7da1b1e5f070>]]]]
<ast.Try object at 0x7da1b1e5c550> | keyword[def] identifier[refresh] ( identifier[self] , identifier[leave_clean] = keyword[False] ):
literal[string]
identifier[remote] , identifier[merge] = identifier[self] . identifier[_get_upstream] ()
identifier[self] . identifier[_check_call] ([ literal[string] , literal[string] , identifier[remote] , identifier[merge] ], identifier[raise_type] = identifier[Scm] . identifier[RemoteException] )
keyword[try] :
identifier[self] . identifier[_check_call] ([ literal[string] , literal[string] ], identifier[raise_type] = identifier[Scm] . identifier[LocalException] )
keyword[except] identifier[Scm] . identifier[LocalException] keyword[as] identifier[e] :
keyword[if] identifier[leave_clean] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[self] . identifier[_check_call] ([ literal[string] , literal[string] ], identifier[raise_type] = identifier[Scm] . identifier[LocalException] )
keyword[except] identifier[Scm] . identifier[LocalException] keyword[as] identifier[abort_exc] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( identifier[traceback] . identifier[format_exc] ( identifier[abort_exc] ))
keyword[raise] identifier[e] | def refresh(self, leave_clean=False):
"""Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
"""
(remote, merge) = self._get_upstream()
self._check_call(['fetch', '--tags', remote, merge], raise_type=Scm.RemoteException)
try:
self._check_call(['rebase', 'FETCH_HEAD'], raise_type=Scm.LocalException) # depends on [control=['try'], data=[]]
except Scm.LocalException as e:
if leave_clean:
logger.debug('Cleaning up after failed rebase')
try:
self._check_call(['rebase', '--abort'], raise_type=Scm.LocalException) # depends on [control=['try'], data=[]]
except Scm.LocalException as abort_exc:
logger.debug('Failed to up after failed rebase')
logger.debug(traceback.format_exc(abort_exc)) # depends on [control=['except'], data=['abort_exc']] # depends on [control=['if'], data=[]]
# But let the original exception propagate, since that's the more interesting one
raise e # depends on [control=['except'], data=['e']] |
def _remote_browser_class(env_vars, tags=None):
"""
Returns class, kwargs, and args needed to instantiate the remote browser.
"""
if tags is None:
tags = []
# Interpret the environment variables, raising an exception if they're
# invalid
envs = _required_envs(env_vars)
envs.update(_optional_envs())
# Turn the environment variables into a dictionary of desired capabilities
caps = _capabilities_dict(envs, tags)
if 'accessKey' in caps:
LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version'])
else:
LOGGER.info(u"Using Remote Browser: %s", caps['browserName'])
# Create and return a new Browser
# We assume that the WebDriver end-point is running locally (e.g. using
# SauceConnect)
url = u"http://{0}:{1}/wd/hub".format(
envs['SELENIUM_HOST'], envs['SELENIUM_PORT'])
browser_args = []
browser_kwargs = {
'command_executor': url,
'desired_capabilities': caps,
}
if caps['browserName'] == 'firefox':
browser_kwargs['browser_profile'] = _firefox_profile()
return webdriver.Remote, browser_args, browser_kwargs | def function[_remote_browser_class, parameter[env_vars, tags]]:
constant[
Returns class, kwargs, and args needed to instantiate the remote browser.
]
if compare[name[tags] is constant[None]] begin[:]
variable[tags] assign[=] list[[]]
variable[envs] assign[=] call[name[_required_envs], parameter[name[env_vars]]]
call[name[envs].update, parameter[call[name[_optional_envs], parameter[]]]]
variable[caps] assign[=] call[name[_capabilities_dict], parameter[name[envs], name[tags]]]
if compare[constant[accessKey] in name[caps]] begin[:]
call[name[LOGGER].info, parameter[constant[Using SauceLabs: %s %s %s], call[name[caps]][constant[platform]], call[name[caps]][constant[browserName]], call[name[caps]][constant[version]]]]
variable[url] assign[=] call[constant[http://{0}:{1}/wd/hub].format, parameter[call[name[envs]][constant[SELENIUM_HOST]], call[name[envs]][constant[SELENIUM_PORT]]]]
variable[browser_args] assign[=] list[[]]
variable[browser_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da20c76ce50>, <ast.Constant object at 0x7da20c76f040>], [<ast.Name object at 0x7da20c76ee60>, <ast.Name object at 0x7da20c76d6c0>]]
if compare[call[name[caps]][constant[browserName]] equal[==] constant[firefox]] begin[:]
call[name[browser_kwargs]][constant[browser_profile]] assign[=] call[name[_firefox_profile], parameter[]]
return[tuple[[<ast.Attribute object at 0x7da20c76dc30>, <ast.Name object at 0x7da20c76d5a0>, <ast.Name object at 0x7da20c76dbd0>]]] | keyword[def] identifier[_remote_browser_class] ( identifier[env_vars] , identifier[tags] = keyword[None] ):
literal[string]
keyword[if] identifier[tags] keyword[is] keyword[None] :
identifier[tags] =[]
identifier[envs] = identifier[_required_envs] ( identifier[env_vars] )
identifier[envs] . identifier[update] ( identifier[_optional_envs] ())
identifier[caps] = identifier[_capabilities_dict] ( identifier[envs] , identifier[tags] )
keyword[if] literal[string] keyword[in] identifier[caps] :
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[caps] [ literal[string] ], identifier[caps] [ literal[string] ], identifier[caps] [ literal[string] ])
keyword[else] :
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[caps] [ literal[string] ])
identifier[url] = literal[string] . identifier[format] (
identifier[envs] [ literal[string] ], identifier[envs] [ literal[string] ])
identifier[browser_args] =[]
identifier[browser_kwargs] ={
literal[string] : identifier[url] ,
literal[string] : identifier[caps] ,
}
keyword[if] identifier[caps] [ literal[string] ]== literal[string] :
identifier[browser_kwargs] [ literal[string] ]= identifier[_firefox_profile] ()
keyword[return] identifier[webdriver] . identifier[Remote] , identifier[browser_args] , identifier[browser_kwargs] | def _remote_browser_class(env_vars, tags=None):
"""
Returns class, kwargs, and args needed to instantiate the remote browser.
"""
if tags is None:
tags = [] # depends on [control=['if'], data=['tags']]
# Interpret the environment variables, raising an exception if they're
# invalid
envs = _required_envs(env_vars)
envs.update(_optional_envs())
# Turn the environment variables into a dictionary of desired capabilities
caps = _capabilities_dict(envs, tags)
if 'accessKey' in caps:
LOGGER.info(u'Using SauceLabs: %s %s %s', caps['platform'], caps['browserName'], caps['version']) # depends on [control=['if'], data=['caps']]
else:
LOGGER.info(u'Using Remote Browser: %s', caps['browserName'])
# Create and return a new Browser
# We assume that the WebDriver end-point is running locally (e.g. using
# SauceConnect)
url = u'http://{0}:{1}/wd/hub'.format(envs['SELENIUM_HOST'], envs['SELENIUM_PORT'])
browser_args = []
browser_kwargs = {'command_executor': url, 'desired_capabilities': caps}
if caps['browserName'] == 'firefox':
browser_kwargs['browser_profile'] = _firefox_profile() # depends on [control=['if'], data=[]]
return (webdriver.Remote, browser_args, browser_kwargs) |
def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ')
return commands | def function[_get_command, parameter[classes]]:
constant[Associates each command class with command depending on setup.cfg
]
variable[commands] assign[=] dictionary[[], []]
variable[setup_file] assign[=] call[name[os].path.join, parameter[call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[../..]]]]], constant[setup.cfg]]]
for taget[name[line]] in starred[call[name[open], parameter[name[setup_file], constant[r]]]] begin[:]
for taget[name[cl]] in starred[name[classes]] begin[:]
if compare[name[cl] in name[line]] begin[:]
call[name[commands]][name[cl]] assign[=] call[call[call[call[name[line].split, parameter[constant[ = ]]]][constant[0]].strip, parameter[]].replace, parameter[constant[_], constant[ ]]]
return[name[commands]] | keyword[def] identifier[_get_command] ( identifier[classes] ):
literal[string]
identifier[commands] ={}
identifier[setup_file] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] )),
literal[string] )
keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[setup_file] , literal[string] ):
keyword[for] identifier[cl] keyword[in] identifier[classes] :
keyword[if] identifier[cl] keyword[in] identifier[line] :
identifier[commands] [ identifier[cl] ]= identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] (). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[commands] | def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')), 'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ') # depends on [control=['if'], data=['cl', 'line']] # depends on [control=['for'], data=['cl']] # depends on [control=['for'], data=['line']]
return commands |
def average_colors(c1, c2):
''' Average the values of two colors together '''
r = int((c1[0] + c2[0])/2)
g = int((c1[1] + c2[1])/2)
b = int((c1[2] + c2[2])/2)
return (r, g, b) | def function[average_colors, parameter[c1, c2]]:
constant[ Average the values of two colors together ]
variable[r] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[c1]][constant[0]] + call[name[c2]][constant[0]]] / constant[2]]]]
variable[g] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[c1]][constant[1]] + call[name[c2]][constant[1]]] / constant[2]]]]
variable[b] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[c1]][constant[2]] + call[name[c2]][constant[2]]] / constant[2]]]]
return[tuple[[<ast.Name object at 0x7da1b07f46a0>, <ast.Name object at 0x7da1b07f67d0>, <ast.Name object at 0x7da1b07f5270>]]] | keyword[def] identifier[average_colors] ( identifier[c1] , identifier[c2] ):
literal[string]
identifier[r] = identifier[int] (( identifier[c1] [ literal[int] ]+ identifier[c2] [ literal[int] ])/ literal[int] )
identifier[g] = identifier[int] (( identifier[c1] [ literal[int] ]+ identifier[c2] [ literal[int] ])/ literal[int] )
identifier[b] = identifier[int] (( identifier[c1] [ literal[int] ]+ identifier[c2] [ literal[int] ])/ literal[int] )
keyword[return] ( identifier[r] , identifier[g] , identifier[b] ) | def average_colors(c1, c2):
""" Average the values of two colors together """
r = int((c1[0] + c2[0]) / 2)
g = int((c1[1] + c2[1]) / 2)
b = int((c1[2] + c2[2]) / 2)
return (r, g, b) |
def update(self, bqm, ignore_info=True):
"""Update one binary quadratic model from another.
Args:
bqm (:class:`.BinaryQuadraticModel`):
The updating binary quadratic model. Any variables in the updating
model are added to the updated model. Values of biases and the offset
in the updating model are added to the corresponding values in
the updated model.
ignore_info (bool, optional, default=True):
If True, info in the given binary quadratic model is ignored, otherwise
:attr:`.BinaryQuadraticModel.info` is updated with the given binary quadratic
model's info, potentially overwriting values.
Examples:
This example creates two binary quadratic models and updates the first
from the second.
>>> import dimod
...
>>> linear1 = {1: 1, 2: 2}
>>> quadratic1 = {(1, 2): 12}
>>> bqm1 = dimod.BinaryQuadraticModel(linear1, quadratic1, 0.5, dimod.SPIN)
>>> bqm1.info = {'BQM number 1'}
>>> linear2 = {2: 0.25, 3: 0.35}
>>> quadratic2 = {(2, 3): 23}
>>> bqm2 = dimod.BinaryQuadraticModel(linear2, quadratic2, 0.75, dimod.SPIN)
>>> bqm2.info = {'BQM number 2'}
>>> bqm1.update(bqm2)
>>> bqm1.offset
1.25
>>> 'BQM number 2' in bqm1.info
False
>>> bqm1.update(bqm2, ignore_info=False)
>>> 'BQM number 2' in bqm1.info
True
>>> bqm1.offset
2.0
"""
self.add_variables_from(bqm.linear, vartype=bqm.vartype)
self.add_interactions_from(bqm.quadratic, vartype=bqm.vartype)
self.add_offset(bqm.offset)
if not ignore_info:
self.info.update(bqm.info) | def function[update, parameter[self, bqm, ignore_info]]:
constant[Update one binary quadratic model from another.
Args:
bqm (:class:`.BinaryQuadraticModel`):
The updating binary quadratic model. Any variables in the updating
model are added to the updated model. Values of biases and the offset
in the updating model are added to the corresponding values in
the updated model.
ignore_info (bool, optional, default=True):
If True, info in the given binary quadratic model is ignored, otherwise
:attr:`.BinaryQuadraticModel.info` is updated with the given binary quadratic
model's info, potentially overwriting values.
Examples:
This example creates two binary quadratic models and updates the first
from the second.
>>> import dimod
...
>>> linear1 = {1: 1, 2: 2}
>>> quadratic1 = {(1, 2): 12}
>>> bqm1 = dimod.BinaryQuadraticModel(linear1, quadratic1, 0.5, dimod.SPIN)
>>> bqm1.info = {'BQM number 1'}
>>> linear2 = {2: 0.25, 3: 0.35}
>>> quadratic2 = {(2, 3): 23}
>>> bqm2 = dimod.BinaryQuadraticModel(linear2, quadratic2, 0.75, dimod.SPIN)
>>> bqm2.info = {'BQM number 2'}
>>> bqm1.update(bqm2)
>>> bqm1.offset
1.25
>>> 'BQM number 2' in bqm1.info
False
>>> bqm1.update(bqm2, ignore_info=False)
>>> 'BQM number 2' in bqm1.info
True
>>> bqm1.offset
2.0
]
call[name[self].add_variables_from, parameter[name[bqm].linear]]
call[name[self].add_interactions_from, parameter[name[bqm].quadratic]]
call[name[self].add_offset, parameter[name[bqm].offset]]
if <ast.UnaryOp object at 0x7da1b07f75b0> begin[:]
call[name[self].info.update, parameter[name[bqm].info]] | keyword[def] identifier[update] ( identifier[self] , identifier[bqm] , identifier[ignore_info] = keyword[True] ):
literal[string]
identifier[self] . identifier[add_variables_from] ( identifier[bqm] . identifier[linear] , identifier[vartype] = identifier[bqm] . identifier[vartype] )
identifier[self] . identifier[add_interactions_from] ( identifier[bqm] . identifier[quadratic] , identifier[vartype] = identifier[bqm] . identifier[vartype] )
identifier[self] . identifier[add_offset] ( identifier[bqm] . identifier[offset] )
keyword[if] keyword[not] identifier[ignore_info] :
identifier[self] . identifier[info] . identifier[update] ( identifier[bqm] . identifier[info] ) | def update(self, bqm, ignore_info=True):
"""Update one binary quadratic model from another.
Args:
bqm (:class:`.BinaryQuadraticModel`):
The updating binary quadratic model. Any variables in the updating
model are added to the updated model. Values of biases and the offset
in the updating model are added to the corresponding values in
the updated model.
ignore_info (bool, optional, default=True):
If True, info in the given binary quadratic model is ignored, otherwise
:attr:`.BinaryQuadraticModel.info` is updated with the given binary quadratic
model's info, potentially overwriting values.
Examples:
This example creates two binary quadratic models and updates the first
from the second.
>>> import dimod
...
>>> linear1 = {1: 1, 2: 2}
>>> quadratic1 = {(1, 2): 12}
>>> bqm1 = dimod.BinaryQuadraticModel(linear1, quadratic1, 0.5, dimod.SPIN)
>>> bqm1.info = {'BQM number 1'}
>>> linear2 = {2: 0.25, 3: 0.35}
>>> quadratic2 = {(2, 3): 23}
>>> bqm2 = dimod.BinaryQuadraticModel(linear2, quadratic2, 0.75, dimod.SPIN)
>>> bqm2.info = {'BQM number 2'}
>>> bqm1.update(bqm2)
>>> bqm1.offset
1.25
>>> 'BQM number 2' in bqm1.info
False
>>> bqm1.update(bqm2, ignore_info=False)
>>> 'BQM number 2' in bqm1.info
True
>>> bqm1.offset
2.0
"""
self.add_variables_from(bqm.linear, vartype=bqm.vartype)
self.add_interactions_from(bqm.quadratic, vartype=bqm.vartype)
self.add_offset(bqm.offset)
if not ignore_info:
self.info.update(bqm.info) # depends on [control=['if'], data=[]] |
def chebyshev(coefs, time, domain):
"""Evaluate a Chebyshev Polynomial
Args:
coefs (list, np.array): Coefficients defining the polynomial
time (int, float): Time where to evaluate the polynomial
domain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right]
Reference: Appendix A in the MSG Level 1.5 Image Data Format Description.
"""
return Chebyshev(coefs, domain=domain)(time) - 0.5 * coefs[0] | def function[chebyshev, parameter[coefs, time, domain]]:
constant[Evaluate a Chebyshev Polynomial
Args:
coefs (list, np.array): Coefficients defining the polynomial
time (int, float): Time where to evaluate the polynomial
domain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right]
Reference: Appendix A in the MSG Level 1.5 Image Data Format Description.
]
return[binary_operation[call[call[name[Chebyshev], parameter[name[coefs]]], parameter[name[time]]] - binary_operation[constant[0.5] * call[name[coefs]][constant[0]]]]] | keyword[def] identifier[chebyshev] ( identifier[coefs] , identifier[time] , identifier[domain] ):
literal[string]
keyword[return] identifier[Chebyshev] ( identifier[coefs] , identifier[domain] = identifier[domain] )( identifier[time] )- literal[int] * identifier[coefs] [ literal[int] ] | def chebyshev(coefs, time, domain):
"""Evaluate a Chebyshev Polynomial
Args:
coefs (list, np.array): Coefficients defining the polynomial
time (int, float): Time where to evaluate the polynomial
domain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right]
Reference: Appendix A in the MSG Level 1.5 Image Data Format Description.
"""
return Chebyshev(coefs, domain=domain)(time) - 0.5 * coefs[0] |
def zscore(bars, window=20, stds=1, col='close'):
""" get zscore of price """
std = numpy_rolling_std(bars[col], window)
mean = numpy_rolling_mean(bars[col], window)
return (bars[col] - mean) / (std * stds) | def function[zscore, parameter[bars, window, stds, col]]:
constant[ get zscore of price ]
variable[std] assign[=] call[name[numpy_rolling_std], parameter[call[name[bars]][name[col]], name[window]]]
variable[mean] assign[=] call[name[numpy_rolling_mean], parameter[call[name[bars]][name[col]], name[window]]]
return[binary_operation[binary_operation[call[name[bars]][name[col]] - name[mean]] / binary_operation[name[std] * name[stds]]]] | keyword[def] identifier[zscore] ( identifier[bars] , identifier[window] = literal[int] , identifier[stds] = literal[int] , identifier[col] = literal[string] ):
literal[string]
identifier[std] = identifier[numpy_rolling_std] ( identifier[bars] [ identifier[col] ], identifier[window] )
identifier[mean] = identifier[numpy_rolling_mean] ( identifier[bars] [ identifier[col] ], identifier[window] )
keyword[return] ( identifier[bars] [ identifier[col] ]- identifier[mean] )/( identifier[std] * identifier[stds] ) | def zscore(bars, window=20, stds=1, col='close'):
""" get zscore of price """
std = numpy_rolling_std(bars[col], window)
mean = numpy_rolling_mean(bars[col], window)
return (bars[col] - mean) / (std * stds) |
def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method could be called several times if the plugin is opened
and closed. The method may be omitted if there is no GUI for the
plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
# Take a text widget to show some instructions
self.msg_font = self.fv.get_font("sans", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msg_font)
self.tw = tw
# Frame for instructions and add the text widget with another
# blank widget to stretch as needed to fill emp
fr = Widgets.Frame("Status")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
# A button box that is always visible at the bottom
btns = Widgets.HBox()
btns.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
self.gui_up = True | def function[build_gui, parameter[self, container]]:
constant[
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method could be called several times if the plugin is opened
and closed. The method may be omitted if there is no GUI for the
plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
]
variable[top] assign[=] call[name[Widgets].VBox, parameter[]]
call[name[top].set_border_width, parameter[constant[4]]]
<ast.Tuple object at 0x7da1b0d0d090> assign[=] call[name[Widgets].get_oriented_box, parameter[name[container]]]
call[name[vbox].set_border_width, parameter[constant[4]]]
call[name[vbox].set_spacing, parameter[constant[2]]]
name[self].msg_font assign[=] call[name[self].fv.get_font, parameter[constant[sans], constant[12]]]
variable[tw] assign[=] call[name[Widgets].TextArea, parameter[]]
call[name[tw].set_font, parameter[name[self].msg_font]]
name[self].tw assign[=] name[tw]
variable[fr] assign[=] call[name[Widgets].Frame, parameter[constant[Status]]]
call[name[fr].set_widget, parameter[name[tw]]]
call[name[vbox].add_widget, parameter[name[fr]]]
variable[spacer] assign[=] call[name[Widgets].Label, parameter[constant[]]]
call[name[vbox].add_widget, parameter[name[spacer]]]
call[name[top].add_widget, parameter[name[sw]]]
variable[btns] assign[=] call[name[Widgets].HBox, parameter[]]
call[name[btns].set_spacing, parameter[constant[3]]]
variable[btn] assign[=] call[name[Widgets].Button, parameter[constant[Close]]]
call[name[btn].add_callback, parameter[constant[activated], <ast.Lambda object at 0x7da1b0d0e530>]]
call[name[btns].add_widget, parameter[name[btn]]]
call[name[btns].add_widget, parameter[call[name[Widgets].Label, parameter[constant[]]]]]
call[name[top].add_widget, parameter[name[btns]]]
call[name[container].add_widget, parameter[name[top]]]
name[self].gui_up assign[=] constant[True] | keyword[def] identifier[build_gui] ( identifier[self] , identifier[container] ):
literal[string]
identifier[top] = identifier[Widgets] . identifier[VBox] ()
identifier[top] . identifier[set_border_width] ( literal[int] )
identifier[vbox] , identifier[sw] , identifier[orientation] = identifier[Widgets] . identifier[get_oriented_box] ( identifier[container] )
identifier[vbox] . identifier[set_border_width] ( literal[int] )
identifier[vbox] . identifier[set_spacing] ( literal[int] )
identifier[self] . identifier[msg_font] = identifier[self] . identifier[fv] . identifier[get_font] ( literal[string] , literal[int] )
identifier[tw] = identifier[Widgets] . identifier[TextArea] ( identifier[wrap] = keyword[True] , identifier[editable] = keyword[False] )
identifier[tw] . identifier[set_font] ( identifier[self] . identifier[msg_font] )
identifier[self] . identifier[tw] = identifier[tw]
identifier[fr] = identifier[Widgets] . identifier[Frame] ( literal[string] )
identifier[fr] . identifier[set_widget] ( identifier[tw] )
identifier[vbox] . identifier[add_widget] ( identifier[fr] , identifier[stretch] = literal[int] )
identifier[spacer] = identifier[Widgets] . identifier[Label] ( literal[string] )
identifier[vbox] . identifier[add_widget] ( identifier[spacer] , identifier[stretch] = literal[int] )
identifier[top] . identifier[add_widget] ( identifier[sw] , identifier[stretch] = literal[int] )
identifier[btns] = identifier[Widgets] . identifier[HBox] ()
identifier[btns] . identifier[set_spacing] ( literal[int] )
identifier[btn] = identifier[Widgets] . identifier[Button] ( literal[string] )
identifier[btn] . identifier[add_callback] ( literal[string] , keyword[lambda] identifier[w] : identifier[self] . identifier[close] ())
identifier[btns] . identifier[add_widget] ( identifier[btn] , identifier[stretch] = literal[int] )
identifier[btns] . identifier[add_widget] ( identifier[Widgets] . identifier[Label] ( literal[string] ), identifier[stretch] = literal[int] )
identifier[top] . identifier[add_widget] ( identifier[btns] , identifier[stretch] = literal[int] )
identifier[container] . identifier[add_widget] ( identifier[top] , identifier[stretch] = literal[int] )
identifier[self] . identifier[gui_up] = keyword[True] | def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method could be called several times if the plugin is opened
and closed. The method may be omitted if there is no GUI for the
plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
(vbox, sw, orientation) = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
# Take a text widget to show some instructions
self.msg_font = self.fv.get_font('sans', 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msg_font)
self.tw = tw
# Frame for instructions and add the text widget with another
# blank widget to stretch as needed to fill emp
fr = Widgets.Frame('Status')
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
# A button box that is always visible at the bottom
btns = Widgets.HBox()
btns.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
self.gui_up = True |
def receive_message(self, message, data):
""" Called when a media message is received. """
if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:
self._process_media_status(data)
return True
return False | def function[receive_message, parameter[self, message, data]]:
constant[ Called when a media message is received. ]
if compare[call[name[data]][name[MESSAGE_TYPE]] equal[==] name[TYPE_MEDIA_STATUS]] begin[:]
call[name[self]._process_media_status, parameter[name[data]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[receive_message] ( identifier[self] , identifier[message] , identifier[data] ):
literal[string]
keyword[if] identifier[data] [ identifier[MESSAGE_TYPE] ]== identifier[TYPE_MEDIA_STATUS] :
identifier[self] . identifier[_process_media_status] ( identifier[data] )
keyword[return] keyword[True]
keyword[return] keyword[False] | def receive_message(self, message, data):
""" Called when a media message is received. """
if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:
self._process_media_status(data)
return True # depends on [control=['if'], data=[]]
return False |
def getDisplaySize(data_type_oid, type_modifier):
"""
Returns the column display size for the given Vertica type with
consideration of the type modifier.
The display size of a column is the maximum number of characters needed to
display data in character form.
"""
if data_type_oid == VerticaType.BOOL:
# T or F
return 1
elif data_type_oid == VerticaType.INT8:
# a sign and 19 digits if signed or 20 digits if unsigned
return 20
elif data_type_oid == VerticaType.FLOAT8:
# a sign, 15 digits, a decimal point, the letter E, a sign, and 3 digits
return 22
elif data_type_oid == VerticaType.NUMERIC:
# a sign, precision digits, and a decimal point
return getPrecision(data_type_oid, type_modifier) + 2
elif data_type_oid == VerticaType.DATE:
# yyyy-mm-dd, a space, and the calendar era (BC)
return 13
elif data_type_oid == VerticaType.TIME:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# hh:mm:ss
return 8
else:
# hh:mm:ss.[fff...]
return 9 + seconds_precision
elif data_type_oid == VerticaType.TIMETZ:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# hh:mm:ss, a sign, hh:mm
return 14
else:
# hh:mm:ss.[fff...], a sign, hh:mm
return 15 + seconds_precision
elif data_type_oid == VerticaType.TIMESTAMP:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# yyyy-mm-dd hh:mm:ss, a space, and the calendar era (BC)
return 22
else:
# yyyy-mm-dd hh:mm:ss[.fff...], a space, and the calendar era (BC)
return 23 + seconds_precision
elif data_type_oid == VerticaType.TIMESTAMPTZ:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# yyyy-mm-dd hh:mm:ss, a sign, hh:mm, a space, and the calendar era (BC)
return 28
else:
# yyyy-mm-dd hh:mm:ss.[fff...], a sign, hh:mm, a space, and the calendar era (BC)
return 29 + seconds_precision
elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM):
leading_precision = getIntervalLeadingPrecision(data_type_oid, type_modifier)
seconds_precision = getPrecision(data_type_oid, type_modifier)
interval_range = getIntervalRange(data_type_oid, type_modifier)
if interval_range in ("Year", "Month", "Day", "Hour", "Minute"):
# a sign, [range...]
return 1 + leading_precision
elif interval_range in ("Day to Hour", "Year to Month", "Hour to Minute"):
# a sign, [dd...] hh; a sign, [yy...]-mm; a sign, [hh...]:mm
return 1 + leading_precision + 3
elif interval_range == "Day to Minute":
# a sign, [dd...] hh:mm
return 1 + leading_precision + 6
elif interval_range == "Second":
if seconds_precision == 0:
# a sign, [ss...]
return 1 + leading_precision
else:
# a sign, [ss...].[fff...]
return 1 + leading_precision + 1 + seconds_precision
elif interval_range == "Day to Second":
if seconds_precision == 0:
# a sign, [dd...] hh:mm:ss
return 1 + leading_precision + 9
else:
# a sign, [dd...] hh:mm:ss.[fff...]
return 1 + leading_precision + 10 + seconds_precision
elif interval_range == "Hour to Second":
if seconds_precision == 0:
# a sign, [hh...]:mm:ss
return 1 + leading_precision + 6
else:
# a sign, [hh...]:mm:ss.[fff...]
return 1 + leading_precision + 7 + seconds_precision
elif interval_range == "Minute to Second":
if seconds_precision == 0:
# a sign, [mm...]:ss
return 1 + leading_precision + 3
else:
# a sign, [mm...]:ss.[fff...]
return 1 + leading_precision + 4 + seconds_precision
elif data_type_oid == VerticaType.UUID:
# aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee
return 36
elif data_type_oid in (VerticaType.CHAR,
VerticaType.VARCHAR,
VerticaType.BINARY,
VerticaType.VARBINARY,
VerticaType.UNKNOWN):
# the defined maximum octet length of the column
return MAX_STRING_LEN if type_modifier <= -1 else (type_modifier - 4)
elif data_type_oid in (VerticaType.LONGVARCHAR,
VerticaType.LONGVARBINARY):
return MAX_LONG_STRING_LEN if type_modifier <= -1 else (type_modifier - 4)
else:
return None | def function[getDisplaySize, parameter[data_type_oid, type_modifier]]:
constant[
Returns the column display size for the given Vertica type with
consideration of the type modifier.
The display size of a column is the maximum number of characters needed to
display data in character form.
]
if compare[name[data_type_oid] equal[==] name[VerticaType].BOOL] begin[:]
return[constant[1]] | keyword[def] identifier[getDisplaySize] ( identifier[data_type_oid] , identifier[type_modifier] ):
literal[string]
keyword[if] identifier[data_type_oid] == identifier[VerticaType] . identifier[BOOL] :
keyword[return] literal[int]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[INT8] :
keyword[return] literal[int]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[FLOAT8] :
keyword[return] literal[int]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[NUMERIC] :
keyword[return] identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )+ literal[int]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[DATE] :
keyword[return] literal[int]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIME] :
identifier[seconds_precision] = identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[seconds_precision]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMETZ] :
identifier[seconds_precision] = identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[seconds_precision]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMESTAMP] :
identifier[seconds_precision] = identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[seconds_precision]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMESTAMPTZ] :
identifier[seconds_precision] = identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[seconds_precision]
keyword[elif] identifier[data_type_oid] keyword[in] ( identifier[VerticaType] . identifier[INTERVAL] , identifier[VerticaType] . identifier[INTERVALYM] ):
identifier[leading_precision] = identifier[getIntervalLeadingPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
identifier[seconds_precision] = identifier[getPrecision] ( identifier[data_type_oid] , identifier[type_modifier] )
identifier[interval_range] = identifier[getIntervalRange] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[if] identifier[interval_range] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[return] literal[int] + identifier[leading_precision]
keyword[elif] identifier[interval_range] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[return] literal[int] + identifier[leading_precision] + literal[int]
keyword[elif] identifier[interval_range] == literal[string] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int]
keyword[elif] identifier[interval_range] == literal[string] :
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int] + identifier[leading_precision]
keyword[else] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int] + identifier[seconds_precision]
keyword[elif] identifier[interval_range] == literal[string] :
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int] + identifier[seconds_precision]
keyword[elif] identifier[interval_range] == literal[string] :
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int] + identifier[seconds_precision]
keyword[elif] identifier[interval_range] == literal[string] :
keyword[if] identifier[seconds_precision] == literal[int] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int]
keyword[else] :
keyword[return] literal[int] + identifier[leading_precision] + literal[int] + identifier[seconds_precision]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[UUID] :
keyword[return] literal[int]
keyword[elif] identifier[data_type_oid] keyword[in] ( identifier[VerticaType] . identifier[CHAR] ,
identifier[VerticaType] . identifier[VARCHAR] ,
identifier[VerticaType] . identifier[BINARY] ,
identifier[VerticaType] . identifier[VARBINARY] ,
identifier[VerticaType] . identifier[UNKNOWN] ):
keyword[return] identifier[MAX_STRING_LEN] keyword[if] identifier[type_modifier] <=- literal[int] keyword[else] ( identifier[type_modifier] - literal[int] )
keyword[elif] identifier[data_type_oid] keyword[in] ( identifier[VerticaType] . identifier[LONGVARCHAR] ,
identifier[VerticaType] . identifier[LONGVARBINARY] ):
keyword[return] identifier[MAX_LONG_STRING_LEN] keyword[if] identifier[type_modifier] <=- literal[int] keyword[else] ( identifier[type_modifier] - literal[int] )
keyword[else] :
keyword[return] keyword[None] | def getDisplaySize(data_type_oid, type_modifier):
"""
Returns the column display size for the given Vertica type with
consideration of the type modifier.
The display size of a column is the maximum number of characters needed to
display data in character form.
"""
if data_type_oid == VerticaType.BOOL:
# T or F
return 1 # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.INT8:
# a sign and 19 digits if signed or 20 digits if unsigned
return 20 # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.FLOAT8:
# a sign, 15 digits, a decimal point, the letter E, a sign, and 3 digits
return 22 # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.NUMERIC:
# a sign, precision digits, and a decimal point
return getPrecision(data_type_oid, type_modifier) + 2 # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.DATE:
# yyyy-mm-dd, a space, and the calendar era (BC)
return 13 # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.TIME:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# hh:mm:ss
return 8 # depends on [control=['if'], data=[]]
else:
# hh:mm:ss.[fff...]
return 9 + seconds_precision # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.TIMETZ:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# hh:mm:ss, a sign, hh:mm
return 14 # depends on [control=['if'], data=[]]
else:
# hh:mm:ss.[fff...], a sign, hh:mm
return 15 + seconds_precision # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.TIMESTAMP:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# yyyy-mm-dd hh:mm:ss, a space, and the calendar era (BC)
return 22 # depends on [control=['if'], data=[]]
else:
# yyyy-mm-dd hh:mm:ss[.fff...], a space, and the calendar era (BC)
return 23 + seconds_precision # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.TIMESTAMPTZ:
seconds_precision = getPrecision(data_type_oid, type_modifier)
if seconds_precision == 0:
# yyyy-mm-dd hh:mm:ss, a sign, hh:mm, a space, and the calendar era (BC)
return 28 # depends on [control=['if'], data=[]]
else:
# yyyy-mm-dd hh:mm:ss.[fff...], a sign, hh:mm, a space, and the calendar era (BC)
return 29 + seconds_precision # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM):
leading_precision = getIntervalLeadingPrecision(data_type_oid, type_modifier)
seconds_precision = getPrecision(data_type_oid, type_modifier)
interval_range = getIntervalRange(data_type_oid, type_modifier)
if interval_range in ('Year', 'Month', 'Day', 'Hour', 'Minute'):
# a sign, [range...]
return 1 + leading_precision # depends on [control=['if'], data=[]]
elif interval_range in ('Day to Hour', 'Year to Month', 'Hour to Minute'):
# a sign, [dd...] hh; a sign, [yy...]-mm; a sign, [hh...]:mm
return 1 + leading_precision + 3 # depends on [control=['if'], data=[]]
elif interval_range == 'Day to Minute':
# a sign, [dd...] hh:mm
return 1 + leading_precision + 6 # depends on [control=['if'], data=[]]
elif interval_range == 'Second':
if seconds_precision == 0:
# a sign, [ss...]
return 1 + leading_precision # depends on [control=['if'], data=[]]
else:
# a sign, [ss...].[fff...]
return 1 + leading_precision + 1 + seconds_precision # depends on [control=['if'], data=[]]
elif interval_range == 'Day to Second':
if seconds_precision == 0:
# a sign, [dd...] hh:mm:ss
return 1 + leading_precision + 9 # depends on [control=['if'], data=[]]
else:
# a sign, [dd...] hh:mm:ss.[fff...]
return 1 + leading_precision + 10 + seconds_precision # depends on [control=['if'], data=[]]
elif interval_range == 'Hour to Second':
if seconds_precision == 0:
# a sign, [hh...]:mm:ss
return 1 + leading_precision + 6 # depends on [control=['if'], data=[]]
else:
# a sign, [hh...]:mm:ss.[fff...]
return 1 + leading_precision + 7 + seconds_precision # depends on [control=['if'], data=[]]
elif interval_range == 'Minute to Second':
if seconds_precision == 0:
# a sign, [mm...]:ss
return 1 + leading_precision + 3 # depends on [control=['if'], data=[]]
else:
# a sign, [mm...]:ss.[fff...]
return 1 + leading_precision + 4 + seconds_precision # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.UUID:
# aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee
return 36 # depends on [control=['if'], data=[]]
elif data_type_oid in (VerticaType.CHAR, VerticaType.VARCHAR, VerticaType.BINARY, VerticaType.VARBINARY, VerticaType.UNKNOWN):
# the defined maximum octet length of the column
return MAX_STRING_LEN if type_modifier <= -1 else type_modifier - 4 # depends on [control=['if'], data=[]]
elif data_type_oid in (VerticaType.LONGVARCHAR, VerticaType.LONGVARBINARY):
return MAX_LONG_STRING_LEN if type_modifier <= -1 else type_modifier - 4 # depends on [control=['if'], data=[]]
else:
return None |
def make_mecard(name, reading=None, email=None, phone=None, videophone=None,
memo=None, nickname=None, birthday=None, url=None, pobox=None,
roomno=None, houseno=None, city=None, prefecture=None,
zipcode=None, country=None):
"""\
Returns a QR Code which encodes a `MeCard <https://en.wikipedia.org/wiki/MeCard>`_
:param str name: Name. If it contains a comma, the first part
is treated as lastname and the second part is treated as forename.
:param str|None reading: Designates a text string to be set as the
kana name in the phonebook
:param str|iterable email: E-mail address. Multiple values are
allowed.
:param str|iterable phone: Phone number. Multiple values are
allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|int|date birthday: Birthday. If a string is provided,
it should encode the date as YYYYMMDD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None roomno: Room number (address information).
:param str|None houseno: House number (address information).
:param str|None city: City (address information).
:param str|None prefecture: Prefecture (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:rtype: segno.QRCode
"""
return segno.make_qr(make_mecard_data(name=name, reading=reading,
email=email, phone=phone,
videophone=videophone, memo=memo,
nickname=nickname, birthday=birthday,
url=url, pobox=pobox, roomno=roomno,
houseno=houseno, city=city,
prefecture=prefecture, zipcode=zipcode,
country=country)) | def function[make_mecard, parameter[name, reading, email, phone, videophone, memo, nickname, birthday, url, pobox, roomno, houseno, city, prefecture, zipcode, country]]:
constant[ Returns a QR Code which encodes a `MeCard <https://en.wikipedia.org/wiki/MeCard>`_
:param str name: Name. If it contains a comma, the first part
is treated as lastname and the second part is treated as forename.
:param str|None reading: Designates a text string to be set as the
kana name in the phonebook
:param str|iterable email: E-mail address. Multiple values are
allowed.
:param str|iterable phone: Phone number. Multiple values are
allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|int|date birthday: Birthday. If a string is provided,
it should encode the date as YYYYMMDD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None roomno: Room number (address information).
:param str|None houseno: House number (address information).
:param str|None city: City (address information).
:param str|None prefecture: Prefecture (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:rtype: segno.QRCode
]
return[call[name[segno].make_qr, parameter[call[name[make_mecard_data], parameter[]]]]] | keyword[def] identifier[make_mecard] ( identifier[name] , identifier[reading] = keyword[None] , identifier[email] = keyword[None] , identifier[phone] = keyword[None] , identifier[videophone] = keyword[None] ,
identifier[memo] = keyword[None] , identifier[nickname] = keyword[None] , identifier[birthday] = keyword[None] , identifier[url] = keyword[None] , identifier[pobox] = keyword[None] ,
identifier[roomno] = keyword[None] , identifier[houseno] = keyword[None] , identifier[city] = keyword[None] , identifier[prefecture] = keyword[None] ,
identifier[zipcode] = keyword[None] , identifier[country] = keyword[None] ):
literal[string]
keyword[return] identifier[segno] . identifier[make_qr] ( identifier[make_mecard_data] ( identifier[name] = identifier[name] , identifier[reading] = identifier[reading] ,
identifier[email] = identifier[email] , identifier[phone] = identifier[phone] ,
identifier[videophone] = identifier[videophone] , identifier[memo] = identifier[memo] ,
identifier[nickname] = identifier[nickname] , identifier[birthday] = identifier[birthday] ,
identifier[url] = identifier[url] , identifier[pobox] = identifier[pobox] , identifier[roomno] = identifier[roomno] ,
identifier[houseno] = identifier[houseno] , identifier[city] = identifier[city] ,
identifier[prefecture] = identifier[prefecture] , identifier[zipcode] = identifier[zipcode] ,
identifier[country] = identifier[country] )) | def make_mecard(name, reading=None, email=None, phone=None, videophone=None, memo=None, nickname=None, birthday=None, url=None, pobox=None, roomno=None, houseno=None, city=None, prefecture=None, zipcode=None, country=None):
""" Returns a QR Code which encodes a `MeCard <https://en.wikipedia.org/wiki/MeCard>`_
:param str name: Name. If it contains a comma, the first part
is treated as lastname and the second part is treated as forename.
:param str|None reading: Designates a text string to be set as the
kana name in the phonebook
:param str|iterable email: E-mail address. Multiple values are
allowed.
:param str|iterable phone: Phone number. Multiple values are
allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|int|date birthday: Birthday. If a string is provided,
it should encode the date as YYYYMMDD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None roomno: Room number (address information).
:param str|None houseno: House number (address information).
:param str|None city: City (address information).
:param str|None prefecture: Prefecture (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:rtype: segno.QRCode
"""
return segno.make_qr(make_mecard_data(name=name, reading=reading, email=email, phone=phone, videophone=videophone, memo=memo, nickname=nickname, birthday=birthday, url=url, pobox=pobox, roomno=roomno, houseno=houseno, city=city, prefecture=prefecture, zipcode=zipcode, country=country)) |
def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y,
method='basic'):
"""Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and scale it by scale factors (scale_x, scale_y).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
"""
new_wd = int(round(scale_x * (x2 - x1 + 1)))
new_ht = int(round(scale_y * (y2 - y1 + 1)))
return self.get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht,
# TODO:
# this causes a problem for the
# current Glue plugin--update that
#method=method
) | def function[get_scaled_cutout_basic, parameter[self, x1, y1, x2, y2, scale_x, scale_y, method]]:
constant[Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and scale it by scale factors (scale_x, scale_y).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
]
variable[new_wd] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[scale_x] * binary_operation[binary_operation[name[x2] - name[x1]] + constant[1]]]]]]]
variable[new_ht] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[scale_y] * binary_operation[binary_operation[name[y2] - name[y1]] + constant[1]]]]]]]
return[call[name[self].get_scaled_cutout_wdht, parameter[name[x1], name[y1], name[x2], name[y2], name[new_wd], name[new_ht]]]] | keyword[def] identifier[get_scaled_cutout_basic] ( identifier[self] , identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[scale_x] , identifier[scale_y] ,
identifier[method] = literal[string] ):
literal[string]
identifier[new_wd] = identifier[int] ( identifier[round] ( identifier[scale_x] *( identifier[x2] - identifier[x1] + literal[int] )))
identifier[new_ht] = identifier[int] ( identifier[round] ( identifier[scale_y] *( identifier[y2] - identifier[y1] + literal[int] )))
keyword[return] identifier[self] . identifier[get_scaled_cutout_wdht] ( identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] , identifier[new_wd] , identifier[new_ht] ,
) | def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y, method='basic'):
"""Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and scale it by scale factors (scale_x, scale_y).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
"""
new_wd = int(round(scale_x * (x2 - x1 + 1)))
new_ht = int(round(scale_y * (y2 - y1 + 1)))
# TODO:
# this causes a problem for the
# current Glue plugin--update that
#method=method
return self.get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht) |
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggFLAC(filething)
filething.fileobj.seek(0)
t.delete(filething) | def function[delete, parameter[filething]]:
constant[ delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
]
variable[t] assign[=] call[name[OggFLAC], parameter[name[filething]]]
call[name[filething].fileobj.seek, parameter[constant[0]]]
call[name[t].delete, parameter[name[filething]]] | keyword[def] identifier[delete] ( identifier[filething] ):
literal[string]
identifier[t] = identifier[OggFLAC] ( identifier[filething] )
identifier[filething] . identifier[fileobj] . identifier[seek] ( literal[int] )
identifier[t] . identifier[delete] ( identifier[filething] ) | def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggFLAC(filething)
filething.fileobj.seek(0)
t.delete(filething) |
def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='generated',
help='The directory containing the sources to benchmark'
)
parser.add_argument(
'--out',
dest='out_dir',
default='../../doc',
help='The output directory'
)
parser.add_argument(
'--include',
dest='include',
default='include',
help='The directory containing the headeres for the benchmark'
)
parser.add_argument(
'--boost_headers',
dest='boost_headers',
default='../../../..',
help='The directory containing the Boost headers (the boost directory)'
)
parser.add_argument(
'--compiler',
dest='compiler',
default='g++',
help='The compiler to do the benchmark with'
)
parser.add_argument(
'--repeat_count',
dest='repeat_count',
type=int,
default=5,
help='How many times a measurement should be repeated.'
)
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(
args.src_dir,
args.compiler,
[args.include, args.boost_headers],
args.repeat_count
)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir) | def function[main, parameter[]]:
constant[The main function of the script]
variable[desc] assign[=] constant[Benchmark the files generated by generate.py]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[--src]]]
call[name[parser].add_argument, parameter[constant[--out]]]
call[name[parser].add_argument, parameter[constant[--include]]]
call[name[parser].add_argument, parameter[constant[--boost_headers]]]
call[name[parser].add_argument, parameter[constant[--compiler]]]
call[name[parser].add_argument, parameter[constant[--repeat_count]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
variable[compiler] assign[=] call[name[compiler_info], parameter[name[args].compiler]]
variable[results] assign[=] call[name[benchmark], parameter[name[args].src_dir, name[args].compiler, list[[<ast.Attribute object at 0x7da1b1fe78b0>, <ast.Attribute object at 0x7da1b1fe7130>]], name[args].repeat_count]]
call[name[plot_diagrams], parameter[name[results], call[name[configs_in], parameter[name[args].src_dir]], name[compiler], name[args].out_dir]] | keyword[def] identifier[main] ():
literal[string]
identifier[desc] = literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[desc] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[int] ,
identifier[default] = literal[int] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[compiler] = identifier[compiler_info] ( identifier[args] . identifier[compiler] )
identifier[results] = identifier[benchmark] (
identifier[args] . identifier[src_dir] ,
identifier[args] . identifier[compiler] ,
[ identifier[args] . identifier[include] , identifier[args] . identifier[boost_headers] ],
identifier[args] . identifier[repeat_count]
)
identifier[plot_diagrams] ( identifier[results] , identifier[configs_in] ( identifier[args] . identifier[src_dir] ), identifier[compiler] , identifier[args] . identifier[out_dir] ) | def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--src', dest='src_dir', default='generated', help='The directory containing the sources to benchmark')
parser.add_argument('--out', dest='out_dir', default='../../doc', help='The output directory')
parser.add_argument('--include', dest='include', default='include', help='The directory containing the headeres for the benchmark')
parser.add_argument('--boost_headers', dest='boost_headers', default='../../../..', help='The directory containing the Boost headers (the boost directory)')
parser.add_argument('--compiler', dest='compiler', default='g++', help='The compiler to do the benchmark with')
parser.add_argument('--repeat_count', dest='repeat_count', type=int, default=5, help='How many times a measurement should be repeated.')
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(args.src_dir, args.compiler, [args.include, args.boost_headers], args.repeat_count)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir) |
def fromInputs(self, received):
"""
Convert some random strings received from a browser into structured
data, using a list of parameters.
@param received: a dict of lists of strings, i.e. the canonical Python
form of web form post.
@rtype: L{Deferred}
@return: A Deferred which will be called back with a dict mapping
parameter names to coerced parameter values.
"""
results = []
for parameter in self.parameters:
name = parameter.name.encode('ascii')
d = maybeDeferred(parameter.fromInputs, received)
d.addCallback(lambda value, name=name: (name, value))
results.append(d)
return gatherResults(results).addCallback(dict) | def function[fromInputs, parameter[self, received]]:
constant[
Convert some random strings received from a browser into structured
data, using a list of parameters.
@param received: a dict of lists of strings, i.e. the canonical Python
form of web form post.
@rtype: L{Deferred}
@return: A Deferred which will be called back with a dict mapping
parameter names to coerced parameter values.
]
variable[results] assign[=] list[[]]
for taget[name[parameter]] in starred[name[self].parameters] begin[:]
variable[name] assign[=] call[name[parameter].name.encode, parameter[constant[ascii]]]
variable[d] assign[=] call[name[maybeDeferred], parameter[name[parameter].fromInputs, name[received]]]
call[name[d].addCallback, parameter[<ast.Lambda object at 0x7da1b0bd91e0>]]
call[name[results].append, parameter[name[d]]]
return[call[call[name[gatherResults], parameter[name[results]]].addCallback, parameter[name[dict]]]] | keyword[def] identifier[fromInputs] ( identifier[self] , identifier[received] ):
literal[string]
identifier[results] =[]
keyword[for] identifier[parameter] keyword[in] identifier[self] . identifier[parameters] :
identifier[name] = identifier[parameter] . identifier[name] . identifier[encode] ( literal[string] )
identifier[d] = identifier[maybeDeferred] ( identifier[parameter] . identifier[fromInputs] , identifier[received] )
identifier[d] . identifier[addCallback] ( keyword[lambda] identifier[value] , identifier[name] = identifier[name] :( identifier[name] , identifier[value] ))
identifier[results] . identifier[append] ( identifier[d] )
keyword[return] identifier[gatherResults] ( identifier[results] ). identifier[addCallback] ( identifier[dict] ) | def fromInputs(self, received):
"""
Convert some random strings received from a browser into structured
data, using a list of parameters.
@param received: a dict of lists of strings, i.e. the canonical Python
form of web form post.
@rtype: L{Deferred}
@return: A Deferred which will be called back with a dict mapping
parameter names to coerced parameter values.
"""
results = []
for parameter in self.parameters:
name = parameter.name.encode('ascii')
d = maybeDeferred(parameter.fromInputs, received)
d.addCallback(lambda value, name=name: (name, value))
results.append(d) # depends on [control=['for'], data=['parameter']]
return gatherResults(results).addCallback(dict) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.