code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _check_directory_arguments(self):
"""
Validates arguments for loading from directories, including static image and time series directories.
"""
if not os.path.isdir(self.datapath):
raise (NotADirectoryError('Directory does not exist: %s' % self.datapath))
if self.time_delay:
if self.time_delay < 1:
raise ValueError('Time step argument must be greater than 0, but gave: %i' % self.time_delay)
if not isinstance(self.time_delay, int):
raise ValueError('Time step argument must be an integer, but gave: %s' % str(self.time_delay)) | def function[_check_directory_arguments, parameter[self]]:
constant[
Validates arguments for loading from directories, including static image and time series directories.
]
if <ast.UnaryOp object at 0x7da20e962bc0> begin[:]
<ast.Raise object at 0x7da20e960430>
if name[self].time_delay begin[:]
if compare[name[self].time_delay less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da18c4ceb00>
if <ast.UnaryOp object at 0x7da18c4ce020> begin[:]
<ast.Raise object at 0x7da18f00c370> | keyword[def] identifier[_check_directory_arguments] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[datapath] ):
keyword[raise] ( identifier[NotADirectoryError] ( literal[string] % identifier[self] . identifier[datapath] ))
keyword[if] identifier[self] . identifier[time_delay] :
keyword[if] identifier[self] . identifier[time_delay] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[time_delay] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[time_delay] , identifier[int] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[str] ( identifier[self] . identifier[time_delay] )) | def _check_directory_arguments(self):
"""
Validates arguments for loading from directories, including static image and time series directories.
"""
if not os.path.isdir(self.datapath):
raise NotADirectoryError('Directory does not exist: %s' % self.datapath) # depends on [control=['if'], data=[]]
if self.time_delay:
if self.time_delay < 1:
raise ValueError('Time step argument must be greater than 0, but gave: %i' % self.time_delay) # depends on [control=['if'], data=[]]
if not isinstance(self.time_delay, int):
raise ValueError('Time step argument must be an integer, but gave: %s' % str(self.time_delay)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def new_board(self, name):
"""Make a board for a character name, and switch to it."""
char = self.engine.character[name]
board = Board(character=char)
self.mainscreen.boards[name] = board
self.character = char | def function[new_board, parameter[self, name]]:
constant[Make a board for a character name, and switch to it.]
variable[char] assign[=] call[name[self].engine.character][name[name]]
variable[board] assign[=] call[name[Board], parameter[]]
call[name[self].mainscreen.boards][name[name]] assign[=] name[board]
name[self].character assign[=] name[char] | keyword[def] identifier[new_board] ( identifier[self] , identifier[name] ):
literal[string]
identifier[char] = identifier[self] . identifier[engine] . identifier[character] [ identifier[name] ]
identifier[board] = identifier[Board] ( identifier[character] = identifier[char] )
identifier[self] . identifier[mainscreen] . identifier[boards] [ identifier[name] ]= identifier[board]
identifier[self] . identifier[character] = identifier[char] | def new_board(self, name):
"""Make a board for a character name, and switch to it."""
char = self.engine.character[name]
board = Board(character=char)
self.mainscreen.boards[name] = board
self.character = char |
def pci_lookup_name4(
access: (IN, ctypes.POINTER(pci_access)),
buf: (IN, ctypes.c_char_p),
size: (IN, ctypes.c_int),
flags: (IN, ctypes.c_int),
arg1: (IN, ctypes.c_int),
arg2: (IN, ctypes.c_int),
arg3: (IN, ctypes.c_int),
arg4: (IN, ctypes.c_int),
) -> ctypes.c_char_p:
"""
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with four
arguments. It is required because ctypes doesn't support varadic functions.
"""
pass | def function[pci_lookup_name4, parameter[access, buf, size, flags, arg1, arg2, arg3, arg4]]:
constant[
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with four
arguments. It is required because ctypes doesn't support varadic functions.
]
pass | keyword[def] identifier[pci_lookup_name4] (
identifier[access] :( identifier[IN] , identifier[ctypes] . identifier[POINTER] ( identifier[pci_access] )),
identifier[buf] :( identifier[IN] , identifier[ctypes] . identifier[c_char_p] ),
identifier[size] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
identifier[flags] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
identifier[arg1] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
identifier[arg2] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
identifier[arg3] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
identifier[arg4] :( identifier[IN] , identifier[ctypes] . identifier[c_int] ),
)-> identifier[ctypes] . identifier[c_char_p] :
literal[string]
keyword[pass] | def pci_lookup_name4(access: (IN, ctypes.POINTER(pci_access)), buf: (IN, ctypes.c_char_p), size: (IN, ctypes.c_int), flags: (IN, ctypes.c_int), arg1: (IN, ctypes.c_int), arg2: (IN, ctypes.c_int), arg3: (IN, ctypes.c_int), arg4: (IN, ctypes.c_int)) -> ctypes.c_char_p:
"""
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with four
arguments. It is required because ctypes doesn't support varadic functions.
"""
pass |
def _log(file_list, list_name, in_path):
"""Logs result at debug level"""
file_names = '\n'.join(file_list)
LOG.debug("\nDiscovered %(size)d %(name)s file(s) in %(path)s:\n"
"%(files)s\n",
{'size': len(file_list), 'name': list_name, 'path': in_path,
'files': file_names}) | def function[_log, parameter[file_list, list_name, in_path]]:
constant[Logs result at debug level]
variable[file_names] assign[=] call[constant[
].join, parameter[name[file_list]]]
call[name[LOG].debug, parameter[constant[
Discovered %(size)d %(name)s file(s) in %(path)s:
%(files)s
], dictionary[[<ast.Constant object at 0x7da1b1987940>, <ast.Constant object at 0x7da1b1986800>, <ast.Constant object at 0x7da1b19877c0>, <ast.Constant object at 0x7da1b1987130>], [<ast.Call object at 0x7da1b1986f20>, <ast.Name object at 0x7da1b1985390>, <ast.Name object at 0x7da1b1984730>, <ast.Name object at 0x7da1b1984550>]]]] | keyword[def] identifier[_log] ( identifier[file_list] , identifier[list_name] , identifier[in_path] ):
literal[string]
identifier[file_names] = literal[string] . identifier[join] ( identifier[file_list] )
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] ,
{ literal[string] : identifier[len] ( identifier[file_list] ), literal[string] : identifier[list_name] , literal[string] : identifier[in_path] ,
literal[string] : identifier[file_names] }) | def _log(file_list, list_name, in_path):
"""Logs result at debug level"""
file_names = '\n'.join(file_list)
LOG.debug('\nDiscovered %(size)d %(name)s file(s) in %(path)s:\n%(files)s\n', {'size': len(file_list), 'name': list_name, 'path': in_path, 'files': file_names}) |
def community_topic_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic"
api_path = "/api/v2/community/topics/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | def function[community_topic_delete, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic]
variable[api_path] assign[=] constant[/api/v2/community/topics/{id}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[community_topic_delete] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] ,** identifier[kwargs] ) | def community_topic_delete(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic"""
api_path = '/api/v2/community/topics/{id}.json'
api_path = api_path.format(id=id)
return self.call(api_path, method='DELETE', **kwargs) |
def schedule_jobs(user):
"""Dispatch jobs to remotecis.
The remoteci can use this method to request a new job.
Before a job is dispatched, the server will flag as 'killed' all the
running jobs that were associated with the remoteci. This is because they
will never be finished.
"""
values = schemas.job_schedule.post(flask.request.json)
values.update({
'id': utils.gen_uuid(),
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'status': 'new',
'remoteci_id': user.id,
'user_agent': flask.request.environ.get('HTTP_USER_AGENT'),
'client_version': flask.request.environ.get(
'HTTP_CLIENT_VERSION'
),
})
topic_id = values.pop('topic_id')
topic_id_secondary = values.pop('topic_id_secondary')
components_ids = values.pop('components_ids')
# check remoteci
remoteci = v1_utils.verify_existence_and_get(user.id, models.REMOTECIS)
if remoteci['state'] != 'active':
message = 'RemoteCI "%s" is disabled.' % remoteci['id']
raise dci_exc.DCIException(message, status_code=412)
# check primary topic
topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
if topic['state'] != 'active':
msg = 'Topic %s:%s not active.' % (topic_id, topic['name'])
raise dci_exc.DCIException(msg, status_code=412)
v1_utils.verify_team_in_topic(user, topic_id)
# check secondary topic
if topic_id_secondary:
topic_secondary = v1_utils.verify_existence_and_get(
topic_id_secondary, models.TOPICS)
if topic_secondary['state'] != 'active':
msg = 'Topic %s:%s not active.' % (topic_id_secondary,
topic['name'])
raise dci_exc.DCIException(msg, status_code=412)
v1_utils.verify_team_in_topic(user, topic_id_secondary)
dry_run = values.pop('dry_run')
if dry_run:
component_types = components.get_component_types_from_topic(topic_id)
components_ids = components.get_last_components_by_type(
component_types,
topic_id
)
return flask.Response(
json.dumps({'components_ids': components_ids, 'job': None}),
201,
content_type='application/json'
)
remotecis.kill_existing_jobs(remoteci['id'])
values = _build_job(topic_id, remoteci, components_ids, values,
topic_id_secondary=topic_id_secondary)
return flask.Response(json.dumps({'job': values}), 201,
headers={'ETag': values['etag']},
content_type='application/json') | def function[schedule_jobs, parameter[user]]:
constant[Dispatch jobs to remotecis.
The remoteci can use this method to request a new job.
Before a job is dispatched, the server will flag as 'killed' all the
running jobs that were associated with the remoteci. This is because they
will never be finished.
]
variable[values] assign[=] call[name[schemas].job_schedule.post, parameter[name[flask].request.json]]
call[name[values].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0fe5f30>, <ast.Constant object at 0x7da1b0fe6b00>, <ast.Constant object at 0x7da1b0fe5030>, <ast.Constant object at 0x7da1b0fe5780>, <ast.Constant object at 0x7da1b0f05ba0>, <ast.Constant object at 0x7da1b0f05ed0>, <ast.Constant object at 0x7da1b0f05390>, <ast.Constant object at 0x7da1b0f05c90>], [<ast.Call object at 0x7da1b0f05fc0>, <ast.Call object at 0x7da1b0f05840>, <ast.Call object at 0x7da1b0f062c0>, <ast.Call object at 0x7da1b0f06530>, <ast.Constant object at 0x7da1b0f06320>, <ast.Attribute object at 0x7da1b0f05900>, <ast.Call object at 0x7da1b0f06500>, <ast.Call object at 0x7da1b0f05f30>]]]]
variable[topic_id] assign[=] call[name[values].pop, parameter[constant[topic_id]]]
variable[topic_id_secondary] assign[=] call[name[values].pop, parameter[constant[topic_id_secondary]]]
variable[components_ids] assign[=] call[name[values].pop, parameter[constant[components_ids]]]
variable[remoteci] assign[=] call[name[v1_utils].verify_existence_and_get, parameter[name[user].id, name[models].REMOTECIS]]
if compare[call[name[remoteci]][constant[state]] not_equal[!=] constant[active]] begin[:]
variable[message] assign[=] binary_operation[constant[RemoteCI "%s" is disabled.] <ast.Mod object at 0x7da2590d6920> call[name[remoteci]][constant[id]]]
<ast.Raise object at 0x7da1b0d6b3d0>
variable[topic] assign[=] call[name[v1_utils].verify_existence_and_get, parameter[name[topic_id], name[models].TOPICS]]
if compare[call[name[topic]][constant[state]] not_equal[!=] constant[active]] begin[:]
variable[msg] assign[=] binary_operation[constant[Topic %s:%s not active.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0d68670>, <ast.Subscript object at 0x7da1b0d6ac80>]]]
<ast.Raise object at 0x7da1b0d6ba60>
call[name[v1_utils].verify_team_in_topic, parameter[name[user], name[topic_id]]]
if name[topic_id_secondary] begin[:]
variable[topic_secondary] assign[=] call[name[v1_utils].verify_existence_and_get, parameter[name[topic_id_secondary], name[models].TOPICS]]
if compare[call[name[topic_secondary]][constant[state]] not_equal[!=] constant[active]] begin[:]
variable[msg] assign[=] binary_operation[constant[Topic %s:%s not active.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0d69030>, <ast.Subscript object at 0x7da1b0d6a410>]]]
<ast.Raise object at 0x7da1b0d699c0>
call[name[v1_utils].verify_team_in_topic, parameter[name[user], name[topic_id_secondary]]]
variable[dry_run] assign[=] call[name[values].pop, parameter[constant[dry_run]]]
if name[dry_run] begin[:]
variable[component_types] assign[=] call[name[components].get_component_types_from_topic, parameter[name[topic_id]]]
variable[components_ids] assign[=] call[name[components].get_last_components_by_type, parameter[name[component_types], name[topic_id]]]
return[call[name[flask].Response, parameter[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da1b0ff1120>, <ast.Constant object at 0x7da1b0ff16c0>], [<ast.Name object at 0x7da1b0ff0f10>, <ast.Constant object at 0x7da1b0ff2f50>]]]], constant[201]]]]
call[name[remotecis].kill_existing_jobs, parameter[call[name[remoteci]][constant[id]]]]
variable[values] assign[=] call[name[_build_job], parameter[name[topic_id], name[remoteci], name[components_ids], name[values]]]
return[call[name[flask].Response, parameter[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da1b0d6bd30>], [<ast.Name object at 0x7da1b0d6a4a0>]]]], constant[201]]]] | keyword[def] identifier[schedule_jobs] ( identifier[user] ):
literal[string]
identifier[values] = identifier[schemas] . identifier[job_schedule] . identifier[post] ( identifier[flask] . identifier[request] . identifier[json] )
identifier[values] . identifier[update] ({
literal[string] : identifier[utils] . identifier[gen_uuid] (),
literal[string] : identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[isoformat] (),
literal[string] : identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[isoformat] (),
literal[string] : identifier[utils] . identifier[gen_etag] (),
literal[string] : literal[string] ,
literal[string] : identifier[user] . identifier[id] ,
literal[string] : identifier[flask] . identifier[request] . identifier[environ] . identifier[get] ( literal[string] ),
literal[string] : identifier[flask] . identifier[request] . identifier[environ] . identifier[get] (
literal[string]
),
})
identifier[topic_id] = identifier[values] . identifier[pop] ( literal[string] )
identifier[topic_id_secondary] = identifier[values] . identifier[pop] ( literal[string] )
identifier[components_ids] = identifier[values] . identifier[pop] ( literal[string] )
identifier[remoteci] = identifier[v1_utils] . identifier[verify_existence_and_get] ( identifier[user] . identifier[id] , identifier[models] . identifier[REMOTECIS] )
keyword[if] identifier[remoteci] [ literal[string] ]!= literal[string] :
identifier[message] = literal[string] % identifier[remoteci] [ literal[string] ]
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[message] , identifier[status_code] = literal[int] )
identifier[topic] = identifier[v1_utils] . identifier[verify_existence_and_get] ( identifier[topic_id] , identifier[models] . identifier[TOPICS] )
keyword[if] identifier[topic] [ literal[string] ]!= literal[string] :
identifier[msg] = literal[string] %( identifier[topic_id] , identifier[topic] [ literal[string] ])
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[msg] , identifier[status_code] = literal[int] )
identifier[v1_utils] . identifier[verify_team_in_topic] ( identifier[user] , identifier[topic_id] )
keyword[if] identifier[topic_id_secondary] :
identifier[topic_secondary] = identifier[v1_utils] . identifier[verify_existence_and_get] (
identifier[topic_id_secondary] , identifier[models] . identifier[TOPICS] )
keyword[if] identifier[topic_secondary] [ literal[string] ]!= literal[string] :
identifier[msg] = literal[string] %( identifier[topic_id_secondary] ,
identifier[topic] [ literal[string] ])
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[msg] , identifier[status_code] = literal[int] )
identifier[v1_utils] . identifier[verify_team_in_topic] ( identifier[user] , identifier[topic_id_secondary] )
identifier[dry_run] = identifier[values] . identifier[pop] ( literal[string] )
keyword[if] identifier[dry_run] :
identifier[component_types] = identifier[components] . identifier[get_component_types_from_topic] ( identifier[topic_id] )
identifier[components_ids] = identifier[components] . identifier[get_last_components_by_type] (
identifier[component_types] ,
identifier[topic_id]
)
keyword[return] identifier[flask] . identifier[Response] (
identifier[json] . identifier[dumps] ({ literal[string] : identifier[components_ids] , literal[string] : keyword[None] }),
literal[int] ,
identifier[content_type] = literal[string]
)
identifier[remotecis] . identifier[kill_existing_jobs] ( identifier[remoteci] [ literal[string] ])
identifier[values] = identifier[_build_job] ( identifier[topic_id] , identifier[remoteci] , identifier[components_ids] , identifier[values] ,
identifier[topic_id_secondary] = identifier[topic_id_secondary] )
keyword[return] identifier[flask] . identifier[Response] ( identifier[json] . identifier[dumps] ({ literal[string] : identifier[values] }), literal[int] ,
identifier[headers] ={ literal[string] : identifier[values] [ literal[string] ]},
identifier[content_type] = literal[string] ) | def schedule_jobs(user):
"""Dispatch jobs to remotecis.
The remoteci can use this method to request a new job.
Before a job is dispatched, the server will flag as 'killed' all the
running jobs that were associated with the remoteci. This is because they
will never be finished.
"""
values = schemas.job_schedule.post(flask.request.json)
values.update({'id': utils.gen_uuid(), 'created_at': datetime.datetime.utcnow().isoformat(), 'updated_at': datetime.datetime.utcnow().isoformat(), 'etag': utils.gen_etag(), 'status': 'new', 'remoteci_id': user.id, 'user_agent': flask.request.environ.get('HTTP_USER_AGENT'), 'client_version': flask.request.environ.get('HTTP_CLIENT_VERSION')})
topic_id = values.pop('topic_id')
topic_id_secondary = values.pop('topic_id_secondary')
components_ids = values.pop('components_ids')
# check remoteci
remoteci = v1_utils.verify_existence_and_get(user.id, models.REMOTECIS)
if remoteci['state'] != 'active':
message = 'RemoteCI "%s" is disabled.' % remoteci['id']
raise dci_exc.DCIException(message, status_code=412) # depends on [control=['if'], data=[]]
# check primary topic
topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
if topic['state'] != 'active':
msg = 'Topic %s:%s not active.' % (topic_id, topic['name'])
raise dci_exc.DCIException(msg, status_code=412) # depends on [control=['if'], data=[]]
v1_utils.verify_team_in_topic(user, topic_id)
# check secondary topic
if topic_id_secondary:
topic_secondary = v1_utils.verify_existence_and_get(topic_id_secondary, models.TOPICS)
if topic_secondary['state'] != 'active':
msg = 'Topic %s:%s not active.' % (topic_id_secondary, topic['name'])
raise dci_exc.DCIException(msg, status_code=412) # depends on [control=['if'], data=[]]
v1_utils.verify_team_in_topic(user, topic_id_secondary) # depends on [control=['if'], data=[]]
dry_run = values.pop('dry_run')
if dry_run:
component_types = components.get_component_types_from_topic(topic_id)
components_ids = components.get_last_components_by_type(component_types, topic_id)
return flask.Response(json.dumps({'components_ids': components_ids, 'job': None}), 201, content_type='application/json') # depends on [control=['if'], data=[]]
remotecis.kill_existing_jobs(remoteci['id'])
values = _build_job(topic_id, remoteci, components_ids, values, topic_id_secondary=topic_id_secondary)
return flask.Response(json.dumps({'job': values}), 201, headers={'ETag': values['etag']}, content_type='application/json') |
def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
) | def function[main, parameter[extension, strict_extensions, default_extension, x]]:
constant[Top level zipline entry point.
]
call[call[name[logbook].StderrHandler, parameter[]].push_application, parameter[]]
call[name[create_args], parameter[name[x], name[zipline].extension_args]]
call[name[load_extensions], parameter[name[default_extension], name[extension], name[strict_extensions], name[os].environ]] | keyword[def] identifier[main] ( identifier[extension] , identifier[strict_extensions] , identifier[default_extension] , identifier[x] ):
literal[string]
identifier[logbook] . identifier[StderrHandler] (). identifier[push_application] ()
identifier[create_args] ( identifier[x] , identifier[zipline] . identifier[extension_args] )
identifier[load_extensions] (
identifier[default_extension] ,
identifier[extension] ,
identifier[strict_extensions] ,
identifier[os] . identifier[environ] ,
) | def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(default_extension, extension, strict_extensions, os.environ) |
def break_mst(mst, i):
"""
Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys)
"""
for j in range(len(mst['mst'])):
mst['mst'][i][j] = 0
mst['mst'][j][i] = 0
_, components = scipy.sparse.csgraph.connected_components(mst['mst'])
comp_indices = {}
for el in set(components):
comp_indices[el] = {'strokes': [], 'strokes_i': []}
for i, comp_nr in enumerate(components):
comp_indices[comp_nr]['strokes'].append(mst['strokes'][i])
comp_indices[comp_nr]['strokes_i'].append(i)
mst_wood = []
for key in comp_indices:
matrix = []
for i, line in enumerate(mst['mst']):
line_add = []
if i not in comp_indices[key]['strokes_i']:
continue
for j, el in enumerate(line):
if j in comp_indices[key]['strokes_i']:
line_add.append(el)
matrix.append(line_add)
assert len(matrix) > 0, \
("len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" %
(comp_indices[key]['strokes'], mst, i))
assert len(matrix) == len(matrix[0]), \
("matrix was %i x %i, but should be square" %
(len(matrix), len(matrix[0])))
assert len(matrix) == len(comp_indices[key]['strokes']), \
(("stroke length was not equal to matrix length "
"(strokes=%s, len(matrix)=%i)") %
(comp_indices[key]['strokes'], len(matrix)))
mst_wood.append({'mst': matrix,
'strokes': comp_indices[key]['strokes']})
return mst_wood | def function[break_mst, parameter[mst, i]]:
constant[
Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys)
]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[mst]][constant[mst]]]]]]] begin[:]
call[call[call[name[mst]][constant[mst]]][name[i]]][name[j]] assign[=] constant[0]
call[call[call[name[mst]][constant[mst]]][name[j]]][name[i]] assign[=] constant[0]
<ast.Tuple object at 0x7da1b2853ac0> assign[=] call[name[scipy].sparse.csgraph.connected_components, parameter[call[name[mst]][constant[mst]]]]
variable[comp_indices] assign[=] dictionary[[], []]
for taget[name[el]] in starred[call[name[set], parameter[name[components]]]] begin[:]
call[name[comp_indices]][name[el]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2853220>, <ast.Constant object at 0x7da1b2852230>], [<ast.List object at 0x7da1b2852770>, <ast.List object at 0x7da1b2853d30>]]
for taget[tuple[[<ast.Name object at 0x7da1b2850eb0>, <ast.Name object at 0x7da1b2851ba0>]]] in starred[call[name[enumerate], parameter[name[components]]]] begin[:]
call[call[call[name[comp_indices]][name[comp_nr]]][constant[strokes]].append, parameter[call[call[name[mst]][constant[strokes]]][name[i]]]]
call[call[call[name[comp_indices]][name[comp_nr]]][constant[strokes_i]].append, parameter[name[i]]]
variable[mst_wood] assign[=] list[[]]
for taget[name[key]] in starred[name[comp_indices]] begin[:]
variable[matrix] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2850d90>, <ast.Name object at 0x7da1b2850670>]]] in starred[call[name[enumerate], parameter[call[name[mst]][constant[mst]]]]] begin[:]
variable[line_add] assign[=] list[[]]
if compare[name[i] <ast.NotIn object at 0x7da2590d7190> call[call[name[comp_indices]][name[key]]][constant[strokes_i]]] begin[:]
continue
for taget[tuple[[<ast.Name object at 0x7da1b2853dc0>, <ast.Name object at 0x7da1b28528f0>]]] in starred[call[name[enumerate], parameter[name[line]]]] begin[:]
if compare[name[j] in call[call[name[comp_indices]][name[key]]][constant[strokes_i]]] begin[:]
call[name[line_add].append, parameter[name[el]]]
call[name[matrix].append, parameter[name[line_add]]]
assert[compare[call[name[len], parameter[name[matrix]]] greater[>] constant[0]]]
assert[compare[call[name[len], parameter[name[matrix]]] equal[==] call[name[len], parameter[call[name[matrix]][constant[0]]]]]]
assert[compare[call[name[len], parameter[name[matrix]]] equal[==] call[name[len], parameter[call[call[name[comp_indices]][name[key]]][constant[strokes]]]]]]
call[name[mst_wood].append, parameter[dictionary[[<ast.Constant object at 0x7da1b2851e40>, <ast.Constant object at 0x7da1b2851ed0>], [<ast.Name object at 0x7da1b2851f00>, <ast.Subscript object at 0x7da1b28519f0>]]]]
return[name[mst_wood]] | keyword[def] identifier[break_mst] ( identifier[mst] , identifier[i] ):
literal[string]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[mst] [ literal[string] ])):
identifier[mst] [ literal[string] ][ identifier[i] ][ identifier[j] ]= literal[int]
identifier[mst] [ literal[string] ][ identifier[j] ][ identifier[i] ]= literal[int]
identifier[_] , identifier[components] = identifier[scipy] . identifier[sparse] . identifier[csgraph] . identifier[connected_components] ( identifier[mst] [ literal[string] ])
identifier[comp_indices] ={}
keyword[for] identifier[el] keyword[in] identifier[set] ( identifier[components] ):
identifier[comp_indices] [ identifier[el] ]={ literal[string] :[], literal[string] :[]}
keyword[for] identifier[i] , identifier[comp_nr] keyword[in] identifier[enumerate] ( identifier[components] ):
identifier[comp_indices] [ identifier[comp_nr] ][ literal[string] ]. identifier[append] ( identifier[mst] [ literal[string] ][ identifier[i] ])
identifier[comp_indices] [ identifier[comp_nr] ][ literal[string] ]. identifier[append] ( identifier[i] )
identifier[mst_wood] =[]
keyword[for] identifier[key] keyword[in] identifier[comp_indices] :
identifier[matrix] =[]
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[mst] [ literal[string] ]):
identifier[line_add] =[]
keyword[if] identifier[i] keyword[not] keyword[in] identifier[comp_indices] [ identifier[key] ][ literal[string] ]:
keyword[continue]
keyword[for] identifier[j] , identifier[el] keyword[in] identifier[enumerate] ( identifier[line] ):
keyword[if] identifier[j] keyword[in] identifier[comp_indices] [ identifier[key] ][ literal[string] ]:
identifier[line_add] . identifier[append] ( identifier[el] )
identifier[matrix] . identifier[append] ( identifier[line_add] )
keyword[assert] identifier[len] ( identifier[matrix] )> literal[int] ,( literal[string] %
( identifier[comp_indices] [ identifier[key] ][ literal[string] ], identifier[mst] , identifier[i] ))
keyword[assert] identifier[len] ( identifier[matrix] )== identifier[len] ( identifier[matrix] [ literal[int] ]),( literal[string] %
( identifier[len] ( identifier[matrix] ), identifier[len] ( identifier[matrix] [ literal[int] ])))
keyword[assert] identifier[len] ( identifier[matrix] )== identifier[len] ( identifier[comp_indices] [ identifier[key] ][ literal[string] ]),(( literal[string]
literal[string] )%
( identifier[comp_indices] [ identifier[key] ][ literal[string] ], identifier[len] ( identifier[matrix] )))
identifier[mst_wood] . identifier[append] ({ literal[string] : identifier[matrix] ,
literal[string] : identifier[comp_indices] [ identifier[key] ][ literal[string] ]})
keyword[return] identifier[mst_wood] | def break_mst(mst, i):
"""
Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys)
"""
for j in range(len(mst['mst'])):
mst['mst'][i][j] = 0
mst['mst'][j][i] = 0 # depends on [control=['for'], data=['j']]
(_, components) = scipy.sparse.csgraph.connected_components(mst['mst'])
comp_indices = {}
for el in set(components):
comp_indices[el] = {'strokes': [], 'strokes_i': []} # depends on [control=['for'], data=['el']]
for (i, comp_nr) in enumerate(components):
comp_indices[comp_nr]['strokes'].append(mst['strokes'][i])
comp_indices[comp_nr]['strokes_i'].append(i) # depends on [control=['for'], data=[]]
mst_wood = []
for key in comp_indices:
matrix = []
for (i, line) in enumerate(mst['mst']):
line_add = []
if i not in comp_indices[key]['strokes_i']:
continue # depends on [control=['if'], data=[]]
for (j, el) in enumerate(line):
if j in comp_indices[key]['strokes_i']:
line_add.append(el) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
matrix.append(line_add) # depends on [control=['for'], data=[]]
assert len(matrix) > 0, 'len(matrix) == 0 (strokes: %s, mst=%s, i=%i)' % (comp_indices[key]['strokes'], mst, i)
assert len(matrix) == len(matrix[0]), 'matrix was %i x %i, but should be square' % (len(matrix), len(matrix[0]))
assert len(matrix) == len(comp_indices[key]['strokes']), 'stroke length was not equal to matrix length (strokes=%s, len(matrix)=%i)' % (comp_indices[key]['strokes'], len(matrix))
mst_wood.append({'mst': matrix, 'strokes': comp_indices[key]['strokes']}) # depends on [control=['for'], data=['key']]
return mst_wood |
def project_destroy(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/destroy API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdestroy
"""
return DXHTTPRequest('/%s/destroy' % object_id, input_params, always_retry=always_retry, **kwargs) | def function[project_destroy, parameter[object_id, input_params, always_retry]]:
constant[
Invokes the /project-xxxx/destroy API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdestroy
]
return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/destroy] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]] | keyword[def] identifier[project_destroy] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] ) | def project_destroy(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/destroy API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdestroy
"""
return DXHTTPRequest('/%s/destroy' % object_id, input_params, always_retry=always_retry, **kwargs) |
def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result | def function[disconnect, parameter[self, cback, subscribers, instance]]:
constant[Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
]
if compare[name[subscribers] is constant[None]] begin[:]
variable[subscribers] assign[=] name[self].subscribers
if compare[name[self]._fdisconnect is_not constant[None]] begin[:]
def function[_disconnect, parameter[cback]]:
call[name[self]._disconnect, parameter[name[subscribers], name[cback]]]
variable[notify] assign[=] call[name[partial], parameter[name[self]._notify_one, name[instance]]]
if compare[name[instance] is_not constant[None]] begin[:]
variable[result] assign[=] call[name[self]._fdisconnect, parameter[name[instance], name[cback], name[subscribers], name[_disconnect], name[notify]]]
if call[name[inspect].isawaitable, parameter[name[result]]] begin[:]
variable[result] assign[=] call[name[pull_result], parameter[name[result]]]
return[name[result]] | keyword[def] identifier[disconnect] ( identifier[self] , identifier[cback] , identifier[subscribers] = keyword[None] , identifier[instance] = keyword[None] ):
literal[string]
keyword[if] identifier[subscribers] keyword[is] keyword[None] :
identifier[subscribers] = identifier[self] . identifier[subscribers]
keyword[if] identifier[self] . identifier[_fdisconnect] keyword[is] keyword[not] keyword[None] :
keyword[def] identifier[_disconnect] ( identifier[cback] ):
identifier[self] . identifier[_disconnect] ( identifier[subscribers] , identifier[cback] )
identifier[notify] = identifier[partial] ( identifier[self] . identifier[_notify_one] , identifier[instance] )
keyword[if] identifier[instance] keyword[is] keyword[not] keyword[None] :
identifier[result] = identifier[self] . identifier[_fdisconnect] ( identifier[instance] , identifier[cback] , identifier[subscribers] ,
identifier[_disconnect] , identifier[notify] )
keyword[else] :
identifier[result] = identifier[self] . identifier[_fdisconnect] ( identifier[cback] , identifier[subscribers] , identifier[_disconnect] ,
identifier[notify] )
keyword[if] identifier[inspect] . identifier[isawaitable] ( identifier[result] ):
identifier[result] = identifier[pull_result] ( identifier[result] )
keyword[else] :
identifier[self] . identifier[_disconnect] ( identifier[subscribers] , identifier[cback] )
identifier[result] = keyword[None]
keyword[return] identifier[result] | def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers # depends on [control=['if'], data=['subscribers']]
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers, _disconnect, notify) # depends on [control=['if'], data=['instance']]
else:
result = self._fdisconnect(cback, subscribers, _disconnect, notify)
if inspect.isawaitable(result):
result = pull_result(result) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self._disconnect(subscribers, cback)
result = None
return result |
def run(self):
"""Executed by Sphinx.
:returns: Single DisqusNode instance with config values passed as arguments.
:rtype: list
"""
disqus_shortname = self.get_shortname()
disqus_identifier = self.get_identifier()
return [DisqusNode(disqus_shortname, disqus_identifier)] | def function[run, parameter[self]]:
constant[Executed by Sphinx.
:returns: Single DisqusNode instance with config values passed as arguments.
:rtype: list
]
variable[disqus_shortname] assign[=] call[name[self].get_shortname, parameter[]]
variable[disqus_identifier] assign[=] call[name[self].get_identifier, parameter[]]
return[list[[<ast.Call object at 0x7da1b039bd30>]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[disqus_shortname] = identifier[self] . identifier[get_shortname] ()
identifier[disqus_identifier] = identifier[self] . identifier[get_identifier] ()
keyword[return] [ identifier[DisqusNode] ( identifier[disqus_shortname] , identifier[disqus_identifier] )] | def run(self):
"""Executed by Sphinx.
:returns: Single DisqusNode instance with config values passed as arguments.
:rtype: list
"""
disqus_shortname = self.get_shortname()
disqus_identifier = self.get_identifier()
return [DisqusNode(disqus_shortname, disqus_identifier)] |
def _mode(self, s):
"""Check file mode format and parse into an int.
:return: mode as integer
"""
# Note: Output from git-fast-export slightly different to spec
if s in [b'644', b'100644', b'0100644']:
return 0o100644
elif s in [b'755', b'100755', b'0100755']:
return 0o100755
elif s in [b'040000', b'0040000']:
return 0o40000
elif s in [b'120000', b'0120000']:
return 0o120000
elif s in [b'160000', b'0160000']:
return 0o160000
else:
self.abort(errors.BadFormat, 'filemodify', 'mode', s) | def function[_mode, parameter[self, s]]:
constant[Check file mode format and parse into an int.
:return: mode as integer
]
if compare[name[s] in list[[<ast.Constant object at 0x7da1b0ac82b0>, <ast.Constant object at 0x7da1b0a04c70>, <ast.Constant object at 0x7da1b0a04d90>]]] begin[:]
return[constant[33188]] | keyword[def] identifier[_mode] ( identifier[self] , identifier[s] ):
literal[string]
keyword[if] identifier[s] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] literal[int]
keyword[elif] identifier[s] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] literal[int]
keyword[elif] identifier[s] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] literal[int]
keyword[elif] identifier[s] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] literal[int]
keyword[elif] identifier[s] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] literal[int]
keyword[else] :
identifier[self] . identifier[abort] ( identifier[errors] . identifier[BadFormat] , literal[string] , literal[string] , identifier[s] ) | def _mode(self, s):
"""Check file mode format and parse into an int.
:return: mode as integer
"""
# Note: Output from git-fast-export slightly different to spec
if s in [b'644', b'100644', b'0100644']:
return 33188 # depends on [control=['if'], data=[]]
elif s in [b'755', b'100755', b'0100755']:
return 33261 # depends on [control=['if'], data=[]]
elif s in [b'040000', b'0040000']:
return 16384 # depends on [control=['if'], data=[]]
elif s in [b'120000', b'0120000']:
return 40960 # depends on [control=['if'], data=[]]
elif s in [b'160000', b'0160000']:
return 57344 # depends on [control=['if'], data=[]]
else:
self.abort(errors.BadFormat, 'filemodify', 'mode', s) |
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
) -> 'QubitOrder':
"""Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
"""
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val)
if isinstance(val, QubitOrder):
return val
raise ValueError(
"Don't know how to interpret <{}> as a Basis.".format(val)) | def function[as_qubit_order, parameter[val]]:
constant[Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
]
if call[name[isinstance], parameter[name[val], name[collections].Iterable]] begin[:]
return[call[name[QubitOrder].explicit, parameter[name[val]]]]
if call[name[isinstance], parameter[name[val], name[QubitOrder]]] begin[:]
return[name[val]]
<ast.Raise object at 0x7da20c7cbf10> | keyword[def] identifier[as_qubit_order] ( identifier[val] : literal[string]
)-> literal[string] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[val] , identifier[collections] . identifier[Iterable] ):
keyword[return] identifier[QubitOrder] . identifier[explicit] ( identifier[val] )
keyword[if] identifier[isinstance] ( identifier[val] , identifier[QubitOrder] ):
keyword[return] identifier[val]
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[val] )) | def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList') -> 'QubitOrder':
"""Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
"""
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val) # depends on [control=['if'], data=[]]
if isinstance(val, QubitOrder):
return val # depends on [control=['if'], data=[]]
raise ValueError("Don't know how to interpret <{}> as a Basis.".format(val)) |
def generate_response(chaldict,uri,username,passwd,method='GET',cnonce=None):
"""
Generate an authorization response dictionary. chaldict should contain the digest
challenge in dict form. Use fetch_challenge to create a chaldict from a HTTPResponse
object like this: fetch_challenge(res.getheaders()).
returns dict (the authdict)
Note. Use build_authorization_arg() to turn an authdict into the final Authorization
header value.
"""
authdict = {}
qop = dict_fetch(chaldict,'qop')
domain = dict_fetch(chaldict,'domain')
nonce = dict_fetch(chaldict,'nonce')
stale = dict_fetch(chaldict,'stale')
algorithm = dict_fetch(chaldict,'algorithm','MD5')
realm = dict_fetch(chaldict,'realm','MD5')
opaque = dict_fetch(chaldict,'opaque')
nc = "00000001"
if not cnonce:
cnonce = H(str(random.randint(0,10000000)))[:16]
if algorithm.lower()=='md5-sess':
a1 = A1(username,realm,passwd,nonce,cnonce)
else:
a1 = A1(username,realm,passwd)
a2 = A2(method,uri)
secret = H(a1)
data = '%s:%s:%s:%s:%s' % (nonce,nc,cnonce,qop,H(a2))
authdict['username'] = '"%s"' % username
authdict['realm'] = '"%s"' % realm
authdict['nonce'] = '"%s"' % nonce
authdict['uri'] = '"%s"' % uri
authdict['response'] = '"%s"' % KD(secret,data)
authdict['qop'] = '"%s"' % qop
authdict['nc'] = nc
authdict['cnonce'] = '"%s"' % cnonce
return authdict | def function[generate_response, parameter[chaldict, uri, username, passwd, method, cnonce]]:
constant[
Generate an authorization response dictionary. chaldict should contain the digest
challenge in dict form. Use fetch_challenge to create a chaldict from a HTTPResponse
object like this: fetch_challenge(res.getheaders()).
returns dict (the authdict)
Note. Use build_authorization_arg() to turn an authdict into the final Authorization
header value.
]
variable[authdict] assign[=] dictionary[[], []]
variable[qop] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[qop]]]
variable[domain] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[domain]]]
variable[nonce] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[nonce]]]
variable[stale] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[stale]]]
variable[algorithm] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[algorithm], constant[MD5]]]
variable[realm] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[realm], constant[MD5]]]
variable[opaque] assign[=] call[name[dict_fetch], parameter[name[chaldict], constant[opaque]]]
variable[nc] assign[=] constant[00000001]
if <ast.UnaryOp object at 0x7da1b1578df0> begin[:]
variable[cnonce] assign[=] call[call[name[H], parameter[call[name[str], parameter[call[name[random].randint, parameter[constant[0], constant[10000000]]]]]]]][<ast.Slice object at 0x7da1b157b970>]
if compare[call[name[algorithm].lower, parameter[]] equal[==] constant[md5-sess]] begin[:]
variable[a1] assign[=] call[name[A1], parameter[name[username], name[realm], name[passwd], name[nonce], name[cnonce]]]
variable[a2] assign[=] call[name[A2], parameter[name[method], name[uri]]]
variable[secret] assign[=] call[name[H], parameter[name[a1]]]
variable[data] assign[=] binary_operation[constant[%s:%s:%s:%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b15791e0>, <ast.Name object at 0x7da1b1578a30>, <ast.Name object at 0x7da1b1578100>, <ast.Name object at 0x7da1b157b910>, <ast.Call object at 0x7da1b1579de0>]]]
call[name[authdict]][constant[username]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[username]]
call[name[authdict]][constant[realm]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[realm]]
call[name[authdict]][constant[nonce]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[nonce]]
call[name[authdict]][constant[uri]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[uri]]
call[name[authdict]][constant[response]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> call[name[KD], parameter[name[secret], name[data]]]]
call[name[authdict]][constant[qop]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[qop]]
call[name[authdict]][constant[nc]] assign[=] name[nc]
call[name[authdict]][constant[cnonce]] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[cnonce]]
return[name[authdict]] | keyword[def] identifier[generate_response] ( identifier[chaldict] , identifier[uri] , identifier[username] , identifier[passwd] , identifier[method] = literal[string] , identifier[cnonce] = keyword[None] ):
literal[string]
identifier[authdict] ={}
identifier[qop] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] )
identifier[domain] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] )
identifier[nonce] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] )
identifier[stale] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] )
identifier[algorithm] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] , literal[string] )
identifier[realm] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] , literal[string] )
identifier[opaque] = identifier[dict_fetch] ( identifier[chaldict] , literal[string] )
identifier[nc] = literal[string]
keyword[if] keyword[not] identifier[cnonce] :
identifier[cnonce] = identifier[H] ( identifier[str] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] )))[: literal[int] ]
keyword[if] identifier[algorithm] . identifier[lower] ()== literal[string] :
identifier[a1] = identifier[A1] ( identifier[username] , identifier[realm] , identifier[passwd] , identifier[nonce] , identifier[cnonce] )
keyword[else] :
identifier[a1] = identifier[A1] ( identifier[username] , identifier[realm] , identifier[passwd] )
identifier[a2] = identifier[A2] ( identifier[method] , identifier[uri] )
identifier[secret] = identifier[H] ( identifier[a1] )
identifier[data] = literal[string] %( identifier[nonce] , identifier[nc] , identifier[cnonce] , identifier[qop] , identifier[H] ( identifier[a2] ))
identifier[authdict] [ literal[string] ]= literal[string] % identifier[username]
identifier[authdict] [ literal[string] ]= literal[string] % identifier[realm]
identifier[authdict] [ literal[string] ]= literal[string] % identifier[nonce]
identifier[authdict] [ literal[string] ]= literal[string] % identifier[uri]
identifier[authdict] [ literal[string] ]= literal[string] % identifier[KD] ( identifier[secret] , identifier[data] )
identifier[authdict] [ literal[string] ]= literal[string] % identifier[qop]
identifier[authdict] [ literal[string] ]= identifier[nc]
identifier[authdict] [ literal[string] ]= literal[string] % identifier[cnonce]
keyword[return] identifier[authdict] | def generate_response(chaldict, uri, username, passwd, method='GET', cnonce=None):
"""
Generate an authorization response dictionary. chaldict should contain the digest
challenge in dict form. Use fetch_challenge to create a chaldict from a HTTPResponse
object like this: fetch_challenge(res.getheaders()).
returns dict (the authdict)
Note. Use build_authorization_arg() to turn an authdict into the final Authorization
header value.
"""
authdict = {}
qop = dict_fetch(chaldict, 'qop')
domain = dict_fetch(chaldict, 'domain')
nonce = dict_fetch(chaldict, 'nonce')
stale = dict_fetch(chaldict, 'stale')
algorithm = dict_fetch(chaldict, 'algorithm', 'MD5')
realm = dict_fetch(chaldict, 'realm', 'MD5')
opaque = dict_fetch(chaldict, 'opaque')
nc = '00000001'
if not cnonce:
cnonce = H(str(random.randint(0, 10000000)))[:16] # depends on [control=['if'], data=[]]
if algorithm.lower() == 'md5-sess':
a1 = A1(username, realm, passwd, nonce, cnonce) # depends on [control=['if'], data=[]]
else:
a1 = A1(username, realm, passwd)
a2 = A2(method, uri)
secret = H(a1)
data = '%s:%s:%s:%s:%s' % (nonce, nc, cnonce, qop, H(a2))
authdict['username'] = '"%s"' % username
authdict['realm'] = '"%s"' % realm
authdict['nonce'] = '"%s"' % nonce
authdict['uri'] = '"%s"' % uri
authdict['response'] = '"%s"' % KD(secret, data)
authdict['qop'] = '"%s"' % qop
authdict['nc'] = nc
authdict['cnonce'] = '"%s"' % cnonce
return authdict |
def _ask_for_credentials():
"""
Asks the user for their email and password.
"""
_print_msg('Please enter your SolveBio credentials')
domain = raw_input('Domain (e.g. <domain>.solvebio.com): ')
# Check to see if this domain supports password authentication
try:
account = client.request('get', '/p/accounts/{}'.format(domain))
auth = account['authentication']
except:
raise SolveError('Invalid domain: {}'.format(domain))
# Account must support password-based login
if auth.get('login') or auth.get('SAML', {}).get('simple_login'):
email = raw_input('Email: ')
password = getpass.getpass('Password (typing will be hidden): ')
return (domain, email, password)
else:
_print_msg(
'Your domain uses Single Sign-On (SSO). '
'Please visit https://{}.solvebio.com/settings/security '
'for instructions on how to log in.'.format(domain))
sys.exit(1) | def function[_ask_for_credentials, parameter[]]:
constant[
Asks the user for their email and password.
]
call[name[_print_msg], parameter[constant[Please enter your SolveBio credentials]]]
variable[domain] assign[=] call[name[raw_input], parameter[constant[Domain (e.g. <domain>.solvebio.com): ]]]
<ast.Try object at 0x7da20e955720>
if <ast.BoolOp object at 0x7da20e954520> begin[:]
variable[email] assign[=] call[name[raw_input], parameter[constant[Email: ]]]
variable[password] assign[=] call[name[getpass].getpass, parameter[constant[Password (typing will be hidden): ]]]
return[tuple[[<ast.Name object at 0x7da20e9576d0>, <ast.Name object at 0x7da20e955a50>, <ast.Name object at 0x7da20e956a70>]]] | keyword[def] identifier[_ask_for_credentials] ():
literal[string]
identifier[_print_msg] ( literal[string] )
identifier[domain] = identifier[raw_input] ( literal[string] )
keyword[try] :
identifier[account] = identifier[client] . identifier[request] ( literal[string] , literal[string] . identifier[format] ( identifier[domain] ))
identifier[auth] = identifier[account] [ literal[string] ]
keyword[except] :
keyword[raise] identifier[SolveError] ( literal[string] . identifier[format] ( identifier[domain] ))
keyword[if] identifier[auth] . identifier[get] ( literal[string] ) keyword[or] identifier[auth] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ):
identifier[email] = identifier[raw_input] ( literal[string] )
identifier[password] = identifier[getpass] . identifier[getpass] ( literal[string] )
keyword[return] ( identifier[domain] , identifier[email] , identifier[password] )
keyword[else] :
identifier[_print_msg] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[domain] ))
identifier[sys] . identifier[exit] ( literal[int] ) | def _ask_for_credentials():
"""
Asks the user for their email and password.
"""
_print_msg('Please enter your SolveBio credentials')
domain = raw_input('Domain (e.g. <domain>.solvebio.com): ')
# Check to see if this domain supports password authentication
try:
account = client.request('get', '/p/accounts/{}'.format(domain))
auth = account['authentication'] # depends on [control=['try'], data=[]]
except:
raise SolveError('Invalid domain: {}'.format(domain)) # depends on [control=['except'], data=[]]
# Account must support password-based login
if auth.get('login') or auth.get('SAML', {}).get('simple_login'):
email = raw_input('Email: ')
password = getpass.getpass('Password (typing will be hidden): ')
return (domain, email, password) # depends on [control=['if'], data=[]]
else:
_print_msg('Your domain uses Single Sign-On (SSO). Please visit https://{}.solvebio.com/settings/security for instructions on how to log in.'.format(domain))
sys.exit(1) |
def run_cmd(call, cmd, *, echo=True, **kwargs):
"""Run a command and echo it first"""
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | def function[run_cmd, parameter[call, cmd]]:
constant[Run a command and echo it first]
if name[echo] begin[:]
call[name[print], parameter[binary_operation[constant[$> ] + call[constant[ ].join, parameter[call[name[map], parameter[name[pipes].quote, name[cmd]]]]]]]]
return[call[name[call], parameter[name[cmd]]]] | keyword[def] identifier[run_cmd] ( identifier[call] , identifier[cmd] ,*, identifier[echo] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[echo] :
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[map] ( identifier[pipes] . identifier[quote] , identifier[cmd] )))
keyword[return] identifier[call] ( identifier[cmd] ,** identifier[kwargs] ) | def run_cmd(call, cmd, *, echo=True, **kwargs):
"""Run a command and echo it first"""
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd))) # depends on [control=['if'], data=[]]
return call(cmd, **kwargs) |
def parse(self, argv=None, keyring_namespace=None, strict=False):
"""Find settings from all sources.
:keyword strict: fail if unknown args are passed in.
:returns: dict of parsed option name and values
:raises: SystemExit if invalid arguments supplied along with stdout
message (same as argparser).
"""
if argv is None:
argv = self._argv or sys.argv
results = self.load_options(argv=argv,
keyring_namespace=keyring_namespace)
# Run validation
results = self.validate_config(results, argv=argv, strict=strict)
self._values = results
return self | def function[parse, parameter[self, argv, keyring_namespace, strict]]:
constant[Find settings from all sources.
:keyword strict: fail if unknown args are passed in.
:returns: dict of parsed option name and values
:raises: SystemExit if invalid arguments supplied along with stdout
message (same as argparser).
]
if compare[name[argv] is constant[None]] begin[:]
variable[argv] assign[=] <ast.BoolOp object at 0x7da1b09d0fd0>
variable[results] assign[=] call[name[self].load_options, parameter[]]
variable[results] assign[=] call[name[self].validate_config, parameter[name[results]]]
name[self]._values assign[=] name[results]
return[name[self]] | keyword[def] identifier[parse] ( identifier[self] , identifier[argv] = keyword[None] , identifier[keyring_namespace] = keyword[None] , identifier[strict] = keyword[False] ):
literal[string]
keyword[if] identifier[argv] keyword[is] keyword[None] :
identifier[argv] = identifier[self] . identifier[_argv] keyword[or] identifier[sys] . identifier[argv]
identifier[results] = identifier[self] . identifier[load_options] ( identifier[argv] = identifier[argv] ,
identifier[keyring_namespace] = identifier[keyring_namespace] )
identifier[results] = identifier[self] . identifier[validate_config] ( identifier[results] , identifier[argv] = identifier[argv] , identifier[strict] = identifier[strict] )
identifier[self] . identifier[_values] = identifier[results]
keyword[return] identifier[self] | def parse(self, argv=None, keyring_namespace=None, strict=False):
"""Find settings from all sources.
:keyword strict: fail if unknown args are passed in.
:returns: dict of parsed option name and values
:raises: SystemExit if invalid arguments supplied along with stdout
message (same as argparser).
"""
if argv is None:
argv = self._argv or sys.argv # depends on [control=['if'], data=['argv']]
results = self.load_options(argv=argv, keyring_namespace=keyring_namespace)
# Run validation
results = self.validate_config(results, argv=argv, strict=strict)
self._values = results
return self |
def get_epoch_price_divisor( block_height, namespace_id, units ):
"""
what's the name price divisor for this epoch?
Not all epochs have one---if this epoch does NOT have BLOCKSTACK_INT_DIVISION set,
use get_epoch_price_multiplier() instead.
"""
try:
assert units in [TOKEN_TYPE_STACKS, 'BTC'], 'Unknown units {}'.format(units)
except AssertionError as ae:
log.exception(ae)
log.error("FATAL: No such units {}".format(units))
os.abort()
divisor = 'PRICE_DIVISOR' if units == 'BTC' else 'PRICE_DIVISOR_STACKS'
epoch_config = get_epoch_config( block_height )
d = None
if epoch_config['namespaces'].has_key(namespace_id):
d = epoch_config['namespaces'][namespace_id][divisor]
else:
d = epoch_config['namespaces']['*'][divisor]
try:
assert d is not None
except AssertionError as ae:
log.exception(ae)
log.error("FATAL: Tried to get a price divisor in an epoch without price divisors!")
os.abort()
return d | def function[get_epoch_price_divisor, parameter[block_height, namespace_id, units]]:
constant[
what's the name price divisor for this epoch?
Not all epochs have one---if this epoch does NOT have BLOCKSTACK_INT_DIVISION set,
use get_epoch_price_multiplier() instead.
]
<ast.Try object at 0x7da18f00d960>
variable[divisor] assign[=] <ast.IfExp object at 0x7da18f00d330>
variable[epoch_config] assign[=] call[name[get_epoch_config], parameter[name[block_height]]]
variable[d] assign[=] constant[None]
if call[call[name[epoch_config]][constant[namespaces]].has_key, parameter[name[namespace_id]]] begin[:]
variable[d] assign[=] call[call[call[name[epoch_config]][constant[namespaces]]][name[namespace_id]]][name[divisor]]
<ast.Try object at 0x7da18f00f940>
return[name[d]] | keyword[def] identifier[get_epoch_price_divisor] ( identifier[block_height] , identifier[namespace_id] , identifier[units] ):
literal[string]
keyword[try] :
keyword[assert] identifier[units] keyword[in] [ identifier[TOKEN_TYPE_STACKS] , literal[string] ], literal[string] . identifier[format] ( identifier[units] )
keyword[except] identifier[AssertionError] keyword[as] identifier[ae] :
identifier[log] . identifier[exception] ( identifier[ae] )
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[units] ))
identifier[os] . identifier[abort] ()
identifier[divisor] = literal[string] keyword[if] identifier[units] == literal[string] keyword[else] literal[string]
identifier[epoch_config] = identifier[get_epoch_config] ( identifier[block_height] )
identifier[d] = keyword[None]
keyword[if] identifier[epoch_config] [ literal[string] ]. identifier[has_key] ( identifier[namespace_id] ):
identifier[d] = identifier[epoch_config] [ literal[string] ][ identifier[namespace_id] ][ identifier[divisor] ]
keyword[else] :
identifier[d] = identifier[epoch_config] [ literal[string] ][ literal[string] ][ identifier[divisor] ]
keyword[try] :
keyword[assert] identifier[d] keyword[is] keyword[not] keyword[None]
keyword[except] identifier[AssertionError] keyword[as] identifier[ae] :
identifier[log] . identifier[exception] ( identifier[ae] )
identifier[log] . identifier[error] ( literal[string] )
identifier[os] . identifier[abort] ()
keyword[return] identifier[d] | def get_epoch_price_divisor(block_height, namespace_id, units):
"""
what's the name price divisor for this epoch?
Not all epochs have one---if this epoch does NOT have BLOCKSTACK_INT_DIVISION set,
use get_epoch_price_multiplier() instead.
"""
try:
assert units in [TOKEN_TYPE_STACKS, 'BTC'], 'Unknown units {}'.format(units) # depends on [control=['try'], data=[]]
except AssertionError as ae:
log.exception(ae)
log.error('FATAL: No such units {}'.format(units))
os.abort() # depends on [control=['except'], data=['ae']]
divisor = 'PRICE_DIVISOR' if units == 'BTC' else 'PRICE_DIVISOR_STACKS'
epoch_config = get_epoch_config(block_height)
d = None
if epoch_config['namespaces'].has_key(namespace_id):
d = epoch_config['namespaces'][namespace_id][divisor] # depends on [control=['if'], data=[]]
else:
d = epoch_config['namespaces']['*'][divisor]
try:
assert d is not None # depends on [control=['try'], data=[]]
except AssertionError as ae:
log.exception(ae)
log.error('FATAL: Tried to get a price divisor in an epoch without price divisors!')
os.abort() # depends on [control=['except'], data=['ae']]
return d |
def _get_input_steps(self):
"""
Search and return all steps that have no parents. These are the steps that are get the input data.
"""
input_steps = []
for step in self.steps_sorted:
parent_steps = self._parent_steps(step)
if len(parent_steps) == 0:
input_steps.append(step)
return input_steps | def function[_get_input_steps, parameter[self]]:
constant[
Search and return all steps that have no parents. These are the steps that are get the input data.
]
variable[input_steps] assign[=] list[[]]
for taget[name[step]] in starred[name[self].steps_sorted] begin[:]
variable[parent_steps] assign[=] call[name[self]._parent_steps, parameter[name[step]]]
if compare[call[name[len], parameter[name[parent_steps]]] equal[==] constant[0]] begin[:]
call[name[input_steps].append, parameter[name[step]]]
return[name[input_steps]] | keyword[def] identifier[_get_input_steps] ( identifier[self] ):
literal[string]
identifier[input_steps] =[]
keyword[for] identifier[step] keyword[in] identifier[self] . identifier[steps_sorted] :
identifier[parent_steps] = identifier[self] . identifier[_parent_steps] ( identifier[step] )
keyword[if] identifier[len] ( identifier[parent_steps] )== literal[int] :
identifier[input_steps] . identifier[append] ( identifier[step] )
keyword[return] identifier[input_steps] | def _get_input_steps(self):
"""
Search and return all steps that have no parents. These are the steps that are get the input data.
"""
input_steps = []
for step in self.steps_sorted:
parent_steps = self._parent_steps(step)
if len(parent_steps) == 0:
input_steps.append(step) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['step']]
return input_steps |
def _extractEgaNegFromSent( sentTokens, clausesDict, foundChains ):
''' Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset.
'''
sonaEga = WordTemplate({ROOT:'^ega$',POSTAG:'[DJ]'})
verbEiJarel = WordTemplate({POSTAG:'V',FORM:'(o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat)$'})
verbEiJarel2 = WordTemplate({ROOT:'^mine$', POSTAG:'V',FORM:'neg o$'})
verbTud = WordTemplate({POSTAG:'V',FORM:'(tud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
expandableEgaFound = False
for i in range(len(sentTokens)):
token = sentTokens[i]
if sonaEga.matches(token) and token[WORD_ID] not in annotatedWords:
matchFound = False
if i+1 < len(sentTokens) and sentTokens[i+1][WORD_ID] in annotatedWords:
#
# K6ige lihtsam juht: eelnevalt on verbifraas juba tuvastatud (ja
# eeldatavasti maksimaalses pikkuses), seega pole teha muud, kui sellele
# ega ette panna ning polaarsuse negatiivseks muuta:
# Te saate kaebusi palju ega_0 jõua_0 nendele reageerida_0 .
# vene keelt ta ei mõista ega_0 või_0 seepärast olla_0 vene spioon
# NB! Lisamist ei teosta siiski juhtudel kui:
# *) J2rgnev fraas on juba negatiivse polaarsusega (selline laiendamine
# tekitaks lihtsalt mustreid juurde, aga sisulist infot juurde ei
# annaks);
# *) J2rgnev s6na pole 'ei'-ga yhilduv verb (t6en2oliselt on mingi jama,
# nt morf yhestamisel);
# *) J2rgnev s6na kuulub verbiahelasse, mis algab enne 'ega'-t (see viitab
# tegelikult sellele, et k6nealune verbiahel on katkiselt eraldatud);
#
for verbObj in foundChains:
if sentTokens[i+1][WORD_ID] in verbObj[PHRASE] and verbObj[POLARITY] != 'NEG' and \
(verbEiJarel.matches( sentTokens[i+1] ) or verbEiJarel2.matches( sentTokens[i+1] )) \
and i < min( verbObj[PHRASE] ):
verbObj[PHRASE].insert(0, token[WORD_ID])
verbObj[PATTERN].insert(0, 'ega')
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
break
elif i+1 < len(sentTokens) and verbEiJarel.matches( sentTokens[i+1] ) and \
sentTokens[i+1][WORD_ID] not in annotatedWords:
#
# Heuristik:
# kui 'ega'-le j2rgneb vahetult 'ei'-ga sobiv verb (peaks olema
# infiniitne nud/tud verb, kuna finiitsed leitakse ja seotakse
# t6en2oliselt eelmises harus), siis eraldame uue fraasina:
#
# Hakkasin Ainikiga rääkima ega_0 pööranud_0 Ivole enam tähelepanu .
# Tereese oli tükk aega vait ega_0 teadnud_0 , kas tõtt rääkida või mitte .
#
# >> clauseID-iks saab j2rgneva verbi ID, kuna 'ega' j2relt l2heb sageli
# osalausepiir ning ega-le eelnevad verbid kindlasti sellega seotud olla
# ei saa.
clauseID = sentTokens[i+1][CLAUSE_IDX]
wid1 = sentTokens[i][WORD_ID]
wid2 = sentTokens[i+1][WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = clauseID
if verbOlema.matches(sentTokens[i+1]):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i+1], verbEiJarel ) )
# Teeme kindlaks, kas j2rgneb veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID and verb.matches(token2):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
else:
#
# Kui osalausest on tuvastatud teisi predikaadieituseid ning need
# eelnevad praegusele 'ega'-eitusele , nt:
# Ei lükka ma ümber ega kinnita.
# Ta ei oota ega looda_0 ( enam ).
# V6ib olla tegu keerukama tervikfraasiga, nt:
# Ta ise pole kuidagi saanud ega tahnud_0 end samastada nendega.
# Sellistel juhtudel m2rgime konteksti mitmeseks, kuna 'ega'-fraas
# v6ib toetuda varasemale verbiahelale;
#
for j in range(i-1, -1, -1):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID:
for verbObj2 in foundChains:
if token2[WORD_ID] in verbObj2[PHRASE] and verbObj2[POLARITY] != 'POS':
verbObj[OTHER_VERBS] = True
break
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
if not matchFound:
#
# 2. 'ega' + kaugemal järgnev verb
#
# 2.1 Kui 'ega'-le ei j2rgne ega eelne yhtegi eitust, kyll aga j2rgneb
# (osalause piires) 'ei'-le sobiv verb, loeme teatud juhtudel, et
# tegu on sobiva eitusfraasiga.
# Nt.
# Nii et ega_0 Diana jõulureedel sünnitanudki .
# Ega_0 ta tahtnud algul rääkida .
# Yldiselt paistab see muster olevat sage just ilukirjanduses ja
# suulise k6ne l2hedases keelekasutuses, harvem ajakirjanduses ning
# veel v2hem kasutusel teaduskirjanduses;
#
egaClauseID = sentTokens[i][CLAUSE_IDX]
precedingNeg = False
followingNeg = False
followingPos = None
for verbObj1 in foundChains:
if verbObj1[CLAUSE_IDX] == egaClauseID:
if verbObj1[POLARITY] != 'POS':
if any([ wid < sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
precedingNeg = True
if any([ wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
followingNeg = True
elif verbObj1[POLARITY] == 'POS' and \
all([wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
followingPos = verbObj1
if not precedingNeg and not followingNeg:
if followingPos:
#
# K6ige lihtsam juht: kui j2rgneb positiivne verbiahel (ja eeldatavasti
# maksimaalses pikkuses) ning:
# *) ahelverbi ja 'ega' vahel pole punktuatsiooni;
# *) ahelverb sisaldab 'ei'-ga yhilduvat verbivormi;
# liidame ahelale 'ega' ette ning muudame polaarsuse negatiivseks:
# Ega_0 neil seal kerge ole_0 . "
# Ega_0 70 eluaastat ole_0 naljaasi !
# Ega_0 sa puusärgis paugutama_0 hakka_0 . "
#
minWID = min(followingPos[PHRASE])
phraseTokens = [t for t in sentTokens if t[WORD_ID] in followingPos[PHRASE]]
if any( [verbEiJarel.matches( t ) for t in phraseTokens] ) and \
not _isSeparatedByPossibleClauseBreakers( sentTokens, token[WORD_ID], minWID, True, True, False):
followingPos[PHRASE].insert(0, token[WORD_ID])
followingPos[PATTERN].insert(0, 'ega')
followingPos[POLARITY] = 'NEG'
followingPos[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
#
# Veakoht - vahel on 'kui':
# " Ega_0 muud kui pista_0 heinad põlema_0
#
elif i+1 < len(sentTokens):
#
# Heuristik:
# Kui 'ega'-le j2rgneb samas osalauses 'ei'-ga sobiv verb ning:
# *) see verb ei ole 'tud'-verb (seega t6en2oliselt on 'nud');
# *) see verb asub osalause l6pus v6i pole 'ega'-st kaugemal kui
# 2 s6na;
# *) see verb ei kuulu juba m2rgendatud verbiahelate sisse;
# siis eraldame uue 'ega'-fraasina, nt:
#
# Ega_0 poiss teda enam vahtinudki_0 .
# Ega_0 keegi sellist tulemust ju soovinud_0 ,
# Ja ega_0 ta soovinudki_0 Semperi kombel ümber õppida .
#
for j in range(i+1, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == egaClauseID and verbEiJarel.matches(token2) and \
not verbTud.matches(token2) and token2[WORD_ID] not in annotatedWords and \
(_isClauseFinal( token2[WORD_ID], clausesDict[token2[CLAUSE_IDX]] ) or \
j-i <= 2):
wid1 = sentTokens[i][WORD_ID]
wid2 = token2[WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = token2[CLAUSE_IDX]
if verbOlema.matches(token2):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token2, verbEiJarel ) )
# Teeme kindlaks, kas osalauses on veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token3 = sentTokens[j]
if token3[CLAUSE_IDX] == verbObj[CLAUSE_IDX] and \
token2 != token3 and verb.matches(token3):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
break
return expandableEgaFound | def function[_extractEgaNegFromSent, parameter[sentTokens, clausesDict, foundChains]]:
constant[ Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset.
]
variable[sonaEga] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da20c6c62f0>, <ast.Name object at 0x7da20c6c57e0>], [<ast.Constant object at 0x7da20c6c66e0>, <ast.Constant object at 0x7da20c6c4310>]]]]
variable[verbEiJarel] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da20c6c6d70>, <ast.Name object at 0x7da20c6c4520>], [<ast.Constant object at 0x7da20c6c4910>, <ast.Constant object at 0x7da20c6c4a60>]]]]
variable[verbEiJarel2] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da20c6c65f0>, <ast.Name object at 0x7da20c6c5600>, <ast.Name object at 0x7da20c6c5150>], [<ast.Constant object at 0x7da20c6c5ba0>, <ast.Constant object at 0x7da20c6c5b10>, <ast.Constant object at 0x7da20c6c5d20>]]]]
variable[verbTud] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da20c6c44f0>, <ast.Name object at 0x7da20c6c7d00>], [<ast.Constant object at 0x7da20c6c7340>, <ast.Constant object at 0x7da20c6c42b0>]]]]
variable[verb] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da20c6c7100>], [<ast.Constant object at 0x7da20c6c7fd0>]]]]
variable[verbOlema] assign[=] call[name[WordTemplate], parameter[dictionary[[<ast.Name object at 0x7da18f00faf0>, <ast.Name object at 0x7da18f00c3d0>], [<ast.Constant object at 0x7da18f00d600>, <ast.Constant object at 0x7da18f00f7f0>]]]]
variable[annotatedWords] assign[=] list[[]]
for taget[name[verbObj]] in starred[name[foundChains]] begin[:]
if <ast.BoolOp object at 0x7da18f00c610> begin[:]
continue
call[name[annotatedWords].extend, parameter[call[name[verbObj]][name[PHRASE]]]]
variable[expandableEgaFound] assign[=] constant[False]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sentTokens]]]]]] begin[:]
variable[token] assign[=] call[name[sentTokens]][name[i]]
if <ast.BoolOp object at 0x7da18f00ceb0> begin[:]
variable[matchFound] assign[=] constant[False]
if <ast.BoolOp object at 0x7da18f00e470> begin[:]
for taget[name[verbObj]] in starred[name[foundChains]] begin[:]
if <ast.BoolOp object at 0x7da18f00f400> begin[:]
call[call[name[verbObj]][name[PHRASE]].insert, parameter[constant[0], call[name[token]][name[WORD_ID]]]]
call[call[name[verbObj]][name[PATTERN]].insert, parameter[constant[0], constant[ega]]]
call[name[verbObj]][name[POLARITY]] assign[=] constant[NEG]
call[call[name[verbObj]][name[ANALYSIS_IDS]].insert, parameter[constant[0], call[name[_getMatchingAnalysisIDs], parameter[name[token], name[sonaEga]]]]]
call[name[annotatedWords].append, parameter[call[name[token]][name[WORD_ID]]]]
variable[matchFound] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da20e9b0760> begin[:]
variable[egaClauseID] assign[=] call[call[name[sentTokens]][name[i]]][name[CLAUSE_IDX]]
variable[precedingNeg] assign[=] constant[False]
variable[followingNeg] assign[=] constant[False]
variable[followingPos] assign[=] constant[None]
for taget[name[verbObj1]] in starred[name[foundChains]] begin[:]
if compare[call[name[verbObj1]][name[CLAUSE_IDX]] equal[==] name[egaClauseID]] begin[:]
if compare[call[name[verbObj1]][name[POLARITY]] not_equal[!=] constant[POS]] begin[:]
if call[name[any], parameter[<ast.ListComp object at 0x7da20e9b0f40>]] begin[:]
variable[precedingNeg] assign[=] constant[True]
if call[name[any], parameter[<ast.ListComp object at 0x7da20e9b2b90>]] begin[:]
variable[followingNeg] assign[=] constant[True]
if <ast.BoolOp object at 0x7da20e9b3130> begin[:]
if name[followingPos] begin[:]
variable[minWID] assign[=] call[name[min], parameter[call[name[followingPos]][name[PHRASE]]]]
variable[phraseTokens] assign[=] <ast.ListComp object at 0x7da20e9b29b0>
if <ast.BoolOp object at 0x7da204346740> begin[:]
call[call[name[followingPos]][name[PHRASE]].insert, parameter[constant[0], call[name[token]][name[WORD_ID]]]]
call[call[name[followingPos]][name[PATTERN]].insert, parameter[constant[0], constant[ega]]]
call[name[followingPos]][name[POLARITY]] assign[=] constant[NEG]
call[call[name[followingPos]][name[ANALYSIS_IDS]].insert, parameter[constant[0], call[name[_getMatchingAnalysisIDs], parameter[name[token], name[sonaEga]]]]]
call[name[annotatedWords].append, parameter[call[name[token]][name[WORD_ID]]]]
variable[matchFound] assign[=] constant[True]
return[name[expandableEgaFound]] | keyword[def] identifier[_extractEgaNegFromSent] ( identifier[sentTokens] , identifier[clausesDict] , identifier[foundChains] ):
literal[string]
identifier[sonaEga] = identifier[WordTemplate] ({ identifier[ROOT] : literal[string] , identifier[POSTAG] : literal[string] })
identifier[verbEiJarel] = identifier[WordTemplate] ({ identifier[POSTAG] : literal[string] , identifier[FORM] : literal[string] })
identifier[verbEiJarel2] = identifier[WordTemplate] ({ identifier[ROOT] : literal[string] , identifier[POSTAG] : literal[string] , identifier[FORM] : literal[string] })
identifier[verbTud] = identifier[WordTemplate] ({ identifier[POSTAG] : literal[string] , identifier[FORM] : literal[string] })
identifier[verb] = identifier[WordTemplate] ({ identifier[POSTAG] : literal[string] })
identifier[verbOlema] = identifier[WordTemplate] ({ identifier[POSTAG] : literal[string] , identifier[ROOT] : literal[string] })
identifier[annotatedWords] =[]
keyword[for] identifier[verbObj] keyword[in] identifier[foundChains] :
keyword[if] ( identifier[len] ( identifier[verbObj] [ identifier[PATTERN] ])== literal[int] keyword[and] identifier[re] . identifier[match] ( literal[string] , identifier[verbObj] [ identifier[PATTERN] ][ literal[int] ])):
keyword[continue]
identifier[annotatedWords] . identifier[extend] ( identifier[verbObj] [ identifier[PHRASE] ])
identifier[expandableEgaFound] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sentTokens] )):
identifier[token] = identifier[sentTokens] [ identifier[i] ]
keyword[if] identifier[sonaEga] . identifier[matches] ( identifier[token] ) keyword[and] identifier[token] [ identifier[WORD_ID] ] keyword[not] keyword[in] identifier[annotatedWords] :
identifier[matchFound] = keyword[False]
keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[sentTokens] ) keyword[and] identifier[sentTokens] [ identifier[i] + literal[int] ][ identifier[WORD_ID] ] keyword[in] identifier[annotatedWords] :
keyword[for] identifier[verbObj] keyword[in] identifier[foundChains] :
keyword[if] identifier[sentTokens] [ identifier[i] + literal[int] ][ identifier[WORD_ID] ] keyword[in] identifier[verbObj] [ identifier[PHRASE] ] keyword[and] identifier[verbObj] [ identifier[POLARITY] ]!= literal[string] keyword[and] ( identifier[verbEiJarel] . identifier[matches] ( identifier[sentTokens] [ identifier[i] + literal[int] ]) keyword[or] identifier[verbEiJarel2] . identifier[matches] ( identifier[sentTokens] [ identifier[i] + literal[int] ])) keyword[and] identifier[i] < identifier[min] ( identifier[verbObj] [ identifier[PHRASE] ]):
identifier[verbObj] [ identifier[PHRASE] ]. identifier[insert] ( literal[int] , identifier[token] [ identifier[WORD_ID] ])
identifier[verbObj] [ identifier[PATTERN] ]. identifier[insert] ( literal[int] , literal[string] )
identifier[verbObj] [ identifier[POLARITY] ]= literal[string]
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]. identifier[insert] ( literal[int] , identifier[_getMatchingAnalysisIDs] ( identifier[token] , identifier[sonaEga] ))
identifier[annotatedWords] . identifier[append] ( identifier[token] [ identifier[WORD_ID] ])
identifier[matchFound] = keyword[True]
keyword[break]
keyword[elif] identifier[i] + literal[int] < identifier[len] ( identifier[sentTokens] ) keyword[and] identifier[verbEiJarel] . identifier[matches] ( identifier[sentTokens] [ identifier[i] + literal[int] ]) keyword[and] identifier[sentTokens] [ identifier[i] + literal[int] ][ identifier[WORD_ID] ] keyword[not] keyword[in] identifier[annotatedWords] :
identifier[clauseID] = identifier[sentTokens] [ identifier[i] + literal[int] ][ identifier[CLAUSE_IDX] ]
identifier[wid1] = identifier[sentTokens] [ identifier[i] ][ identifier[WORD_ID] ]
identifier[wid2] = identifier[sentTokens] [ identifier[i] + literal[int] ][ identifier[WORD_ID] ]
identifier[verbObj] ={ identifier[PHRASE] :[ identifier[wid1] , identifier[wid2] ], identifier[PATTERN] :[ literal[string] , literal[string] ]}
identifier[verbObj] [ identifier[CLAUSE_IDX] ]= identifier[clauseID]
keyword[if] identifier[verbOlema] . identifier[matches] ( identifier[sentTokens] [ identifier[i] + literal[int] ]):
identifier[verbObj] [ identifier[PATTERN] ][ literal[int] ]= literal[string]
identifier[verbObj] [ identifier[POLARITY] ]= literal[string]
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]=[]
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]. identifier[append] ( identifier[_getMatchingAnalysisIDs] ( identifier[sentTokens] [ identifier[i] ], identifier[sonaEga] ))
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]. identifier[append] ( identifier[_getMatchingAnalysisIDs] ( identifier[sentTokens] [ identifier[i] + literal[int] ], identifier[verbEiJarel] ))
identifier[verbObj] [ identifier[OTHER_VERBS] ]= keyword[False]
keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[sentTokens] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[len] ( identifier[sentTokens] )):
identifier[token2] = identifier[sentTokens] [ identifier[j] ]
keyword[if] identifier[token2] [ identifier[CLAUSE_IDX] ]== identifier[clauseID] keyword[and] identifier[verb] . identifier[matches] ( identifier[token2] ):
identifier[verbObj] [ identifier[OTHER_VERBS] ]= keyword[True]
keyword[break]
keyword[if] identifier[verbObj] [ identifier[OTHER_VERBS] ]:
identifier[expandableEgaFound] = keyword[True]
keyword[else] :
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] - literal[int] ,- literal[int] ,- literal[int] ):
identifier[token2] = identifier[sentTokens] [ identifier[j] ]
keyword[if] identifier[token2] [ identifier[CLAUSE_IDX] ]== identifier[clauseID] :
keyword[for] identifier[verbObj2] keyword[in] identifier[foundChains] :
keyword[if] identifier[token2] [ identifier[WORD_ID] ] keyword[in] identifier[verbObj2] [ identifier[PHRASE] ] keyword[and] identifier[verbObj2] [ identifier[POLARITY] ]!= literal[string] :
identifier[verbObj] [ identifier[OTHER_VERBS] ]= keyword[True]
keyword[break]
identifier[foundChains] . identifier[append] ( identifier[verbObj] )
identifier[annotatedWords] . identifier[extend] ( identifier[verbObj] [ identifier[PHRASE] ])
identifier[matchFound] = keyword[True]
keyword[if] keyword[not] identifier[matchFound] :
identifier[egaClauseID] = identifier[sentTokens] [ identifier[i] ][ identifier[CLAUSE_IDX] ]
identifier[precedingNeg] = keyword[False]
identifier[followingNeg] = keyword[False]
identifier[followingPos] = keyword[None]
keyword[for] identifier[verbObj1] keyword[in] identifier[foundChains] :
keyword[if] identifier[verbObj1] [ identifier[CLAUSE_IDX] ]== identifier[egaClauseID] :
keyword[if] identifier[verbObj1] [ identifier[POLARITY] ]!= literal[string] :
keyword[if] identifier[any] ([ identifier[wid] < identifier[sentTokens] [ identifier[i] ][ identifier[WORD_ID] ] keyword[for] identifier[wid] keyword[in] identifier[verbObj1] [ identifier[PHRASE] ]]):
identifier[precedingNeg] = keyword[True]
keyword[if] identifier[any] ([ identifier[wid] > identifier[sentTokens] [ identifier[i] ][ identifier[WORD_ID] ] keyword[for] identifier[wid] keyword[in] identifier[verbObj1] [ identifier[PHRASE] ]]):
identifier[followingNeg] = keyword[True]
keyword[elif] identifier[verbObj1] [ identifier[POLARITY] ]== literal[string] keyword[and] identifier[all] ([ identifier[wid] > identifier[sentTokens] [ identifier[i] ][ identifier[WORD_ID] ] keyword[for] identifier[wid] keyword[in] identifier[verbObj1] [ identifier[PHRASE] ]]):
identifier[followingPos] = identifier[verbObj1]
keyword[if] keyword[not] identifier[precedingNeg] keyword[and] keyword[not] identifier[followingNeg] :
keyword[if] identifier[followingPos] :
identifier[minWID] = identifier[min] ( identifier[followingPos] [ identifier[PHRASE] ])
identifier[phraseTokens] =[ identifier[t] keyword[for] identifier[t] keyword[in] identifier[sentTokens] keyword[if] identifier[t] [ identifier[WORD_ID] ] keyword[in] identifier[followingPos] [ identifier[PHRASE] ]]
keyword[if] identifier[any] ([ identifier[verbEiJarel] . identifier[matches] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[phraseTokens] ]) keyword[and] keyword[not] identifier[_isSeparatedByPossibleClauseBreakers] ( identifier[sentTokens] , identifier[token] [ identifier[WORD_ID] ], identifier[minWID] , keyword[True] , keyword[True] , keyword[False] ):
identifier[followingPos] [ identifier[PHRASE] ]. identifier[insert] ( literal[int] , identifier[token] [ identifier[WORD_ID] ])
identifier[followingPos] [ identifier[PATTERN] ]. identifier[insert] ( literal[int] , literal[string] )
identifier[followingPos] [ identifier[POLARITY] ]= literal[string]
identifier[followingPos] [ identifier[ANALYSIS_IDS] ]. identifier[insert] ( literal[int] , identifier[_getMatchingAnalysisIDs] ( identifier[token] , identifier[sonaEga] ))
identifier[annotatedWords] . identifier[append] ( identifier[token] [ identifier[WORD_ID] ])
identifier[matchFound] = keyword[True]
keyword[elif] identifier[i] + literal[int] < identifier[len] ( identifier[sentTokens] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[len] ( identifier[sentTokens] )):
identifier[token2] = identifier[sentTokens] [ identifier[j] ]
keyword[if] identifier[token2] [ identifier[CLAUSE_IDX] ]== identifier[egaClauseID] keyword[and] identifier[verbEiJarel] . identifier[matches] ( identifier[token2] ) keyword[and] keyword[not] identifier[verbTud] . identifier[matches] ( identifier[token2] ) keyword[and] identifier[token2] [ identifier[WORD_ID] ] keyword[not] keyword[in] identifier[annotatedWords] keyword[and] ( identifier[_isClauseFinal] ( identifier[token2] [ identifier[WORD_ID] ], identifier[clausesDict] [ identifier[token2] [ identifier[CLAUSE_IDX] ]]) keyword[or] identifier[j] - identifier[i] <= literal[int] ):
identifier[wid1] = identifier[sentTokens] [ identifier[i] ][ identifier[WORD_ID] ]
identifier[wid2] = identifier[token2] [ identifier[WORD_ID] ]
identifier[verbObj] ={ identifier[PHRASE] :[ identifier[wid1] , identifier[wid2] ], identifier[PATTERN] :[ literal[string] , literal[string] ]}
identifier[verbObj] [ identifier[CLAUSE_IDX] ]= identifier[token2] [ identifier[CLAUSE_IDX] ]
keyword[if] identifier[verbOlema] . identifier[matches] ( identifier[token2] ):
identifier[verbObj] [ identifier[PATTERN] ][ literal[int] ]= literal[string]
identifier[verbObj] [ identifier[POLARITY] ]= literal[string]
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]=[]
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]. identifier[append] ( identifier[_getMatchingAnalysisIDs] ( identifier[sentTokens] [ identifier[i] ], identifier[sonaEga] ))
identifier[verbObj] [ identifier[ANALYSIS_IDS] ]. identifier[append] ( identifier[_getMatchingAnalysisIDs] ( identifier[token2] , identifier[verbEiJarel] ))
identifier[verbObj] [ identifier[OTHER_VERBS] ]= keyword[False]
keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[sentTokens] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[len] ( identifier[sentTokens] )):
identifier[token3] = identifier[sentTokens] [ identifier[j] ]
keyword[if] identifier[token3] [ identifier[CLAUSE_IDX] ]== identifier[verbObj] [ identifier[CLAUSE_IDX] ] keyword[and] identifier[token2] != identifier[token3] keyword[and] identifier[verb] . identifier[matches] ( identifier[token3] ):
identifier[verbObj] [ identifier[OTHER_VERBS] ]= keyword[True]
keyword[break]
keyword[if] identifier[verbObj] [ identifier[OTHER_VERBS] ]:
identifier[expandableEgaFound] = keyword[True]
identifier[foundChains] . identifier[append] ( identifier[verbObj] )
identifier[annotatedWords] . identifier[extend] ( identifier[verbObj] [ identifier[PHRASE] ])
identifier[matchFound] = keyword[True]
keyword[break]
keyword[return] identifier[expandableEgaFound] | def _extractEgaNegFromSent(sentTokens, clausesDict, foundChains):
""" Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset.
"""
sonaEga = WordTemplate({ROOT: '^ega$', POSTAG: '[DJ]'})
verbEiJarel = WordTemplate({POSTAG: 'V', FORM: '(o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat)$'})
verbEiJarel2 = WordTemplate({ROOT: '^mine$', POSTAG: 'V', FORM: 'neg o$'})
verbTud = WordTemplate({POSTAG: 'V', FORM: '(tud)$'})
verb = WordTemplate({POSTAG: 'V'})
verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'}) # J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0]): # V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue # depends on [control=['if'], data=[]]
annotatedWords.extend(verbObj[PHRASE]) # depends on [control=['for'], data=['verbObj']]
expandableEgaFound = False
for i in range(len(sentTokens)):
token = sentTokens[i]
if sonaEga.matches(token) and token[WORD_ID] not in annotatedWords:
matchFound = False
if i + 1 < len(sentTokens) and sentTokens[i + 1][WORD_ID] in annotatedWords: #
# K6ige lihtsam juht: eelnevalt on verbifraas juba tuvastatud (ja
# eeldatavasti maksimaalses pikkuses), seega pole teha muud, kui sellele
# ega ette panna ning polaarsuse negatiivseks muuta:
# Te saate kaebusi palju ega_0 jõua_0 nendele reageerida_0 .
# vene keelt ta ei mõista ega_0 või_0 seepärast olla_0 vene spioon
# NB! Lisamist ei teosta siiski juhtudel kui:
# *) J2rgnev fraas on juba negatiivse polaarsusega (selline laiendamine
# tekitaks lihtsalt mustreid juurde, aga sisulist infot juurde ei
# annaks);
# *) J2rgnev s6na pole 'ei'-ga yhilduv verb (t6en2oliselt on mingi jama,
# nt morf yhestamisel);
# *) J2rgnev s6na kuulub verbiahelasse, mis algab enne 'ega'-t (see viitab
# tegelikult sellele, et k6nealune verbiahel on katkiselt eraldatud);
#
for verbObj in foundChains:
if sentTokens[i + 1][WORD_ID] in verbObj[PHRASE] and verbObj[POLARITY] != 'NEG' and (verbEiJarel.matches(sentTokens[i + 1]) or verbEiJarel2.matches(sentTokens[i + 1])) and (i < min(verbObj[PHRASE])):
verbObj[PHRASE].insert(0, token[WORD_ID])
verbObj[PATTERN].insert(0, 'ega')
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs(token, sonaEga))
annotatedWords.append(token[WORD_ID])
matchFound = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['verbObj']] # depends on [control=['if'], data=[]]
elif i + 1 < len(sentTokens) and verbEiJarel.matches(sentTokens[i + 1]) and (sentTokens[i + 1][WORD_ID] not in annotatedWords): #
# Heuristik:
# kui 'ega'-le j2rgneb vahetult 'ei'-ga sobiv verb (peaks olema
# infiniitne nud/tud verb, kuna finiitsed leitakse ja seotakse
# t6en2oliselt eelmises harus), siis eraldame uue fraasina:
#
# Hakkasin Ainikiga rääkima ega_0 pööranud_0 Ivole enam tähelepanu .
# Tereese oli tükk aega vait ega_0 teadnud_0 , kas tõtt rääkida või mitte .
#
# >> clauseID-iks saab j2rgneva verbi ID, kuna 'ega' j2relt l2heb sageli
# osalausepiir ning ega-le eelnevad verbid kindlasti sellega seotud olla
# ei saa.
clauseID = sentTokens[i + 1][CLAUSE_IDX]
wid1 = sentTokens[i][WORD_ID]
wid2 = sentTokens[i + 1][WORD_ID]
verbObj = {PHRASE: [wid1, wid2], PATTERN: ['ega', 'verb']}
verbObj[CLAUSE_IDX] = clauseID
if verbOlema.matches(sentTokens[i + 1]):
verbObj[PATTERN][1] = 'ole' # depends on [control=['if'], data=[]]
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(sentTokens[i], sonaEga))
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(sentTokens[i + 1], verbEiJarel)) # Teeme kindlaks, kas j2rgneb veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i + 2 < len(sentTokens):
for j in range(i + 2, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID and verb.matches(token2):
verbObj[OTHER_VERBS] = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
if verbObj[OTHER_VERBS]:
expandableEgaFound = True # depends on [control=['if'], data=[]]
else: #
# Kui osalausest on tuvastatud teisi predikaadieituseid ning need
# eelnevad praegusele 'ega'-eitusele , nt:
# Ei lükka ma ümber ega kinnita.
# Ta ei oota ega looda_0 ( enam ).
# V6ib olla tegu keerukama tervikfraasiga, nt:
# Ta ise pole kuidagi saanud ega tahnud_0 end samastada nendega.
# Sellistel juhtudel m2rgime konteksti mitmeseks, kuna 'ega'-fraas
# v6ib toetuda varasemale verbiahelale;
#
for j in range(i - 1, -1, -1):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID:
for verbObj2 in foundChains:
if token2[WORD_ID] in verbObj2[PHRASE] and verbObj2[POLARITY] != 'POS':
verbObj[OTHER_VERBS] = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['verbObj2']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
foundChains.append(verbObj)
annotatedWords.extend(verbObj[PHRASE])
matchFound = True # depends on [control=['if'], data=[]]
if not matchFound: #
# 2. 'ega' + kaugemal järgnev verb
#
# 2.1 Kui 'ega'-le ei j2rgne ega eelne yhtegi eitust, kyll aga j2rgneb
# (osalause piires) 'ei'-le sobiv verb, loeme teatud juhtudel, et
# tegu on sobiva eitusfraasiga.
# Nt.
# Nii et ega_0 Diana jõulureedel sünnitanudki .
# Ega_0 ta tahtnud algul rääkida .
# Yldiselt paistab see muster olevat sage just ilukirjanduses ja
# suulise k6ne l2hedases keelekasutuses, harvem ajakirjanduses ning
# veel v2hem kasutusel teaduskirjanduses;
#
egaClauseID = sentTokens[i][CLAUSE_IDX]
precedingNeg = False
followingNeg = False
followingPos = None
for verbObj1 in foundChains:
if verbObj1[CLAUSE_IDX] == egaClauseID:
if verbObj1[POLARITY] != 'POS':
if any([wid < sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
precedingNeg = True # depends on [control=['if'], data=[]]
if any([wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
followingNeg = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif verbObj1[POLARITY] == 'POS' and all([wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
followingPos = verbObj1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['verbObj1']]
if not precedingNeg and (not followingNeg):
if followingPos: #
# K6ige lihtsam juht: kui j2rgneb positiivne verbiahel (ja eeldatavasti
# maksimaalses pikkuses) ning:
# *) ahelverbi ja 'ega' vahel pole punktuatsiooni;
# *) ahelverb sisaldab 'ei'-ga yhilduvat verbivormi;
# liidame ahelale 'ega' ette ning muudame polaarsuse negatiivseks:
# Ega_0 neil seal kerge ole_0 . "
# Ega_0 70 eluaastat ole_0 naljaasi !
# Ega_0 sa puusärgis paugutama_0 hakka_0 . "
#
minWID = min(followingPos[PHRASE])
phraseTokens = [t for t in sentTokens if t[WORD_ID] in followingPos[PHRASE]]
if any([verbEiJarel.matches(t) for t in phraseTokens]) and (not _isSeparatedByPossibleClauseBreakers(sentTokens, token[WORD_ID], minWID, True, True, False)):
followingPos[PHRASE].insert(0, token[WORD_ID])
followingPos[PATTERN].insert(0, 'ega')
followingPos[POLARITY] = 'NEG'
followingPos[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs(token, sonaEga))
annotatedWords.append(token[WORD_ID])
matchFound = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] #
# Veakoht - vahel on 'kui':
# " Ega_0 muud kui pista_0 heinad põlema_0
#
elif i + 1 < len(sentTokens): #
# Heuristik:
# Kui 'ega'-le j2rgneb samas osalauses 'ei'-ga sobiv verb ning:
# *) see verb ei ole 'tud'-verb (seega t6en2oliselt on 'nud');
# *) see verb asub osalause l6pus v6i pole 'ega'-st kaugemal kui
# 2 s6na;
# *) see verb ei kuulu juba m2rgendatud verbiahelate sisse;
# siis eraldame uue 'ega'-fraasina, nt:
#
# Ega_0 poiss teda enam vahtinudki_0 .
# Ega_0 keegi sellist tulemust ju soovinud_0 ,
# Ja ega_0 ta soovinudki_0 Semperi kombel ümber õppida .
#
for j in range(i + 1, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == egaClauseID and verbEiJarel.matches(token2) and (not verbTud.matches(token2)) and (token2[WORD_ID] not in annotatedWords) and (_isClauseFinal(token2[WORD_ID], clausesDict[token2[CLAUSE_IDX]]) or j - i <= 2):
wid1 = sentTokens[i][WORD_ID]
wid2 = token2[WORD_ID]
verbObj = {PHRASE: [wid1, wid2], PATTERN: ['ega', 'verb']}
verbObj[CLAUSE_IDX] = token2[CLAUSE_IDX]
if verbOlema.matches(token2):
verbObj[PATTERN][1] = 'ole' # depends on [control=['if'], data=[]]
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(sentTokens[i], sonaEga))
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(token2, verbEiJarel)) # Teeme kindlaks, kas osalauses on veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i + 2 < len(sentTokens):
for j in range(i + 2, len(sentTokens)):
token3 = sentTokens[j]
if token3[CLAUSE_IDX] == verbObj[CLAUSE_IDX] and token2 != token3 and verb.matches(token3):
verbObj[OTHER_VERBS] = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
if verbObj[OTHER_VERBS]:
expandableEgaFound = True # depends on [control=['if'], data=[]]
foundChains.append(verbObj)
annotatedWords.extend(verbObj[PHRASE])
matchFound = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return expandableEgaFound |
def json(self):
""" returns a dict that represents a NetJSON NetworkGraph object """
nodes = []
links = []
for link in self.link_set.all():
if self.is_layer2:
source = link.interface_a.mac
destination = link.interface_b.mac
else:
source = str(link.interface_a.ip_set.first().address)
destination = str(link.interface_b.ip_set.first().address)
nodes.append({
'id': source
})
nodes.append({
'id': destination
})
links.append(OrderedDict((
('source', source),
('target', destination),
('cost', link.metric_value)
)))
return OrderedDict((
('type', 'NetworkGraph'),
('protocol', self.parser.protocol),
('version', self.parser.version),
('metric', self.parser.metric),
('nodes', nodes),
('links', links)
)) | def function[json, parameter[self]]:
constant[ returns a dict that represents a NetJSON NetworkGraph object ]
variable[nodes] assign[=] list[[]]
variable[links] assign[=] list[[]]
for taget[name[link]] in starred[call[name[self].link_set.all, parameter[]]] begin[:]
if name[self].is_layer2 begin[:]
variable[source] assign[=] name[link].interface_a.mac
variable[destination] assign[=] name[link].interface_b.mac
call[name[nodes].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6e4970>], [<ast.Name object at 0x7da20c6e4d30>]]]]
call[name[nodes].append, parameter[dictionary[[<ast.Constant object at 0x7da20c6e6da0>], [<ast.Name object at 0x7da18c4cf4c0>]]]]
call[name[links].append, parameter[call[name[OrderedDict], parameter[tuple[[<ast.Tuple object at 0x7da20c6e5510>, <ast.Tuple object at 0x7da20c6e70d0>, <ast.Tuple object at 0x7da20c6e79d0>]]]]]]
return[call[name[OrderedDict], parameter[tuple[[<ast.Tuple object at 0x7da20c6e71c0>, <ast.Tuple object at 0x7da20c6e53c0>, <ast.Tuple object at 0x7da20c6e4460>, <ast.Tuple object at 0x7da20c6e6560>, <ast.Tuple object at 0x7da20c6e5720>, <ast.Tuple object at 0x7da20c6e7940>]]]]] | keyword[def] identifier[json] ( identifier[self] ):
literal[string]
identifier[nodes] =[]
identifier[links] =[]
keyword[for] identifier[link] keyword[in] identifier[self] . identifier[link_set] . identifier[all] ():
keyword[if] identifier[self] . identifier[is_layer2] :
identifier[source] = identifier[link] . identifier[interface_a] . identifier[mac]
identifier[destination] = identifier[link] . identifier[interface_b] . identifier[mac]
keyword[else] :
identifier[source] = identifier[str] ( identifier[link] . identifier[interface_a] . identifier[ip_set] . identifier[first] (). identifier[address] )
identifier[destination] = identifier[str] ( identifier[link] . identifier[interface_b] . identifier[ip_set] . identifier[first] (). identifier[address] )
identifier[nodes] . identifier[append] ({
literal[string] : identifier[source]
})
identifier[nodes] . identifier[append] ({
literal[string] : identifier[destination]
})
identifier[links] . identifier[append] ( identifier[OrderedDict] ((
( literal[string] , identifier[source] ),
( literal[string] , identifier[destination] ),
( literal[string] , identifier[link] . identifier[metric_value] )
)))
keyword[return] identifier[OrderedDict] ((
( literal[string] , literal[string] ),
( literal[string] , identifier[self] . identifier[parser] . identifier[protocol] ),
( literal[string] , identifier[self] . identifier[parser] . identifier[version] ),
( literal[string] , identifier[self] . identifier[parser] . identifier[metric] ),
( literal[string] , identifier[nodes] ),
( literal[string] , identifier[links] )
)) | def json(self):
""" returns a dict that represents a NetJSON NetworkGraph object """
nodes = []
links = []
for link in self.link_set.all():
if self.is_layer2:
source = link.interface_a.mac
destination = link.interface_b.mac # depends on [control=['if'], data=[]]
else:
source = str(link.interface_a.ip_set.first().address)
destination = str(link.interface_b.ip_set.first().address)
nodes.append({'id': source})
nodes.append({'id': destination})
links.append(OrderedDict((('source', source), ('target', destination), ('cost', link.metric_value)))) # depends on [control=['for'], data=['link']]
return OrderedDict((('type', 'NetworkGraph'), ('protocol', self.parser.protocol), ('version', self.parser.version), ('metric', self.parser.metric), ('nodes', nodes), ('links', links))) |
def flexifunction_read_req_send(self, target_system, target_component, read_req_type, data_index, force_mavlink1=False):
'''
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
'''
return self.send(self.flexifunction_read_req_encode(target_system, target_component, read_req_type, data_index), force_mavlink1=force_mavlink1) | def function[flexifunction_read_req_send, parameter[self, target_system, target_component, read_req_type, data_index, force_mavlink1]]:
constant[
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
]
return[call[name[self].send, parameter[call[name[self].flexifunction_read_req_encode, parameter[name[target_system], name[target_component], name[read_req_type], name[data_index]]]]]] | keyword[def] identifier[flexifunction_read_req_send] ( identifier[self] , identifier[target_system] , identifier[target_component] , identifier[read_req_type] , identifier[data_index] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[flexifunction_read_req_encode] ( identifier[target_system] , identifier[target_component] , identifier[read_req_type] , identifier[data_index] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def flexifunction_read_req_send(self, target_system, target_component, read_req_type, data_index, force_mavlink1=False):
"""
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
"""
return self.send(self.flexifunction_read_req_encode(target_system, target_component, read_req_type, data_index), force_mavlink1=force_mavlink1) |
def _make_gelf_dict(self, record):
"""Create a dictionary representing a Graylog GELF log from a
python :class:`logging.LogRecord`
:param record: :class:`logging.LogRecord` to create a Graylog GELF
log from.
:type record: logging.LogRecord
:return: dictionary representing a Graylog GELF log.
:rtype: dict
"""
# construct the base GELF format
gelf_dict = {
'version': "1.0",
'host': BaseGELFHandler._resolve_host(self.fqdn, self.localname),
'short_message': self.formatter.format(record) if self.formatter else record.getMessage(),
'timestamp': record.created,
'level': SYSLOG_LEVELS.get(record.levelno, record.levelno),
'facility': self.facility or record.name,
}
# add in specified optional extras
self._add_full_message(gelf_dict, record)
if self.level_names:
self._add_level_names(gelf_dict, record)
if self.facility is not None:
self._set_custom_facility(gelf_dict, self.facility, record)
if self.debugging_fields:
self._add_debugging_fields(gelf_dict, record)
if self.extra_fields:
self._add_extra_fields(gelf_dict, record)
return gelf_dict | def function[_make_gelf_dict, parameter[self, record]]:
constant[Create a dictionary representing a Graylog GELF log from a
python :class:`logging.LogRecord`
:param record: :class:`logging.LogRecord` to create a Graylog GELF
log from.
:type record: logging.LogRecord
:return: dictionary representing a Graylog GELF log.
:rtype: dict
]
variable[gelf_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57820>, <ast.Constant object at 0x7da18eb557e0>, <ast.Constant object at 0x7da18eb579d0>, <ast.Constant object at 0x7da18eb55c00>, <ast.Constant object at 0x7da18eb54940>, <ast.Constant object at 0x7da18eb544f0>], [<ast.Constant object at 0x7da18eb54df0>, <ast.Call object at 0x7da18eb555d0>, <ast.IfExp object at 0x7da18eb57970>, <ast.Attribute object at 0x7da18eb55f30>, <ast.Call object at 0x7da18eb54e80>, <ast.BoolOp object at 0x7da18eb576d0>]]
call[name[self]._add_full_message, parameter[name[gelf_dict], name[record]]]
if name[self].level_names begin[:]
call[name[self]._add_level_names, parameter[name[gelf_dict], name[record]]]
if compare[name[self].facility is_not constant[None]] begin[:]
call[name[self]._set_custom_facility, parameter[name[gelf_dict], name[self].facility, name[record]]]
if name[self].debugging_fields begin[:]
call[name[self]._add_debugging_fields, parameter[name[gelf_dict], name[record]]]
if name[self].extra_fields begin[:]
call[name[self]._add_extra_fields, parameter[name[gelf_dict], name[record]]]
return[name[gelf_dict]] | keyword[def] identifier[_make_gelf_dict] ( identifier[self] , identifier[record] ):
literal[string]
identifier[gelf_dict] ={
literal[string] : literal[string] ,
literal[string] : identifier[BaseGELFHandler] . identifier[_resolve_host] ( identifier[self] . identifier[fqdn] , identifier[self] . identifier[localname] ),
literal[string] : identifier[self] . identifier[formatter] . identifier[format] ( identifier[record] ) keyword[if] identifier[self] . identifier[formatter] keyword[else] identifier[record] . identifier[getMessage] (),
literal[string] : identifier[record] . identifier[created] ,
literal[string] : identifier[SYSLOG_LEVELS] . identifier[get] ( identifier[record] . identifier[levelno] , identifier[record] . identifier[levelno] ),
literal[string] : identifier[self] . identifier[facility] keyword[or] identifier[record] . identifier[name] ,
}
identifier[self] . identifier[_add_full_message] ( identifier[gelf_dict] , identifier[record] )
keyword[if] identifier[self] . identifier[level_names] :
identifier[self] . identifier[_add_level_names] ( identifier[gelf_dict] , identifier[record] )
keyword[if] identifier[self] . identifier[facility] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_set_custom_facility] ( identifier[gelf_dict] , identifier[self] . identifier[facility] , identifier[record] )
keyword[if] identifier[self] . identifier[debugging_fields] :
identifier[self] . identifier[_add_debugging_fields] ( identifier[gelf_dict] , identifier[record] )
keyword[if] identifier[self] . identifier[extra_fields] :
identifier[self] . identifier[_add_extra_fields] ( identifier[gelf_dict] , identifier[record] )
keyword[return] identifier[gelf_dict] | def _make_gelf_dict(self, record):
"""Create a dictionary representing a Graylog GELF log from a
python :class:`logging.LogRecord`
:param record: :class:`logging.LogRecord` to create a Graylog GELF
log from.
:type record: logging.LogRecord
:return: dictionary representing a Graylog GELF log.
:rtype: dict
"""
# construct the base GELF format
gelf_dict = {'version': '1.0', 'host': BaseGELFHandler._resolve_host(self.fqdn, self.localname), 'short_message': self.formatter.format(record) if self.formatter else record.getMessage(), 'timestamp': record.created, 'level': SYSLOG_LEVELS.get(record.levelno, record.levelno), 'facility': self.facility or record.name}
# add in specified optional extras
self._add_full_message(gelf_dict, record)
if self.level_names:
self._add_level_names(gelf_dict, record) # depends on [control=['if'], data=[]]
if self.facility is not None:
self._set_custom_facility(gelf_dict, self.facility, record) # depends on [control=['if'], data=[]]
if self.debugging_fields:
self._add_debugging_fields(gelf_dict, record) # depends on [control=['if'], data=[]]
if self.extra_fields:
self._add_extra_fields(gelf_dict, record) # depends on [control=['if'], data=[]]
return gelf_dict |
def get_url(cls, data):
"""Return the URL for a get request based on data type.
Args:
data: Accepts multiple types.
Int: Generate URL to object with data ID.
None: Get basic object GET URL (list).
String/Unicode: Search for <data> with default_search,
usually "name".
String/Unicode with "=": Other searches, for example
Computers can be search by uuid with:
"udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5"
See the class "search_types" attribute for options.
"""
try:
data = int(data)
except (ValueError, TypeError):
pass
if isinstance(data, int):
return "%s%s%s" % (cls._url, cls.id_url, data)
elif data is None:
return cls._url
elif isinstance(data, basestring):
if "=" in data:
key, value = data.split("=") # pylint: disable=no-member
if key in cls.search_types:
return "%s%s%s" % (cls._url, cls.search_types[key], value)
else:
raise JSSUnsupportedSearchMethodError(
"This object cannot be queried by %s." % key)
else:
return "%s%s%s" % (cls._url,
cls.search_types[cls.default_search], data)
else:
raise ValueError | def function[get_url, parameter[cls, data]]:
constant[Return the URL for a get request based on data type.
Args:
data: Accepts multiple types.
Int: Generate URL to object with data ID.
None: Get basic object GET URL (list).
String/Unicode: Search for <data> with default_search,
usually "name".
String/Unicode with "=": Other searches, for example
Computers can be search by uuid with:
"udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5"
See the class "search_types" attribute for options.
]
<ast.Try object at 0x7da18ede69e0>
if call[name[isinstance], parameter[name[data], name[int]]] begin[:]
return[binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede74c0>, <ast.Attribute object at 0x7da18ede5180>, <ast.Name object at 0x7da18ede54e0>]]]] | keyword[def] identifier[get_url] ( identifier[cls] , identifier[data] ):
literal[string]
keyword[try] :
identifier[data] = identifier[int] ( identifier[data] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[pass]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[int] ):
keyword[return] literal[string] %( identifier[cls] . identifier[_url] , identifier[cls] . identifier[id_url] , identifier[data] )
keyword[elif] identifier[data] keyword[is] keyword[None] :
keyword[return] identifier[cls] . identifier[_url]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[basestring] ):
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[key] , identifier[value] = identifier[data] . identifier[split] ( literal[string] )
keyword[if] identifier[key] keyword[in] identifier[cls] . identifier[search_types] :
keyword[return] literal[string] %( identifier[cls] . identifier[_url] , identifier[cls] . identifier[search_types] [ identifier[key] ], identifier[value] )
keyword[else] :
keyword[raise] identifier[JSSUnsupportedSearchMethodError] (
literal[string] % identifier[key] )
keyword[else] :
keyword[return] literal[string] %( identifier[cls] . identifier[_url] ,
identifier[cls] . identifier[search_types] [ identifier[cls] . identifier[default_search] ], identifier[data] )
keyword[else] :
keyword[raise] identifier[ValueError] | def get_url(cls, data):
"""Return the URL for a get request based on data type.
Args:
data: Accepts multiple types.
Int: Generate URL to object with data ID.
None: Get basic object GET URL (list).
String/Unicode: Search for <data> with default_search,
usually "name".
String/Unicode with "=": Other searches, for example
Computers can be search by uuid with:
"udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5"
See the class "search_types" attribute for options.
"""
try:
data = int(data) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
pass # depends on [control=['except'], data=[]]
if isinstance(data, int):
return '%s%s%s' % (cls._url, cls.id_url, data) # depends on [control=['if'], data=[]]
elif data is None:
return cls._url # depends on [control=['if'], data=[]]
elif isinstance(data, basestring):
if '=' in data:
(key, value) = data.split('=') # pylint: disable=no-member
if key in cls.search_types:
return '%s%s%s' % (cls._url, cls.search_types[key], value) # depends on [control=['if'], data=['key']]
else:
raise JSSUnsupportedSearchMethodError('This object cannot be queried by %s.' % key) # depends on [control=['if'], data=['data']]
else:
return '%s%s%s' % (cls._url, cls.search_types[cls.default_search], data) # depends on [control=['if'], data=[]]
else:
raise ValueError |
def delete_resource(self, resource):
"""
Deletes the resource from the pool and destroys the associated
resource. Not usually needed by users of the pool, but called
internally when BadResource is raised.
:param resource: the resource to remove
:type resource: Resource
"""
with self.lock:
self.resources.remove(resource)
self.destroy_resource(resource.object)
del resource | def function[delete_resource, parameter[self, resource]]:
constant[
Deletes the resource from the pool and destroys the associated
resource. Not usually needed by users of the pool, but called
internally when BadResource is raised.
:param resource: the resource to remove
:type resource: Resource
]
with name[self].lock begin[:]
call[name[self].resources.remove, parameter[name[resource]]]
call[name[self].destroy_resource, parameter[name[resource].object]]
<ast.Delete object at 0x7da20c7cb4c0> | keyword[def] identifier[delete_resource] ( identifier[self] , identifier[resource] ):
literal[string]
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[resources] . identifier[remove] ( identifier[resource] )
identifier[self] . identifier[destroy_resource] ( identifier[resource] . identifier[object] )
keyword[del] identifier[resource] | def delete_resource(self, resource):
"""
Deletes the resource from the pool and destroys the associated
resource. Not usually needed by users of the pool, but called
internally when BadResource is raised.
:param resource: the resource to remove
:type resource: Resource
"""
with self.lock:
self.resources.remove(resource) # depends on [control=['with'], data=[]]
self.destroy_resource(resource.object)
del resource |
def ncVarUnit(ncVar):
""" Returns the unit of the ncVar by looking in the attributes.
It searches in the attributes for one of the following keys:
'unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'. If these are not found, the empty
string is returned.
"""
attributes = ncVarAttributes(ncVar)
if not attributes:
return '' # a premature optimization :-)
for key in ('unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'):
if key in attributes:
# In Python3 the attribures are byte strings so we must decode them
# This a bug in h5py, see https://github.com/h5py/h5py/issues/379
return attributes[key]
else:
return '' | def function[ncVarUnit, parameter[ncVar]]:
constant[ Returns the unit of the ncVar by looking in the attributes.
It searches in the attributes for one of the following keys:
'unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'. If these are not found, the empty
string is returned.
]
variable[attributes] assign[=] call[name[ncVarAttributes], parameter[name[ncVar]]]
if <ast.UnaryOp object at 0x7da1b04f9c00> begin[:]
return[constant[]]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da1b04f95d0>, <ast.Constant object at 0x7da1b04f8790>, <ast.Constant object at 0x7da1b04f96f0>, <ast.Constant object at 0x7da1b04f8e50>, <ast.Constant object at 0x7da1b04fa290>, <ast.Constant object at 0x7da1b04f8970>]]] begin[:]
if compare[name[key] in name[attributes]] begin[:]
return[call[name[attributes]][name[key]]] | keyword[def] identifier[ncVarUnit] ( identifier[ncVar] ):
literal[string]
identifier[attributes] = identifier[ncVarAttributes] ( identifier[ncVar] )
keyword[if] keyword[not] identifier[attributes] :
keyword[return] literal[string]
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[key] keyword[in] identifier[attributes] :
keyword[return] identifier[attributes] [ identifier[key] ]
keyword[else] :
keyword[return] literal[string] | def ncVarUnit(ncVar):
""" Returns the unit of the ncVar by looking in the attributes.
It searches in the attributes for one of the following keys:
'unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'. If these are not found, the empty
string is returned.
"""
attributes = ncVarAttributes(ncVar)
if not attributes:
return '' # a premature optimization :-) # depends on [control=['if'], data=[]]
for key in ('unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'):
if key in attributes:
# In Python3 the attribures are byte strings so we must decode them
# This a bug in h5py, see https://github.com/h5py/h5py/issues/379
return attributes[key] # depends on [control=['if'], data=['key', 'attributes']] # depends on [control=['for'], data=['key']]
else:
return '' |
def decorate(self, name_or_func):
"""Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
"""
if os.environ.get("SC2_NO_STOPWATCH"):
return name_or_func if callable(name_or_func) else lambda func: func
def decorator(name, func):
@functools.wraps(func)
def _stopwatch(*args, **kwargs):
with self(name):
return func(*args, **kwargs)
return _stopwatch
if callable(name_or_func):
return decorator(name_or_func.__name__, name_or_func)
else:
return lambda func: decorator(name_or_func, func) | def function[decorate, parameter[self, name_or_func]]:
constant[Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
]
if call[name[os].environ.get, parameter[constant[SC2_NO_STOPWATCH]]] begin[:]
return[<ast.IfExp object at 0x7da18f09fa30>]
def function[decorator, parameter[name, func]]:
def function[_stopwatch, parameter[]]:
with call[name[self], parameter[name[name]]] begin[:]
return[call[name[func], parameter[<ast.Starred object at 0x7da18f09fca0>]]]
return[name[_stopwatch]]
if call[name[callable], parameter[name[name_or_func]]] begin[:]
return[call[name[decorator], parameter[name[name_or_func].__name__, name[name_or_func]]]] | keyword[def] identifier[decorate] ( identifier[self] , identifier[name_or_func] ):
literal[string]
keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ):
keyword[return] identifier[name_or_func] keyword[if] identifier[callable] ( identifier[name_or_func] ) keyword[else] keyword[lambda] identifier[func] : identifier[func]
keyword[def] identifier[decorator] ( identifier[name] , identifier[func] ):
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[_stopwatch] (* identifier[args] ,** identifier[kwargs] ):
keyword[with] identifier[self] ( identifier[name] ):
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_stopwatch]
keyword[if] identifier[callable] ( identifier[name_or_func] ):
keyword[return] identifier[decorator] ( identifier[name_or_func] . identifier[__name__] , identifier[name_or_func] )
keyword[else] :
keyword[return] keyword[lambda] identifier[func] : identifier[decorator] ( identifier[name_or_func] , identifier[func] ) | def decorate(self, name_or_func):
"""Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
"""
if os.environ.get('SC2_NO_STOPWATCH'):
return name_or_func if callable(name_or_func) else lambda func: func # depends on [control=['if'], data=[]]
def decorator(name, func):
@functools.wraps(func)
def _stopwatch(*args, **kwargs):
with self(name):
return func(*args, **kwargs) # depends on [control=['with'], data=[]]
return _stopwatch
if callable(name_or_func):
return decorator(name_or_func.__name__, name_or_func) # depends on [control=['if'], data=[]]
else:
return lambda func: decorator(name_or_func, func) |
def add_query(self, name, filter, **kwargs):
"""Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
"""
# Ensure we have the correct types and get the new query object
filter_obj = filters.legacy_filter_formatter(
dict(filter=filter),
Device._get_attributes_map()
) if filter else None
query_map = Query._create_request_map(kwargs)
# Create the DeviceQuery object
f = DeviceQuery(name=name, query=filter_obj['filter'], **query_map)
api = self._get_api(device_directory.DefaultApi)
return Query(api.device_query_create(f)) | def function[add_query, parameter[self, name, filter]]:
constant[Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
]
variable[filter_obj] assign[=] <ast.IfExp object at 0x7da1b04ed1e0>
variable[query_map] assign[=] call[name[Query]._create_request_map, parameter[name[kwargs]]]
variable[f] assign[=] call[name[DeviceQuery], parameter[]]
variable[api] assign[=] call[name[self]._get_api, parameter[name[device_directory].DefaultApi]]
return[call[name[Query], parameter[call[name[api].device_query_create, parameter[name[f]]]]]] | keyword[def] identifier[add_query] ( identifier[self] , identifier[name] , identifier[filter] ,** identifier[kwargs] ):
literal[string]
identifier[filter_obj] = identifier[filters] . identifier[legacy_filter_formatter] (
identifier[dict] ( identifier[filter] = identifier[filter] ),
identifier[Device] . identifier[_get_attributes_map] ()
) keyword[if] identifier[filter] keyword[else] keyword[None]
identifier[query_map] = identifier[Query] . identifier[_create_request_map] ( identifier[kwargs] )
identifier[f] = identifier[DeviceQuery] ( identifier[name] = identifier[name] , identifier[query] = identifier[filter_obj] [ literal[string] ],** identifier[query_map] )
identifier[api] = identifier[self] . identifier[_get_api] ( identifier[device_directory] . identifier[DefaultApi] )
keyword[return] identifier[Query] ( identifier[api] . identifier[device_query_create] ( identifier[f] )) | def add_query(self, name, filter, **kwargs):
"""Add a new query to device query service.
.. code-block:: python
f = api.add_query(
name = "Query name",
filter = {
"device_id": {"$eq": "01234"},
custom_attributes = {
"foo": {"$eq": "bar"}
}
}
)
print(f.created_at)
:param str name: Name of query (Required)
:param dict filter: Filter properties to apply (Required)
:param return: The newly created query object.
:return: the newly created query object
:rtype: Query
"""
# Ensure we have the correct types and get the new query object
filter_obj = filters.legacy_filter_formatter(dict(filter=filter), Device._get_attributes_map()) if filter else None
query_map = Query._create_request_map(kwargs)
# Create the DeviceQuery object
f = DeviceQuery(name=name, query=filter_obj['filter'], **query_map)
api = self._get_api(device_directory.DefaultApi)
return Query(api.device_query_create(f)) |
def write_string(self, obj, use_reference=True):
"""
Writes a Java string with the TC_STRING type marker
:param obj: The string to print
:param use_reference: If True, allow writing a reference
"""
if use_reference and isinstance(obj, JavaString):
try:
idx = self.references.index(obj)
except ValueError:
# String is not referenced: let _writeString store it
self._writeStruct(">B", 1, (self.TC_STRING,))
self._writeString(obj, use_reference)
else:
# Reuse the referenced string
logging.debug(
"*** Reusing ref 0x%X for String: %s",
idx + self.BASE_REFERENCE_IDX,
obj,
)
self.write_reference(idx)
else:
# Don't use references
self._writeStruct(">B", 1, (self.TC_STRING,))
self._writeString(obj, use_reference) | def function[write_string, parameter[self, obj, use_reference]]:
constant[
Writes a Java string with the TC_STRING type marker
:param obj: The string to print
:param use_reference: If True, allow writing a reference
]
if <ast.BoolOp object at 0x7da20c76fa60> begin[:]
<ast.Try object at 0x7da20c76ef50> | keyword[def] identifier[write_string] ( identifier[self] , identifier[obj] , identifier[use_reference] = keyword[True] ):
literal[string]
keyword[if] identifier[use_reference] keyword[and] identifier[isinstance] ( identifier[obj] , identifier[JavaString] ):
keyword[try] :
identifier[idx] = identifier[self] . identifier[references] . identifier[index] ( identifier[obj] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[_writeStruct] ( literal[string] , literal[int] ,( identifier[self] . identifier[TC_STRING] ,))
identifier[self] . identifier[_writeString] ( identifier[obj] , identifier[use_reference] )
keyword[else] :
identifier[logging] . identifier[debug] (
literal[string] ,
identifier[idx] + identifier[self] . identifier[BASE_REFERENCE_IDX] ,
identifier[obj] ,
)
identifier[self] . identifier[write_reference] ( identifier[idx] )
keyword[else] :
identifier[self] . identifier[_writeStruct] ( literal[string] , literal[int] ,( identifier[self] . identifier[TC_STRING] ,))
identifier[self] . identifier[_writeString] ( identifier[obj] , identifier[use_reference] ) | def write_string(self, obj, use_reference=True):
"""
Writes a Java string with the TC_STRING type marker
:param obj: The string to print
:param use_reference: If True, allow writing a reference
"""
if use_reference and isinstance(obj, JavaString):
try:
idx = self.references.index(obj) # depends on [control=['try'], data=[]]
except ValueError:
# String is not referenced: let _writeString store it
self._writeStruct('>B', 1, (self.TC_STRING,))
self._writeString(obj, use_reference) # depends on [control=['except'], data=[]]
else:
# Reuse the referenced string
logging.debug('*** Reusing ref 0x%X for String: %s', idx + self.BASE_REFERENCE_IDX, obj)
self.write_reference(idx) # depends on [control=['if'], data=[]]
else:
# Don't use references
self._writeStruct('>B', 1, (self.TC_STRING,))
self._writeString(obj, use_reference) |
def get_arp_output_arp_entry_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
age = ET.SubElement(arp_entry, "age")
age.text = kwargs.pop('age')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_arp_output_arp_entry_age, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_arp] assign[=] call[name[ET].Element, parameter[constant[get_arp]]]
variable[config] assign[=] name[get_arp]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_arp], constant[output]]]
variable[arp_entry] assign[=] call[name[ET].SubElement, parameter[name[output], constant[arp-entry]]]
variable[ip_address_key] assign[=] call[name[ET].SubElement, parameter[name[arp_entry], constant[ip-address]]]
name[ip_address_key].text assign[=] call[name[kwargs].pop, parameter[constant[ip_address]]]
variable[age] assign[=] call[name[ET].SubElement, parameter[name[arp_entry], constant[age]]]
name[age].text assign[=] call[name[kwargs].pop, parameter[constant[age]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_arp_output_arp_entry_age] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_arp] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_arp]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_arp] , literal[string] )
identifier[arp_entry] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[ip_address_key] = identifier[ET] . identifier[SubElement] ( identifier[arp_entry] , literal[string] )
identifier[ip_address_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[age] = identifier[ET] . identifier[SubElement] ( identifier[arp_entry] , literal[string] )
identifier[age] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_arp_output_arp_entry_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_arp = ET.Element('get_arp')
config = get_arp
output = ET.SubElement(get_arp, 'output')
arp_entry = ET.SubElement(output, 'arp-entry')
ip_address_key = ET.SubElement(arp_entry, 'ip-address')
ip_address_key.text = kwargs.pop('ip_address')
age = ET.SubElement(arp_entry, 'age')
age.text = kwargs.pop('age')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def addToService(self, service, namespace=None, seperator='.'):
"""
Add this Handler's exported methods to an RPC Service instance.
"""
if namespace is None:
namespace = []
if isinstance(namespace, basestring):
namespace = [namespace]
for n, m in inspect.getmembers(self, inspect.ismethod):
if hasattr(m, 'export_rpc'):
try:
name = seperator.join(namespace + m.export_rpc)
except TypeError:
name = seperator.join(namespace + [m.export_rpc])
service.add(m, name) | def function[addToService, parameter[self, service, namespace, seperator]]:
constant[
Add this Handler's exported methods to an RPC Service instance.
]
if compare[name[namespace] is constant[None]] begin[:]
variable[namespace] assign[=] list[[]]
if call[name[isinstance], parameter[name[namespace], name[basestring]]] begin[:]
variable[namespace] assign[=] list[[<ast.Name object at 0x7da1b0a9d9f0>]]
for taget[tuple[[<ast.Name object at 0x7da1b0a9d630>, <ast.Name object at 0x7da1b0a9d0f0>]]] in starred[call[name[inspect].getmembers, parameter[name[self], name[inspect].ismethod]]] begin[:]
if call[name[hasattr], parameter[name[m], constant[export_rpc]]] begin[:]
<ast.Try object at 0x7da1b0a9c490>
call[name[service].add, parameter[name[m], name[name]]] | keyword[def] identifier[addToService] ( identifier[self] , identifier[service] , identifier[namespace] = keyword[None] , identifier[seperator] = literal[string] ):
literal[string]
keyword[if] identifier[namespace] keyword[is] keyword[None] :
identifier[namespace] =[]
keyword[if] identifier[isinstance] ( identifier[namespace] , identifier[basestring] ):
identifier[namespace] =[ identifier[namespace] ]
keyword[for] identifier[n] , identifier[m] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[self] , identifier[inspect] . identifier[ismethod] ):
keyword[if] identifier[hasattr] ( identifier[m] , literal[string] ):
keyword[try] :
identifier[name] = identifier[seperator] . identifier[join] ( identifier[namespace] + identifier[m] . identifier[export_rpc] )
keyword[except] identifier[TypeError] :
identifier[name] = identifier[seperator] . identifier[join] ( identifier[namespace] +[ identifier[m] . identifier[export_rpc] ])
identifier[service] . identifier[add] ( identifier[m] , identifier[name] ) | def addToService(self, service, namespace=None, seperator='.'):
"""
Add this Handler's exported methods to an RPC Service instance.
"""
if namespace is None:
namespace = [] # depends on [control=['if'], data=['namespace']]
if isinstance(namespace, basestring):
namespace = [namespace] # depends on [control=['if'], data=[]]
for (n, m) in inspect.getmembers(self, inspect.ismethod):
if hasattr(m, 'export_rpc'):
try:
name = seperator.join(namespace + m.export_rpc) # depends on [control=['try'], data=[]]
except TypeError:
name = seperator.join(namespace + [m.export_rpc]) # depends on [control=['except'], data=[]]
service.add(m, name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def convert_convolution(node, **kwargs):
"""Map MXNet's convolution operator attributes to onnx's Conv operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
kernel_dims = list(parse_helper(attrs, "kernel"))
stride_dims = list(parse_helper(attrs, "stride", [1, 1]))
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
num_group = int(attrs.get("num_group", 1))
dilations = list(parse_helper(attrs, "dilate", [1, 1]))
pad_dims = pad_dims + pad_dims
conv_node = onnx.helper.make_node(
"Conv",
inputs=input_nodes,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
dilations=dilations,
pads=pad_dims,
group=num_group,
name=name
)
return [conv_node] | def function[convert_convolution, parameter[node]]:
constant[Map MXNet's convolution operator attributes to onnx's Conv operator
and return the created node.
]
<ast.Tuple object at 0x7da1b1ef0b80> assign[=] call[name[get_inputs], parameter[name[node], name[kwargs]]]
variable[kernel_dims] assign[=] call[name[list], parameter[call[name[parse_helper], parameter[name[attrs], constant[kernel]]]]]
variable[stride_dims] assign[=] call[name[list], parameter[call[name[parse_helper], parameter[name[attrs], constant[stride], list[[<ast.Constant object at 0x7da1b1ef1060>, <ast.Constant object at 0x7da1b1ef03d0>]]]]]]
variable[pad_dims] assign[=] call[name[list], parameter[call[name[parse_helper], parameter[name[attrs], constant[pad], list[[<ast.Constant object at 0x7da1b1ef28f0>, <ast.Constant object at 0x7da1b1ef1000>]]]]]]
variable[num_group] assign[=] call[name[int], parameter[call[name[attrs].get, parameter[constant[num_group], constant[1]]]]]
variable[dilations] assign[=] call[name[list], parameter[call[name[parse_helper], parameter[name[attrs], constant[dilate], list[[<ast.Constant object at 0x7da1b1ef08e0>, <ast.Constant object at 0x7da1b1ef0ee0>]]]]]]
variable[pad_dims] assign[=] binary_operation[name[pad_dims] + name[pad_dims]]
variable[conv_node] assign[=] call[name[onnx].helper.make_node, parameter[constant[Conv]]]
return[list[[<ast.Name object at 0x7da1b20f93c0>]]] | keyword[def] identifier[convert_convolution] ( identifier[node] ,** identifier[kwargs] ):
literal[string]
identifier[name] , identifier[input_nodes] , identifier[attrs] = identifier[get_inputs] ( identifier[node] , identifier[kwargs] )
identifier[kernel_dims] = identifier[list] ( identifier[parse_helper] ( identifier[attrs] , literal[string] ))
identifier[stride_dims] = identifier[list] ( identifier[parse_helper] ( identifier[attrs] , literal[string] ,[ literal[int] , literal[int] ]))
identifier[pad_dims] = identifier[list] ( identifier[parse_helper] ( identifier[attrs] , literal[string] ,[ literal[int] , literal[int] ]))
identifier[num_group] = identifier[int] ( identifier[attrs] . identifier[get] ( literal[string] , literal[int] ))
identifier[dilations] = identifier[list] ( identifier[parse_helper] ( identifier[attrs] , literal[string] ,[ literal[int] , literal[int] ]))
identifier[pad_dims] = identifier[pad_dims] + identifier[pad_dims]
identifier[conv_node] = identifier[onnx] . identifier[helper] . identifier[make_node] (
literal[string] ,
identifier[inputs] = identifier[input_nodes] ,
identifier[outputs] =[ identifier[name] ],
identifier[kernel_shape] = identifier[kernel_dims] ,
identifier[strides] = identifier[stride_dims] ,
identifier[dilations] = identifier[dilations] ,
identifier[pads] = identifier[pad_dims] ,
identifier[group] = identifier[num_group] ,
identifier[name] = identifier[name]
)
keyword[return] [ identifier[conv_node] ] | def convert_convolution(node, **kwargs):
"""Map MXNet's convolution operator attributes to onnx's Conv operator
and return the created node.
"""
(name, input_nodes, attrs) = get_inputs(node, kwargs)
kernel_dims = list(parse_helper(attrs, 'kernel'))
stride_dims = list(parse_helper(attrs, 'stride', [1, 1]))
pad_dims = list(parse_helper(attrs, 'pad', [0, 0]))
num_group = int(attrs.get('num_group', 1))
dilations = list(parse_helper(attrs, 'dilate', [1, 1]))
pad_dims = pad_dims + pad_dims
conv_node = onnx.helper.make_node('Conv', inputs=input_nodes, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, pads=pad_dims, group=num_group, name=name)
return [conv_node] |
def import_type(dest, src, name, api=None, filter_symbol=None):
"""Import Type `name` and its dependencies from Registry `src`
to Registry `dest`.
:param Registry dest: Destination Registry
:param Registry src: Source Registry
:param str name: Name of type to import
:param str api: Prefer to import Types with api Name `api`, or None to
import Types with no api name.
:param filter_symbol: Optional filter callable
:type filter_symbol: Callable with signature
``(symbol_type:str, symbol_name:str) -> bool``
"""
if not filter_symbol:
filter_symbol = _default_filter_symbol
type = src.get_type(name, api)
for x in type.required_types:
if not filter_symbol('type', x):
continue
import_type(dest, src, x, api, filter_symbol)
dest.types[(type.name, type.api)] = type | def function[import_type, parameter[dest, src, name, api, filter_symbol]]:
constant[Import Type `name` and its dependencies from Registry `src`
to Registry `dest`.
:param Registry dest: Destination Registry
:param Registry src: Source Registry
:param str name: Name of type to import
:param str api: Prefer to import Types with api Name `api`, or None to
import Types with no api name.
:param filter_symbol: Optional filter callable
:type filter_symbol: Callable with signature
``(symbol_type:str, symbol_name:str) -> bool``
]
if <ast.UnaryOp object at 0x7da20e9562c0> begin[:]
variable[filter_symbol] assign[=] name[_default_filter_symbol]
variable[type] assign[=] call[name[src].get_type, parameter[name[name], name[api]]]
for taget[name[x]] in starred[name[type].required_types] begin[:]
if <ast.UnaryOp object at 0x7da20e9559c0> begin[:]
continue
call[name[import_type], parameter[name[dest], name[src], name[x], name[api], name[filter_symbol]]]
call[name[dest].types][tuple[[<ast.Attribute object at 0x7da20cabd7b0>, <ast.Attribute object at 0x7da20cabdf00>]]] assign[=] name[type] | keyword[def] identifier[import_type] ( identifier[dest] , identifier[src] , identifier[name] , identifier[api] = keyword[None] , identifier[filter_symbol] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[filter_symbol] :
identifier[filter_symbol] = identifier[_default_filter_symbol]
identifier[type] = identifier[src] . identifier[get_type] ( identifier[name] , identifier[api] )
keyword[for] identifier[x] keyword[in] identifier[type] . identifier[required_types] :
keyword[if] keyword[not] identifier[filter_symbol] ( literal[string] , identifier[x] ):
keyword[continue]
identifier[import_type] ( identifier[dest] , identifier[src] , identifier[x] , identifier[api] , identifier[filter_symbol] )
identifier[dest] . identifier[types] [( identifier[type] . identifier[name] , identifier[type] . identifier[api] )]= identifier[type] | def import_type(dest, src, name, api=None, filter_symbol=None):
"""Import Type `name` and its dependencies from Registry `src`
to Registry `dest`.
:param Registry dest: Destination Registry
:param Registry src: Source Registry
:param str name: Name of type to import
:param str api: Prefer to import Types with api Name `api`, or None to
import Types with no api name.
:param filter_symbol: Optional filter callable
:type filter_symbol: Callable with signature
``(symbol_type:str, symbol_name:str) -> bool``
"""
if not filter_symbol:
filter_symbol = _default_filter_symbol # depends on [control=['if'], data=[]]
type = src.get_type(name, api)
for x in type.required_types:
if not filter_symbol('type', x):
continue # depends on [control=['if'], data=[]]
import_type(dest, src, x, api, filter_symbol) # depends on [control=['for'], data=['x']]
dest.types[type.name, type.api] = type |
def add_marccountry_tag(dom):
"""
Add ``<mods:placeTerm>`` tag with proper content.
"""
marccountry = dom.find("mods:placeTerm", {"authority": "marccountry"})
# don't add again if already defined
if marccountry:
return
marccountry_tag = dhtmlparser.HTMLElement(
"mods:place",
[
dhtmlparser.HTMLElement(
"mods:placeTerm",
{"type": "code", "authority": "marccountry"},
[dhtmlparser.HTMLElement("xr-")]
)
]
)
insert_tag(
marccountry_tag,
dom.match("mods:mods", "mods:originInfo", "mods:place"),
first(dom.find("mods:originInfo"))
) | def function[add_marccountry_tag, parameter[dom]]:
constant[
Add ``<mods:placeTerm>`` tag with proper content.
]
variable[marccountry] assign[=] call[name[dom].find, parameter[constant[mods:placeTerm], dictionary[[<ast.Constant object at 0x7da1b094a500>], [<ast.Constant object at 0x7da1b094a3e0>]]]]
if name[marccountry] begin[:]
return[None]
variable[marccountry_tag] assign[=] call[name[dhtmlparser].HTMLElement, parameter[constant[mods:place], list[[<ast.Call object at 0x7da1b0949bd0>]]]]
call[name[insert_tag], parameter[name[marccountry_tag], call[name[dom].match, parameter[constant[mods:mods], constant[mods:originInfo], constant[mods:place]]], call[name[first], parameter[call[name[dom].find, parameter[constant[mods:originInfo]]]]]]] | keyword[def] identifier[add_marccountry_tag] ( identifier[dom] ):
literal[string]
identifier[marccountry] = identifier[dom] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[marccountry] :
keyword[return]
identifier[marccountry_tag] = identifier[dhtmlparser] . identifier[HTMLElement] (
literal[string] ,
[
identifier[dhtmlparser] . identifier[HTMLElement] (
literal[string] ,
{ literal[string] : literal[string] , literal[string] : literal[string] },
[ identifier[dhtmlparser] . identifier[HTMLElement] ( literal[string] )]
)
]
)
identifier[insert_tag] (
identifier[marccountry_tag] ,
identifier[dom] . identifier[match] ( literal[string] , literal[string] , literal[string] ),
identifier[first] ( identifier[dom] . identifier[find] ( literal[string] ))
) | def add_marccountry_tag(dom):
"""
Add ``<mods:placeTerm>`` tag with proper content.
"""
marccountry = dom.find('mods:placeTerm', {'authority': 'marccountry'})
# don't add again if already defined
if marccountry:
return # depends on [control=['if'], data=[]]
marccountry_tag = dhtmlparser.HTMLElement('mods:place', [dhtmlparser.HTMLElement('mods:placeTerm', {'type': 'code', 'authority': 'marccountry'}, [dhtmlparser.HTMLElement('xr-')])])
insert_tag(marccountry_tag, dom.match('mods:mods', 'mods:originInfo', 'mods:place'), first(dom.find('mods:originInfo'))) |
def results(project, apikey, run, watch, server, output):
"""
Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details.
"""
status = run_get_status(server, project, run, apikey)
log(format_run_status(status))
if watch:
for status in watch_run_status(server, project, run, apikey, 24*60*60):
log(format_run_status(status))
if status['state'] == 'completed':
log("Downloading result")
response = run_get_result_text(server, project, run, apikey)
log("Received result")
print(response, file=output)
elif status['state'] == 'error':
log("There was an error")
error_result = run_get_result_text(server, project, run, apikey)
print(error_result, file=output)
else:
log("No result yet") | def function[results, parameter[project, apikey, run, watch, server, output]]:
constant[
Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details.
]
variable[status] assign[=] call[name[run_get_status], parameter[name[server], name[project], name[run], name[apikey]]]
call[name[log], parameter[call[name[format_run_status], parameter[name[status]]]]]
if name[watch] begin[:]
for taget[name[status]] in starred[call[name[watch_run_status], parameter[name[server], name[project], name[run], name[apikey], binary_operation[binary_operation[constant[24] * constant[60]] * constant[60]]]]] begin[:]
call[name[log], parameter[call[name[format_run_status], parameter[name[status]]]]]
if compare[call[name[status]][constant[state]] equal[==] constant[completed]] begin[:]
call[name[log], parameter[constant[Downloading result]]]
variable[response] assign[=] call[name[run_get_result_text], parameter[name[server], name[project], name[run], name[apikey]]]
call[name[log], parameter[constant[Received result]]]
call[name[print], parameter[name[response]]] | keyword[def] identifier[results] ( identifier[project] , identifier[apikey] , identifier[run] , identifier[watch] , identifier[server] , identifier[output] ):
literal[string]
identifier[status] = identifier[run_get_status] ( identifier[server] , identifier[project] , identifier[run] , identifier[apikey] )
identifier[log] ( identifier[format_run_status] ( identifier[status] ))
keyword[if] identifier[watch] :
keyword[for] identifier[status] keyword[in] identifier[watch_run_status] ( identifier[server] , identifier[project] , identifier[run] , identifier[apikey] , literal[int] * literal[int] * literal[int] ):
identifier[log] ( identifier[format_run_status] ( identifier[status] ))
keyword[if] identifier[status] [ literal[string] ]== literal[string] :
identifier[log] ( literal[string] )
identifier[response] = identifier[run_get_result_text] ( identifier[server] , identifier[project] , identifier[run] , identifier[apikey] )
identifier[log] ( literal[string] )
identifier[print] ( identifier[response] , identifier[file] = identifier[output] )
keyword[elif] identifier[status] [ literal[string] ]== literal[string] :
identifier[log] ( literal[string] )
identifier[error_result] = identifier[run_get_result_text] ( identifier[server] , identifier[project] , identifier[run] , identifier[apikey] )
identifier[print] ( identifier[error_result] , identifier[file] = identifier[output] )
keyword[else] :
identifier[log] ( literal[string] ) | def results(project, apikey, run, watch, server, output):
"""
Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details.
"""
status = run_get_status(server, project, run, apikey)
log(format_run_status(status))
if watch:
for status in watch_run_status(server, project, run, apikey, 24 * 60 * 60):
log(format_run_status(status)) # depends on [control=['for'], data=['status']] # depends on [control=['if'], data=[]]
if status['state'] == 'completed':
log('Downloading result')
response = run_get_result_text(server, project, run, apikey)
log('Received result')
print(response, file=output) # depends on [control=['if'], data=[]]
elif status['state'] == 'error':
log('There was an error')
error_result = run_get_result_text(server, project, run, apikey)
print(error_result, file=output) # depends on [control=['if'], data=[]]
else:
log('No result yet') |
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params) | def function[samples, parameter[self, anystring, limit, offset, sortby]]:
constant[Return an object representing the samples identified by the input domain, IP, or URL]
variable[uri] assign[=] call[call[name[self]._uris][constant[samples]].format, parameter[name[anystring]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f2fb20>, <ast.Constant object at 0x7da1b0f2f250>, <ast.Constant object at 0x7da1b0f2df60>], [<ast.Name object at 0x7da1b0f2e7a0>, <ast.Name object at 0x7da1b0f2f130>, <ast.Name object at 0x7da1b0f2e230>]]
return[call[name[self].get_parse, parameter[name[uri], name[params]]]] | keyword[def] identifier[samples] ( identifier[self] , identifier[anystring] , identifier[limit] = keyword[None] , identifier[offset] = keyword[None] , identifier[sortby] = keyword[None] ):
literal[string]
identifier[uri] = identifier[self] . identifier[_uris] [ literal[string] ]. identifier[format] ( identifier[anystring] )
identifier[params] ={ literal[string] : identifier[limit] , literal[string] : identifier[offset] , literal[string] : identifier[sortby] }
keyword[return] identifier[self] . identifier[get_parse] ( identifier[uri] , identifier[params] ) | def samples(self, anystring, limit=None, offset=None, sortby=None):
"""Return an object representing the samples identified by the input domain, IP, or URL"""
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params) |
def validate_schema(instance, schema, test_required=True, data_location=None,
skip_missing_data=False):
"""Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``data_location`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``data_location`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param :class:`~resolwe.flow.models.data.DataLocation` data_location:
data location used for checking if files and directories exist
(default: ``None``)
:param bool skip_missing_data: Don't raise an error if referenced
``Data`` object does not exist
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
"""
from .storage import Storage # Prevent circular import.
path_prefix = None
if data_location:
path_prefix = data_location.get_path()
def validate_refs(field):
"""Validate reference paths."""
for ref_filename in field.get('refs', []):
ref_path = os.path.join(path_prefix, ref_filename)
if not os.path.exists(ref_path):
raise ValidationError("Path referenced in `refs` ({}) does not exist.".format(ref_path))
if not (os.path.isfile(ref_path) or os.path.isdir(ref_path)):
raise ValidationError(
"Path referenced in `refs` ({}) is neither a file or directory.".format(ref_path))
def validate_file(field, regex):
"""Validate file name (and check that it exists)."""
filename = field['file']
if regex and not re.search(regex, filename):
raise ValidationError(
"File name {} does not match regex {}".format(filename, regex))
if path_prefix:
path = os.path.join(path_prefix, filename)
if not os.path.exists(path):
raise ValidationError("Referenced path ({}) does not exist.".format(path))
if not os.path.isfile(path):
raise ValidationError("Referenced path ({}) is not a file.".format(path))
validate_refs(field)
def validate_dir(field):
"""Check that dirs and referenced files exists."""
dirname = field['dir']
if path_prefix:
path = os.path.join(path_prefix, dirname)
if not os.path.exists(path):
raise ValidationError("Referenced path ({}) does not exist.".format(path))
if not os.path.isdir(path):
raise ValidationError("Referenced path ({}) is not a directory.".format(path))
validate_refs(field)
def validate_data(data_pk, type_):
"""Check that `Data` objects exist and is of right type."""
from .data import Data # prevent circular import
data_qs = Data.objects.filter(pk=data_pk).values('process__type')
if not data_qs.exists():
if skip_missing_data:
return
raise ValidationError(
"Referenced `Data` object does not exist (id:{})".format(data_pk))
data = data_qs.first()
if not data['process__type'].startswith(type_):
raise ValidationError(
"Data object of type `{}` is required, but type `{}` is given. "
"(id:{})".format(type_, data['process__type'], data_pk))
def validate_range(value, interval, name):
"""Check that given value is inside the specified range."""
if not interval:
return
if value < interval[0] or value > interval[1]:
raise ValidationError(
"Value of field '{}' is out of range. It should be between {} and {}.".format(
name, interval[0], interval[1]
)
)
is_dirty = False
dirty_fields = []
for _schema, _fields, _ in iterate_schema(instance, schema):
name = _schema['name']
is_required = _schema.get('required', True)
if test_required and is_required and name not in _fields:
is_dirty = True
dirty_fields.append(name)
if name in _fields:
field = _fields[name]
type_ = _schema.get('type', "")
# Treat None as if the field is missing.
if not is_required and field is None:
continue
try:
jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA)
except jsonschema.exceptions.ValidationError as ex:
raise ValidationError(ex.message)
choices = [choice['value'] for choice in _schema.get('choices', [])]
allow_custom_choice = _schema.get('allow_custom_choice', False)
if choices and not allow_custom_choice and field not in choices:
raise ValidationError(
"Value of field '{}' must match one of predefined choices. "
"Current value: {}".format(name, field)
)
if type_ == 'basic:file:':
validate_file(field, _schema.get('validate_regex'))
elif type_ == 'list:basic:file:':
for obj in field:
validate_file(obj, _schema.get('validate_regex'))
elif type_ == 'basic:dir:':
validate_dir(field)
elif type_ == 'list:basic:dir:':
for obj in field:
validate_dir(obj)
elif type_ == 'basic:json:' and not Storage.objects.filter(pk=field).exists():
raise ValidationError(
"Referenced `Storage` object does not exist (id:{})".format(field))
elif type_.startswith('data:'):
validate_data(field, type_)
elif type_.startswith('list:data:'):
for data_id in field:
validate_data(data_id, type_[5:]) # remove `list:` from type
elif type_ == 'basic:integer:' or type_ == 'basic:decimal:':
validate_range(field, _schema.get('range'), name)
elif type_ == 'list:basic:integer:' or type_ == 'list:basic:decimal:':
for obj in field:
validate_range(obj, _schema.get('range'), name)
try:
# Check that schema definitions exist for all fields
for _, _ in iterate_fields(instance, schema):
pass
except KeyError as ex:
raise ValidationError(str(ex))
if is_dirty:
dirty_fields = ['"{}"'.format(field) for field in dirty_fields]
raise DirtyError("Required fields {} not given.".format(', '.join(dirty_fields))) | def function[validate_schema, parameter[instance, schema, test_required, data_location, skip_missing_data]]:
constant[Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``data_location`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``data_location`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param :class:`~resolwe.flow.models.data.DataLocation` data_location:
data location used for checking if files and directories exist
(default: ``None``)
:param bool skip_missing_data: Don't raise an error if referenced
``Data`` object does not exist
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
]
from relative_module[storage] import module[Storage]
variable[path_prefix] assign[=] constant[None]
if name[data_location] begin[:]
variable[path_prefix] assign[=] call[name[data_location].get_path, parameter[]]
def function[validate_refs, parameter[field]]:
constant[Validate reference paths.]
for taget[name[ref_filename]] in starred[call[name[field].get, parameter[constant[refs], list[[]]]]] begin[:]
variable[ref_path] assign[=] call[name[os].path.join, parameter[name[path_prefix], name[ref_filename]]]
if <ast.UnaryOp object at 0x7da1b19276a0> begin[:]
<ast.Raise object at 0x7da1b1927580>
if <ast.UnaryOp object at 0x7da1b1927400> begin[:]
<ast.Raise object at 0x7da1b19271c0>
def function[validate_file, parameter[field, regex]]:
constant[Validate file name (and check that it exists).]
variable[filename] assign[=] call[name[field]][constant[file]]
if <ast.BoolOp object at 0x7da1b1926e00> begin[:]
<ast.Raise object at 0x7da1b1926c80>
if name[path_prefix] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[path_prefix], name[filename]]]
if <ast.UnaryOp object at 0x7da1b19268f0> begin[:]
<ast.Raise object at 0x7da1b19267d0>
if <ast.UnaryOp object at 0x7da1b1926650> begin[:]
<ast.Raise object at 0x7da1b1926530>
call[name[validate_refs], parameter[name[field]]]
def function[validate_dir, parameter[field]]:
constant[Check that dirs and referenced files exists.]
variable[dirname] assign[=] call[name[field]][constant[dir]]
if name[path_prefix] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[path_prefix], name[dirname]]]
if <ast.UnaryOp object at 0x7da1b1925ed0> begin[:]
<ast.Raise object at 0x7da1b1925db0>
if <ast.UnaryOp object at 0x7da1b1925c30> begin[:]
<ast.Raise object at 0x7da1b1925b10>
call[name[validate_refs], parameter[name[field]]]
def function[validate_data, parameter[data_pk, type_]]:
constant[Check that `Data` objects exist and is of right type.]
from relative_module[data] import module[Data]
variable[data_qs] assign[=] call[call[name[Data].objects.filter, parameter[]].values, parameter[constant[process__type]]]
if <ast.UnaryOp object at 0x7da1b19254b0> begin[:]
if name[skip_missing_data] begin[:]
return[None]
<ast.Raise object at 0x7da1b1925330>
variable[data] assign[=] call[name[data_qs].first, parameter[]]
if <ast.UnaryOp object at 0x7da1b19250c0> begin[:]
<ast.Raise object at 0x7da1b19b8b80>
def function[validate_range, parameter[value, interval, name]]:
constant[Check that given value is inside the specified range.]
if <ast.UnaryOp object at 0x7da1b19b89a0> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b19b8730> begin[:]
<ast.Raise object at 0x7da1b19b84c0>
variable[is_dirty] assign[=] constant[False]
variable[dirty_fields] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b19b80d0>, <ast.Name object at 0x7da1b19b8100>, <ast.Name object at 0x7da1b19b8130>]]] in starred[call[name[iterate_schema], parameter[name[instance], name[schema]]]] begin[:]
variable[name] assign[=] call[name[_schema]][constant[name]]
variable[is_required] assign[=] call[name[_schema].get, parameter[constant[required], constant[True]]]
if <ast.BoolOp object at 0x7da1b19ba8c0> begin[:]
variable[is_dirty] assign[=] constant[True]
call[name[dirty_fields].append, parameter[name[name]]]
if compare[name[name] in name[_fields]] begin[:]
variable[field] assign[=] call[name[_fields]][name[name]]
variable[type_] assign[=] call[name[_schema].get, parameter[constant[type], constant[]]]
if <ast.BoolOp object at 0x7da1b19baf50> begin[:]
continue
<ast.Try object at 0x7da1b19bae60>
variable[choices] assign[=] <ast.ListComp object at 0x7da1b19bb400>
variable[allow_custom_choice] assign[=] call[name[_schema].get, parameter[constant[allow_custom_choice], constant[False]]]
if <ast.BoolOp object at 0x7da1b19bbc70> begin[:]
<ast.Raise object at 0x7da1b19bba30>
if compare[name[type_] equal[==] constant[basic:file:]] begin[:]
call[name[validate_file], parameter[name[field], call[name[_schema].get, parameter[constant[validate_regex]]]]]
<ast.Try object at 0x7da1b193ab00>
if name[is_dirty] begin[:]
variable[dirty_fields] assign[=] <ast.ListComp object at 0x7da1b193a6b0>
<ast.Raise object at 0x7da1b193a500> | keyword[def] identifier[validate_schema] ( identifier[instance] , identifier[schema] , identifier[test_required] = keyword[True] , identifier[data_location] = keyword[None] ,
identifier[skip_missing_data] = keyword[False] ):
literal[string]
keyword[from] . identifier[storage] keyword[import] identifier[Storage]
identifier[path_prefix] = keyword[None]
keyword[if] identifier[data_location] :
identifier[path_prefix] = identifier[data_location] . identifier[get_path] ()
keyword[def] identifier[validate_refs] ( identifier[field] ):
literal[string]
keyword[for] identifier[ref_filename] keyword[in] identifier[field] . identifier[get] ( literal[string] ,[]):
identifier[ref_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path_prefix] , identifier[ref_filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[ref_path] ):
keyword[raise] identifier[ValidationError] ( literal[string] . identifier[format] ( identifier[ref_path] ))
keyword[if] keyword[not] ( identifier[os] . identifier[path] . identifier[isfile] ( identifier[ref_path] ) keyword[or] identifier[os] . identifier[path] . identifier[isdir] ( identifier[ref_path] )):
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[ref_path] ))
keyword[def] identifier[validate_file] ( identifier[field] , identifier[regex] ):
literal[string]
identifier[filename] = identifier[field] [ literal[string] ]
keyword[if] identifier[regex] keyword[and] keyword[not] identifier[re] . identifier[search] ( identifier[regex] , identifier[filename] ):
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[filename] , identifier[regex] ))
keyword[if] identifier[path_prefix] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path_prefix] , identifier[filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[ValidationError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
keyword[raise] identifier[ValidationError] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[validate_refs] ( identifier[field] )
keyword[def] identifier[validate_dir] ( identifier[field] ):
literal[string]
identifier[dirname] = identifier[field] [ literal[string] ]
keyword[if] identifier[path_prefix] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path_prefix] , identifier[dirname] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[ValidationError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[raise] identifier[ValidationError] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[validate_refs] ( identifier[field] )
keyword[def] identifier[validate_data] ( identifier[data_pk] , identifier[type_] ):
literal[string]
keyword[from] . identifier[data] keyword[import] identifier[Data]
identifier[data_qs] = identifier[Data] . identifier[objects] . identifier[filter] ( identifier[pk] = identifier[data_pk] ). identifier[values] ( literal[string] )
keyword[if] keyword[not] identifier[data_qs] . identifier[exists] ():
keyword[if] identifier[skip_missing_data] :
keyword[return]
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[data_pk] ))
identifier[data] = identifier[data_qs] . identifier[first] ()
keyword[if] keyword[not] identifier[data] [ literal[string] ]. identifier[startswith] ( identifier[type_] ):
keyword[raise] identifier[ValidationError] (
literal[string]
literal[string] . identifier[format] ( identifier[type_] , identifier[data] [ literal[string] ], identifier[data_pk] ))
keyword[def] identifier[validate_range] ( identifier[value] , identifier[interval] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[interval] :
keyword[return]
keyword[if] identifier[value] < identifier[interval] [ literal[int] ] keyword[or] identifier[value] > identifier[interval] [ literal[int] ]:
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] (
identifier[name] , identifier[interval] [ literal[int] ], identifier[interval] [ literal[int] ]
)
)
identifier[is_dirty] = keyword[False]
identifier[dirty_fields] =[]
keyword[for] identifier[_schema] , identifier[_fields] , identifier[_] keyword[in] identifier[iterate_schema] ( identifier[instance] , identifier[schema] ):
identifier[name] = identifier[_schema] [ literal[string] ]
identifier[is_required] = identifier[_schema] . identifier[get] ( literal[string] , keyword[True] )
keyword[if] identifier[test_required] keyword[and] identifier[is_required] keyword[and] identifier[name] keyword[not] keyword[in] identifier[_fields] :
identifier[is_dirty] = keyword[True]
identifier[dirty_fields] . identifier[append] ( identifier[name] )
keyword[if] identifier[name] keyword[in] identifier[_fields] :
identifier[field] = identifier[_fields] [ identifier[name] ]
identifier[type_] = identifier[_schema] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[is_required] keyword[and] identifier[field] keyword[is] keyword[None] :
keyword[continue]
keyword[try] :
identifier[jsonschema] . identifier[validate] ([{ literal[string] : identifier[type_] , literal[string] : identifier[field] }], identifier[TYPE_SCHEMA] )
keyword[except] identifier[jsonschema] . identifier[exceptions] . identifier[ValidationError] keyword[as] identifier[ex] :
keyword[raise] identifier[ValidationError] ( identifier[ex] . identifier[message] )
identifier[choices] =[ identifier[choice] [ literal[string] ] keyword[for] identifier[choice] keyword[in] identifier[_schema] . identifier[get] ( literal[string] ,[])]
identifier[allow_custom_choice] = identifier[_schema] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[choices] keyword[and] keyword[not] identifier[allow_custom_choice] keyword[and] identifier[field] keyword[not] keyword[in] identifier[choices] :
keyword[raise] identifier[ValidationError] (
literal[string]
literal[string] . identifier[format] ( identifier[name] , identifier[field] )
)
keyword[if] identifier[type_] == literal[string] :
identifier[validate_file] ( identifier[field] , identifier[_schema] . identifier[get] ( literal[string] ))
keyword[elif] identifier[type_] == literal[string] :
keyword[for] identifier[obj] keyword[in] identifier[field] :
identifier[validate_file] ( identifier[obj] , identifier[_schema] . identifier[get] ( literal[string] ))
keyword[elif] identifier[type_] == literal[string] :
identifier[validate_dir] ( identifier[field] )
keyword[elif] identifier[type_] == literal[string] :
keyword[for] identifier[obj] keyword[in] identifier[field] :
identifier[validate_dir] ( identifier[obj] )
keyword[elif] identifier[type_] == literal[string] keyword[and] keyword[not] identifier[Storage] . identifier[objects] . identifier[filter] ( identifier[pk] = identifier[field] ). identifier[exists] ():
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[field] ))
keyword[elif] identifier[type_] . identifier[startswith] ( literal[string] ):
identifier[validate_data] ( identifier[field] , identifier[type_] )
keyword[elif] identifier[type_] . identifier[startswith] ( literal[string] ):
keyword[for] identifier[data_id] keyword[in] identifier[field] :
identifier[validate_data] ( identifier[data_id] , identifier[type_] [ literal[int] :])
keyword[elif] identifier[type_] == literal[string] keyword[or] identifier[type_] == literal[string] :
identifier[validate_range] ( identifier[field] , identifier[_schema] . identifier[get] ( literal[string] ), identifier[name] )
keyword[elif] identifier[type_] == literal[string] keyword[or] identifier[type_] == literal[string] :
keyword[for] identifier[obj] keyword[in] identifier[field] :
identifier[validate_range] ( identifier[obj] , identifier[_schema] . identifier[get] ( literal[string] ), identifier[name] )
keyword[try] :
keyword[for] identifier[_] , identifier[_] keyword[in] identifier[iterate_fields] ( identifier[instance] , identifier[schema] ):
keyword[pass]
keyword[except] identifier[KeyError] keyword[as] identifier[ex] :
keyword[raise] identifier[ValidationError] ( identifier[str] ( identifier[ex] ))
keyword[if] identifier[is_dirty] :
identifier[dirty_fields] =[ literal[string] . identifier[format] ( identifier[field] ) keyword[for] identifier[field] keyword[in] identifier[dirty_fields] ]
keyword[raise] identifier[DirtyError] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[dirty_fields] ))) | def validate_schema(instance, schema, test_required=True, data_location=None, skip_missing_data=False):
"""Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``data_location`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``data_location`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param :class:`~resolwe.flow.models.data.DataLocation` data_location:
data location used for checking if files and directories exist
(default: ``None``)
:param bool skip_missing_data: Don't raise an error if referenced
``Data`` object does not exist
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
"""
from .storage import Storage # Prevent circular import.
path_prefix = None
if data_location:
path_prefix = data_location.get_path() # depends on [control=['if'], data=[]]
def validate_refs(field):
"""Validate reference paths."""
for ref_filename in field.get('refs', []):
ref_path = os.path.join(path_prefix, ref_filename)
if not os.path.exists(ref_path):
raise ValidationError('Path referenced in `refs` ({}) does not exist.'.format(ref_path)) # depends on [control=['if'], data=[]]
if not (os.path.isfile(ref_path) or os.path.isdir(ref_path)):
raise ValidationError('Path referenced in `refs` ({}) is neither a file or directory.'.format(ref_path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ref_filename']]
def validate_file(field, regex):
"""Validate file name (and check that it exists)."""
filename = field['file']
if regex and (not re.search(regex, filename)):
raise ValidationError('File name {} does not match regex {}'.format(filename, regex)) # depends on [control=['if'], data=[]]
if path_prefix:
path = os.path.join(path_prefix, filename)
if not os.path.exists(path):
raise ValidationError('Referenced path ({}) does not exist.'.format(path)) # depends on [control=['if'], data=[]]
if not os.path.isfile(path):
raise ValidationError('Referenced path ({}) is not a file.'.format(path)) # depends on [control=['if'], data=[]]
validate_refs(field) # depends on [control=['if'], data=[]]
def validate_dir(field):
"""Check that dirs and referenced files exists."""
dirname = field['dir']
if path_prefix:
path = os.path.join(path_prefix, dirname)
if not os.path.exists(path):
raise ValidationError('Referenced path ({}) does not exist.'.format(path)) # depends on [control=['if'], data=[]]
if not os.path.isdir(path):
raise ValidationError('Referenced path ({}) is not a directory.'.format(path)) # depends on [control=['if'], data=[]]
validate_refs(field) # depends on [control=['if'], data=[]]
def validate_data(data_pk, type_):
"""Check that `Data` objects exist and is of right type."""
from .data import Data # prevent circular import
data_qs = Data.objects.filter(pk=data_pk).values('process__type')
if not data_qs.exists():
if skip_missing_data:
return # depends on [control=['if'], data=[]]
raise ValidationError('Referenced `Data` object does not exist (id:{})'.format(data_pk)) # depends on [control=['if'], data=[]]
data = data_qs.first()
if not data['process__type'].startswith(type_):
raise ValidationError('Data object of type `{}` is required, but type `{}` is given. (id:{})'.format(type_, data['process__type'], data_pk)) # depends on [control=['if'], data=[]]
def validate_range(value, interval, name):
"""Check that given value is inside the specified range."""
if not interval:
return # depends on [control=['if'], data=[]]
if value < interval[0] or value > interval[1]:
raise ValidationError("Value of field '{}' is out of range. It should be between {} and {}.".format(name, interval[0], interval[1])) # depends on [control=['if'], data=[]]
is_dirty = False
dirty_fields = []
for (_schema, _fields, _) in iterate_schema(instance, schema):
name = _schema['name']
is_required = _schema.get('required', True)
if test_required and is_required and (name not in _fields):
is_dirty = True
dirty_fields.append(name) # depends on [control=['if'], data=[]]
if name in _fields:
field = _fields[name]
type_ = _schema.get('type', '')
# Treat None as if the field is missing.
if not is_required and field is None:
continue # depends on [control=['if'], data=[]]
try:
jsonschema.validate([{'type': type_, 'value': field}], TYPE_SCHEMA) # depends on [control=['try'], data=[]]
except jsonschema.exceptions.ValidationError as ex:
raise ValidationError(ex.message) # depends on [control=['except'], data=['ex']]
choices = [choice['value'] for choice in _schema.get('choices', [])]
allow_custom_choice = _schema.get('allow_custom_choice', False)
if choices and (not allow_custom_choice) and (field not in choices):
raise ValidationError("Value of field '{}' must match one of predefined choices. Current value: {}".format(name, field)) # depends on [control=['if'], data=[]]
if type_ == 'basic:file:':
validate_file(field, _schema.get('validate_regex')) # depends on [control=['if'], data=[]]
elif type_ == 'list:basic:file:':
for obj in field:
validate_file(obj, _schema.get('validate_regex')) # depends on [control=['for'], data=['obj']] # depends on [control=['if'], data=[]]
elif type_ == 'basic:dir:':
validate_dir(field) # depends on [control=['if'], data=[]]
elif type_ == 'list:basic:dir:':
for obj in field:
validate_dir(obj) # depends on [control=['for'], data=['obj']] # depends on [control=['if'], data=[]]
elif type_ == 'basic:json:' and (not Storage.objects.filter(pk=field).exists()):
raise ValidationError('Referenced `Storage` object does not exist (id:{})'.format(field)) # depends on [control=['if'], data=[]]
elif type_.startswith('data:'):
validate_data(field, type_) # depends on [control=['if'], data=[]]
elif type_.startswith('list:data:'):
for data_id in field:
validate_data(data_id, type_[5:]) # remove `list:` from type # depends on [control=['for'], data=['data_id']] # depends on [control=['if'], data=[]]
elif type_ == 'basic:integer:' or type_ == 'basic:decimal:':
validate_range(field, _schema.get('range'), name) # depends on [control=['if'], data=[]]
elif type_ == 'list:basic:integer:' or type_ == 'list:basic:decimal:':
for obj in field:
validate_range(obj, _schema.get('range'), name) # depends on [control=['for'], data=['obj']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name', '_fields']] # depends on [control=['for'], data=[]]
try:
# Check that schema definitions exist for all fields
for (_, _) in iterate_fields(instance, schema):
pass # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except KeyError as ex:
raise ValidationError(str(ex)) # depends on [control=['except'], data=['ex']]
if is_dirty:
dirty_fields = ['"{}"'.format(field) for field in dirty_fields]
raise DirtyError('Required fields {} not given.'.format(', '.join(dirty_fields))) # depends on [control=['if'], data=[]] |
def compare_jsone_task_definition(parent_link, rebuilt_definitions):
"""Compare the json-e rebuilt task definition vs the runtime definition.
Args:
parent_link (LinkOfTrust): the parent link to test.
rebuilt_definitions (dict): the rebuilt task definitions.
Raises:
CoTError: on failure.
"""
diffs = []
for compare_definition in rebuilt_definitions['tasks']:
# Rebuilt decision tasks have an extra `taskId`; remove
if 'taskId' in compare_definition:
del(compare_definition['taskId'])
# remove key/value pairs where the value is empty, since json-e drops
# them instead of keeping them with a None/{}/[] value.
compare_definition = remove_empty_keys(compare_definition)
runtime_definition = remove_empty_keys(parent_link.task)
diff = list(dictdiffer.diff(compare_definition, runtime_definition))
if diff:
diffs.append(pprint.pformat(diff))
continue
log.info("{}: Good.".format(parent_link.name))
break
else:
error_msg = "{} {}: the runtime task doesn't match any rebuilt definition!\n{}".format(
parent_link.name, parent_link.task_id, pprint.pformat(diffs)
)
log.critical(error_msg)
raise CoTError(error_msg) | def function[compare_jsone_task_definition, parameter[parent_link, rebuilt_definitions]]:
constant[Compare the json-e rebuilt task definition vs the runtime definition.
Args:
parent_link (LinkOfTrust): the parent link to test.
rebuilt_definitions (dict): the rebuilt task definitions.
Raises:
CoTError: on failure.
]
variable[diffs] assign[=] list[[]]
for taget[name[compare_definition]] in starred[call[name[rebuilt_definitions]][constant[tasks]]] begin[:]
if compare[constant[taskId] in name[compare_definition]] begin[:]
<ast.Delete object at 0x7da1b0e9f430>
variable[compare_definition] assign[=] call[name[remove_empty_keys], parameter[name[compare_definition]]]
variable[runtime_definition] assign[=] call[name[remove_empty_keys], parameter[name[parent_link].task]]
variable[diff] assign[=] call[name[list], parameter[call[name[dictdiffer].diff, parameter[name[compare_definition], name[runtime_definition]]]]]
if name[diff] begin[:]
call[name[diffs].append, parameter[call[name[pprint].pformat, parameter[name[diff]]]]]
continue
call[name[log].info, parameter[call[constant[{}: Good.].format, parameter[name[parent_link].name]]]]
break | keyword[def] identifier[compare_jsone_task_definition] ( identifier[parent_link] , identifier[rebuilt_definitions] ):
literal[string]
identifier[diffs] =[]
keyword[for] identifier[compare_definition] keyword[in] identifier[rebuilt_definitions] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[compare_definition] :
keyword[del] ( identifier[compare_definition] [ literal[string] ])
identifier[compare_definition] = identifier[remove_empty_keys] ( identifier[compare_definition] )
identifier[runtime_definition] = identifier[remove_empty_keys] ( identifier[parent_link] . identifier[task] )
identifier[diff] = identifier[list] ( identifier[dictdiffer] . identifier[diff] ( identifier[compare_definition] , identifier[runtime_definition] ))
keyword[if] identifier[diff] :
identifier[diffs] . identifier[append] ( identifier[pprint] . identifier[pformat] ( identifier[diff] ))
keyword[continue]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[parent_link] . identifier[name] ))
keyword[break]
keyword[else] :
identifier[error_msg] = literal[string] . identifier[format] (
identifier[parent_link] . identifier[name] , identifier[parent_link] . identifier[task_id] , identifier[pprint] . identifier[pformat] ( identifier[diffs] )
)
identifier[log] . identifier[critical] ( identifier[error_msg] )
keyword[raise] identifier[CoTError] ( identifier[error_msg] ) | def compare_jsone_task_definition(parent_link, rebuilt_definitions):
"""Compare the json-e rebuilt task definition vs the runtime definition.
Args:
parent_link (LinkOfTrust): the parent link to test.
rebuilt_definitions (dict): the rebuilt task definitions.
Raises:
CoTError: on failure.
"""
diffs = []
for compare_definition in rebuilt_definitions['tasks']:
# Rebuilt decision tasks have an extra `taskId`; remove
if 'taskId' in compare_definition:
del compare_definition['taskId'] # depends on [control=['if'], data=['compare_definition']]
# remove key/value pairs where the value is empty, since json-e drops
# them instead of keeping them with a None/{}/[] value.
compare_definition = remove_empty_keys(compare_definition)
runtime_definition = remove_empty_keys(parent_link.task)
diff = list(dictdiffer.diff(compare_definition, runtime_definition))
if diff:
diffs.append(pprint.pformat(diff))
continue # depends on [control=['if'], data=[]]
log.info('{}: Good.'.format(parent_link.name))
break # depends on [control=['for'], data=['compare_definition']]
else:
error_msg = "{} {}: the runtime task doesn't match any rebuilt definition!\n{}".format(parent_link.name, parent_link.task_id, pprint.pformat(diffs))
log.critical(error_msg)
raise CoTError(error_msg) |
def tx2genedict(gtf, keep_version=False):
"""
produce a tx2gene dictionary from a GTF file
"""
d = {}
with open_gzipsafe(gtf) as in_handle:
for line in in_handle:
if "gene_id" not in line or "transcript_id" not in line:
continue
geneid = line.split("gene_id")[1].split(" ")[1]
geneid = _strip_non_alphanumeric(geneid)
txid = line.split("transcript_id")[1].split(" ")[1]
txid = _strip_non_alphanumeric(txid)
if keep_version and "transcript_version" in line:
txversion = line.split("transcript_version")[1].split(" ")[1]
txversion = _strip_non_alphanumeric(txversion)
txid += "." + txversion
if has_transcript_version(line) and not keep_version:
txid = _strip_feature_version(txid)
geneid = _strip_feature_version(geneid)
d[txid] = geneid
return d | def function[tx2genedict, parameter[gtf, keep_version]]:
constant[
produce a tx2gene dictionary from a GTF file
]
variable[d] assign[=] dictionary[[], []]
with call[name[open_gzipsafe], parameter[name[gtf]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if <ast.BoolOp object at 0x7da1b17907f0> begin[:]
continue
variable[geneid] assign[=] call[call[call[call[name[line].split, parameter[constant[gene_id]]]][constant[1]].split, parameter[constant[ ]]]][constant[1]]
variable[geneid] assign[=] call[name[_strip_non_alphanumeric], parameter[name[geneid]]]
variable[txid] assign[=] call[call[call[call[name[line].split, parameter[constant[transcript_id]]]][constant[1]].split, parameter[constant[ ]]]][constant[1]]
variable[txid] assign[=] call[name[_strip_non_alphanumeric], parameter[name[txid]]]
if <ast.BoolOp object at 0x7da1b1894400> begin[:]
variable[txversion] assign[=] call[call[call[call[name[line].split, parameter[constant[transcript_version]]]][constant[1]].split, parameter[constant[ ]]]][constant[1]]
variable[txversion] assign[=] call[name[_strip_non_alphanumeric], parameter[name[txversion]]]
<ast.AugAssign object at 0x7da1b1896440>
if <ast.BoolOp object at 0x7da1b1895d50> begin[:]
variable[txid] assign[=] call[name[_strip_feature_version], parameter[name[txid]]]
variable[geneid] assign[=] call[name[_strip_feature_version], parameter[name[geneid]]]
call[name[d]][name[txid]] assign[=] name[geneid]
return[name[d]] | keyword[def] identifier[tx2genedict] ( identifier[gtf] , identifier[keep_version] = keyword[False] ):
literal[string]
identifier[d] ={}
keyword[with] identifier[open_gzipsafe] ( identifier[gtf] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[line] keyword[or] literal[string] keyword[not] keyword[in] identifier[line] :
keyword[continue]
identifier[geneid] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[geneid] = identifier[_strip_non_alphanumeric] ( identifier[geneid] )
identifier[txid] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[txid] = identifier[_strip_non_alphanumeric] ( identifier[txid] )
keyword[if] identifier[keep_version] keyword[and] literal[string] keyword[in] identifier[line] :
identifier[txversion] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[txversion] = identifier[_strip_non_alphanumeric] ( identifier[txversion] )
identifier[txid] += literal[string] + identifier[txversion]
keyword[if] identifier[has_transcript_version] ( identifier[line] ) keyword[and] keyword[not] identifier[keep_version] :
identifier[txid] = identifier[_strip_feature_version] ( identifier[txid] )
identifier[geneid] = identifier[_strip_feature_version] ( identifier[geneid] )
identifier[d] [ identifier[txid] ]= identifier[geneid]
keyword[return] identifier[d] | def tx2genedict(gtf, keep_version=False):
"""
produce a tx2gene dictionary from a GTF file
"""
d = {}
with open_gzipsafe(gtf) as in_handle:
for line in in_handle:
if 'gene_id' not in line or 'transcript_id' not in line:
continue # depends on [control=['if'], data=[]]
geneid = line.split('gene_id')[1].split(' ')[1]
geneid = _strip_non_alphanumeric(geneid)
txid = line.split('transcript_id')[1].split(' ')[1]
txid = _strip_non_alphanumeric(txid)
if keep_version and 'transcript_version' in line:
txversion = line.split('transcript_version')[1].split(' ')[1]
txversion = _strip_non_alphanumeric(txversion)
txid += '.' + txversion # depends on [control=['if'], data=[]]
if has_transcript_version(line) and (not keep_version):
txid = _strip_feature_version(txid)
geneid = _strip_feature_version(geneid) # depends on [control=['if'], data=[]]
d[txid] = geneid # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']]
return d |
def display_status():
"""Display an OK or FAILED message for the context block."""
def print_status(msg, color):
"""Print the status message.
Args:
msg: The message to display (e.g. OK or FAILED).
color: The ANSI color code to use in displaying the message.
"""
print('\r' if sys.stdout.isatty() else '\t', end='')
print('{}{}[{color}{msg}{}]{}'.format(
Cursor.FORWARD(_ncols() - 8),
Style.BRIGHT,
Fore.RESET,
Style.RESET_ALL,
color=color,
msg=msg[:6].upper().center(6)
))
sys.stdout.flush()
try:
yield
except Status as e:
_LOGGER.debug(e)
print_status(e.msg, e.color)
if e.exc:
raise e.exc # pylint: disable=raising-bad-type
except (KeyboardInterrupt, EOFError):
raise
except Exception:
print_status('FAILED', Fore.RED)
raise
else:
print_status('OK', Fore.GREEN) | def function[display_status, parameter[]]:
constant[Display an OK or FAILED message for the context block.]
def function[print_status, parameter[msg, color]]:
constant[Print the status message.
Args:
msg: The message to display (e.g. OK or FAILED).
color: The ANSI color code to use in displaying the message.
]
call[name[print], parameter[<ast.IfExp object at 0x7da20c76e770>]]
call[name[print], parameter[call[constant[{}{}[{color}{msg}{}]{}].format, parameter[call[name[Cursor].FORWARD, parameter[binary_operation[call[name[_ncols], parameter[]] - constant[8]]]], name[Style].BRIGHT, name[Fore].RESET, name[Style].RESET_ALL]]]]
call[name[sys].stdout.flush, parameter[]]
<ast.Try object at 0x7da20c76e0b0> | keyword[def] identifier[display_status] ():
literal[string]
keyword[def] identifier[print_status] ( identifier[msg] , identifier[color] ):
literal[string]
identifier[print] ( literal[string] keyword[if] identifier[sys] . identifier[stdout] . identifier[isatty] () keyword[else] literal[string] , identifier[end] = literal[string] )
identifier[print] ( literal[string] . identifier[format] (
identifier[Cursor] . identifier[FORWARD] ( identifier[_ncols] ()- literal[int] ),
identifier[Style] . identifier[BRIGHT] ,
identifier[Fore] . identifier[RESET] ,
identifier[Style] . identifier[RESET_ALL] ,
identifier[color] = identifier[color] ,
identifier[msg] = identifier[msg] [: literal[int] ]. identifier[upper] (). identifier[center] ( literal[int] )
))
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[try] :
keyword[yield]
keyword[except] identifier[Status] keyword[as] identifier[e] :
identifier[_LOGGER] . identifier[debug] ( identifier[e] )
identifier[print_status] ( identifier[e] . identifier[msg] , identifier[e] . identifier[color] )
keyword[if] identifier[e] . identifier[exc] :
keyword[raise] identifier[e] . identifier[exc]
keyword[except] ( identifier[KeyboardInterrupt] , identifier[EOFError] ):
keyword[raise]
keyword[except] identifier[Exception] :
identifier[print_status] ( literal[string] , identifier[Fore] . identifier[RED] )
keyword[raise]
keyword[else] :
identifier[print_status] ( literal[string] , identifier[Fore] . identifier[GREEN] ) | def display_status():
"""Display an OK or FAILED message for the context block."""
def print_status(msg, color):
"""Print the status message.
Args:
msg: The message to display (e.g. OK or FAILED).
color: The ANSI color code to use in displaying the message.
"""
print('\r' if sys.stdout.isatty() else '\t', end='')
print('{}{}[{color}{msg}{}]{}'.format(Cursor.FORWARD(_ncols() - 8), Style.BRIGHT, Fore.RESET, Style.RESET_ALL, color=color, msg=msg[:6].upper().center(6)))
sys.stdout.flush()
try:
yield # depends on [control=['try'], data=[]]
except Status as e:
_LOGGER.debug(e)
print_status(e.msg, e.color)
if e.exc:
raise e.exc # pylint: disable=raising-bad-type # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
except (KeyboardInterrupt, EOFError):
raise # depends on [control=['except'], data=[]]
except Exception:
print_status('FAILED', Fore.RED)
raise # depends on [control=['except'], data=[]]
else:
print_status('OK', Fore.GREEN) |
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation
"""
di = super()._get_observation()
if self.use_camera_obs:
camera_obs = self.sim.render(
camera_name=self.camera_name,
width=self.camera_width,
height=self.camera_height,
depth=self.camera_depth,
)
if self.camera_depth:
di["image"], di["depth"] = camera_obs
else:
di["image"] = camera_obs
# low-level object information
if self.use_object_obs:
# position and rotation of the first cube
cubeA_pos = np.array(self.sim.data.body_xpos[self.cubeA_body_id])
cubeA_quat = convert_quat(
np.array(self.sim.data.body_xquat[self.cubeA_body_id]), to="xyzw"
)
di["cubeA_pos"] = cubeA_pos
di["cubeA_quat"] = cubeA_quat
# position and rotation of the second cube
cubeB_pos = np.array(self.sim.data.body_xpos[self.cubeB_body_id])
cubeB_quat = convert_quat(
np.array(self.sim.data.body_xquat[self.cubeB_body_id]), to="xyzw"
)
di["cubeB_pos"] = cubeB_pos
di["cubeB_quat"] = cubeB_quat
# relative positions between gripper and cubes
gripper_site_pos = np.array(self.sim.data.site_xpos[self.eef_site_id])
di["gripper_to_cubeA"] = gripper_site_pos - cubeA_pos
di["gripper_to_cubeB"] = gripper_site_pos - cubeB_pos
di["cubeA_to_cubeB"] = cubeA_pos - cubeB_pos
di["object-state"] = np.concatenate(
[
cubeA_pos,
cubeA_quat,
cubeB_pos,
cubeB_quat,
di["gripper_to_cubeA"],
di["gripper_to_cubeB"],
di["cubeA_to_cubeB"],
]
)
return di | def function[_get_observation, parameter[self]]:
constant[
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation
]
variable[di] assign[=] call[call[name[super], parameter[]]._get_observation, parameter[]]
if name[self].use_camera_obs begin[:]
variable[camera_obs] assign[=] call[name[self].sim.render, parameter[]]
if name[self].camera_depth begin[:]
<ast.Tuple object at 0x7da204564cd0> assign[=] name[camera_obs]
if name[self].use_object_obs begin[:]
variable[cubeA_pos] assign[=] call[name[np].array, parameter[call[name[self].sim.data.body_xpos][name[self].cubeA_body_id]]]
variable[cubeA_quat] assign[=] call[name[convert_quat], parameter[call[name[np].array, parameter[call[name[self].sim.data.body_xquat][name[self].cubeA_body_id]]]]]
call[name[di]][constant[cubeA_pos]] assign[=] name[cubeA_pos]
call[name[di]][constant[cubeA_quat]] assign[=] name[cubeA_quat]
variable[cubeB_pos] assign[=] call[name[np].array, parameter[call[name[self].sim.data.body_xpos][name[self].cubeB_body_id]]]
variable[cubeB_quat] assign[=] call[name[convert_quat], parameter[call[name[np].array, parameter[call[name[self].sim.data.body_xquat][name[self].cubeB_body_id]]]]]
call[name[di]][constant[cubeB_pos]] assign[=] name[cubeB_pos]
call[name[di]][constant[cubeB_quat]] assign[=] name[cubeB_quat]
variable[gripper_site_pos] assign[=] call[name[np].array, parameter[call[name[self].sim.data.site_xpos][name[self].eef_site_id]]]
call[name[di]][constant[gripper_to_cubeA]] assign[=] binary_operation[name[gripper_site_pos] - name[cubeA_pos]]
call[name[di]][constant[gripper_to_cubeB]] assign[=] binary_operation[name[gripper_site_pos] - name[cubeB_pos]]
call[name[di]][constant[cubeA_to_cubeB]] assign[=] binary_operation[name[cubeA_pos] - name[cubeB_pos]]
call[name[di]][constant[object-state]] assign[=] call[name[np].concatenate, parameter[list[[<ast.Name object at 0x7da18eb57bb0>, <ast.Name object at 0x7da20e954ac0>, <ast.Name object at 0x7da20e954ee0>, <ast.Name object at 0x7da20e955cf0>, <ast.Subscript object at 0x7da20e9568c0>, <ast.Subscript object at 0x7da20e9556c0>, <ast.Subscript object at 0x7da20e955150>]]]]
return[name[di]] | keyword[def] identifier[_get_observation] ( identifier[self] ):
literal[string]
identifier[di] = identifier[super] (). identifier[_get_observation] ()
keyword[if] identifier[self] . identifier[use_camera_obs] :
identifier[camera_obs] = identifier[self] . identifier[sim] . identifier[render] (
identifier[camera_name] = identifier[self] . identifier[camera_name] ,
identifier[width] = identifier[self] . identifier[camera_width] ,
identifier[height] = identifier[self] . identifier[camera_height] ,
identifier[depth] = identifier[self] . identifier[camera_depth] ,
)
keyword[if] identifier[self] . identifier[camera_depth] :
identifier[di] [ literal[string] ], identifier[di] [ literal[string] ]= identifier[camera_obs]
keyword[else] :
identifier[di] [ literal[string] ]= identifier[camera_obs]
keyword[if] identifier[self] . identifier[use_object_obs] :
identifier[cubeA_pos] = identifier[np] . identifier[array] ( identifier[self] . identifier[sim] . identifier[data] . identifier[body_xpos] [ identifier[self] . identifier[cubeA_body_id] ])
identifier[cubeA_quat] = identifier[convert_quat] (
identifier[np] . identifier[array] ( identifier[self] . identifier[sim] . identifier[data] . identifier[body_xquat] [ identifier[self] . identifier[cubeA_body_id] ]), identifier[to] = literal[string]
)
identifier[di] [ literal[string] ]= identifier[cubeA_pos]
identifier[di] [ literal[string] ]= identifier[cubeA_quat]
identifier[cubeB_pos] = identifier[np] . identifier[array] ( identifier[self] . identifier[sim] . identifier[data] . identifier[body_xpos] [ identifier[self] . identifier[cubeB_body_id] ])
identifier[cubeB_quat] = identifier[convert_quat] (
identifier[np] . identifier[array] ( identifier[self] . identifier[sim] . identifier[data] . identifier[body_xquat] [ identifier[self] . identifier[cubeB_body_id] ]), identifier[to] = literal[string]
)
identifier[di] [ literal[string] ]= identifier[cubeB_pos]
identifier[di] [ literal[string] ]= identifier[cubeB_quat]
identifier[gripper_site_pos] = identifier[np] . identifier[array] ( identifier[self] . identifier[sim] . identifier[data] . identifier[site_xpos] [ identifier[self] . identifier[eef_site_id] ])
identifier[di] [ literal[string] ]= identifier[gripper_site_pos] - identifier[cubeA_pos]
identifier[di] [ literal[string] ]= identifier[gripper_site_pos] - identifier[cubeB_pos]
identifier[di] [ literal[string] ]= identifier[cubeA_pos] - identifier[cubeB_pos]
identifier[di] [ literal[string] ]= identifier[np] . identifier[concatenate] (
[
identifier[cubeA_pos] ,
identifier[cubeA_quat] ,
identifier[cubeB_pos] ,
identifier[cubeB_quat] ,
identifier[di] [ literal[string] ],
identifier[di] [ literal[string] ],
identifier[di] [ literal[string] ],
]
)
keyword[return] identifier[di] | def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation
"""
di = super()._get_observation()
if self.use_camera_obs:
camera_obs = self.sim.render(camera_name=self.camera_name, width=self.camera_width, height=self.camera_height, depth=self.camera_depth)
if self.camera_depth:
(di['image'], di['depth']) = camera_obs # depends on [control=['if'], data=[]]
else:
di['image'] = camera_obs # depends on [control=['if'], data=[]]
# low-level object information
if self.use_object_obs:
# position and rotation of the first cube
cubeA_pos = np.array(self.sim.data.body_xpos[self.cubeA_body_id])
cubeA_quat = convert_quat(np.array(self.sim.data.body_xquat[self.cubeA_body_id]), to='xyzw')
di['cubeA_pos'] = cubeA_pos
di['cubeA_quat'] = cubeA_quat
# position and rotation of the second cube
cubeB_pos = np.array(self.sim.data.body_xpos[self.cubeB_body_id])
cubeB_quat = convert_quat(np.array(self.sim.data.body_xquat[self.cubeB_body_id]), to='xyzw')
di['cubeB_pos'] = cubeB_pos
di['cubeB_quat'] = cubeB_quat
# relative positions between gripper and cubes
gripper_site_pos = np.array(self.sim.data.site_xpos[self.eef_site_id])
di['gripper_to_cubeA'] = gripper_site_pos - cubeA_pos
di['gripper_to_cubeB'] = gripper_site_pos - cubeB_pos
di['cubeA_to_cubeB'] = cubeA_pos - cubeB_pos
di['object-state'] = np.concatenate([cubeA_pos, cubeA_quat, cubeB_pos, cubeB_quat, di['gripper_to_cubeA'], di['gripper_to_cubeB'], di['cubeA_to_cubeB']]) # depends on [control=['if'], data=[]]
return di |
def conv_stack(name, x, mid_channels, output_channels, dilations=None,
activation="relu", dropout=0.0):
"""3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block("conv_block", x, mid_channels=mid_channels,
dilations=dilations, activation=activation,
dropout=dropout)
# Final layer.
x = conv("zeros", x, apply_actnorm=False, conv_init="zeros",
output_channels=output_channels, dilations=dilations)
return x | def function[conv_stack, parameter[name, x, mid_channels, output_channels, dilations, activation, dropout]]:
constant[3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[x] assign[=] call[name[conv_block], parameter[constant[conv_block], name[x]]]
variable[x] assign[=] call[name[conv], parameter[constant[zeros], name[x]]]
return[name[x]] | keyword[def] identifier[conv_stack] ( identifier[name] , identifier[x] , identifier[mid_channels] , identifier[output_channels] , identifier[dilations] = keyword[None] ,
identifier[activation] = literal[string] , identifier[dropout] = literal[int] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] , identifier[reuse] = identifier[tf] . identifier[AUTO_REUSE] ):
identifier[x] = identifier[conv_block] ( literal[string] , identifier[x] , identifier[mid_channels] = identifier[mid_channels] ,
identifier[dilations] = identifier[dilations] , identifier[activation] = identifier[activation] ,
identifier[dropout] = identifier[dropout] )
identifier[x] = identifier[conv] ( literal[string] , identifier[x] , identifier[apply_actnorm] = keyword[False] , identifier[conv_init] = literal[string] ,
identifier[output_channels] = identifier[output_channels] , identifier[dilations] = identifier[dilations] )
keyword[return] identifier[x] | def conv_stack(name, x, mid_channels, output_channels, dilations=None, activation='relu', dropout=0.0):
"""3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block('conv_block', x, mid_channels=mid_channels, dilations=dilations, activation=activation, dropout=dropout)
# Final layer.
x = conv('zeros', x, apply_actnorm=False, conv_init='zeros', output_channels=output_channels, dilations=dilations) # depends on [control=['with'], data=[]]
return x |
def set_ttl(self, key, ttl):
""" Sets time to live for @key to @ttl seconds
-> #bool True if the timeout was set
"""
return self._client.expire(self.get_key(key), ttl) | def function[set_ttl, parameter[self, key, ttl]]:
constant[ Sets time to live for @key to @ttl seconds
-> #bool True if the timeout was set
]
return[call[name[self]._client.expire, parameter[call[name[self].get_key, parameter[name[key]]], name[ttl]]]] | keyword[def] identifier[set_ttl] ( identifier[self] , identifier[key] , identifier[ttl] ):
literal[string]
keyword[return] identifier[self] . identifier[_client] . identifier[expire] ( identifier[self] . identifier[get_key] ( identifier[key] ), identifier[ttl] ) | def set_ttl(self, key, ttl):
""" Sets time to live for @key to @ttl seconds
-> #bool True if the timeout was set
"""
return self._client.expire(self.get_key(key), ttl) |
def create(cls, cli, management_address,
local_username=None, local_password=None,
remote_username=None, remote_password=None,
connection_type=None):
"""
Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system.
"""
req_body = cli.make_body(
managementAddress=management_address, localUsername=local_username,
localPassword=local_password, remoteUsername=remote_username,
remotePassword=remote_password, connectionType=connection_type)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) | def function[create, parameter[cls, cli, management_address, local_username, local_password, remote_username, remote_password, connection_type]]:
constant[
Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system.
]
variable[req_body] assign[=] call[name[cli].make_body, parameter[]]
variable[resp] assign[=] call[name[cli].post, parameter[call[name[cls], parameter[]].resource_class]]
call[name[resp].raise_if_err, parameter[]]
return[call[name[cls].get, parameter[name[cli], name[resp].resource_id]]] | keyword[def] identifier[create] ( identifier[cls] , identifier[cli] , identifier[management_address] ,
identifier[local_username] = keyword[None] , identifier[local_password] = keyword[None] ,
identifier[remote_username] = keyword[None] , identifier[remote_password] = keyword[None] ,
identifier[connection_type] = keyword[None] ):
literal[string]
identifier[req_body] = identifier[cli] . identifier[make_body] (
identifier[managementAddress] = identifier[management_address] , identifier[localUsername] = identifier[local_username] ,
identifier[localPassword] = identifier[local_password] , identifier[remoteUsername] = identifier[remote_username] ,
identifier[remotePassword] = identifier[remote_password] , identifier[connectionType] = identifier[connection_type] )
identifier[resp] = identifier[cli] . identifier[post] ( identifier[cls] (). identifier[resource_class] ,** identifier[req_body] )
identifier[resp] . identifier[raise_if_err] ()
keyword[return] identifier[cls] . identifier[get] ( identifier[cli] , identifier[resp] . identifier[resource_id] ) | def create(cls, cli, management_address, local_username=None, local_password=None, remote_username=None, remote_password=None, connection_type=None):
"""
Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system.
"""
req_body = cli.make_body(managementAddress=management_address, localUsername=local_username, localPassword=local_password, remoteUsername=remote_username, remotePassword=remote_password, connectionType=connection_type)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) |
def transform_qubits(self: TSelf_Operation,
func: Callable[[Qid], Qid]) -> TSelf_Operation:
"""Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
"""
return self.with_qubits(*(func(q) for q in self.qubits)) | def function[transform_qubits, parameter[self, func]]:
constant[Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
]
return[call[name[self].with_qubits, parameter[<ast.Starred object at 0x7da1b1c618a0>]]] | keyword[def] identifier[transform_qubits] ( identifier[self] : identifier[TSelf_Operation] ,
identifier[func] : identifier[Callable] [[ identifier[Qid] ], identifier[Qid] ])-> identifier[TSelf_Operation] :
literal[string]
keyword[return] identifier[self] . identifier[with_qubits] (*( identifier[func] ( identifier[q] ) keyword[for] identifier[q] keyword[in] identifier[self] . identifier[qubits] )) | def transform_qubits(self: TSelf_Operation, func: Callable[[Qid], Qid]) -> TSelf_Operation:
"""Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
"""
return self.with_qubits(*(func(q) for q in self.qubits)) |
def _objective_decorator(func):
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds, dmatrix):
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner | def function[_objective_decorator, parameter[func]]:
constant[Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
]
def function[inner, parameter[preds, dmatrix]]:
constant[internal function]
variable[labels] assign[=] call[name[dmatrix].get_label, parameter[]]
return[call[name[func], parameter[name[labels], name[preds]]]]
return[name[inner]] | keyword[def] identifier[_objective_decorator] ( identifier[func] ):
literal[string]
keyword[def] identifier[inner] ( identifier[preds] , identifier[dmatrix] ):
literal[string]
identifier[labels] = identifier[dmatrix] . identifier[get_label] ()
keyword[return] identifier[func] ( identifier[labels] , identifier[preds] )
keyword[return] identifier[inner] | def _objective_decorator(func):
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds, dmatrix):
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner |
def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None):
"""
Retrieve all quiz reports.
Returns a list of all available reports.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - includes_all_versions
"""Whether to retrieve reports that consider all the submissions or only
the most recent. Defaults to false, ignored for item_analysis reports."""
if includes_all_versions is not None:
params["includes_all_versions"] = includes_all_versions
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, all_pages=True) | def function[retrieve_all_quiz_reports, parameter[self, quiz_id, course_id, includes_all_versions]]:
constant[
Retrieve all quiz reports.
Returns a list of all available reports.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[ID]
call[name[path]][constant[quiz_id]] assign[=] name[quiz_id]
constant[Whether to retrieve reports that consider all the submissions or only
the most recent. Defaults to false, ignored for item_analysis reports.]
if compare[name[includes_all_versions] is_not constant[None]] begin[:]
call[name[params]][constant[includes_all_versions]] assign[=] name[includes_all_versions]
call[name[self].logger.debug, parameter[call[constant[GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[GET], call[constant[/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports].format, parameter[]]]]] | keyword[def] identifier[retrieve_all_quiz_reports] ( identifier[self] , identifier[quiz_id] , identifier[course_id] , identifier[includes_all_versions] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[quiz_id]
literal[string]
keyword[if] identifier[includes_all_versions] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[includes_all_versions]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[all_pages] = keyword[True] ) | def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None):
"""
Retrieve all quiz reports.
Returns a list of all available reports.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'ID'
path['course_id'] = course_id # REQUIRED - PATH - quiz_id
'ID'
path['quiz_id'] = quiz_id # OPTIONAL - includes_all_versions
'Whether to retrieve reports that consider all the submissions or only\n the most recent. Defaults to false, ignored for item_analysis reports.'
if includes_all_versions is not None:
params['includes_all_versions'] = includes_all_versions # depends on [control=['if'], data=['includes_all_versions']]
self.logger.debug('GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('GET', '/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports'.format(**path), data=data, params=params, all_pages=True) |
def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory) | def function[download_mail_attachments, parameter[self, name, local_output_directory, mail_folder, check_regex, latest_only, not_found_mode]]:
constant[
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
]
variable[mail_attachments] assign[=] call[name[self]._retrieve_mails_attachments_by_name, parameter[name[name], name[mail_folder], name[check_regex], name[latest_only]]]
if <ast.UnaryOp object at 0x7da20c6c55a0> begin[:]
call[name[self]._handle_not_found_mode, parameter[name[not_found_mode]]]
call[name[self]._create_files, parameter[name[mail_attachments], name[local_output_directory]]] | keyword[def] identifier[download_mail_attachments] ( identifier[self] ,
identifier[name] ,
identifier[local_output_directory] ,
identifier[mail_folder] = literal[string] ,
identifier[check_regex] = keyword[False] ,
identifier[latest_only] = keyword[False] ,
identifier[not_found_mode] = literal[string] ):
literal[string]
identifier[mail_attachments] = identifier[self] . identifier[_retrieve_mails_attachments_by_name] ( identifier[name] ,
identifier[mail_folder] ,
identifier[check_regex] ,
identifier[latest_only] )
keyword[if] keyword[not] identifier[mail_attachments] :
identifier[self] . identifier[_handle_not_found_mode] ( identifier[not_found_mode] )
identifier[self] . identifier[_create_files] ( identifier[mail_attachments] , identifier[local_output_directory] ) | def download_mail_attachments(self, name, local_output_directory, mail_folder='INBOX', check_regex=False, latest_only=False, not_found_mode='raise'):
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name, mail_folder, check_regex, latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode) # depends on [control=['if'], data=[]]
self._create_files(mail_attachments, local_output_directory) |
def plot_fit(self, **kwargs):
"""
Plots the fit of the model against the data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[self.ar:self.data.shape[0]]
mu, Y = self._model(self.latent_variables.get_z_values())
plt.plot(date_index,Y,label='Data')
plt.plot(date_index,mu,label='Filter',c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show() | def function[plot_fit, parameter[self]]:
constant[
Plots the fit of the model against the data
]
import module[matplotlib.pyplot] as alias[plt]
import module[seaborn] as alias[sns]
variable[figsize] assign[=] call[name[kwargs].get, parameter[constant[figsize], tuple[[<ast.Constant object at 0x7da18f58d810>, <ast.Constant object at 0x7da18f58f490>]]]]
call[name[plt].figure, parameter[]]
variable[date_index] assign[=] call[name[self].index][<ast.Slice object at 0x7da18f58d2d0>]
<ast.Tuple object at 0x7da18f58e0e0> assign[=] call[name[self]._model, parameter[call[name[self].latent_variables.get_z_values, parameter[]]]]
call[name[plt].plot, parameter[name[date_index], name[Y]]]
call[name[plt].plot, parameter[name[date_index], name[mu]]]
call[name[plt].title, parameter[name[self].data_name]]
call[name[plt].legend, parameter[]]
call[name[plt].show, parameter[]] | keyword[def] identifier[plot_fit] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[seaborn] keyword[as] identifier[sns]
identifier[figsize] = identifier[kwargs] . identifier[get] ( literal[string] ,( literal[int] , literal[int] ))
identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
identifier[date_index] = identifier[self] . identifier[index] [ identifier[self] . identifier[ar] : identifier[self] . identifier[data] . identifier[shape] [ literal[int] ]]
identifier[mu] , identifier[Y] = identifier[self] . identifier[_model] ( identifier[self] . identifier[latent_variables] . identifier[get_z_values] ())
identifier[plt] . identifier[plot] ( identifier[date_index] , identifier[Y] , identifier[label] = literal[string] )
identifier[plt] . identifier[plot] ( identifier[date_index] , identifier[mu] , identifier[label] = literal[string] , identifier[c] = literal[string] )
identifier[plt] . identifier[title] ( identifier[self] . identifier[data_name] )
identifier[plt] . identifier[legend] ( identifier[loc] = literal[int] )
identifier[plt] . identifier[show] () | def plot_fit(self, **kwargs):
"""
Plots the fit of the model against the data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize', (10, 7))
plt.figure(figsize=figsize)
date_index = self.index[self.ar:self.data.shape[0]]
(mu, Y) = self._model(self.latent_variables.get_z_values())
plt.plot(date_index, Y, label='Data')
plt.plot(date_index, mu, label='Filter', c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show() |
def match(self, tag):
"""Match."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).match(tag) | def function[match, parameter[self, tag]]:
constant[Match.]
return[call[call[name[CSSMatch], parameter[name[self].selectors, name[tag], name[self].namespaces, name[self].flags]].match, parameter[name[tag]]]] | keyword[def] identifier[match] ( identifier[self] , identifier[tag] ):
literal[string]
keyword[return] identifier[CSSMatch] ( identifier[self] . identifier[selectors] , identifier[tag] , identifier[self] . identifier[namespaces] , identifier[self] . identifier[flags] ). identifier[match] ( identifier[tag] ) | def match(self, tag):
"""Match."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).match(tag) |
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
if condition:
condition = ' WHERE {0:s}'.format(condition)
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(
', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {
column_name: row[column_index]
for column_index, column_name in enumerate(column_names)} | def function[GetValues, parameter[self, table_names, column_names, condition]]:
constant[Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
]
if <ast.UnaryOp object at 0x7da20c6a8ee0> begin[:]
<ast.Raise object at 0x7da20c6aa680>
if name[condition] begin[:]
variable[condition] assign[=] call[constant[ WHERE {0:s}].format, parameter[name[condition]]]
variable[sql_query] assign[=] call[constant[SELECT {1:s} FROM {0:s}{2:s}].format, parameter[call[constant[, ].join, parameter[name[table_names]]], call[constant[, ].join, parameter[name[column_names]]], name[condition]]]
call[name[self]._cursor.execute, parameter[name[sql_query]]]
for taget[name[row]] in starred[name[self]._cursor] begin[:]
<ast.Yield object at 0x7da18ede5c00> | keyword[def] identifier[GetValues] ( identifier[self] , identifier[table_names] , identifier[column_names] , identifier[condition] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_connection] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[condition] :
identifier[condition] = literal[string] . identifier[format] ( identifier[condition] )
identifier[sql_query] = literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[table_names] ), literal[string] . identifier[join] ( identifier[column_names] ), identifier[condition] )
identifier[self] . identifier[_cursor] . identifier[execute] ( identifier[sql_query] )
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[_cursor] :
keyword[yield] {
identifier[column_name] : identifier[row] [ identifier[column_index] ]
keyword[for] identifier[column_index] , identifier[column_name] keyword[in] identifier[enumerate] ( identifier[column_names] )} | def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.') # depends on [control=['if'], data=[]]
if condition:
condition = ' WHERE {0:s}'.format(condition) # depends on [control=['if'], data=[]]
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {column_name: row[column_index] for (column_index, column_name) in enumerate(column_names)} # depends on [control=['for'], data=['row']] |
def getHelp(arg=None):
"""
This function provides interactive manuals and tutorials.
"""
if arg==None:
print('--------------------------------------------------------------')
print('Hello, this is an interactive help system of HITRANonline API.')
print('--------------------------------------------------------------')
print('Run getHelp(.) with one of the following arguments:')
print(' tutorial - interactive tutorials on HAPI')
print(' units - units used in calculations')
print(' index - index of available HAPI functions')
elif arg=='tutorial':
print('-----------------------------------')
print('This is a tutorial section of help.')
print('-----------------------------------')
print('Please choose the subject of tutorial:')
print(' data - downloading the data and working with it')
print(' spectra - calculating spectral functions')
print(' plotting - visualizing data with matplotlib')
print(' python - Python quick start guide')
elif arg=='python':
print_python_tutorial()
elif arg=='data':
print_data_tutorial()
elif arg=='spectra':
print_spectra_tutorial()
elif arg=='plotting':
print_plotting_tutorial()
elif arg=='index':
print('------------------------------')
print('FETCHING DATA:')
print('------------------------------')
print(' fetch')
print(' fetch_by_ids')
print('')
print('------------------------------')
print('WORKING WITH DATA:')
print('------------------------------')
print(' db_begin')
print(' db_commit')
print(' tableList')
print(' describe')
print(' select')
print(' sort')
print(' extractColumns')
print(' getColumn')
print(' getColumns')
print(' dropTable')
print('')
print('------------------------------')
print('CALCULATING SPECTRA:')
print('------------------------------')
print(' profiles')
print(' partitionSum')
print(' absorptionCoefficient_HT')
print(' absorptionCoefficient_Voigt')
print(' absorptionCoefficient_SDVoigt')
print(' absorptionCoefficient_Lorentz')
print(' absorptionCoefficient_Doppler')
print(' transmittanceSpectrum')
print(' absorptionSpectrum')
print(' radianceSpectrum')
print('')
print('------------------------------')
print('CONVOLVING SPECTRA:')
print('------------------------------')
print(' convolveSpectrum')
print(' slit_functions')
print('')
print('------------------------------')
print('INFO ON ISOTOPOLOGUES:')
print('------------------------------')
print(' ISO_ID')
print(' abundance')
print(' molecularMass')
print(' moleculeName')
print(' isotopologueName')
print('')
print('------------------------------')
print('MISCELLANEOUS:')
print('------------------------------')
print(' getStickXY')
print(' read_hotw')
elif arg == ISO:
print_iso()
elif arg == ISO_ID:
print_iso_id()
elif arg == profiles:
print_profiles()
elif arg == slit_functions:
print_slit_functions()
else:
help(arg) | def function[getHelp, parameter[arg]]:
constant[
This function provides interactive manuals and tutorials.
]
if compare[name[arg] equal[==] constant[None]] begin[:]
call[name[print], parameter[constant[--------------------------------------------------------------]]]
call[name[print], parameter[constant[Hello, this is an interactive help system of HITRANonline API.]]]
call[name[print], parameter[constant[--------------------------------------------------------------]]]
call[name[print], parameter[constant[Run getHelp(.) with one of the following arguments:]]]
call[name[print], parameter[constant[ tutorial - interactive tutorials on HAPI]]]
call[name[print], parameter[constant[ units - units used in calculations]]]
call[name[print], parameter[constant[ index - index of available HAPI functions]]] | keyword[def] identifier[getHelp] ( identifier[arg] = keyword[None] ):
literal[string]
keyword[if] identifier[arg] == keyword[None] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[elif] identifier[arg] == literal[string] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[elif] identifier[arg] == literal[string] :
identifier[print_python_tutorial] ()
keyword[elif] identifier[arg] == literal[string] :
identifier[print_data_tutorial] ()
keyword[elif] identifier[arg] == literal[string] :
identifier[print_spectra_tutorial] ()
keyword[elif] identifier[arg] == literal[string] :
identifier[print_plotting_tutorial] ()
keyword[elif] identifier[arg] == literal[string] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[elif] identifier[arg] == identifier[ISO] :
identifier[print_iso] ()
keyword[elif] identifier[arg] == identifier[ISO_ID] :
identifier[print_iso_id] ()
keyword[elif] identifier[arg] == identifier[profiles] :
identifier[print_profiles] ()
keyword[elif] identifier[arg] == identifier[slit_functions] :
identifier[print_slit_functions] ()
keyword[else] :
identifier[help] ( identifier[arg] ) | def getHelp(arg=None):
"""
This function provides interactive manuals and tutorials.
"""
if arg == None:
print('--------------------------------------------------------------')
print('Hello, this is an interactive help system of HITRANonline API.')
print('--------------------------------------------------------------')
print('Run getHelp(.) with one of the following arguments:')
print(' tutorial - interactive tutorials on HAPI')
print(' units - units used in calculations')
print(' index - index of available HAPI functions') # depends on [control=['if'], data=[]]
elif arg == 'tutorial':
print('-----------------------------------')
print('This is a tutorial section of help.')
print('-----------------------------------')
print('Please choose the subject of tutorial:')
print(' data - downloading the data and working with it')
print(' spectra - calculating spectral functions')
print(' plotting - visualizing data with matplotlib')
print(' python - Python quick start guide') # depends on [control=['if'], data=[]]
elif arg == 'python':
print_python_tutorial() # depends on [control=['if'], data=[]]
elif arg == 'data':
print_data_tutorial() # depends on [control=['if'], data=[]]
elif arg == 'spectra':
print_spectra_tutorial() # depends on [control=['if'], data=[]]
elif arg == 'plotting':
print_plotting_tutorial() # depends on [control=['if'], data=[]]
elif arg == 'index':
print('------------------------------')
print('FETCHING DATA:')
print('------------------------------')
print(' fetch')
print(' fetch_by_ids')
print('')
print('------------------------------')
print('WORKING WITH DATA:')
print('------------------------------')
print(' db_begin')
print(' db_commit')
print(' tableList')
print(' describe')
print(' select')
print(' sort')
print(' extractColumns')
print(' getColumn')
print(' getColumns')
print(' dropTable')
print('')
print('------------------------------')
print('CALCULATING SPECTRA:')
print('------------------------------')
print(' profiles')
print(' partitionSum')
print(' absorptionCoefficient_HT')
print(' absorptionCoefficient_Voigt')
print(' absorptionCoefficient_SDVoigt')
print(' absorptionCoefficient_Lorentz')
print(' absorptionCoefficient_Doppler')
print(' transmittanceSpectrum')
print(' absorptionSpectrum')
print(' radianceSpectrum')
print('')
print('------------------------------')
print('CONVOLVING SPECTRA:')
print('------------------------------')
print(' convolveSpectrum')
print(' slit_functions')
print('')
print('------------------------------')
print('INFO ON ISOTOPOLOGUES:')
print('------------------------------')
print(' ISO_ID')
print(' abundance')
print(' molecularMass')
print(' moleculeName')
print(' isotopologueName')
print('')
print('------------------------------')
print('MISCELLANEOUS:')
print('------------------------------')
print(' getStickXY')
print(' read_hotw') # depends on [control=['if'], data=[]]
elif arg == ISO:
print_iso() # depends on [control=['if'], data=[]]
elif arg == ISO_ID:
print_iso_id() # depends on [control=['if'], data=[]]
elif arg == profiles:
print_profiles() # depends on [control=['if'], data=[]]
elif arg == slit_functions:
print_slit_functions() # depends on [control=['if'], data=[]]
else:
help(arg) |
def read_configs_(self):
"""Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
"""
if not self.config_files_:
return {}, [], []
content = {section: {} for section in self}
empty_files = []
faulty_files = []
for cfile in self.config_files_:
conf_dict = self.read_config_(cfile)
if conf_dict is None:
faulty_files.append(cfile)
continue
elif not conf_dict:
empty_files.append(cfile)
continue
for section, secdict in conf_dict.items():
content[section].update(secdict)
return content, empty_files, faulty_files | def function[read_configs_, parameter[self]]:
constant[Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
]
if <ast.UnaryOp object at 0x7da20e954a30> begin[:]
return[tuple[[<ast.Dict object at 0x7da20e957a90>, <ast.List object at 0x7da20e9568c0>, <ast.List object at 0x7da20e955bd0>]]]
variable[content] assign[=] <ast.DictComp object at 0x7da20e954fa0>
variable[empty_files] assign[=] list[[]]
variable[faulty_files] assign[=] list[[]]
for taget[name[cfile]] in starred[name[self].config_files_] begin[:]
variable[conf_dict] assign[=] call[name[self].read_config_, parameter[name[cfile]]]
if compare[name[conf_dict] is constant[None]] begin[:]
call[name[faulty_files].append, parameter[name[cfile]]]
continue
for taget[tuple[[<ast.Name object at 0x7da20e957820>, <ast.Name object at 0x7da20e955cc0>]]] in starred[call[name[conf_dict].items, parameter[]]] begin[:]
call[call[name[content]][name[section]].update, parameter[name[secdict]]]
return[tuple[[<ast.Name object at 0x7da20e955c60>, <ast.Name object at 0x7da20e956d70>, <ast.Name object at 0x7da20e956170>]]] | keyword[def] identifier[read_configs_] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[config_files_] :
keyword[return] {},[],[]
identifier[content] ={ identifier[section] :{} keyword[for] identifier[section] keyword[in] identifier[self] }
identifier[empty_files] =[]
identifier[faulty_files] =[]
keyword[for] identifier[cfile] keyword[in] identifier[self] . identifier[config_files_] :
identifier[conf_dict] = identifier[self] . identifier[read_config_] ( identifier[cfile] )
keyword[if] identifier[conf_dict] keyword[is] keyword[None] :
identifier[faulty_files] . identifier[append] ( identifier[cfile] )
keyword[continue]
keyword[elif] keyword[not] identifier[conf_dict] :
identifier[empty_files] . identifier[append] ( identifier[cfile] )
keyword[continue]
keyword[for] identifier[section] , identifier[secdict] keyword[in] identifier[conf_dict] . identifier[items] ():
identifier[content] [ identifier[section] ]. identifier[update] ( identifier[secdict] )
keyword[return] identifier[content] , identifier[empty_files] , identifier[faulty_files] | def read_configs_(self):
"""Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
"""
if not self.config_files_:
return ({}, [], []) # depends on [control=['if'], data=[]]
content = {section: {} for section in self}
empty_files = []
faulty_files = []
for cfile in self.config_files_:
conf_dict = self.read_config_(cfile)
if conf_dict is None:
faulty_files.append(cfile)
continue # depends on [control=['if'], data=[]]
elif not conf_dict:
empty_files.append(cfile)
continue # depends on [control=['if'], data=[]]
for (section, secdict) in conf_dict.items():
content[section].update(secdict) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['cfile']]
return (content, empty_files, faulty_files) |
def f_migrate(self, new_name=None, in_store=False,
new_storage_service=None, **kwargs):
"""Can be called to rename and relocate the trajectory.
:param new_name: New name of the trajectory, None if you do not want to change the name.
:param in_store:
Set this to True if the trajectory has been stored with the new name at the new
file before and you just want to "switch back" to the location.
If you migrate to a store used before and you do not set `in_store=True`,
the storage service will throw a RuntimeError in case you store the Trajectory
because it will assume that you try to store a new trajectory that accidentally has
the very same name as another trajectory. If set to `True` and trajectory is not found
in the file, the trajectory is simply stored to the file.
:param new_storage_service:
New service where you want to migrate to. Leave none if you want to keep the olde one.
:param kwargs:
Additional keyword arguments passed to the service.
For instance, to change the file of the trajectory use ``filename='my_new_file.hdf5``.
"""
if new_name is not None:
self._name = new_name
unused_kwargs = set(kwargs.keys())
if new_storage_service is not None or len(kwargs) > 0:
self._storage_service, unused_kwargs = storage_factory(
storage_service=new_storage_service,
trajectory=self, **kwargs)
if len(unused_kwargs) > 0:
raise ValueError('The following keyword arguments were not used: `%s`' %
str(unused_kwargs))
self._stored = in_store | def function[f_migrate, parameter[self, new_name, in_store, new_storage_service]]:
constant[Can be called to rename and relocate the trajectory.
:param new_name: New name of the trajectory, None if you do not want to change the name.
:param in_store:
Set this to True if the trajectory has been stored with the new name at the new
file before and you just want to "switch back" to the location.
If you migrate to a store used before and you do not set `in_store=True`,
the storage service will throw a RuntimeError in case you store the Trajectory
because it will assume that you try to store a new trajectory that accidentally has
the very same name as another trajectory. If set to `True` and trajectory is not found
in the file, the trajectory is simply stored to the file.
:param new_storage_service:
New service where you want to migrate to. Leave none if you want to keep the olde one.
:param kwargs:
Additional keyword arguments passed to the service.
For instance, to change the file of the trajectory use ``filename='my_new_file.hdf5``.
]
if compare[name[new_name] is_not constant[None]] begin[:]
name[self]._name assign[=] name[new_name]
variable[unused_kwargs] assign[=] call[name[set], parameter[call[name[kwargs].keys, parameter[]]]]
if <ast.BoolOp object at 0x7da18f722080> begin[:]
<ast.Tuple object at 0x7da18f720520> assign[=] call[name[storage_factory], parameter[]]
if compare[call[name[len], parameter[name[unused_kwargs]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da18f722da0>
name[self]._stored assign[=] name[in_store] | keyword[def] identifier[f_migrate] ( identifier[self] , identifier[new_name] = keyword[None] , identifier[in_store] = keyword[False] ,
identifier[new_storage_service] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[new_name] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_name] = identifier[new_name]
identifier[unused_kwargs] = identifier[set] ( identifier[kwargs] . identifier[keys] ())
keyword[if] identifier[new_storage_service] keyword[is] keyword[not] keyword[None] keyword[or] identifier[len] ( identifier[kwargs] )> literal[int] :
identifier[self] . identifier[_storage_service] , identifier[unused_kwargs] = identifier[storage_factory] (
identifier[storage_service] = identifier[new_storage_service] ,
identifier[trajectory] = identifier[self] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[unused_kwargs] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[str] ( identifier[unused_kwargs] ))
identifier[self] . identifier[_stored] = identifier[in_store] | def f_migrate(self, new_name=None, in_store=False, new_storage_service=None, **kwargs):
"""Can be called to rename and relocate the trajectory.
:param new_name: New name of the trajectory, None if you do not want to change the name.
:param in_store:
Set this to True if the trajectory has been stored with the new name at the new
file before and you just want to "switch back" to the location.
If you migrate to a store used before and you do not set `in_store=True`,
the storage service will throw a RuntimeError in case you store the Trajectory
because it will assume that you try to store a new trajectory that accidentally has
the very same name as another trajectory. If set to `True` and trajectory is not found
in the file, the trajectory is simply stored to the file.
:param new_storage_service:
New service where you want to migrate to. Leave none if you want to keep the olde one.
:param kwargs:
Additional keyword arguments passed to the service.
For instance, to change the file of the trajectory use ``filename='my_new_file.hdf5``.
"""
if new_name is not None:
self._name = new_name # depends on [control=['if'], data=['new_name']]
unused_kwargs = set(kwargs.keys())
if new_storage_service is not None or len(kwargs) > 0:
(self._storage_service, unused_kwargs) = storage_factory(storage_service=new_storage_service, trajectory=self, **kwargs) # depends on [control=['if'], data=[]]
if len(unused_kwargs) > 0:
raise ValueError('The following keyword arguments were not used: `%s`' % str(unused_kwargs)) # depends on [control=['if'], data=[]]
self._stored = in_store |
def phase_to_color_wheel(complex_number):
"""Map a phase of a complexnumber to a color in (r,g,b).
complex_number is phase is first mapped to angle in the range
[0, 2pi] and then to a color wheel with blue at zero phase.
"""
angles = np.angle(complex_number)
angle_round = int(((angles + 2 * np.pi) % (2 * np.pi))/np.pi*6)
color_map = {
0: (0, 0, 1), # blue,
1: (0.5, 0, 1), # blue-violet
2: (1, 0, 1), # violet
3: (1, 0, 0.5), # red-violet,
4: (1, 0, 0), # red
5: (1, 0.5, 0), # red-oranage,
6: (1, 1, 0), # orange
7: (0.5, 1, 0), # orange-yellow
8: (0, 1, 0), # yellow,
9: (0, 1, 0.5), # yellow-green,
10: (0, 1, 1), # green,
11: (0, 0.5, 1) # green-blue,
}
return color_map[angle_round] | def function[phase_to_color_wheel, parameter[complex_number]]:
constant[Map a phase of a complexnumber to a color in (r,g,b).
complex_number is phase is first mapped to angle in the range
[0, 2pi] and then to a color wheel with blue at zero phase.
]
variable[angles] assign[=] call[name[np].angle, parameter[name[complex_number]]]
variable[angle_round] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[angles] + binary_operation[constant[2] * name[np].pi]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] * name[np].pi]] / name[np].pi] * constant[6]]]]
variable[color_map] assign[=] dictionary[[<ast.Constant object at 0x7da1b0595a80>, <ast.Constant object at 0x7da1b0595f90>, <ast.Constant object at 0x7da1b0595ba0>, <ast.Constant object at 0x7da1b0596650>, <ast.Constant object at 0x7da1b0596590>, <ast.Constant object at 0x7da1b0596e60>, <ast.Constant object at 0x7da1b0594f40>, <ast.Constant object at 0x7da1b0596d70>, <ast.Constant object at 0x7da1b0596b90>, <ast.Constant object at 0x7da1b0596830>, <ast.Constant object at 0x7da1b05965c0>, <ast.Constant object at 0x7da1b05969b0>], [<ast.Tuple object at 0x7da1b0596800>, <ast.Tuple object at 0x7da1b05965f0>, <ast.Tuple object at 0x7da1b050b760>, <ast.Tuple object at 0x7da1b050bf40>, <ast.Tuple object at 0x7da1b05085e0>, <ast.Tuple object at 0x7da1b050b640>, <ast.Tuple object at 0x7da1b050aec0>, <ast.Tuple object at 0x7da1b050b5b0>, <ast.Tuple object at 0x7da1b050b2b0>, <ast.Tuple object at 0x7da1b050bfa0>, <ast.Tuple object at 0x7da1b050a8c0>, <ast.Tuple object at 0x7da1b050a950>]]
return[call[name[color_map]][name[angle_round]]] | keyword[def] identifier[phase_to_color_wheel] ( identifier[complex_number] ):
literal[string]
identifier[angles] = identifier[np] . identifier[angle] ( identifier[complex_number] )
identifier[angle_round] = identifier[int] ((( identifier[angles] + literal[int] * identifier[np] . identifier[pi] )%( literal[int] * identifier[np] . identifier[pi] ))/ identifier[np] . identifier[pi] * literal[int] )
identifier[color_map] ={
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] ),
literal[int] :( literal[int] , literal[int] , literal[int] )
}
keyword[return] identifier[color_map] [ identifier[angle_round] ] | def phase_to_color_wheel(complex_number):
"""Map a phase of a complexnumber to a color in (r,g,b).
complex_number is phase is first mapped to angle in the range
[0, 2pi] and then to a color wheel with blue at zero phase.
"""
angles = np.angle(complex_number)
angle_round = int((angles + 2 * np.pi) % (2 * np.pi) / np.pi * 6) # blue,
# blue-violet
# violet
# red-violet,
# red
# red-oranage,
# orange
# orange-yellow
# yellow,
# yellow-green,
# green,
# green-blue,
color_map = {0: (0, 0, 1), 1: (0.5, 0, 1), 2: (1, 0, 1), 3: (1, 0, 0.5), 4: (1, 0, 0), 5: (1, 0.5, 0), 6: (1, 1, 0), 7: (0.5, 1, 0), 8: (0, 1, 0), 9: (0, 1, 0.5), 10: (0, 1, 1), 11: (0, 0.5, 1)}
return color_map[angle_round] |
def pick_synchronous_standby(self, cluster):
"""Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
:returns tuple of candidate name or None, and bool showing if the member is the active synchronous standby.
"""
current = cluster.sync.sync_standby
current = current.lower() if current else current
members = {m.name.lower(): m for m in cluster.members}
candidates = []
# Pick candidates based on who has flushed WAL farthest.
# TODO: for synchronous_commit = remote_write we actually want to order on write_location
for app_name, state, sync_state in self.query(
"SELECT pg_catalog.lower(application_name), state, sync_state"
" FROM pg_catalog.pg_stat_replication"
" ORDER BY flush_{0} DESC".format(self.lsn_name)):
member = members.get(app_name)
if state != 'streaming' or not member or member.tags.get('nosync', False):
continue
if sync_state == 'sync':
return app_name, True
if sync_state == 'potential' and app_name == current:
# Prefer current even if not the best one any more to avoid indecisivness and spurious swaps.
return current, False
if sync_state in ('async', 'potential'):
candidates.append(app_name)
if candidates:
return candidates[0], False
return None, False | def function[pick_synchronous_standby, parameter[self, cluster]]:
constant[Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
:returns tuple of candidate name or None, and bool showing if the member is the active synchronous standby.
]
variable[current] assign[=] name[cluster].sync.sync_standby
variable[current] assign[=] <ast.IfExp object at 0x7da1b21d49a0>
variable[members] assign[=] <ast.DictComp object at 0x7da1b21d4340>
variable[candidates] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b21d7c10>, <ast.Name object at 0x7da1b21d54b0>, <ast.Name object at 0x7da1b21795d0>]]] in starred[call[name[self].query, parameter[call[constant[SELECT pg_catalog.lower(application_name), state, sync_state FROM pg_catalog.pg_stat_replication ORDER BY flush_{0} DESC].format, parameter[name[self].lsn_name]]]]] begin[:]
variable[member] assign[=] call[name[members].get, parameter[name[app_name]]]
if <ast.BoolOp object at 0x7da1b2179540> begin[:]
continue
if compare[name[sync_state] equal[==] constant[sync]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1f94250>, <ast.Constant object at 0x7da1b1f97fd0>]]]
if <ast.BoolOp object at 0x7da1b21882e0> begin[:]
return[tuple[[<ast.Name object at 0x7da1b2189b10>, <ast.Constant object at 0x7da1b21886d0>]]]
if compare[name[sync_state] in tuple[[<ast.Constant object at 0x7da1b21880d0>, <ast.Constant object at 0x7da1b21889d0>]]] begin[:]
call[name[candidates].append, parameter[name[app_name]]]
if name[candidates] begin[:]
return[tuple[[<ast.Subscript object at 0x7da1b2189cc0>, <ast.Constant object at 0x7da1b21894e0>]]]
return[tuple[[<ast.Constant object at 0x7da1b2189450>, <ast.Constant object at 0x7da1b21899f0>]]] | keyword[def] identifier[pick_synchronous_standby] ( identifier[self] , identifier[cluster] ):
literal[string]
identifier[current] = identifier[cluster] . identifier[sync] . identifier[sync_standby]
identifier[current] = identifier[current] . identifier[lower] () keyword[if] identifier[current] keyword[else] identifier[current]
identifier[members] ={ identifier[m] . identifier[name] . identifier[lower] (): identifier[m] keyword[for] identifier[m] keyword[in] identifier[cluster] . identifier[members] }
identifier[candidates] =[]
keyword[for] identifier[app_name] , identifier[state] , identifier[sync_state] keyword[in] identifier[self] . identifier[query] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[lsn_name] )):
identifier[member] = identifier[members] . identifier[get] ( identifier[app_name] )
keyword[if] identifier[state] != literal[string] keyword[or] keyword[not] identifier[member] keyword[or] identifier[member] . identifier[tags] . identifier[get] ( literal[string] , keyword[False] ):
keyword[continue]
keyword[if] identifier[sync_state] == literal[string] :
keyword[return] identifier[app_name] , keyword[True]
keyword[if] identifier[sync_state] == literal[string] keyword[and] identifier[app_name] == identifier[current] :
keyword[return] identifier[current] , keyword[False]
keyword[if] identifier[sync_state] keyword[in] ( literal[string] , literal[string] ):
identifier[candidates] . identifier[append] ( identifier[app_name] )
keyword[if] identifier[candidates] :
keyword[return] identifier[candidates] [ literal[int] ], keyword[False]
keyword[return] keyword[None] , keyword[False] | def pick_synchronous_standby(self, cluster):
"""Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
:returns tuple of candidate name or None, and bool showing if the member is the active synchronous standby.
"""
current = cluster.sync.sync_standby
current = current.lower() if current else current
members = {m.name.lower(): m for m in cluster.members}
candidates = []
# Pick candidates based on who has flushed WAL farthest.
# TODO: for synchronous_commit = remote_write we actually want to order on write_location
for (app_name, state, sync_state) in self.query('SELECT pg_catalog.lower(application_name), state, sync_state FROM pg_catalog.pg_stat_replication ORDER BY flush_{0} DESC'.format(self.lsn_name)):
member = members.get(app_name)
if state != 'streaming' or not member or member.tags.get('nosync', False):
continue # depends on [control=['if'], data=[]]
if sync_state == 'sync':
return (app_name, True) # depends on [control=['if'], data=[]]
if sync_state == 'potential' and app_name == current:
# Prefer current even if not the best one any more to avoid indecisivness and spurious swaps.
return (current, False) # depends on [control=['if'], data=[]]
if sync_state in ('async', 'potential'):
candidates.append(app_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if candidates:
return (candidates[0], False) # depends on [control=['if'], data=[]]
return (None, False) |
def _SetupBotoConfig(self):
"""Set the boto config so GSUtil works with provisioned service accounts."""
project_id = self._GetNumericProjectId()
try:
boto_config.BotoConfig(project_id, debug=self.debug)
except (IOError, OSError) as e:
self.logger.warning(str(e)) | def function[_SetupBotoConfig, parameter[self]]:
constant[Set the boto config so GSUtil works with provisioned service accounts.]
variable[project_id] assign[=] call[name[self]._GetNumericProjectId, parameter[]]
<ast.Try object at 0x7da2044c0520> | keyword[def] identifier[_SetupBotoConfig] ( identifier[self] ):
literal[string]
identifier[project_id] = identifier[self] . identifier[_GetNumericProjectId] ()
keyword[try] :
identifier[boto_config] . identifier[BotoConfig] ( identifier[project_id] , identifier[debug] = identifier[self] . identifier[debug] )
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] ( identifier[str] ( identifier[e] )) | def _SetupBotoConfig(self):
"""Set the boto config so GSUtil works with provisioned service accounts."""
project_id = self._GetNumericProjectId()
try:
boto_config.BotoConfig(project_id, debug=self.debug) # depends on [control=['try'], data=[]]
except (IOError, OSError) as e:
self.logger.warning(str(e)) # depends on [control=['except'], data=['e']] |
def present(
name,
image_id,
key_name=None,
vpc_id=None,
vpc_name=None,
security_groups=None,
user_data=None,
cloud_init=None,
instance_type='m1.small',
kernel_id=None,
ramdisk_id=None,
block_device_mappings=None,
delete_on_termination=None,
instance_monitoring=False,
spot_price=None,
instance_profile_name=None,
ebs_optimized=False,
associate_public_ip_address=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the launch configuration exists.
name
Name of the launch configuration.
image_id
AMI to use for instances. AMI must exist or creation of the launch
configuration will fail.
key_name
Name of the EC2 key pair to use for instances. Key must exist or
creation of the launch configuration will fail.
vpc_id
The VPC id where the security groups are defined. Only necessary when
using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_name.
vpc_name
Name of the VPC where the security groups are defined. Only Necessary
when using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_id.
security_groups
List of Names or security group id’s of the security groups with which
to associate the EC2 instances or VPC instances, respectively. Security
groups must exist, or creation of the launch configuration will fail.
user_data
The user data available to launched EC2 instances.
cloud_init
A dict of cloud_init configuration. Currently supported keys:
boothooks, scripts and cloud-config.
Mutually exclusive with user_data.
instance_type
The instance type. ex: m1.small.
kernel_id
The kernel id for the instance.
ramdisk_id
The RAM disk ID for the instance.
block_device_mappings
A dict of block device mappings that contains a dict
with volume_type, delete_on_termination, iops, size, encrypted,
snapshot_id.
volume_type
Indicates what volume type to use. Valid values are standard, io1, gp2.
Default is standard.
delete_on_termination
Whether the volume should be explicitly marked for deletion when its instance is
terminated (True), or left around (False). If not provided, or None is explicitly passed,
the default AWS behaviour is used, which is True for ROOT volumes of instances, and
False for all others.
iops
For Provisioned IOPS (SSD) volumes only. The number of I/O operations per
second (IOPS) to provision for the volume.
size
Desired volume size (in GiB).
encrypted
Indicates whether the volume should be encrypted. Encrypted EBS volumes must
be attached to instances that support Amazon EBS encryption. Volumes that are
created from encrypted snapshots are automatically encrypted. There is no way
to create an encrypted volume from an unencrypted snapshot or an unencrypted
volume from an encrypted snapshot.
instance_monitoring
Whether instances in group are launched with detailed monitoring.
spot_price
The spot price you are bidding. Only applies if you are building an
autoscaling group with spot instances.
instance_profile_name
The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance. Instance profile must
exist or the creation of the launch configuration will fail.
ebs_optimized
Specifies whether the instance is optimized for EBS I/O (true) or not
(false).
associate_public_ip_address
Used for Auto Scaling groups that launch instances into an Amazon
Virtual Private Cloud. Specifies whether to assign a public IP address
to each instance launched in a Amazon VPC.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
if user_data and cloud_init:
raise SaltInvocationError('user_data and cloud_init are mutually'
' exclusive options.')
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_asg.launch_configuration_exists'](name,
region=region,
key=key,
keyid=keyid,
profile=profile)
if not exists:
if __opts__['test']:
msg = 'Launch configuration set to be created.'
ret['comment'] = msg
ret['result'] = None
return ret
if cloud_init:
user_data = __salt__['boto_asg.get_cloud_init_mime'](cloud_init)
# TODO: Ensure image_id, key_name, security_groups and instance_profile
# exist, or throw an invocation error.
created = __salt__['boto_asg.create_launch_configuration'](
name,
image_id,
key_name=key_name,
vpc_id=vpc_id,
vpc_name=vpc_name,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
block_device_mappings=block_device_mappings,
delete_on_termination=delete_on_termination,
instance_monitoring=instance_monitoring,
spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
region=region,
key=key,
keyid=keyid,
profile=profile)
if created:
ret['changes']['old'] = None
ret['changes']['new'] = name
else:
ret['result'] = False
ret['comment'] = 'Failed to create launch configuration.'
else:
ret['comment'] = 'Launch configuration present.'
return ret | def function[present, parameter[name, image_id, key_name, vpc_id, vpc_name, security_groups, user_data, cloud_init, instance_type, kernel_id, ramdisk_id, block_device_mappings, delete_on_termination, instance_monitoring, spot_price, instance_profile_name, ebs_optimized, associate_public_ip_address, region, key, keyid, profile]]:
constant[
Ensure the launch configuration exists.
name
Name of the launch configuration.
image_id
AMI to use for instances. AMI must exist or creation of the launch
configuration will fail.
key_name
Name of the EC2 key pair to use for instances. Key must exist or
creation of the launch configuration will fail.
vpc_id
The VPC id where the security groups are defined. Only necessary when
using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_name.
vpc_name
Name of the VPC where the security groups are defined. Only Necessary
when using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_id.
security_groups
List of Names or security group id’s of the security groups with which
to associate the EC2 instances or VPC instances, respectively. Security
groups must exist, or creation of the launch configuration will fail.
user_data
The user data available to launched EC2 instances.
cloud_init
A dict of cloud_init configuration. Currently supported keys:
boothooks, scripts and cloud-config.
Mutually exclusive with user_data.
instance_type
The instance type. ex: m1.small.
kernel_id
The kernel id for the instance.
ramdisk_id
The RAM disk ID for the instance.
block_device_mappings
A dict of block device mappings that contains a dict
with volume_type, delete_on_termination, iops, size, encrypted,
snapshot_id.
volume_type
Indicates what volume type to use. Valid values are standard, io1, gp2.
Default is standard.
delete_on_termination
Whether the volume should be explicitly marked for deletion when its instance is
terminated (True), or left around (False). If not provided, or None is explicitly passed,
the default AWS behaviour is used, which is True for ROOT volumes of instances, and
False for all others.
iops
For Provisioned IOPS (SSD) volumes only. The number of I/O operations per
second (IOPS) to provision for the volume.
size
Desired volume size (in GiB).
encrypted
Indicates whether the volume should be encrypted. Encrypted EBS volumes must
be attached to instances that support Amazon EBS encryption. Volumes that are
created from encrypted snapshots are automatically encrypted. There is no way
to create an encrypted volume from an unencrypted snapshot or an unencrypted
volume from an encrypted snapshot.
instance_monitoring
Whether instances in group are launched with detailed monitoring.
spot_price
The spot price you are bidding. Only applies if you are building an
autoscaling group with spot instances.
instance_profile_name
The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance. Instance profile must
exist or the creation of the launch configuration will fail.
ebs_optimized
Specifies whether the instance is optimized for EBS I/O (true) or not
(false).
associate_public_ip_address
Used for Auto Scaling groups that launch instances into an Amazon
Virtual Private Cloud. Specifies whether to assign a public IP address
to each instance launched in a Amazon VPC.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
]
if <ast.BoolOp object at 0x7da1b20006a0> begin[:]
<ast.Raise object at 0x7da1b20007f0>
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b20008e0>, <ast.Constant object at 0x7da1b2000940>, <ast.Constant object at 0x7da1b20009d0>, <ast.Constant object at 0x7da1b2000af0>], [<ast.Name object at 0x7da1b2000a90>, <ast.Constant object at 0x7da1b2000a30>, <ast.Constant object at 0x7da1b2000a00>, <ast.Dict object at 0x7da1b2000a60>]]
variable[exists] assign[=] call[call[name[__salt__]][constant[boto_asg.launch_configuration_exists]], parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b2000d60> begin[:]
if call[name[__opts__]][constant[test]] begin[:]
variable[msg] assign[=] constant[Launch configuration set to be created.]
call[name[ret]][constant[comment]] assign[=] name[msg]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
if name[cloud_init] begin[:]
variable[user_data] assign[=] call[call[name[__salt__]][constant[boto_asg.get_cloud_init_mime]], parameter[name[cloud_init]]]
variable[created] assign[=] call[call[name[__salt__]][constant[boto_asg.create_launch_configuration]], parameter[name[name], name[image_id]]]
if name[created] begin[:]
call[call[name[ret]][constant[changes]]][constant[old]] assign[=] constant[None]
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] name[name]
return[name[ret]] | keyword[def] identifier[present] (
identifier[name] ,
identifier[image_id] ,
identifier[key_name] = keyword[None] ,
identifier[vpc_id] = keyword[None] ,
identifier[vpc_name] = keyword[None] ,
identifier[security_groups] = keyword[None] ,
identifier[user_data] = keyword[None] ,
identifier[cloud_init] = keyword[None] ,
identifier[instance_type] = literal[string] ,
identifier[kernel_id] = keyword[None] ,
identifier[ramdisk_id] = keyword[None] ,
identifier[block_device_mappings] = keyword[None] ,
identifier[delete_on_termination] = keyword[None] ,
identifier[instance_monitoring] = keyword[False] ,
identifier[spot_price] = keyword[None] ,
identifier[instance_profile_name] = keyword[None] ,
identifier[ebs_optimized] = keyword[False] ,
identifier[associate_public_ip_address] = keyword[None] ,
identifier[region] = keyword[None] ,
identifier[key] = keyword[None] ,
identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
keyword[if] identifier[user_data] keyword[and] identifier[cloud_init] :
keyword[raise] identifier[SaltInvocationError] ( literal[string]
literal[string] )
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
identifier[exists] = identifier[__salt__] [ literal[string] ]( identifier[name] ,
identifier[region] = identifier[region] ,
identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[exists] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string]
identifier[ret] [ literal[string] ]= identifier[msg]
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
keyword[if] identifier[cloud_init] :
identifier[user_data] = identifier[__salt__] [ literal[string] ]( identifier[cloud_init] )
identifier[created] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[image_id] ,
identifier[key_name] = identifier[key_name] ,
identifier[vpc_id] = identifier[vpc_id] ,
identifier[vpc_name] = identifier[vpc_name] ,
identifier[security_groups] = identifier[security_groups] ,
identifier[user_data] = identifier[user_data] ,
identifier[instance_type] = identifier[instance_type] ,
identifier[kernel_id] = identifier[kernel_id] ,
identifier[ramdisk_id] = identifier[ramdisk_id] ,
identifier[block_device_mappings] = identifier[block_device_mappings] ,
identifier[delete_on_termination] = identifier[delete_on_termination] ,
identifier[instance_monitoring] = identifier[instance_monitoring] ,
identifier[spot_price] = identifier[spot_price] ,
identifier[instance_profile_name] = identifier[instance_profile_name] ,
identifier[ebs_optimized] = identifier[ebs_optimized] ,
identifier[associate_public_ip_address] = identifier[associate_public_ip_address] ,
identifier[region] = identifier[region] ,
identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )
keyword[if] identifier[created] :
identifier[ret] [ literal[string] ][ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[name]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def present(name, image_id, key_name=None, vpc_id=None, vpc_name=None, security_groups=None, user_data=None, cloud_init=None, instance_type='m1.small', kernel_id=None, ramdisk_id=None, block_device_mappings=None, delete_on_termination=None, instance_monitoring=False, spot_price=None, instance_profile_name=None, ebs_optimized=False, associate_public_ip_address=None, region=None, key=None, keyid=None, profile=None):
"""
Ensure the launch configuration exists.
name
Name of the launch configuration.
image_id
AMI to use for instances. AMI must exist or creation of the launch
configuration will fail.
key_name
Name of the EC2 key pair to use for instances. Key must exist or
creation of the launch configuration will fail.
vpc_id
The VPC id where the security groups are defined. Only necessary when
using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_name.
vpc_name
Name of the VPC where the security groups are defined. Only Necessary
when using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_id.
security_groups
List of Names or security group id’s of the security groups with which
to associate the EC2 instances or VPC instances, respectively. Security
groups must exist, or creation of the launch configuration will fail.
user_data
The user data available to launched EC2 instances.
cloud_init
A dict of cloud_init configuration. Currently supported keys:
boothooks, scripts and cloud-config.
Mutually exclusive with user_data.
instance_type
The instance type. ex: m1.small.
kernel_id
The kernel id for the instance.
ramdisk_id
The RAM disk ID for the instance.
block_device_mappings
A dict of block device mappings that contains a dict
with volume_type, delete_on_termination, iops, size, encrypted,
snapshot_id.
volume_type
Indicates what volume type to use. Valid values are standard, io1, gp2.
Default is standard.
delete_on_termination
Whether the volume should be explicitly marked for deletion when its instance is
terminated (True), or left around (False). If not provided, or None is explicitly passed,
the default AWS behaviour is used, which is True for ROOT volumes of instances, and
False for all others.
iops
For Provisioned IOPS (SSD) volumes only. The number of I/O operations per
second (IOPS) to provision for the volume.
size
Desired volume size (in GiB).
encrypted
Indicates whether the volume should be encrypted. Encrypted EBS volumes must
be attached to instances that support Amazon EBS encryption. Volumes that are
created from encrypted snapshots are automatically encrypted. There is no way
to create an encrypted volume from an unencrypted snapshot or an unencrypted
volume from an encrypted snapshot.
instance_monitoring
Whether instances in group are launched with detailed monitoring.
spot_price
The spot price you are bidding. Only applies if you are building an
autoscaling group with spot instances.
instance_profile_name
The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance. Instance profile must
exist or the creation of the launch configuration will fail.
ebs_optimized
Specifies whether the instance is optimized for EBS I/O (true) or not
(false).
associate_public_ip_address
Used for Auto Scaling groups that launch instances into an Amazon
Virtual Private Cloud. Specifies whether to assign a public IP address
to each instance launched in a Amazon VPC.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
if user_data and cloud_init:
raise SaltInvocationError('user_data and cloud_init are mutually exclusive options.') # depends on [control=['if'], data=[]]
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_asg.launch_configuration_exists'](name, region=region, key=key, keyid=keyid, profile=profile)
if not exists:
if __opts__['test']:
msg = 'Launch configuration set to be created.'
ret['comment'] = msg
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
if cloud_init:
user_data = __salt__['boto_asg.get_cloud_init_mime'](cloud_init) # depends on [control=['if'], data=[]]
# TODO: Ensure image_id, key_name, security_groups and instance_profile
# exist, or throw an invocation error.
created = __salt__['boto_asg.create_launch_configuration'](name, image_id, key_name=key_name, vpc_id=vpc_id, vpc_name=vpc_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, kernel_id=kernel_id, ramdisk_id=ramdisk_id, block_device_mappings=block_device_mappings, delete_on_termination=delete_on_termination, instance_monitoring=instance_monitoring, spot_price=spot_price, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, associate_public_ip_address=associate_public_ip_address, region=region, key=key, keyid=keyid, profile=profile)
if created:
ret['changes']['old'] = None
ret['changes']['new'] = name # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to create launch configuration.' # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Launch configuration present.'
return ret |
def get_response_signer(self):
"""Returns the response signer for this version of the signature.
"""
if not hasattr(self, "response_signer"):
self.response_signer = V2ResponseSigner(self.digest, orig=self)
return self.response_signer | def function[get_response_signer, parameter[self]]:
constant[Returns the response signer for this version of the signature.
]
if <ast.UnaryOp object at 0x7da1b14d8a90> begin[:]
name[self].response_signer assign[=] call[name[V2ResponseSigner], parameter[name[self].digest]]
return[name[self].response_signer] | keyword[def] identifier[get_response_signer] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[response_signer] = identifier[V2ResponseSigner] ( identifier[self] . identifier[digest] , identifier[orig] = identifier[self] )
keyword[return] identifier[self] . identifier[response_signer] | def get_response_signer(self):
"""Returns the response signer for this version of the signature.
"""
if not hasattr(self, 'response_signer'):
self.response_signer = V2ResponseSigner(self.digest, orig=self) # depends on [control=['if'], data=[]]
return self.response_signer |
def _trim_batch(batch, length):
"""Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
"""
if isinstance(batch, tuple):
return tuple([_trim_batch(b, length) for b in batch])
else:
return batch[:length] | def function[_trim_batch, parameter[batch, length]]:
constant[Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
]
if call[name[isinstance], parameter[name[batch], name[tuple]]] begin[:]
return[call[name[tuple], parameter[<ast.ListComp object at 0x7da20e9576d0>]]] | keyword[def] identifier[_trim_batch] ( identifier[batch] , identifier[length] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[batch] , identifier[tuple] ):
keyword[return] identifier[tuple] ([ identifier[_trim_batch] ( identifier[b] , identifier[length] ) keyword[for] identifier[b] keyword[in] identifier[batch] ])
keyword[else] :
keyword[return] identifier[batch] [: identifier[length] ] | def _trim_batch(batch, length):
"""Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
"""
if isinstance(batch, tuple):
return tuple([_trim_batch(b, length) for b in batch]) # depends on [control=['if'], data=[]]
else:
return batch[:length] |
def _find_particle_image(self, query, match, all_particles):
"""Find particle with the same index as match in a neighboring tile. """
_, idxs = self.particle_kdtree.query(query.pos, k=10)
neighbors = all_particles[idxs]
for particle in neighbors:
if particle.index == match.index:
return particle
raise MBuildError('Unable to find matching particle image while'
' stitching bonds.') | def function[_find_particle_image, parameter[self, query, match, all_particles]]:
constant[Find particle with the same index as match in a neighboring tile. ]
<ast.Tuple object at 0x7da1b20c49d0> assign[=] call[name[self].particle_kdtree.query, parameter[name[query].pos]]
variable[neighbors] assign[=] call[name[all_particles]][name[idxs]]
for taget[name[particle]] in starred[name[neighbors]] begin[:]
if compare[name[particle].index equal[==] name[match].index] begin[:]
return[name[particle]]
<ast.Raise object at 0x7da1b1d99ff0> | keyword[def] identifier[_find_particle_image] ( identifier[self] , identifier[query] , identifier[match] , identifier[all_particles] ):
literal[string]
identifier[_] , identifier[idxs] = identifier[self] . identifier[particle_kdtree] . identifier[query] ( identifier[query] . identifier[pos] , identifier[k] = literal[int] )
identifier[neighbors] = identifier[all_particles] [ identifier[idxs] ]
keyword[for] identifier[particle] keyword[in] identifier[neighbors] :
keyword[if] identifier[particle] . identifier[index] == identifier[match] . identifier[index] :
keyword[return] identifier[particle]
keyword[raise] identifier[MBuildError] ( literal[string]
literal[string] ) | def _find_particle_image(self, query, match, all_particles):
"""Find particle with the same index as match in a neighboring tile. """
(_, idxs) = self.particle_kdtree.query(query.pos, k=10)
neighbors = all_particles[idxs]
for particle in neighbors:
if particle.index == match.index:
return particle # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['particle']]
raise MBuildError('Unable to find matching particle image while stitching bonds.') |
def load_data(filename):
"""Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
file_root, file_ext = os.path.splitext(filename)
data = None
if file_ext == '.npy':
data = np.load(filename)
elif file_ext == '.npz':
data = np.load(filename)['arr_0']
else:
raise ValueError('Extension %s not supported for point reads' %(file_ext))
return data | def function[load_data, parameter[filename]]:
constant[Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz.
]
<ast.Tuple object at 0x7da1b1219a20> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
variable[data] assign[=] constant[None]
if compare[name[file_ext] equal[==] constant[.npy]] begin[:]
variable[data] assign[=] call[name[np].load, parameter[name[filename]]]
return[name[data]] | keyword[def] identifier[load_data] ( identifier[filename] ):
literal[string]
identifier[file_root] , identifier[file_ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
identifier[data] = keyword[None]
keyword[if] identifier[file_ext] == literal[string] :
identifier[data] = identifier[np] . identifier[load] ( identifier[filename] )
keyword[elif] identifier[file_ext] == literal[string] :
identifier[data] = identifier[np] . identifier[load] ( identifier[filename] )[ literal[string] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[file_ext] ))
keyword[return] identifier[data] | def load_data(filename):
"""Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
(file_root, file_ext) = os.path.splitext(filename)
data = None
if file_ext == '.npy':
data = np.load(filename) # depends on [control=['if'], data=[]]
elif file_ext == '.npz':
data = np.load(filename)['arr_0'] # depends on [control=['if'], data=[]]
else:
raise ValueError('Extension %s not supported for point reads' % file_ext)
return data |
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response) | def function[get_list, parameter[self, search, start, limit, order_by, order_by_dir, published_only, minimal]]:
constant[
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
]
variable[parameters] assign[=] dictionary[[], []]
variable[args] assign[=] list[[<ast.Constant object at 0x7da1b0b29870>, <ast.Constant object at 0x7da1b0b2a740>, <ast.Constant object at 0x7da1b0b2af50>, <ast.Constant object at 0x7da1b0b2a050>]]
for taget[name[arg]] in starred[name[args]] begin[:]
if <ast.BoolOp object at 0x7da1b0b2b220> begin[:]
call[name[parameters]][name[arg]] assign[=] call[call[name[locals], parameter[]]][name[arg]]
if name[order_by] begin[:]
call[name[parameters]][constant[orderBy]] assign[=] name[order_by]
if name[order_by_dir] begin[:]
call[name[parameters]][constant[orderByDir]] assign[=] name[order_by_dir]
if name[published_only] begin[:]
call[name[parameters]][constant[publishedOnly]] assign[=] constant[true]
variable[response] assign[=] call[name[self]._client.session.get, parameter[name[self].endpoint_url]]
return[call[name[self].process_response, parameter[name[response]]]] | keyword[def] identifier[get_list] (
identifier[self] ,
identifier[search] = literal[string] ,
identifier[start] = literal[int] ,
identifier[limit] = literal[int] ,
identifier[order_by] = literal[string] ,
identifier[order_by_dir] = literal[string] ,
identifier[published_only] = keyword[False] ,
identifier[minimal] = keyword[False]
):
literal[string]
identifier[parameters] ={}
identifier[args] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[arg] keyword[in] identifier[locals] () keyword[and] identifier[locals] ()[ identifier[arg] ]:
identifier[parameters] [ identifier[arg] ]= identifier[locals] ()[ identifier[arg] ]
keyword[if] identifier[order_by] :
identifier[parameters] [ literal[string] ]= identifier[order_by]
keyword[if] identifier[order_by_dir] :
identifier[parameters] [ literal[string] ]= identifier[order_by_dir]
keyword[if] identifier[published_only] :
identifier[parameters] [ literal[string] ]= literal[string]
identifier[response] = identifier[self] . identifier[_client] . identifier[session] . identifier[get] (
identifier[self] . identifier[endpoint_url] , identifier[params] = identifier[parameters]
)
keyword[return] identifier[self] . identifier[process_response] ( identifier[response] ) | def get_list(self, search='', start=0, limit=0, order_by='', order_by_dir='ASC', published_only=False, minimal=False):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']]
if order_by:
parameters['orderBy'] = order_by # depends on [control=['if'], data=[]]
if order_by_dir:
parameters['orderByDir'] = order_by_dir # depends on [control=['if'], data=[]]
if published_only:
parameters['publishedOnly'] = 'true' # depends on [control=['if'], data=[]]
response = self._client.session.get(self.endpoint_url, params=parameters)
return self.process_response(response) |
def get_ot_study_info_from_treebase_nexml(src=None,
nexml_content=None,
encoding=u'utf8',
nexson_syntax_version=DEFAULT_NEXSON_VERSION,
merge_blocks=True,
sort_arbitrary=False):
"""Normalize treebase-specific metadata into the locations where
open tree of life software that expects it.
See get_ot_study_info_from_nexml for the explanation of the src,
nexml_content, encoding, and nexson_syntax_version arguments
If merge_blocks is True then peyotl.manip.merge_otus_and_trees
Actions to "normalize" TreeBase objects to ot Nexson
1. the meta id for any meta item that has only a value and an id
2. throw away rdfs:isDefinedBy
3. otu @label -> otu ^ot:originalLabel
4. ^tb:indentifier.taxon, ^tb:indentifier.taxonVariant and some skos:closeMatch
fields to ^ot:taxonLink
5. remove "@xml:base"
6. coerce edge lengths to native types
"""
# pylint: disable=R0915
raw = get_ot_study_info_from_nexml(src=src,
nexml_content=nexml_content,
encoding=encoding,
nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
nexml = raw['nexml']
SKOS_ALT_LABEL = '^skos:altLabel'
SKOS_CLOSE_MATCH = '^skos:closeMatch'
strippable_pre = {
'http://www.ubio.org/authority/metadata.php?lsid=urn:lsid:ubio.org:namebank:': '@ubio',
'http://purl.uniprot.org/taxonomy/': '@uniprot',
}
moveable2taxon_link = {"^tb:identifier.taxon": '@tb:identifier.taxon',
"^tb:identifier.taxonVariant": '@tb:identifier.taxonVariant', }
to_del = ['^rdfs:isDefinedBy', '@xml:base']
for tag in to_del:
if tag in nexml:
del nexml[tag]
_simplify_all_meta_by_id_del(nexml)
_otu2label = {}
prefix_map = {}
# compose dataDeposit
nexid = nexml['@id']
tb_url = 'http://purl.org/phylo/treebase/phylows/study/TB2:' + nexid
nexml['^ot:dataDeposit'] = {'@href': tb_url}
# compose dataDeposit
bd = nexml.get("^dcterms:bibliographicCitation")
if bd:
nexml['^ot:studyPublicationReference'] = bd
doi = nexml.get('^prism:doi')
if doi:
doi = doi2url(doi)
nexml['^ot:studyPublication'] = {'@href': doi}
year = nexml.get('^prism:publicationDate')
if year:
try:
nexml['^ot:studyYear'] = int(year)
except:
pass
#
for otus in nexml['otusById'].values():
for tag in to_del:
if tag in otus:
del otus[tag]
_simplify_all_meta_by_id_del(otus)
for oid, otu in otus['otuById'].items():
for tag in to_del:
if tag in otu:
del otu[tag]
_simplify_all_meta_by_id_del(otu)
label = otu['@label']
_otu2label[oid] = label
otu['^ot:originalLabel'] = label
del otu['@label']
al = otu.get(SKOS_ALT_LABEL)
if al is not None:
if otu.get('^ot:altLabel') is None:
otu['^ot:altLabel'] = al
del otu[SKOS_ALT_LABEL]
tl = {}
scm = otu.get(SKOS_CLOSE_MATCH)
# _LOG.debug('scm = ' + str(scm))
if scm:
if isinstance(scm, dict):
h = scm.get('@href')
if h:
try:
for p, t in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
del otu[SKOS_CLOSE_MATCH]
prefix_map[t] = p
except:
pass
else:
nm = []
try:
for el in scm:
h = el.get('@href')
if h:
found = False
for p, t in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
found = True
prefix_map[t] = p
break
if not found:
nm.append(el)
except:
pass
if len(nm) < len(scm):
if len(nm) > 1:
otu[SKOS_CLOSE_MATCH] = nm
elif len(nm) == 1:
otu[SKOS_CLOSE_MATCH] = nm[0]
else:
del otu[SKOS_CLOSE_MATCH]
# _LOG.debug('tl =' + str(tl))
for k, t in moveable2taxon_link.items():
al = otu.get(k)
if al:
tl[t] = al
del otu[k]
if tl:
otu['^ot:taxonLink'] = tl
for trees in nexml['treesById'].values():
for tag in to_del:
if tag in trees:
del trees[tag]
_simplify_all_meta_by_id_del(trees)
for tree in trees['treeById'].values():
for tag in to_del:
if tag in tree:
del tree[tag]
_simplify_all_meta_by_id_del(tree)
tt = tree.get('@xsi:type', 'nex:FloatTree')
if tt.lower() == 'nex:inttree':
e_len_coerce = int
else:
e_len_coerce = float
for edge_d in tree['edgeBySourceId'].values():
for edge in edge_d.values():
try:
x = e_len_coerce(edge['@length'])
edge['@length'] = x
except:
pass
for node in tree['nodeById'].values():
nl = node.get('@label')
if nl:
no = node.get('@otu')
if no and _otu2label[no] == nl:
del node['@label']
if prefix_map:
nexml['^ot:taxonLinkPrefixes'] = prefix_map
if merge_blocks:
from peyotl.manip import merge_otus_and_trees
merge_otus_and_trees(raw)
if nexson_syntax_version != BY_ID_HONEY_BADGERFISH:
convert_nexson_format(raw,
nexson_syntax_version,
current_format=BY_ID_HONEY_BADGERFISH,
sort_arbitrary=sort_arbitrary)
elif sort_arbitrary:
sort_arbitrarily_ordered_nexson(raw)
return raw | def function[get_ot_study_info_from_treebase_nexml, parameter[src, nexml_content, encoding, nexson_syntax_version, merge_blocks, sort_arbitrary]]:
constant[Normalize treebase-specific metadata into the locations where
open tree of life software that expects it.
See get_ot_study_info_from_nexml for the explanation of the src,
nexml_content, encoding, and nexson_syntax_version arguments
If merge_blocks is True then peyotl.manip.merge_otus_and_trees
Actions to "normalize" TreeBase objects to ot Nexson
1. the meta id for any meta item that has only a value and an id
2. throw away rdfs:isDefinedBy
3. otu @label -> otu ^ot:originalLabel
4. ^tb:indentifier.taxon, ^tb:indentifier.taxonVariant and some skos:closeMatch
fields to ^ot:taxonLink
5. remove "@xml:base"
6. coerce edge lengths to native types
]
variable[raw] assign[=] call[name[get_ot_study_info_from_nexml], parameter[]]
variable[nexml] assign[=] call[name[raw]][constant[nexml]]
variable[SKOS_ALT_LABEL] assign[=] constant[^skos:altLabel]
variable[SKOS_CLOSE_MATCH] assign[=] constant[^skos:closeMatch]
variable[strippable_pre] assign[=] dictionary[[<ast.Constant object at 0x7da204345de0>, <ast.Constant object at 0x7da204344760>], [<ast.Constant object at 0x7da204345e10>, <ast.Constant object at 0x7da204346fb0>]]
variable[moveable2taxon_link] assign[=] dictionary[[<ast.Constant object at 0x7da204345900>, <ast.Constant object at 0x7da204346dd0>], [<ast.Constant object at 0x7da204344ac0>, <ast.Constant object at 0x7da204344820>]]
variable[to_del] assign[=] list[[<ast.Constant object at 0x7da204345690>, <ast.Constant object at 0x7da204345cf0>]]
for taget[name[tag]] in starred[name[to_del]] begin[:]
if compare[name[tag] in name[nexml]] begin[:]
<ast.Delete object at 0x7da204346c80>
call[name[_simplify_all_meta_by_id_del], parameter[name[nexml]]]
variable[_otu2label] assign[=] dictionary[[], []]
variable[prefix_map] assign[=] dictionary[[], []]
variable[nexid] assign[=] call[name[nexml]][constant[@id]]
variable[tb_url] assign[=] binary_operation[constant[http://purl.org/phylo/treebase/phylows/study/TB2:] + name[nexid]]
call[name[nexml]][constant[^ot:dataDeposit]] assign[=] dictionary[[<ast.Constant object at 0x7da2043449d0>], [<ast.Name object at 0x7da204347790>]]
variable[bd] assign[=] call[name[nexml].get, parameter[constant[^dcterms:bibliographicCitation]]]
if name[bd] begin[:]
call[name[nexml]][constant[^ot:studyPublicationReference]] assign[=] name[bd]
variable[doi] assign[=] call[name[nexml].get, parameter[constant[^prism:doi]]]
if name[doi] begin[:]
variable[doi] assign[=] call[name[doi2url], parameter[name[doi]]]
call[name[nexml]][constant[^ot:studyPublication]] assign[=] dictionary[[<ast.Constant object at 0x7da204344580>], [<ast.Name object at 0x7da204346c20>]]
variable[year] assign[=] call[name[nexml].get, parameter[constant[^prism:publicationDate]]]
if name[year] begin[:]
<ast.Try object at 0x7da204344f40>
for taget[name[otus]] in starred[call[call[name[nexml]][constant[otusById]].values, parameter[]]] begin[:]
for taget[name[tag]] in starred[name[to_del]] begin[:]
if compare[name[tag] in name[otus]] begin[:]
<ast.Delete object at 0x7da204344fa0>
call[name[_simplify_all_meta_by_id_del], parameter[name[otus]]]
for taget[tuple[[<ast.Name object at 0x7da204346140>, <ast.Name object at 0x7da204346da0>]]] in starred[call[call[name[otus]][constant[otuById]].items, parameter[]]] begin[:]
for taget[name[tag]] in starred[name[to_del]] begin[:]
if compare[name[tag] in name[otu]] begin[:]
<ast.Delete object at 0x7da2043444f0>
call[name[_simplify_all_meta_by_id_del], parameter[name[otu]]]
variable[label] assign[=] call[name[otu]][constant[@label]]
call[name[_otu2label]][name[oid]] assign[=] name[label]
call[name[otu]][constant[^ot:originalLabel]] assign[=] name[label]
<ast.Delete object at 0x7da204347670>
variable[al] assign[=] call[name[otu].get, parameter[name[SKOS_ALT_LABEL]]]
if compare[name[al] is_not constant[None]] begin[:]
if compare[call[name[otu].get, parameter[constant[^ot:altLabel]]] is constant[None]] begin[:]
call[name[otu]][constant[^ot:altLabel]] assign[=] name[al]
<ast.Delete object at 0x7da204347550>
variable[tl] assign[=] dictionary[[], []]
variable[scm] assign[=] call[name[otu].get, parameter[name[SKOS_CLOSE_MATCH]]]
if name[scm] begin[:]
if call[name[isinstance], parameter[name[scm], name[dict]]] begin[:]
variable[h] assign[=] call[name[scm].get, parameter[constant[@href]]]
if name[h] begin[:]
<ast.Try object at 0x7da204345c00>
for taget[tuple[[<ast.Name object at 0x7da2041d91b0>, <ast.Name object at 0x7da2041db490>]]] in starred[call[name[moveable2taxon_link].items, parameter[]]] begin[:]
variable[al] assign[=] call[name[otu].get, parameter[name[k]]]
if name[al] begin[:]
call[name[tl]][name[t]] assign[=] name[al]
<ast.Delete object at 0x7da2041dbe20>
if name[tl] begin[:]
call[name[otu]][constant[^ot:taxonLink]] assign[=] name[tl]
for taget[name[trees]] in starred[call[call[name[nexml]][constant[treesById]].values, parameter[]]] begin[:]
for taget[name[tag]] in starred[name[to_del]] begin[:]
if compare[name[tag] in name[trees]] begin[:]
<ast.Delete object at 0x7da1b25b0e50>
call[name[_simplify_all_meta_by_id_del], parameter[name[trees]]]
for taget[name[tree]] in starred[call[call[name[trees]][constant[treeById]].values, parameter[]]] begin[:]
for taget[name[tag]] in starred[name[to_del]] begin[:]
if compare[name[tag] in name[tree]] begin[:]
<ast.Delete object at 0x7da1b25b3700>
call[name[_simplify_all_meta_by_id_del], parameter[name[tree]]]
variable[tt] assign[=] call[name[tree].get, parameter[constant[@xsi:type], constant[nex:FloatTree]]]
if compare[call[name[tt].lower, parameter[]] equal[==] constant[nex:inttree]] begin[:]
variable[e_len_coerce] assign[=] name[int]
for taget[name[edge_d]] in starred[call[call[name[tree]][constant[edgeBySourceId]].values, parameter[]]] begin[:]
for taget[name[edge]] in starred[call[name[edge_d].values, parameter[]]] begin[:]
<ast.Try object at 0x7da1b25b1930>
for taget[name[node]] in starred[call[call[name[tree]][constant[nodeById]].values, parameter[]]] begin[:]
variable[nl] assign[=] call[name[node].get, parameter[constant[@label]]]
if name[nl] begin[:]
variable[no] assign[=] call[name[node].get, parameter[constant[@otu]]]
if <ast.BoolOp object at 0x7da1b25b3280> begin[:]
<ast.Delete object at 0x7da1b25b23e0>
if name[prefix_map] begin[:]
call[name[nexml]][constant[^ot:taxonLinkPrefixes]] assign[=] name[prefix_map]
if name[merge_blocks] begin[:]
from relative_module[peyotl.manip] import module[merge_otus_and_trees]
call[name[merge_otus_and_trees], parameter[name[raw]]]
if compare[name[nexson_syntax_version] not_equal[!=] name[BY_ID_HONEY_BADGERFISH]] begin[:]
call[name[convert_nexson_format], parameter[name[raw], name[nexson_syntax_version]]]
return[name[raw]] | keyword[def] identifier[get_ot_study_info_from_treebase_nexml] ( identifier[src] = keyword[None] ,
identifier[nexml_content] = keyword[None] ,
identifier[encoding] = literal[string] ,
identifier[nexson_syntax_version] = identifier[DEFAULT_NEXSON_VERSION] ,
identifier[merge_blocks] = keyword[True] ,
identifier[sort_arbitrary] = keyword[False] ):
literal[string]
identifier[raw] = identifier[get_ot_study_info_from_nexml] ( identifier[src] = identifier[src] ,
identifier[nexml_content] = identifier[nexml_content] ,
identifier[encoding] = identifier[encoding] ,
identifier[nexson_syntax_version] = identifier[BY_ID_HONEY_BADGERFISH] )
identifier[nexml] = identifier[raw] [ literal[string] ]
identifier[SKOS_ALT_LABEL] = literal[string]
identifier[SKOS_CLOSE_MATCH] = literal[string]
identifier[strippable_pre] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[moveable2taxon_link] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,}
identifier[to_del] =[ literal[string] , literal[string] ]
keyword[for] identifier[tag] keyword[in] identifier[to_del] :
keyword[if] identifier[tag] keyword[in] identifier[nexml] :
keyword[del] identifier[nexml] [ identifier[tag] ]
identifier[_simplify_all_meta_by_id_del] ( identifier[nexml] )
identifier[_otu2label] ={}
identifier[prefix_map] ={}
identifier[nexid] = identifier[nexml] [ literal[string] ]
identifier[tb_url] = literal[string] + identifier[nexid]
identifier[nexml] [ literal[string] ]={ literal[string] : identifier[tb_url] }
identifier[bd] = identifier[nexml] . identifier[get] ( literal[string] )
keyword[if] identifier[bd] :
identifier[nexml] [ literal[string] ]= identifier[bd]
identifier[doi] = identifier[nexml] . identifier[get] ( literal[string] )
keyword[if] identifier[doi] :
identifier[doi] = identifier[doi2url] ( identifier[doi] )
identifier[nexml] [ literal[string] ]={ literal[string] : identifier[doi] }
identifier[year] = identifier[nexml] . identifier[get] ( literal[string] )
keyword[if] identifier[year] :
keyword[try] :
identifier[nexml] [ literal[string] ]= identifier[int] ( identifier[year] )
keyword[except] :
keyword[pass]
keyword[for] identifier[otus] keyword[in] identifier[nexml] [ literal[string] ]. identifier[values] ():
keyword[for] identifier[tag] keyword[in] identifier[to_del] :
keyword[if] identifier[tag] keyword[in] identifier[otus] :
keyword[del] identifier[otus] [ identifier[tag] ]
identifier[_simplify_all_meta_by_id_del] ( identifier[otus] )
keyword[for] identifier[oid] , identifier[otu] keyword[in] identifier[otus] [ literal[string] ]. identifier[items] ():
keyword[for] identifier[tag] keyword[in] identifier[to_del] :
keyword[if] identifier[tag] keyword[in] identifier[otu] :
keyword[del] identifier[otu] [ identifier[tag] ]
identifier[_simplify_all_meta_by_id_del] ( identifier[otu] )
identifier[label] = identifier[otu] [ literal[string] ]
identifier[_otu2label] [ identifier[oid] ]= identifier[label]
identifier[otu] [ literal[string] ]= identifier[label]
keyword[del] identifier[otu] [ literal[string] ]
identifier[al] = identifier[otu] . identifier[get] ( identifier[SKOS_ALT_LABEL] )
keyword[if] identifier[al] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[otu] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[otu] [ literal[string] ]= identifier[al]
keyword[del] identifier[otu] [ identifier[SKOS_ALT_LABEL] ]
identifier[tl] ={}
identifier[scm] = identifier[otu] . identifier[get] ( identifier[SKOS_CLOSE_MATCH] )
keyword[if] identifier[scm] :
keyword[if] identifier[isinstance] ( identifier[scm] , identifier[dict] ):
identifier[h] = identifier[scm] . identifier[get] ( literal[string] )
keyword[if] identifier[h] :
keyword[try] :
keyword[for] identifier[p] , identifier[t] keyword[in] identifier[strippable_pre] . identifier[items] ():
keyword[if] identifier[h] . identifier[startswith] ( identifier[p] ):
identifier[ident] = identifier[h] [ identifier[len] ( identifier[p] ):]
identifier[tl] [ identifier[t] ]= identifier[ident]
keyword[del] identifier[otu] [ identifier[SKOS_CLOSE_MATCH] ]
identifier[prefix_map] [ identifier[t] ]= identifier[p]
keyword[except] :
keyword[pass]
keyword[else] :
identifier[nm] =[]
keyword[try] :
keyword[for] identifier[el] keyword[in] identifier[scm] :
identifier[h] = identifier[el] . identifier[get] ( literal[string] )
keyword[if] identifier[h] :
identifier[found] = keyword[False]
keyword[for] identifier[p] , identifier[t] keyword[in] identifier[strippable_pre] . identifier[items] ():
keyword[if] identifier[h] . identifier[startswith] ( identifier[p] ):
identifier[ident] = identifier[h] [ identifier[len] ( identifier[p] ):]
identifier[tl] [ identifier[t] ]= identifier[ident]
identifier[found] = keyword[True]
identifier[prefix_map] [ identifier[t] ]= identifier[p]
keyword[break]
keyword[if] keyword[not] identifier[found] :
identifier[nm] . identifier[append] ( identifier[el] )
keyword[except] :
keyword[pass]
keyword[if] identifier[len] ( identifier[nm] )< identifier[len] ( identifier[scm] ):
keyword[if] identifier[len] ( identifier[nm] )> literal[int] :
identifier[otu] [ identifier[SKOS_CLOSE_MATCH] ]= identifier[nm]
keyword[elif] identifier[len] ( identifier[nm] )== literal[int] :
identifier[otu] [ identifier[SKOS_CLOSE_MATCH] ]= identifier[nm] [ literal[int] ]
keyword[else] :
keyword[del] identifier[otu] [ identifier[SKOS_CLOSE_MATCH] ]
keyword[for] identifier[k] , identifier[t] keyword[in] identifier[moveable2taxon_link] . identifier[items] ():
identifier[al] = identifier[otu] . identifier[get] ( identifier[k] )
keyword[if] identifier[al] :
identifier[tl] [ identifier[t] ]= identifier[al]
keyword[del] identifier[otu] [ identifier[k] ]
keyword[if] identifier[tl] :
identifier[otu] [ literal[string] ]= identifier[tl]
keyword[for] identifier[trees] keyword[in] identifier[nexml] [ literal[string] ]. identifier[values] ():
keyword[for] identifier[tag] keyword[in] identifier[to_del] :
keyword[if] identifier[tag] keyword[in] identifier[trees] :
keyword[del] identifier[trees] [ identifier[tag] ]
identifier[_simplify_all_meta_by_id_del] ( identifier[trees] )
keyword[for] identifier[tree] keyword[in] identifier[trees] [ literal[string] ]. identifier[values] ():
keyword[for] identifier[tag] keyword[in] identifier[to_del] :
keyword[if] identifier[tag] keyword[in] identifier[tree] :
keyword[del] identifier[tree] [ identifier[tag] ]
identifier[_simplify_all_meta_by_id_del] ( identifier[tree] )
identifier[tt] = identifier[tree] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[tt] . identifier[lower] ()== literal[string] :
identifier[e_len_coerce] = identifier[int]
keyword[else] :
identifier[e_len_coerce] = identifier[float]
keyword[for] identifier[edge_d] keyword[in] identifier[tree] [ literal[string] ]. identifier[values] ():
keyword[for] identifier[edge] keyword[in] identifier[edge_d] . identifier[values] ():
keyword[try] :
identifier[x] = identifier[e_len_coerce] ( identifier[edge] [ literal[string] ])
identifier[edge] [ literal[string] ]= identifier[x]
keyword[except] :
keyword[pass]
keyword[for] identifier[node] keyword[in] identifier[tree] [ literal[string] ]. identifier[values] ():
identifier[nl] = identifier[node] . identifier[get] ( literal[string] )
keyword[if] identifier[nl] :
identifier[no] = identifier[node] . identifier[get] ( literal[string] )
keyword[if] identifier[no] keyword[and] identifier[_otu2label] [ identifier[no] ]== identifier[nl] :
keyword[del] identifier[node] [ literal[string] ]
keyword[if] identifier[prefix_map] :
identifier[nexml] [ literal[string] ]= identifier[prefix_map]
keyword[if] identifier[merge_blocks] :
keyword[from] identifier[peyotl] . identifier[manip] keyword[import] identifier[merge_otus_and_trees]
identifier[merge_otus_and_trees] ( identifier[raw] )
keyword[if] identifier[nexson_syntax_version] != identifier[BY_ID_HONEY_BADGERFISH] :
identifier[convert_nexson_format] ( identifier[raw] ,
identifier[nexson_syntax_version] ,
identifier[current_format] = identifier[BY_ID_HONEY_BADGERFISH] ,
identifier[sort_arbitrary] = identifier[sort_arbitrary] )
keyword[elif] identifier[sort_arbitrary] :
identifier[sort_arbitrarily_ordered_nexson] ( identifier[raw] )
keyword[return] identifier[raw] | def get_ot_study_info_from_treebase_nexml(src=None, nexml_content=None, encoding=u'utf8', nexson_syntax_version=DEFAULT_NEXSON_VERSION, merge_blocks=True, sort_arbitrary=False):
"""Normalize treebase-specific metadata into the locations where
open tree of life software that expects it.
See get_ot_study_info_from_nexml for the explanation of the src,
nexml_content, encoding, and nexson_syntax_version arguments
If merge_blocks is True then peyotl.manip.merge_otus_and_trees
Actions to "normalize" TreeBase objects to ot Nexson
1. the meta id for any meta item that has only a value and an id
2. throw away rdfs:isDefinedBy
3. otu @label -> otu ^ot:originalLabel
4. ^tb:indentifier.taxon, ^tb:indentifier.taxonVariant and some skos:closeMatch
fields to ^ot:taxonLink
5. remove "@xml:base"
6. coerce edge lengths to native types
"""
# pylint: disable=R0915
raw = get_ot_study_info_from_nexml(src=src, nexml_content=nexml_content, encoding=encoding, nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
nexml = raw['nexml']
SKOS_ALT_LABEL = '^skos:altLabel'
SKOS_CLOSE_MATCH = '^skos:closeMatch'
strippable_pre = {'http://www.ubio.org/authority/metadata.php?lsid=urn:lsid:ubio.org:namebank:': '@ubio', 'http://purl.uniprot.org/taxonomy/': '@uniprot'}
moveable2taxon_link = {'^tb:identifier.taxon': '@tb:identifier.taxon', '^tb:identifier.taxonVariant': '@tb:identifier.taxonVariant'}
to_del = ['^rdfs:isDefinedBy', '@xml:base']
for tag in to_del:
if tag in nexml:
del nexml[tag] # depends on [control=['if'], data=['tag', 'nexml']] # depends on [control=['for'], data=['tag']]
_simplify_all_meta_by_id_del(nexml)
_otu2label = {}
prefix_map = {}
# compose dataDeposit
nexid = nexml['@id']
tb_url = 'http://purl.org/phylo/treebase/phylows/study/TB2:' + nexid
nexml['^ot:dataDeposit'] = {'@href': tb_url}
# compose dataDeposit
bd = nexml.get('^dcterms:bibliographicCitation')
if bd:
nexml['^ot:studyPublicationReference'] = bd # depends on [control=['if'], data=[]]
doi = nexml.get('^prism:doi')
if doi:
doi = doi2url(doi)
nexml['^ot:studyPublication'] = {'@href': doi} # depends on [control=['if'], data=[]]
year = nexml.get('^prism:publicationDate')
if year:
try:
nexml['^ot:studyYear'] = int(year) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
#
for otus in nexml['otusById'].values():
for tag in to_del:
if tag in otus:
del otus[tag] # depends on [control=['if'], data=['tag', 'otus']] # depends on [control=['for'], data=['tag']]
_simplify_all_meta_by_id_del(otus)
for (oid, otu) in otus['otuById'].items():
for tag in to_del:
if tag in otu:
del otu[tag] # depends on [control=['if'], data=['tag', 'otu']] # depends on [control=['for'], data=['tag']]
_simplify_all_meta_by_id_del(otu)
label = otu['@label']
_otu2label[oid] = label
otu['^ot:originalLabel'] = label
del otu['@label']
al = otu.get(SKOS_ALT_LABEL)
if al is not None:
if otu.get('^ot:altLabel') is None:
otu['^ot:altLabel'] = al # depends on [control=['if'], data=[]]
del otu[SKOS_ALT_LABEL] # depends on [control=['if'], data=['al']]
tl = {}
scm = otu.get(SKOS_CLOSE_MATCH)
# _LOG.debug('scm = ' + str(scm))
if scm:
if isinstance(scm, dict):
h = scm.get('@href')
if h:
try:
for (p, t) in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
del otu[SKOS_CLOSE_MATCH]
prefix_map[t] = p # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
nm = []
try:
for el in scm:
h = el.get('@href')
if h:
found = False
for (p, t) in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
found = True
prefix_map[t] = p
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not found:
nm.append(el) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
if len(nm) < len(scm):
if len(nm) > 1:
otu[SKOS_CLOSE_MATCH] = nm # depends on [control=['if'], data=[]]
elif len(nm) == 1:
otu[SKOS_CLOSE_MATCH] = nm[0] # depends on [control=['if'], data=[]]
else:
del otu[SKOS_CLOSE_MATCH] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# _LOG.debug('tl =' + str(tl))
for (k, t) in moveable2taxon_link.items():
al = otu.get(k)
if al:
tl[t] = al
del otu[k] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if tl:
otu['^ot:taxonLink'] = tl # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['otus']]
for trees in nexml['treesById'].values():
for tag in to_del:
if tag in trees:
del trees[tag] # depends on [control=['if'], data=['tag', 'trees']] # depends on [control=['for'], data=['tag']]
_simplify_all_meta_by_id_del(trees)
for tree in trees['treeById'].values():
for tag in to_del:
if tag in tree:
del tree[tag] # depends on [control=['if'], data=['tag', 'tree']] # depends on [control=['for'], data=['tag']]
_simplify_all_meta_by_id_del(tree)
tt = tree.get('@xsi:type', 'nex:FloatTree')
if tt.lower() == 'nex:inttree':
e_len_coerce = int # depends on [control=['if'], data=[]]
else:
e_len_coerce = float
for edge_d in tree['edgeBySourceId'].values():
for edge in edge_d.values():
try:
x = e_len_coerce(edge['@length'])
edge['@length'] = x # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['edge']] # depends on [control=['for'], data=['edge_d']]
for node in tree['nodeById'].values():
nl = node.get('@label')
if nl:
no = node.get('@otu')
if no and _otu2label[no] == nl:
del node['@label'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] # depends on [control=['for'], data=['tree']] # depends on [control=['for'], data=['trees']]
if prefix_map:
nexml['^ot:taxonLinkPrefixes'] = prefix_map # depends on [control=['if'], data=[]]
if merge_blocks:
from peyotl.manip import merge_otus_and_trees
merge_otus_and_trees(raw) # depends on [control=['if'], data=[]]
if nexson_syntax_version != BY_ID_HONEY_BADGERFISH:
convert_nexson_format(raw, nexson_syntax_version, current_format=BY_ID_HONEY_BADGERFISH, sort_arbitrary=sort_arbitrary) # depends on [control=['if'], data=['nexson_syntax_version', 'BY_ID_HONEY_BADGERFISH']]
elif sort_arbitrary:
sort_arbitrarily_ordered_nexson(raw) # depends on [control=['if'], data=[]]
return raw |
def tange_pth(v, temp, v0, gamma0, a, b, theta0, n, z,
t_ref=300., three_r=3. * constants.R):
"""
calculate thermal pressure for the Tange equation
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param a: volume-independent adjustable parameters
:param b: volume-independent adjustable parameters
:param theta0: Debye temperature at 1 bar in K
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
gamma = tange_grun(v, v0, gamma0, a, b)
theta = tange_debyetemp(v, v0, gamma0, a, b, theta0)
xx = theta / temp
debye = debye_E(xx)
if t_ref == 0.:
debye0 = 0.
else:
xx0 = theta / t_ref
debye0 = debye_E(xx0)
Eth0 = three_r * n * t_ref * debye0
Eth = three_r * n * temp * debye
delEth = Eth - Eth0
p_th = (gamma / v_mol * delEth) * 1.e-9
return p_th | def function[tange_pth, parameter[v, temp, v0, gamma0, a, b, theta0, n, z, t_ref, three_r]]:
constant[
calculate thermal pressure for the Tange equation
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param a: volume-independent adjustable parameters
:param b: volume-independent adjustable parameters
:param theta0: Debye temperature at 1 bar in K
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
]
variable[v_mol] assign[=] call[name[vol_uc2mol], parameter[name[v], name[z]]]
variable[gamma] assign[=] call[name[tange_grun], parameter[name[v], name[v0], name[gamma0], name[a], name[b]]]
variable[theta] assign[=] call[name[tange_debyetemp], parameter[name[v], name[v0], name[gamma0], name[a], name[b], name[theta0]]]
variable[xx] assign[=] binary_operation[name[theta] / name[temp]]
variable[debye] assign[=] call[name[debye_E], parameter[name[xx]]]
if compare[name[t_ref] equal[==] constant[0.0]] begin[:]
variable[debye0] assign[=] constant[0.0]
variable[Eth0] assign[=] binary_operation[binary_operation[binary_operation[name[three_r] * name[n]] * name[t_ref]] * name[debye0]]
variable[Eth] assign[=] binary_operation[binary_operation[binary_operation[name[three_r] * name[n]] * name[temp]] * name[debye]]
variable[delEth] assign[=] binary_operation[name[Eth] - name[Eth0]]
variable[p_th] assign[=] binary_operation[binary_operation[binary_operation[name[gamma] / name[v_mol]] * name[delEth]] * constant[1e-09]]
return[name[p_th]] | keyword[def] identifier[tange_pth] ( identifier[v] , identifier[temp] , identifier[v0] , identifier[gamma0] , identifier[a] , identifier[b] , identifier[theta0] , identifier[n] , identifier[z] ,
identifier[t_ref] = literal[int] , identifier[three_r] = literal[int] * identifier[constants] . identifier[R] ):
literal[string]
identifier[v_mol] = identifier[vol_uc2mol] ( identifier[v] , identifier[z] )
identifier[gamma] = identifier[tange_grun] ( identifier[v] , identifier[v0] , identifier[gamma0] , identifier[a] , identifier[b] )
identifier[theta] = identifier[tange_debyetemp] ( identifier[v] , identifier[v0] , identifier[gamma0] , identifier[a] , identifier[b] , identifier[theta0] )
identifier[xx] = identifier[theta] / identifier[temp]
identifier[debye] = identifier[debye_E] ( identifier[xx] )
keyword[if] identifier[t_ref] == literal[int] :
identifier[debye0] = literal[int]
keyword[else] :
identifier[xx0] = identifier[theta] / identifier[t_ref]
identifier[debye0] = identifier[debye_E] ( identifier[xx0] )
identifier[Eth0] = identifier[three_r] * identifier[n] * identifier[t_ref] * identifier[debye0]
identifier[Eth] = identifier[three_r] * identifier[n] * identifier[temp] * identifier[debye]
identifier[delEth] = identifier[Eth] - identifier[Eth0]
identifier[p_th] =( identifier[gamma] / identifier[v_mol] * identifier[delEth] )* literal[int]
keyword[return] identifier[p_th] | def tange_pth(v, temp, v0, gamma0, a, b, theta0, n, z, t_ref=300.0, three_r=3.0 * constants.R):
"""
calculate thermal pressure for the Tange equation
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param a: volume-independent adjustable parameters
:param b: volume-independent adjustable parameters
:param theta0: Debye temperature at 1 bar in K
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
gamma = tange_grun(v, v0, gamma0, a, b)
theta = tange_debyetemp(v, v0, gamma0, a, b, theta0)
xx = theta / temp
debye = debye_E(xx)
if t_ref == 0.0:
debye0 = 0.0 # depends on [control=['if'], data=[]]
else:
xx0 = theta / t_ref
debye0 = debye_E(xx0)
Eth0 = three_r * n * t_ref * debye0
Eth = three_r * n * temp * debye
delEth = Eth - Eth0
p_th = gamma / v_mol * delEth * 1e-09
return p_th |
def setCurrentSchemaPath(self, path):
"""
Sets the current item based on the inputed column.
:param path | <str>
"""
if not path:
return False
parts = path.split('.')
name = parts[0]
next = parts[1:]
if name == self.text(0):
if next:
self.load()
path = '.'.join(next)
for c in range(self.childCount()):
if self.child(c).setCurrentSchemaPath(path):
self.setExpanded(True)
return True
return False
else:
self.treeWidget().setCurrentItem(self)
return True
return False | def function[setCurrentSchemaPath, parameter[self, path]]:
constant[
Sets the current item based on the inputed column.
:param path | <str>
]
if <ast.UnaryOp object at 0x7da18f58e5f0> begin[:]
return[constant[False]]
variable[parts] assign[=] call[name[path].split, parameter[constant[.]]]
variable[name] assign[=] call[name[parts]][constant[0]]
variable[next] assign[=] call[name[parts]][<ast.Slice object at 0x7da18f58ee90>]
if compare[name[name] equal[==] call[name[self].text, parameter[constant[0]]]] begin[:]
if name[next] begin[:]
call[name[self].load, parameter[]]
variable[path] assign[=] call[constant[.].join, parameter[name[next]]]
for taget[name[c]] in starred[call[name[range], parameter[call[name[self].childCount, parameter[]]]]] begin[:]
if call[call[name[self].child, parameter[name[c]]].setCurrentSchemaPath, parameter[name[path]]] begin[:]
call[name[self].setExpanded, parameter[constant[True]]]
return[constant[True]]
return[constant[False]]
return[constant[False]] | keyword[def] identifier[setCurrentSchemaPath] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
keyword[return] keyword[False]
identifier[parts] = identifier[path] . identifier[split] ( literal[string] )
identifier[name] = identifier[parts] [ literal[int] ]
identifier[next] = identifier[parts] [ literal[int] :]
keyword[if] identifier[name] == identifier[self] . identifier[text] ( literal[int] ):
keyword[if] identifier[next] :
identifier[self] . identifier[load] ()
identifier[path] = literal[string] . identifier[join] ( identifier[next] )
keyword[for] identifier[c] keyword[in] identifier[range] ( identifier[self] . identifier[childCount] ()):
keyword[if] identifier[self] . identifier[child] ( identifier[c] ). identifier[setCurrentSchemaPath] ( identifier[path] ):
identifier[self] . identifier[setExpanded] ( keyword[True] )
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[else] :
identifier[self] . identifier[treeWidget] (). identifier[setCurrentItem] ( identifier[self] )
keyword[return] keyword[True]
keyword[return] keyword[False] | def setCurrentSchemaPath(self, path):
"""
Sets the current item based on the inputed column.
:param path | <str>
"""
if not path:
return False # depends on [control=['if'], data=[]]
parts = path.split('.')
name = parts[0]
next = parts[1:]
if name == self.text(0):
if next:
self.load()
path = '.'.join(next)
for c in range(self.childCount()):
if self.child(c).setCurrentSchemaPath(path):
self.setExpanded(True)
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return False # depends on [control=['if'], data=[]]
else:
self.treeWidget().setCurrentItem(self)
return True # depends on [control=['if'], data=[]]
return False |
def messages_from_response(response):
"""Returns a list of the messages from the django MessageMiddleware
package contained within the given response. This is to be used during
unit testing when trying to see if a message was set properly in a view.
:param response: HttpResponse object, likely obtained through a
test client.get() or client.post() call
:returns: a list of tuples (message_string, message_level), one for each
message in the response context
"""
messages = []
if hasattr(response, 'context') and response.context and \
'messages' in response.context:
messages = response.context['messages']
elif hasattr(response, 'cookies'):
# no "context" set-up or no messages item, check for message info in
# the cookies
morsel = response.cookies.get('messages')
if not morsel:
return []
# use the decoder in the CookieStore to process and get a list of
# messages
from django.contrib.messages.storage.cookie import CookieStorage
store = CookieStorage(FakeRequest())
messages = store._decode(morsel.value)
else:
return []
return [(m.message, m.level) for m in messages] | def function[messages_from_response, parameter[response]]:
constant[Returns a list of the messages from the django MessageMiddleware
package contained within the given response. This is to be used during
unit testing when trying to see if a message was set properly in a view.
:param response: HttpResponse object, likely obtained through a
test client.get() or client.post() call
:returns: a list of tuples (message_string, message_level), one for each
message in the response context
]
variable[messages] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b0048a90> begin[:]
variable[messages] assign[=] call[name[response].context][constant[messages]]
return[<ast.ListComp object at 0x7da1afea5e70>] | keyword[def] identifier[messages_from_response] ( identifier[response] ):
literal[string]
identifier[messages] =[]
keyword[if] identifier[hasattr] ( identifier[response] , literal[string] ) keyword[and] identifier[response] . identifier[context] keyword[and] literal[string] keyword[in] identifier[response] . identifier[context] :
identifier[messages] = identifier[response] . identifier[context] [ literal[string] ]
keyword[elif] identifier[hasattr] ( identifier[response] , literal[string] ):
identifier[morsel] = identifier[response] . identifier[cookies] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[morsel] :
keyword[return] []
keyword[from] identifier[django] . identifier[contrib] . identifier[messages] . identifier[storage] . identifier[cookie] keyword[import] identifier[CookieStorage]
identifier[store] = identifier[CookieStorage] ( identifier[FakeRequest] ())
identifier[messages] = identifier[store] . identifier[_decode] ( identifier[morsel] . identifier[value] )
keyword[else] :
keyword[return] []
keyword[return] [( identifier[m] . identifier[message] , identifier[m] . identifier[level] ) keyword[for] identifier[m] keyword[in] identifier[messages] ] | def messages_from_response(response):
"""Returns a list of the messages from the django MessageMiddleware
package contained within the given response. This is to be used during
unit testing when trying to see if a message was set properly in a view.
:param response: HttpResponse object, likely obtained through a
test client.get() or client.post() call
:returns: a list of tuples (message_string, message_level), one for each
message in the response context
"""
messages = []
if hasattr(response, 'context') and response.context and ('messages' in response.context):
messages = response.context['messages'] # depends on [control=['if'], data=[]]
elif hasattr(response, 'cookies'):
# no "context" set-up or no messages item, check for message info in
# the cookies
morsel = response.cookies.get('messages')
if not morsel:
return [] # depends on [control=['if'], data=[]]
# use the decoder in the CookieStore to process and get a list of
# messages
from django.contrib.messages.storage.cookie import CookieStorage
store = CookieStorage(FakeRequest())
messages = store._decode(morsel.value) # depends on [control=['if'], data=[]]
else:
return []
return [(m.message, m.level) for m in messages] |
def get_data(self, safe_copy=False):
"""Get the data in the image.
If save_copy is True, will perform a deep copy of the data and return it.
Parameters
----------
smoothed: (optional) bool
If True and self._smooth_fwhm > 0 will smooth the data before masking.
masked: (optional) bool
If True and self.has_mask will return the masked data, the plain data otherwise.
safe_copy: (optional) bool
Returns
-------
np.ndarray
"""
if safe_copy:
data = get_data(self.img)
else:
data = self.img.get_data(caching=self._caching)
return data | def function[get_data, parameter[self, safe_copy]]:
constant[Get the data in the image.
If save_copy is True, will perform a deep copy of the data and return it.
Parameters
----------
smoothed: (optional) bool
If True and self._smooth_fwhm > 0 will smooth the data before masking.
masked: (optional) bool
If True and self.has_mask will return the masked data, the plain data otherwise.
safe_copy: (optional) bool
Returns
-------
np.ndarray
]
if name[safe_copy] begin[:]
variable[data] assign[=] call[name[get_data], parameter[name[self].img]]
return[name[data]] | keyword[def] identifier[get_data] ( identifier[self] , identifier[safe_copy] = keyword[False] ):
literal[string]
keyword[if] identifier[safe_copy] :
identifier[data] = identifier[get_data] ( identifier[self] . identifier[img] )
keyword[else] :
identifier[data] = identifier[self] . identifier[img] . identifier[get_data] ( identifier[caching] = identifier[self] . identifier[_caching] )
keyword[return] identifier[data] | def get_data(self, safe_copy=False):
"""Get the data in the image.
If save_copy is True, will perform a deep copy of the data and return it.
Parameters
----------
smoothed: (optional) bool
If True and self._smooth_fwhm > 0 will smooth the data before masking.
masked: (optional) bool
If True and self.has_mask will return the masked data, the plain data otherwise.
safe_copy: (optional) bool
Returns
-------
np.ndarray
"""
if safe_copy:
data = get_data(self.img) # depends on [control=['if'], data=[]]
else:
data = self.img.get_data(caching=self._caching)
return data |
def BC_Rigidity(self):
"""
Utility function to help implement boundary conditions by specifying
them for and applying them to the elastic thickness grid
"""
#########################################
# FLEXURAL RIGIDITY BOUNDARY CONDITIONS #
#########################################
# West
if self.BC_W == 'Periodic':
self.BC_Rigidity_W = 'periodic'
elif (self.BC_W == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_W = '0 curvature'
elif self.BC_W == 'Mirror':
self.BC_Rigidity_W = 'mirror symmetry'
else:
sys.exit("Invalid Te B.C. case")
# East
if self.BC_E == 'Periodic':
self.BC_Rigidity_E = 'periodic'
elif (self.BC_E == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_E = '0 curvature'
elif self.BC_E == 'Mirror':
self.BC_Rigidity_E = 'mirror symmetry'
else:
sys.exit("Invalid Te B.C. case")
# North
if self.BC_N == 'Periodic':
self.BC_Rigidity_N = 'periodic'
elif (self.BC_N == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_N = '0 curvature'
elif self.BC_N == 'Mirror':
self.BC_Rigidity_N = 'mirror symmetry'
else:
sys.exit("Invalid Te B.C. case")
# South
if self.BC_S == 'Periodic':
self.BC_Rigidity_S = 'periodic'
elif (self.BC_S == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_S = '0 curvature'
elif self.BC_S == 'Mirror':
self.BC_Rigidity_S = 'mirror symmetry'
else:
sys.exit("Invalid Te B.C. case")
#############
# PAD ARRAY #
#############
if np.isscalar(self.Te):
self.D *= np.ones(self.qs.shape) # And leave Te as a scalar for checks
else:
self.Te_unpadded = self.Te.copy()
self.Te = np.hstack(( np.nan*np.zeros((self.Te.shape[0], 1)), self.Te, np.nan*np.zeros((self.Te.shape[0], 1)) ))
self.Te = np.vstack(( np.nan*np.zeros(self.Te.shape[1]), self.Te, np.nan*np.zeros(self.Te.shape[1]) ))
self.D = np.hstack(( np.nan*np.zeros((self.D.shape[0], 1)), self.D, np.nan*np.zeros((self.D.shape[0], 1)) ))
self.D = np.vstack(( np.nan*np.zeros(self.D.shape[1]), self.D, np.nan*np.zeros(self.D.shape[1]) ))
###############################################################
# APPLY FLEXURAL RIGIDITY BOUNDARY CONDITIONS TO PADDED ARRAY #
###############################################################
if self.BC_Rigidity_W == "0 curvature":
self.D[:,0] = 2*self.D[:,1] - self.D[:,2]
if self.BC_Rigidity_E == "0 curvature":
self.D[:,-1] = 2*self.D[:,-2] - self.D[:,-3]
if self.BC_Rigidity_N == "0 curvature":
self.D[0,:] = 2*self.D[1,:] - self.D[2,:]
if self.BC_Rigidity_S == "0 curvature":
self.D[-1,:] = 2*self.D[-2,:] - self.D[-3,:]
if self.BC_Rigidity_W == "mirror symmetry":
self.D[:,0] = self.D[:,2]
if self.BC_Rigidity_E == "mirror symmetry":
self.D[:,-1] = self.D[:,-3]
if self.BC_Rigidity_N == "mirror symmetry":
self.D[0,:] = self.D[2,:] # Yes, will work on corners -- double-reflection
if self.BC_Rigidity_S == "mirror symmetry":
self.D[-1,:] = self.D[-3,:]
if self.BC_Rigidity_W == "periodic":
self.D[:,0] = self.D[:,-2]
if self.BC_Rigidity_E == "periodic":
self.D[:,-1] = self.D[:,-3]
if self.BC_Rigidity_N == "periodic":
self.D[0,:] = self.D[-2,:]
if self.BC_Rigidity_S == "periodic":
self.D[-1,:] = self.D[-3,:] | def function[BC_Rigidity, parameter[self]]:
constant[
Utility function to help implement boundary conditions by specifying
them for and applying them to the elastic thickness grid
]
if compare[name[self].BC_W equal[==] constant[Periodic]] begin[:]
name[self].BC_Rigidity_W assign[=] constant[periodic]
if compare[name[self].BC_E equal[==] constant[Periodic]] begin[:]
name[self].BC_Rigidity_E assign[=] constant[periodic]
if compare[name[self].BC_N equal[==] constant[Periodic]] begin[:]
name[self].BC_Rigidity_N assign[=] constant[periodic]
if compare[name[self].BC_S equal[==] constant[Periodic]] begin[:]
name[self].BC_Rigidity_S assign[=] constant[periodic]
if call[name[np].isscalar, parameter[name[self].Te]] begin[:]
<ast.AugAssign object at 0x7da1b251d840>
if compare[name[self].BC_Rigidity_W equal[==] constant[0 curvature]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b251a860>, <ast.Constant object at 0x7da1b2519840>]]] assign[=] binary_operation[binary_operation[constant[2] * call[name[self].D][tuple[[<ast.Slice object at 0x7da1b25199c0>, <ast.Constant object at 0x7da1b25190c0>]]]] - call[name[self].D][tuple[[<ast.Slice object at 0x7da1b251ac50>, <ast.Constant object at 0x7da1b251b0d0>]]]]
if compare[name[self].BC_Rigidity_E equal[==] constant[0 curvature]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b251abf0>, <ast.UnaryOp object at 0x7da1b251ad70>]]] assign[=] binary_operation[binary_operation[constant[2] * call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2518250>, <ast.UnaryOp object at 0x7da1b251a6b0>]]]] - call[name[self].D][tuple[[<ast.Slice object at 0x7da1b25184f0>, <ast.UnaryOp object at 0x7da1b251ab90>]]]]
if compare[name[self].BC_Rigidity_N equal[==] constant[0 curvature]] begin[:]
call[name[self].D][tuple[[<ast.Constant object at 0x7da1b25184c0>, <ast.Slice object at 0x7da1b2519240>]]] assign[=] binary_operation[binary_operation[constant[2] * call[name[self].D][tuple[[<ast.Constant object at 0x7da1b2518d90>, <ast.Slice object at 0x7da1b2518f40>]]]] - call[name[self].D][tuple[[<ast.Constant object at 0x7da1b2518340>, <ast.Slice object at 0x7da1b251a650>]]]]
if compare[name[self].BC_Rigidity_S equal[==] constant[0 curvature]] begin[:]
call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2519270>, <ast.Slice object at 0x7da1b2518a30>]]] assign[=] binary_operation[binary_operation[constant[2] * call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2519210>, <ast.Slice object at 0x7da1b251aa70>]]]] - call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b251a470>, <ast.Slice object at 0x7da1b25183a0>]]]]
if compare[name[self].BC_Rigidity_W equal[==] constant[mirror symmetry]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b251ac20>, <ast.Constant object at 0x7da1b251a5f0>]]] assign[=] call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2519390>, <ast.Constant object at 0x7da1b2519030>]]]
if compare[name[self].BC_Rigidity_E equal[==] constant[mirror symmetry]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2595c30>, <ast.UnaryOp object at 0x7da1b2597400>]]] assign[=] call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2596c20>, <ast.UnaryOp object at 0x7da1b2596110>]]]
if compare[name[self].BC_Rigidity_N equal[==] constant[mirror symmetry]] begin[:]
call[name[self].D][tuple[[<ast.Constant object at 0x7da1b25964d0>, <ast.Slice object at 0x7da1b2595bd0>]]] assign[=] call[name[self].D][tuple[[<ast.Constant object at 0x7da1b2597940>, <ast.Slice object at 0x7da1b2594e50>]]]
if compare[name[self].BC_Rigidity_S equal[==] constant[mirror symmetry]] begin[:]
call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2596e00>, <ast.Slice object at 0x7da1b2596a10>]]] assign[=] call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2595d50>, <ast.Slice object at 0x7da1b2597760>]]]
if compare[name[self].BC_Rigidity_W equal[==] constant[periodic]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2596020>, <ast.Constant object at 0x7da1b2595a80>]]] assign[=] call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2596290>, <ast.UnaryOp object at 0x7da1b25960b0>]]]
if compare[name[self].BC_Rigidity_E equal[==] constant[periodic]] begin[:]
call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2595b10>, <ast.UnaryOp object at 0x7da1b25945e0>]]] assign[=] call[name[self].D][tuple[[<ast.Slice object at 0x7da1b2595c00>, <ast.UnaryOp object at 0x7da1b2595660>]]]
if compare[name[self].BC_Rigidity_N equal[==] constant[periodic]] begin[:]
call[name[self].D][tuple[[<ast.Constant object at 0x7da1b2597fa0>, <ast.Slice object at 0x7da1b2594e20>]]] assign[=] call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2597130>, <ast.Slice object at 0x7da1b2594190>]]]
if compare[name[self].BC_Rigidity_S equal[==] constant[periodic]] begin[:]
call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2596e90>, <ast.Slice object at 0x7da1b2594670>]]] assign[=] call[name[self].D][tuple[[<ast.UnaryOp object at 0x7da1b2594040>, <ast.Slice object at 0x7da1b2594760>]]] | keyword[def] identifier[BC_Rigidity] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[BC_W] == literal[string] :
identifier[self] . identifier[BC_Rigidity_W] = literal[string]
keyword[elif] ( identifier[self] . identifier[BC_W] == identifier[np] . identifier[array] ([ literal[string] , literal[string] , literal[string] ])). identifier[any] ():
identifier[self] . identifier[BC_Rigidity_W] = literal[string]
keyword[elif] identifier[self] . identifier[BC_W] == literal[string] :
identifier[self] . identifier[BC_Rigidity_W] = literal[string]
keyword[else] :
identifier[sys] . identifier[exit] ( literal[string] )
keyword[if] identifier[self] . identifier[BC_E] == literal[string] :
identifier[self] . identifier[BC_Rigidity_E] = literal[string]
keyword[elif] ( identifier[self] . identifier[BC_E] == identifier[np] . identifier[array] ([ literal[string] , literal[string] , literal[string] ])). identifier[any] ():
identifier[self] . identifier[BC_Rigidity_E] = literal[string]
keyword[elif] identifier[self] . identifier[BC_E] == literal[string] :
identifier[self] . identifier[BC_Rigidity_E] = literal[string]
keyword[else] :
identifier[sys] . identifier[exit] ( literal[string] )
keyword[if] identifier[self] . identifier[BC_N] == literal[string] :
identifier[self] . identifier[BC_Rigidity_N] = literal[string]
keyword[elif] ( identifier[self] . identifier[BC_N] == identifier[np] . identifier[array] ([ literal[string] , literal[string] , literal[string] ])). identifier[any] ():
identifier[self] . identifier[BC_Rigidity_N] = literal[string]
keyword[elif] identifier[self] . identifier[BC_N] == literal[string] :
identifier[self] . identifier[BC_Rigidity_N] = literal[string]
keyword[else] :
identifier[sys] . identifier[exit] ( literal[string] )
keyword[if] identifier[self] . identifier[BC_S] == literal[string] :
identifier[self] . identifier[BC_Rigidity_S] = literal[string]
keyword[elif] ( identifier[self] . identifier[BC_S] == identifier[np] . identifier[array] ([ literal[string] , literal[string] , literal[string] ])). identifier[any] ():
identifier[self] . identifier[BC_Rigidity_S] = literal[string]
keyword[elif] identifier[self] . identifier[BC_S] == literal[string] :
identifier[self] . identifier[BC_Rigidity_S] = literal[string]
keyword[else] :
identifier[sys] . identifier[exit] ( literal[string] )
keyword[if] identifier[np] . identifier[isscalar] ( identifier[self] . identifier[Te] ):
identifier[self] . identifier[D] *= identifier[np] . identifier[ones] ( identifier[self] . identifier[qs] . identifier[shape] )
keyword[else] :
identifier[self] . identifier[Te_unpadded] = identifier[self] . identifier[Te] . identifier[copy] ()
identifier[self] . identifier[Te] = identifier[np] . identifier[hstack] (( identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] (( identifier[self] . identifier[Te] . identifier[shape] [ literal[int] ], literal[int] )), identifier[self] . identifier[Te] , identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] (( identifier[self] . identifier[Te] . identifier[shape] [ literal[int] ], literal[int] ))))
identifier[self] . identifier[Te] = identifier[np] . identifier[vstack] (( identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] ( identifier[self] . identifier[Te] . identifier[shape] [ literal[int] ]), identifier[self] . identifier[Te] , identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] ( identifier[self] . identifier[Te] . identifier[shape] [ literal[int] ])))
identifier[self] . identifier[D] = identifier[np] . identifier[hstack] (( identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] (( identifier[self] . identifier[D] . identifier[shape] [ literal[int] ], literal[int] )), identifier[self] . identifier[D] , identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] (( identifier[self] . identifier[D] . identifier[shape] [ literal[int] ], literal[int] ))))
identifier[self] . identifier[D] = identifier[np] . identifier[vstack] (( identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] ( identifier[self] . identifier[D] . identifier[shape] [ literal[int] ]), identifier[self] . identifier[D] , identifier[np] . identifier[nan] * identifier[np] . identifier[zeros] ( identifier[self] . identifier[D] . identifier[shape] [ literal[int] ])))
keyword[if] identifier[self] . identifier[BC_Rigidity_W] == literal[string] :
identifier[self] . identifier[D] [:, literal[int] ]= literal[int] * identifier[self] . identifier[D] [:, literal[int] ]- identifier[self] . identifier[D] [:, literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_E] == literal[string] :
identifier[self] . identifier[D] [:,- literal[int] ]= literal[int] * identifier[self] . identifier[D] [:,- literal[int] ]- identifier[self] . identifier[D] [:,- literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_N] == literal[string] :
identifier[self] . identifier[D] [ literal[int] ,:]= literal[int] * identifier[self] . identifier[D] [ literal[int] ,:]- identifier[self] . identifier[D] [ literal[int] ,:]
keyword[if] identifier[self] . identifier[BC_Rigidity_S] == literal[string] :
identifier[self] . identifier[D] [- literal[int] ,:]= literal[int] * identifier[self] . identifier[D] [- literal[int] ,:]- identifier[self] . identifier[D] [- literal[int] ,:]
keyword[if] identifier[self] . identifier[BC_Rigidity_W] == literal[string] :
identifier[self] . identifier[D] [:, literal[int] ]= identifier[self] . identifier[D] [:, literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_E] == literal[string] :
identifier[self] . identifier[D] [:,- literal[int] ]= identifier[self] . identifier[D] [:,- literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_N] == literal[string] :
identifier[self] . identifier[D] [ literal[int] ,:]= identifier[self] . identifier[D] [ literal[int] ,:]
keyword[if] identifier[self] . identifier[BC_Rigidity_S] == literal[string] :
identifier[self] . identifier[D] [- literal[int] ,:]= identifier[self] . identifier[D] [- literal[int] ,:]
keyword[if] identifier[self] . identifier[BC_Rigidity_W] == literal[string] :
identifier[self] . identifier[D] [:, literal[int] ]= identifier[self] . identifier[D] [:,- literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_E] == literal[string] :
identifier[self] . identifier[D] [:,- literal[int] ]= identifier[self] . identifier[D] [:,- literal[int] ]
keyword[if] identifier[self] . identifier[BC_Rigidity_N] == literal[string] :
identifier[self] . identifier[D] [ literal[int] ,:]= identifier[self] . identifier[D] [- literal[int] ,:]
keyword[if] identifier[self] . identifier[BC_Rigidity_S] == literal[string] :
identifier[self] . identifier[D] [- literal[int] ,:]= identifier[self] . identifier[D] [- literal[int] ,:] | def BC_Rigidity(self):
"""
Utility function to help implement boundary conditions by specifying
them for and applying them to the elastic thickness grid
"""
#########################################
# FLEXURAL RIGIDITY BOUNDARY CONDITIONS #
#########################################
# West
if self.BC_W == 'Periodic':
self.BC_Rigidity_W = 'periodic' # depends on [control=['if'], data=[]]
elif (self.BC_W == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_W = '0 curvature' # depends on [control=['if'], data=[]]
elif self.BC_W == 'Mirror':
self.BC_Rigidity_W = 'mirror symmetry' # depends on [control=['if'], data=[]]
else:
sys.exit('Invalid Te B.C. case')
# East
if self.BC_E == 'Periodic':
self.BC_Rigidity_E = 'periodic' # depends on [control=['if'], data=[]]
elif (self.BC_E == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_E = '0 curvature' # depends on [control=['if'], data=[]]
elif self.BC_E == 'Mirror':
self.BC_Rigidity_E = 'mirror symmetry' # depends on [control=['if'], data=[]]
else:
sys.exit('Invalid Te B.C. case')
# North
if self.BC_N == 'Periodic':
self.BC_Rigidity_N = 'periodic' # depends on [control=['if'], data=[]]
elif (self.BC_N == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_N = '0 curvature' # depends on [control=['if'], data=[]]
elif self.BC_N == 'Mirror':
self.BC_Rigidity_N = 'mirror symmetry' # depends on [control=['if'], data=[]]
else:
sys.exit('Invalid Te B.C. case')
# South
if self.BC_S == 'Periodic':
self.BC_Rigidity_S = 'periodic' # depends on [control=['if'], data=[]]
elif (self.BC_S == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any():
self.BC_Rigidity_S = '0 curvature' # depends on [control=['if'], data=[]]
elif self.BC_S == 'Mirror':
self.BC_Rigidity_S = 'mirror symmetry' # depends on [control=['if'], data=[]]
else:
sys.exit('Invalid Te B.C. case')
#############
# PAD ARRAY #
#############
if np.isscalar(self.Te):
self.D *= np.ones(self.qs.shape) # And leave Te as a scalar for checks # depends on [control=['if'], data=[]]
else:
self.Te_unpadded = self.Te.copy()
self.Te = np.hstack((np.nan * np.zeros((self.Te.shape[0], 1)), self.Te, np.nan * np.zeros((self.Te.shape[0], 1))))
self.Te = np.vstack((np.nan * np.zeros(self.Te.shape[1]), self.Te, np.nan * np.zeros(self.Te.shape[1])))
self.D = np.hstack((np.nan * np.zeros((self.D.shape[0], 1)), self.D, np.nan * np.zeros((self.D.shape[0], 1))))
self.D = np.vstack((np.nan * np.zeros(self.D.shape[1]), self.D, np.nan * np.zeros(self.D.shape[1])))
###############################################################
# APPLY FLEXURAL RIGIDITY BOUNDARY CONDITIONS TO PADDED ARRAY #
###############################################################
if self.BC_Rigidity_W == '0 curvature':
self.D[:, 0] = 2 * self.D[:, 1] - self.D[:, 2] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_E == '0 curvature':
self.D[:, -1] = 2 * self.D[:, -2] - self.D[:, -3] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_N == '0 curvature':
self.D[0, :] = 2 * self.D[1, :] - self.D[2, :] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_S == '0 curvature':
self.D[-1, :] = 2 * self.D[-2, :] - self.D[-3, :] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_W == 'mirror symmetry':
self.D[:, 0] = self.D[:, 2] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_E == 'mirror symmetry':
self.D[:, -1] = self.D[:, -3] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_N == 'mirror symmetry':
self.D[0, :] = self.D[2, :] # Yes, will work on corners -- double-reflection # depends on [control=['if'], data=[]]
if self.BC_Rigidity_S == 'mirror symmetry':
self.D[-1, :] = self.D[-3, :] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_W == 'periodic':
self.D[:, 0] = self.D[:, -2] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_E == 'periodic':
self.D[:, -1] = self.D[:, -3] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_N == 'periodic':
self.D[0, :] = self.D[-2, :] # depends on [control=['if'], data=[]]
if self.BC_Rigidity_S == 'periodic':
self.D[-1, :] = self.D[-3, :] # depends on [control=['if'], data=[]] |
def _generate_move(
cls, char, width=None, fill_char=None,
bounce=False, reverse=True, back_char=None):
""" Yields strings that simulate movement of a character from left
to right. For use with `BarSet.from_char`.
Arguments:
char : Character to move across the progress bar.
width : Width for the progress bar.
Default: cls.default_width
fill_char : String for empty space.
Default: cls.default_fill_char
bounce : Whether to move the character in both
directions.
reverse : Whether to start on the right side.
back_char : Character to use for the bounce's backward
movement.
Default: `char`
"""
width = width or cls.default_width
char = str(char)
filler = str(fill_char or cls.default_fill_char) * (width - len(char))
rangeargs = RangeMoveArgs(
(0, width, 1),
(width, 0, -1),
)
if reverse:
# Reverse the arguments for range to start from the right.
# Not using swap, because the stopping point is different.
rangeargs = RangeMoveArgs(
(width, -1, -1),
(0, width - 1, 1),
)
yield from (
''.join((filler[:i], char, filler[i:]))
for i in range(*rangeargs.forward)
)
if bounce:
bouncechar = char if back_char is None else back_char
yield from (
''.join((filler[:i], str(bouncechar), filler[i:]))
for i in range(*rangeargs.backward)
) | def function[_generate_move, parameter[cls, char, width, fill_char, bounce, reverse, back_char]]:
constant[ Yields strings that simulate movement of a character from left
to right. For use with `BarSet.from_char`.
Arguments:
char : Character to move across the progress bar.
width : Width for the progress bar.
Default: cls.default_width
fill_char : String for empty space.
Default: cls.default_fill_char
bounce : Whether to move the character in both
directions.
reverse : Whether to start on the right side.
back_char : Character to use for the bounce's backward
movement.
Default: `char`
]
variable[width] assign[=] <ast.BoolOp object at 0x7da20c991540>
variable[char] assign[=] call[name[str], parameter[name[char]]]
variable[filler] assign[=] binary_operation[call[name[str], parameter[<ast.BoolOp object at 0x7da20c9910c0>]] * binary_operation[name[width] - call[name[len], parameter[name[char]]]]]
variable[rangeargs] assign[=] call[name[RangeMoveArgs], parameter[tuple[[<ast.Constant object at 0x7da20c9921d0>, <ast.Name object at 0x7da20c993550>, <ast.Constant object at 0x7da20c991b40>]], tuple[[<ast.Name object at 0x7da20c992140>, <ast.Constant object at 0x7da20c991cf0>, <ast.UnaryOp object at 0x7da20c991fc0>]]]]
if name[reverse] begin[:]
variable[rangeargs] assign[=] call[name[RangeMoveArgs], parameter[tuple[[<ast.Name object at 0x7da20c993f40>, <ast.UnaryOp object at 0x7da20c992e30>, <ast.UnaryOp object at 0x7da20c9928c0>]], tuple[[<ast.Constant object at 0x7da20c9929b0>, <ast.BinOp object at 0x7da20c991720>, <ast.Constant object at 0x7da20c992380>]]]]
<ast.YieldFrom object at 0x7da20c993460>
if name[bounce] begin[:]
variable[bouncechar] assign[=] <ast.IfExp object at 0x7da1b02bca90>
<ast.YieldFrom object at 0x7da1b02bf0a0> | keyword[def] identifier[_generate_move] (
identifier[cls] , identifier[char] , identifier[width] = keyword[None] , identifier[fill_char] = keyword[None] ,
identifier[bounce] = keyword[False] , identifier[reverse] = keyword[True] , identifier[back_char] = keyword[None] ):
literal[string]
identifier[width] = identifier[width] keyword[or] identifier[cls] . identifier[default_width]
identifier[char] = identifier[str] ( identifier[char] )
identifier[filler] = identifier[str] ( identifier[fill_char] keyword[or] identifier[cls] . identifier[default_fill_char] )*( identifier[width] - identifier[len] ( identifier[char] ))
identifier[rangeargs] = identifier[RangeMoveArgs] (
( literal[int] , identifier[width] , literal[int] ),
( identifier[width] , literal[int] ,- literal[int] ),
)
keyword[if] identifier[reverse] :
identifier[rangeargs] = identifier[RangeMoveArgs] (
( identifier[width] ,- literal[int] ,- literal[int] ),
( literal[int] , identifier[width] - literal[int] , literal[int] ),
)
keyword[yield] keyword[from] (
literal[string] . identifier[join] (( identifier[filler] [: identifier[i] ], identifier[char] , identifier[filler] [ identifier[i] :]))
keyword[for] identifier[i] keyword[in] identifier[range] (* identifier[rangeargs] . identifier[forward] )
)
keyword[if] identifier[bounce] :
identifier[bouncechar] = identifier[char] keyword[if] identifier[back_char] keyword[is] keyword[None] keyword[else] identifier[back_char]
keyword[yield] keyword[from] (
literal[string] . identifier[join] (( identifier[filler] [: identifier[i] ], identifier[str] ( identifier[bouncechar] ), identifier[filler] [ identifier[i] :]))
keyword[for] identifier[i] keyword[in] identifier[range] (* identifier[rangeargs] . identifier[backward] )
) | def _generate_move(cls, char, width=None, fill_char=None, bounce=False, reverse=True, back_char=None):
""" Yields strings that simulate movement of a character from left
to right. For use with `BarSet.from_char`.
Arguments:
char : Character to move across the progress bar.
width : Width for the progress bar.
Default: cls.default_width
fill_char : String for empty space.
Default: cls.default_fill_char
bounce : Whether to move the character in both
directions.
reverse : Whether to start on the right side.
back_char : Character to use for the bounce's backward
movement.
Default: `char`
"""
width = width or cls.default_width
char = str(char)
filler = str(fill_char or cls.default_fill_char) * (width - len(char))
rangeargs = RangeMoveArgs((0, width, 1), (width, 0, -1))
if reverse:
# Reverse the arguments for range to start from the right.
# Not using swap, because the stopping point is different.
rangeargs = RangeMoveArgs((width, -1, -1), (0, width - 1, 1)) # depends on [control=['if'], data=[]]
yield from (''.join((filler[:i], char, filler[i:])) for i in range(*rangeargs.forward))
if bounce:
bouncechar = char if back_char is None else back_char
yield from (''.join((filler[:i], str(bouncechar), filler[i:])) for i in range(*rangeargs.backward)) # depends on [control=['if'], data=[]] |
def specialspaceless(parser, token):
"""
Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout.
"""
nodelist = parser.parse(('endspecialspaceless',))
parser.delete_first_token()
return SpecialSpacelessNode(nodelist) | def function[specialspaceless, parameter[parser, token]]:
constant[
Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout.
]
variable[nodelist] assign[=] call[name[parser].parse, parameter[tuple[[<ast.Constant object at 0x7da20e955510>]]]]
call[name[parser].delete_first_token, parameter[]]
return[call[name[SpecialSpacelessNode], parameter[name[nodelist]]]] | keyword[def] identifier[specialspaceless] ( identifier[parser] , identifier[token] ):
literal[string]
identifier[nodelist] = identifier[parser] . identifier[parse] (( literal[string] ,))
identifier[parser] . identifier[delete_first_token] ()
keyword[return] identifier[SpecialSpacelessNode] ( identifier[nodelist] ) | def specialspaceless(parser, token):
"""
Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout.
"""
nodelist = parser.parse(('endspecialspaceless',))
parser.delete_first_token()
return SpecialSpacelessNode(nodelist) |
def check_mass_balance(self):
"""Compute mass and charge balance for the reaction
returns a dict of {element: amount} for unbalanced elements.
"charge" is treated as an element in this dict
This should be empty for balanced reactions.
"""
reaction_element_dict = defaultdict(int)
for metabolite, coefficient in iteritems(self._metabolites):
if metabolite.charge is not None:
reaction_element_dict["charge"] += \
coefficient * metabolite.charge
if metabolite.elements is None:
raise ValueError("No elements found in metabolite %s"
% metabolite.id)
for element, amount in iteritems(metabolite.elements):
reaction_element_dict[element] += coefficient * amount
# filter out 0 values
return {k: v for k, v in iteritems(reaction_element_dict) if v != 0} | def function[check_mass_balance, parameter[self]]:
constant[Compute mass and charge balance for the reaction
returns a dict of {element: amount} for unbalanced elements.
"charge" is treated as an element in this dict
This should be empty for balanced reactions.
]
variable[reaction_element_dict] assign[=] call[name[defaultdict], parameter[name[int]]]
for taget[tuple[[<ast.Name object at 0x7da1b0120b20>, <ast.Name object at 0x7da1b01234f0>]]] in starred[call[name[iteritems], parameter[name[self]._metabolites]]] begin[:]
if compare[name[metabolite].charge is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b007cb80>
if compare[name[metabolite].elements is constant[None]] begin[:]
<ast.Raise object at 0x7da1b007dc60>
for taget[tuple[[<ast.Name object at 0x7da1b007ff40>, <ast.Name object at 0x7da1b007fee0>]]] in starred[call[name[iteritems], parameter[name[metabolite].elements]]] begin[:]
<ast.AugAssign object at 0x7da1b007e440>
return[<ast.DictComp object at 0x7da1b007c8b0>] | keyword[def] identifier[check_mass_balance] ( identifier[self] ):
literal[string]
identifier[reaction_element_dict] = identifier[defaultdict] ( identifier[int] )
keyword[for] identifier[metabolite] , identifier[coefficient] keyword[in] identifier[iteritems] ( identifier[self] . identifier[_metabolites] ):
keyword[if] identifier[metabolite] . identifier[charge] keyword[is] keyword[not] keyword[None] :
identifier[reaction_element_dict] [ literal[string] ]+= identifier[coefficient] * identifier[metabolite] . identifier[charge]
keyword[if] identifier[metabolite] . identifier[elements] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[metabolite] . identifier[id] )
keyword[for] identifier[element] , identifier[amount] keyword[in] identifier[iteritems] ( identifier[metabolite] . identifier[elements] ):
identifier[reaction_element_dict] [ identifier[element] ]+= identifier[coefficient] * identifier[amount]
keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[reaction_element_dict] ) keyword[if] identifier[v] != literal[int] } | def check_mass_balance(self):
"""Compute mass and charge balance for the reaction
returns a dict of {element: amount} for unbalanced elements.
"charge" is treated as an element in this dict
This should be empty for balanced reactions.
"""
reaction_element_dict = defaultdict(int)
for (metabolite, coefficient) in iteritems(self._metabolites):
if metabolite.charge is not None:
reaction_element_dict['charge'] += coefficient * metabolite.charge # depends on [control=['if'], data=[]]
if metabolite.elements is None:
raise ValueError('No elements found in metabolite %s' % metabolite.id) # depends on [control=['if'], data=[]]
for (element, amount) in iteritems(metabolite.elements):
reaction_element_dict[element] += coefficient * amount # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# filter out 0 values
return {k: v for (k, v) in iteritems(reaction_element_dict) if v != 0} |
def create_load_balancer(self, name, zones, listeners, subnets=None,
security_groups=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber,
Protocol, [SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber
are integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {'LoadBalancerName' : name}
for index, listener in enumerate(listeners):
i = index + 1
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if listener[2]=='HTTPS':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer | def function[create_load_balancer, parameter[self, name, zones, listeners, subnets, security_groups]]:
constant[
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber,
Protocol, [SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber
are integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b26a6470>], [<ast.Name object at 0x7da1b26a5780>]]
for taget[tuple[[<ast.Name object at 0x7da1b26a4040>, <ast.Name object at 0x7da1b26a67a0>]]] in starred[call[name[enumerate], parameter[name[listeners]]]] begin[:]
variable[i] assign[=] binary_operation[name[index] + constant[1]]
call[name[params]][binary_operation[constant[Listeners.member.%d.LoadBalancerPort] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[listener]][constant[0]]
call[name[params]][binary_operation[constant[Listeners.member.%d.InstancePort] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[listener]][constant[1]]
call[name[params]][binary_operation[constant[Listeners.member.%d.Protocol] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[listener]][constant[2]]
if compare[call[name[listener]][constant[2]] equal[==] constant[HTTPS]] begin[:]
call[name[params]][binary_operation[constant[Listeners.member.%d.SSLCertificateId] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[listener]][constant[3]]
if name[zones] begin[:]
call[name[self].build_list_params, parameter[name[params], name[zones], constant[AvailabilityZones.member.%d]]]
if name[subnets] begin[:]
call[name[self].build_list_params, parameter[name[params], name[subnets], constant[Subnets.member.%d]]]
if name[security_groups] begin[:]
call[name[self].build_list_params, parameter[name[params], name[security_groups], constant[SecurityGroups.member.%d]]]
variable[load_balancer] assign[=] call[name[self].get_object, parameter[constant[CreateLoadBalancer], name[params], name[LoadBalancer]]]
name[load_balancer].name assign[=] name[name]
name[load_balancer].listeners assign[=] name[listeners]
name[load_balancer].availability_zones assign[=] name[zones]
name[load_balancer].subnets assign[=] name[subnets]
name[load_balancer].security_groups assign[=] name[security_groups]
return[name[load_balancer]] | keyword[def] identifier[create_load_balancer] ( identifier[self] , identifier[name] , identifier[zones] , identifier[listeners] , identifier[subnets] = keyword[None] ,
identifier[security_groups] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[name] }
keyword[for] identifier[index] , identifier[listener] keyword[in] identifier[enumerate] ( identifier[listeners] ):
identifier[i] = identifier[index] + literal[int]
identifier[params] [ literal[string] % identifier[i] ]= identifier[listener] [ literal[int] ]
identifier[params] [ literal[string] % identifier[i] ]= identifier[listener] [ literal[int] ]
identifier[params] [ literal[string] % identifier[i] ]= identifier[listener] [ literal[int] ]
keyword[if] identifier[listener] [ literal[int] ]== literal[string] :
identifier[params] [ literal[string] % identifier[i] ]= identifier[listener] [ literal[int] ]
keyword[if] identifier[zones] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[zones] , literal[string] )
keyword[if] identifier[subnets] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[subnets] , literal[string] )
keyword[if] identifier[security_groups] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[security_groups] ,
literal[string] )
identifier[load_balancer] = identifier[self] . identifier[get_object] ( literal[string] ,
identifier[params] , identifier[LoadBalancer] )
identifier[load_balancer] . identifier[name] = identifier[name]
identifier[load_balancer] . identifier[listeners] = identifier[listeners]
identifier[load_balancer] . identifier[availability_zones] = identifier[zones]
identifier[load_balancer] . identifier[subnets] = identifier[subnets]
identifier[load_balancer] . identifier[security_groups] = identifier[security_groups]
keyword[return] identifier[load_balancer] | def create_load_balancer(self, name, zones, listeners, subnets=None, security_groups=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber,
Protocol, [SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber
are integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {'LoadBalancerName': name}
for (index, listener) in enumerate(listeners):
i = index + 1
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if listener[2] == 'HTTPS':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d') # depends on [control=['if'], data=[]]
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d') # depends on [control=['if'], data=[]]
if security_groups:
self.build_list_params(params, security_groups, 'SecurityGroups.member.%d') # depends on [control=['if'], data=[]]
load_balancer = self.get_object('CreateLoadBalancer', params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer |
def parse_degrees(cls, degrees, arcminutes, arcseconds, direction=None):
"""
Parse degrees minutes seconds including direction (N, S, E, W)
"""
degrees = float(degrees)
negative = degrees < 0
arcminutes = float(arcminutes)
arcseconds = float(arcseconds)
if arcminutes or arcseconds:
more = units.degrees(arcminutes=arcminutes, arcseconds=arcseconds)
if negative:
degrees -= more
else:
degrees += more
if direction in [None, 'N', 'E']:
return degrees
elif direction in ['S', 'W']:
return -degrees
else:
raise ValueError("Invalid direction! Should be one of [NSEW].") | def function[parse_degrees, parameter[cls, degrees, arcminutes, arcseconds, direction]]:
constant[
Parse degrees minutes seconds including direction (N, S, E, W)
]
variable[degrees] assign[=] call[name[float], parameter[name[degrees]]]
variable[negative] assign[=] compare[name[degrees] less[<] constant[0]]
variable[arcminutes] assign[=] call[name[float], parameter[name[arcminutes]]]
variable[arcseconds] assign[=] call[name[float], parameter[name[arcseconds]]]
if <ast.BoolOp object at 0x7da20c7ca320> begin[:]
variable[more] assign[=] call[name[units].degrees, parameter[]]
if name[negative] begin[:]
<ast.AugAssign object at 0x7da18c4ce950>
if compare[name[direction] in list[[<ast.Constant object at 0x7da1b1a66590>, <ast.Constant object at 0x7da1b1a64190>, <ast.Constant object at 0x7da1b1a67ca0>]]] begin[:]
return[name[degrees]] | keyword[def] identifier[parse_degrees] ( identifier[cls] , identifier[degrees] , identifier[arcminutes] , identifier[arcseconds] , identifier[direction] = keyword[None] ):
literal[string]
identifier[degrees] = identifier[float] ( identifier[degrees] )
identifier[negative] = identifier[degrees] < literal[int]
identifier[arcminutes] = identifier[float] ( identifier[arcminutes] )
identifier[arcseconds] = identifier[float] ( identifier[arcseconds] )
keyword[if] identifier[arcminutes] keyword[or] identifier[arcseconds] :
identifier[more] = identifier[units] . identifier[degrees] ( identifier[arcminutes] = identifier[arcminutes] , identifier[arcseconds] = identifier[arcseconds] )
keyword[if] identifier[negative] :
identifier[degrees] -= identifier[more]
keyword[else] :
identifier[degrees] += identifier[more]
keyword[if] identifier[direction] keyword[in] [ keyword[None] , literal[string] , literal[string] ]:
keyword[return] identifier[degrees]
keyword[elif] identifier[direction] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] - identifier[degrees]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def parse_degrees(cls, degrees, arcminutes, arcseconds, direction=None):
"""
Parse degrees minutes seconds including direction (N, S, E, W)
"""
degrees = float(degrees)
negative = degrees < 0
arcminutes = float(arcminutes)
arcseconds = float(arcseconds)
if arcminutes or arcseconds:
more = units.degrees(arcminutes=arcminutes, arcseconds=arcseconds)
if negative:
degrees -= more # depends on [control=['if'], data=[]]
else:
degrees += more # depends on [control=['if'], data=[]]
if direction in [None, 'N', 'E']:
return degrees # depends on [control=['if'], data=[]]
elif direction in ['S', 'W']:
return -degrees # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid direction! Should be one of [NSEW].') |
def resize(self, package):
"""
::
POST /:login/machines/:id?action=resize
Initiate resizing of the remote machine to a new package.
"""
if isinstance(package, dict):
package = package['name']
action = {'action': 'resize',
'package': package}
j, r = self.datacenter.request('POST', self.path, params=action)
r.raise_for_status() | def function[resize, parameter[self, package]]:
constant[
::
POST /:login/machines/:id?action=resize
Initiate resizing of the remote machine to a new package.
]
if call[name[isinstance], parameter[name[package], name[dict]]] begin[:]
variable[package] assign[=] call[name[package]][constant[name]]
variable[action] assign[=] dictionary[[<ast.Constant object at 0x7da18f09c0a0>, <ast.Constant object at 0x7da18f09e290>], [<ast.Constant object at 0x7da18f09d300>, <ast.Name object at 0x7da18f09f9a0>]]
<ast.Tuple object at 0x7da18f09c0d0> assign[=] call[name[self].datacenter.request, parameter[constant[POST], name[self].path]]
call[name[r].raise_for_status, parameter[]] | keyword[def] identifier[resize] ( identifier[self] , identifier[package] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[package] , identifier[dict] ):
identifier[package] = identifier[package] [ literal[string] ]
identifier[action] ={ literal[string] : literal[string] ,
literal[string] : identifier[package] }
identifier[j] , identifier[r] = identifier[self] . identifier[datacenter] . identifier[request] ( literal[string] , identifier[self] . identifier[path] , identifier[params] = identifier[action] )
identifier[r] . identifier[raise_for_status] () | def resize(self, package):
"""
::
POST /:login/machines/:id?action=resize
Initiate resizing of the remote machine to a new package.
"""
if isinstance(package, dict):
package = package['name'] # depends on [control=['if'], data=[]]
action = {'action': 'resize', 'package': package}
(j, r) = self.datacenter.request('POST', self.path, params=action)
r.raise_for_status() |
def configure_sessionmaker(graph):
"""
Create the SQLAlchemy session class.
"""
engine_routing_strategy = getattr(graph, graph.config.sessionmaker.engine_routing_strategy)
if engine_routing_strategy.supports_multiple_binds:
ScopedFactory.infect(graph, "postgres")
class RoutingSession(Session):
"""
Route session bind to an appropriate engine.
See: http://docs.sqlalchemy.org/en/latest/orm/persistence_techniques.html#partitioning-strategies
"""
def get_bind(self, mapper=None, clause=None):
return engine_routing_strategy.get_bind(mapper, clause)
return sessionmaker(class_=RoutingSession) | def function[configure_sessionmaker, parameter[graph]]:
constant[
Create the SQLAlchemy session class.
]
variable[engine_routing_strategy] assign[=] call[name[getattr], parameter[name[graph], name[graph].config.sessionmaker.engine_routing_strategy]]
if name[engine_routing_strategy].supports_multiple_binds begin[:]
call[name[ScopedFactory].infect, parameter[name[graph], constant[postgres]]]
class class[RoutingSession, parameter[]] begin[:]
constant[
Route session bind to an appropriate engine.
See: http://docs.sqlalchemy.org/en/latest/orm/persistence_techniques.html#partitioning-strategies
]
def function[get_bind, parameter[self, mapper, clause]]:
return[call[name[engine_routing_strategy].get_bind, parameter[name[mapper], name[clause]]]]
return[call[name[sessionmaker], parameter[]]] | keyword[def] identifier[configure_sessionmaker] ( identifier[graph] ):
literal[string]
identifier[engine_routing_strategy] = identifier[getattr] ( identifier[graph] , identifier[graph] . identifier[config] . identifier[sessionmaker] . identifier[engine_routing_strategy] )
keyword[if] identifier[engine_routing_strategy] . identifier[supports_multiple_binds] :
identifier[ScopedFactory] . identifier[infect] ( identifier[graph] , literal[string] )
keyword[class] identifier[RoutingSession] ( identifier[Session] ):
literal[string]
keyword[def] identifier[get_bind] ( identifier[self] , identifier[mapper] = keyword[None] , identifier[clause] = keyword[None] ):
keyword[return] identifier[engine_routing_strategy] . identifier[get_bind] ( identifier[mapper] , identifier[clause] )
keyword[return] identifier[sessionmaker] ( identifier[class_] = identifier[RoutingSession] ) | def configure_sessionmaker(graph):
"""
Create the SQLAlchemy session class.
"""
engine_routing_strategy = getattr(graph, graph.config.sessionmaker.engine_routing_strategy)
if engine_routing_strategy.supports_multiple_binds:
ScopedFactory.infect(graph, 'postgres') # depends on [control=['if'], data=[]]
class RoutingSession(Session):
"""
Route session bind to an appropriate engine.
See: http://docs.sqlalchemy.org/en/latest/orm/persistence_techniques.html#partitioning-strategies
"""
def get_bind(self, mapper=None, clause=None):
return engine_routing_strategy.get_bind(mapper, clause)
return sessionmaker(class_=RoutingSession) |
def resize_image_with_crop_or_pad(img, target_height, target_width):
"""
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
"""
h, w = target_height, target_width
max_h, max_w, c = img.shape
# crop
img = crop_center(img, min(max_h, h), min(max_w, w))
# pad
padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype)
padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img
return padded_img | def function[resize_image_with_crop_or_pad, parameter[img, target_height, target_width]]:
constant[
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
]
<ast.Tuple object at 0x7da1b1a66da0> assign[=] tuple[[<ast.Name object at 0x7da1b1a674c0>, <ast.Name object at 0x7da1b1a67460>]]
<ast.Tuple object at 0x7da1b1a65de0> assign[=] name[img].shape
variable[img] assign[=] call[name[crop_center], parameter[name[img], call[name[min], parameter[name[max_h], name[h]]], call[name[min], parameter[name[max_w], name[w]]]]]
variable[padded_img] assign[=] call[name[np].zeros, parameter[]]
call[name[padded_img]][tuple[[<ast.Slice object at 0x7da1b1a66a40>, <ast.Slice object at 0x7da1b1a65db0>, <ast.Slice object at 0x7da1b1a95d80>]]] assign[=] name[img]
return[name[padded_img]] | keyword[def] identifier[resize_image_with_crop_or_pad] ( identifier[img] , identifier[target_height] , identifier[target_width] ):
literal[string]
identifier[h] , identifier[w] = identifier[target_height] , identifier[target_width]
identifier[max_h] , identifier[max_w] , identifier[c] = identifier[img] . identifier[shape]
identifier[img] = identifier[crop_center] ( identifier[img] , identifier[min] ( identifier[max_h] , identifier[h] ), identifier[min] ( identifier[max_w] , identifier[w] ))
identifier[padded_img] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[h] , identifier[w] , identifier[c] ), identifier[dtype] = identifier[img] . identifier[dtype] )
identifier[padded_img] [: identifier[img] . identifier[shape] [ literal[int] ],: identifier[img] . identifier[shape] [ literal[int] ],: identifier[img] . identifier[shape] [ literal[int] ]]= identifier[img]
keyword[return] identifier[padded_img] | def resize_image_with_crop_or_pad(img, target_height, target_width):
"""
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
"""
(h, w) = (target_height, target_width)
(max_h, max_w, c) = img.shape
# crop
img = crop_center(img, min(max_h, h), min(max_w, w))
# pad
padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype)
padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img
return padded_img |
def on_disconnect(self, broker):
"""
Called by :class:`Broker` to force disconnect the stream. The base
implementation simply closes :attr:`receive_side` and
:attr:`transmit_side` and unregisters the stream from the broker.
"""
LOG.debug('%r.on_disconnect()', self)
if self.receive_side:
broker.stop_receive(self)
self.receive_side.close()
if self.transmit_side:
broker._stop_transmit(self)
self.transmit_side.close()
fire(self, 'disconnect') | def function[on_disconnect, parameter[self, broker]]:
constant[
Called by :class:`Broker` to force disconnect the stream. The base
implementation simply closes :attr:`receive_side` and
:attr:`transmit_side` and unregisters the stream from the broker.
]
call[name[LOG].debug, parameter[constant[%r.on_disconnect()], name[self]]]
if name[self].receive_side begin[:]
call[name[broker].stop_receive, parameter[name[self]]]
call[name[self].receive_side.close, parameter[]]
if name[self].transmit_side begin[:]
call[name[broker]._stop_transmit, parameter[name[self]]]
call[name[self].transmit_side.close, parameter[]]
call[name[fire], parameter[name[self], constant[disconnect]]] | keyword[def] identifier[on_disconnect] ( identifier[self] , identifier[broker] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[self] )
keyword[if] identifier[self] . identifier[receive_side] :
identifier[broker] . identifier[stop_receive] ( identifier[self] )
identifier[self] . identifier[receive_side] . identifier[close] ()
keyword[if] identifier[self] . identifier[transmit_side] :
identifier[broker] . identifier[_stop_transmit] ( identifier[self] )
identifier[self] . identifier[transmit_side] . identifier[close] ()
identifier[fire] ( identifier[self] , literal[string] ) | def on_disconnect(self, broker):
"""
Called by :class:`Broker` to force disconnect the stream. The base
implementation simply closes :attr:`receive_side` and
:attr:`transmit_side` and unregisters the stream from the broker.
"""
LOG.debug('%r.on_disconnect()', self)
if self.receive_side:
broker.stop_receive(self)
self.receive_side.close() # depends on [control=['if'], data=[]]
if self.transmit_side:
broker._stop_transmit(self)
self.transmit_side.close() # depends on [control=['if'], data=[]]
fire(self, 'disconnect') |
def generate(env):
"""Add Builders and construction variables for ifl to an Environment."""
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
env['FORTRAN'] = 'ifl'
env['SHFORTRAN'] = '$FORTRAN'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET' | def function[generate, parameter[env]]:
constant[Add Builders and construction variables for ifl to an Environment.]
variable[fscan] assign[=] call[name[FortranScan], parameter[constant[FORTRANPATH]]]
call[name[SCons].Tool.SourceFileScanner.add_scanner, parameter[constant[.i], name[fscan]]]
call[name[SCons].Tool.SourceFileScanner.add_scanner, parameter[constant[.i90], name[fscan]]]
if compare[constant[FORTRANFILESUFFIXES] <ast.NotIn object at 0x7da2590d7190> name[env]] begin[:]
call[name[env]][constant[FORTRANFILESUFFIXES]] assign[=] list[[<ast.Constant object at 0x7da20c6c5480>]]
if compare[constant[F90FILESUFFIXES] <ast.NotIn object at 0x7da2590d7190> name[env]] begin[:]
call[name[env]][constant[F90FILESUFFIXES]] assign[=] list[[<ast.Constant object at 0x7da20c6c4ee0>]]
call[name[add_all_to_env], parameter[name[env]]]
call[name[env]][constant[FORTRAN]] assign[=] constant[ifl]
call[name[env]][constant[SHFORTRAN]] assign[=] constant[$FORTRAN]
call[name[env]][constant[FORTRANCOM]] assign[=] constant[$FORTRAN $FORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET]
call[name[env]][constant[FORTRANPPCOM]] assign[=] constant[$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET]
call[name[env]][constant[SHFORTRANCOM]] assign[=] constant[$SHFORTRAN $SHFORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET]
call[name[env]][constant[SHFORTRANPPCOM]] assign[=] constant[$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET] | keyword[def] identifier[generate] ( identifier[env] ):
literal[string]
identifier[fscan] = identifier[FortranScan] ( literal[string] )
identifier[SCons] . identifier[Tool] . identifier[SourceFileScanner] . identifier[add_scanner] ( literal[string] , identifier[fscan] )
identifier[SCons] . identifier[Tool] . identifier[SourceFileScanner] . identifier[add_scanner] ( literal[string] , identifier[fscan] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[env] :
identifier[env] [ literal[string] ]=[ literal[string] ]
keyword[else] :
identifier[env] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[env] :
identifier[env] [ literal[string] ]=[ literal[string] ]
keyword[else] :
identifier[env] [ literal[string] ]. identifier[append] ( literal[string] )
identifier[add_all_to_env] ( identifier[env] )
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string] | def generate(env):
"""Add Builders and construction variables for ifl to an Environment."""
fscan = FortranScan('FORTRANPATH')
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i'] # depends on [control=['if'], data=['env']]
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90'] # depends on [control=['if'], data=['env']]
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
env['FORTRAN'] = 'ifl'
env['SHFORTRAN'] = '$FORTRAN'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET' |
def listen(self, message_consumer):
"""Blocking call to listen for messages on the rfile.
Args:
message_consumer (fn): function that is passed each message as it is read off the socket.
"""
while not self._rfile.closed:
request_str = self._read_message()
if request_str is None:
break
try:
message_consumer(json.loads(request_str.decode('utf-8')))
except ValueError:
log.exception("Failed to parse JSON message %s", request_str)
continue | def function[listen, parameter[self, message_consumer]]:
constant[Blocking call to listen for messages on the rfile.
Args:
message_consumer (fn): function that is passed each message as it is read off the socket.
]
while <ast.UnaryOp object at 0x7da1b2828af0> begin[:]
variable[request_str] assign[=] call[name[self]._read_message, parameter[]]
if compare[name[request_str] is constant[None]] begin[:]
break
<ast.Try object at 0x7da1b282b3a0> | keyword[def] identifier[listen] ( identifier[self] , identifier[message_consumer] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[_rfile] . identifier[closed] :
identifier[request_str] = identifier[self] . identifier[_read_message] ()
keyword[if] identifier[request_str] keyword[is] keyword[None] :
keyword[break]
keyword[try] :
identifier[message_consumer] ( identifier[json] . identifier[loads] ( identifier[request_str] . identifier[decode] ( literal[string] )))
keyword[except] identifier[ValueError] :
identifier[log] . identifier[exception] ( literal[string] , identifier[request_str] )
keyword[continue] | def listen(self, message_consumer):
"""Blocking call to listen for messages on the rfile.
Args:
message_consumer (fn): function that is passed each message as it is read off the socket.
"""
while not self._rfile.closed:
request_str = self._read_message()
if request_str is None:
break # depends on [control=['if'], data=[]]
try:
message_consumer(json.loads(request_str.decode('utf-8'))) # depends on [control=['try'], data=[]]
except ValueError:
log.exception('Failed to parse JSON message %s', request_str)
continue # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def _get_output(algorithm, iport=0, iconnection=0, oport=0, active_scalar=None,
active_scalar_field='point'):
"""A helper to get the algorithm's output and copy input's vtki meta info"""
ido = algorithm.GetInputDataObject(iport, iconnection)
data = wrap(algorithm.GetOutputDataObject(oport))
data.copy_meta_from(ido)
if active_scalar is not None:
data.set_active_scalar(active_scalar, preference=active_scalar_field)
return data | def function[_get_output, parameter[algorithm, iport, iconnection, oport, active_scalar, active_scalar_field]]:
constant[A helper to get the algorithm's output and copy input's vtki meta info]
variable[ido] assign[=] call[name[algorithm].GetInputDataObject, parameter[name[iport], name[iconnection]]]
variable[data] assign[=] call[name[wrap], parameter[call[name[algorithm].GetOutputDataObject, parameter[name[oport]]]]]
call[name[data].copy_meta_from, parameter[name[ido]]]
if compare[name[active_scalar] is_not constant[None]] begin[:]
call[name[data].set_active_scalar, parameter[name[active_scalar]]]
return[name[data]] | keyword[def] identifier[_get_output] ( identifier[algorithm] , identifier[iport] = literal[int] , identifier[iconnection] = literal[int] , identifier[oport] = literal[int] , identifier[active_scalar] = keyword[None] ,
identifier[active_scalar_field] = literal[string] ):
literal[string]
identifier[ido] = identifier[algorithm] . identifier[GetInputDataObject] ( identifier[iport] , identifier[iconnection] )
identifier[data] = identifier[wrap] ( identifier[algorithm] . identifier[GetOutputDataObject] ( identifier[oport] ))
identifier[data] . identifier[copy_meta_from] ( identifier[ido] )
keyword[if] identifier[active_scalar] keyword[is] keyword[not] keyword[None] :
identifier[data] . identifier[set_active_scalar] ( identifier[active_scalar] , identifier[preference] = identifier[active_scalar_field] )
keyword[return] identifier[data] | def _get_output(algorithm, iport=0, iconnection=0, oport=0, active_scalar=None, active_scalar_field='point'):
"""A helper to get the algorithm's output and copy input's vtki meta info"""
ido = algorithm.GetInputDataObject(iport, iconnection)
data = wrap(algorithm.GetOutputDataObject(oport))
data.copy_meta_from(ido)
if active_scalar is not None:
data.set_active_scalar(active_scalar, preference=active_scalar_field) # depends on [control=['if'], data=['active_scalar']]
return data |
def serialize(self):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a DependencyNode
:rtype: dict
"""
return {'operand': self.operand, 'sons': [serialize(elem) for elem in self.sons],
'of_values': self.of_values, 'is_of_mul': self.is_of_mul,
'not_value': self.not_value} | def function[serialize, parameter[self]]:
constant[This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a DependencyNode
:rtype: dict
]
return[dictionary[[<ast.Constant object at 0x7da207f00190>, <ast.Constant object at 0x7da207f001c0>, <ast.Constant object at 0x7da207f03cd0>, <ast.Constant object at 0x7da207f01c90>, <ast.Constant object at 0x7da207f006a0>], [<ast.Attribute object at 0x7da207f030d0>, <ast.ListComp object at 0x7da207f01e10>, <ast.Attribute object at 0x7da207f03ca0>, <ast.Attribute object at 0x7da207f02dd0>, <ast.Attribute object at 0x7da207f03bb0>]]] | keyword[def] identifier[serialize] ( identifier[self] ):
literal[string]
keyword[return] { literal[string] : identifier[self] . identifier[operand] , literal[string] :[ identifier[serialize] ( identifier[elem] ) keyword[for] identifier[elem] keyword[in] identifier[self] . identifier[sons] ],
literal[string] : identifier[self] . identifier[of_values] , literal[string] : identifier[self] . identifier[is_of_mul] ,
literal[string] : identifier[self] . identifier[not_value] } | def serialize(self):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a DependencyNode
:rtype: dict
"""
return {'operand': self.operand, 'sons': [serialize(elem) for elem in self.sons], 'of_values': self.of_values, 'is_of_mul': self.is_of_mul, 'not_value': self.not_value} |
def sky2pix_vec(self, pos, r, pa):
"""
Convert a vector from sky to pixel coords.
The vector has a magnitude, angle, and an origin on the sky.
Parameters
----------
pos : (float, float)
The (ra, dec) of the origin of the vector (degrees).
r : float
The magnitude or length of the vector (degrees).
pa : float
The position angle of the vector (degrees).
Returns
-------
x, y : float
The pixel coordinates of the origin.
r, theta : float
The magnitude (pixels) and angle (degrees) of the vector.
"""
ra, dec = pos
x, y = self.sky2pix(pos)
a = translate(ra, dec, r, pa)
locations = self.sky2pix(a)
x_off, y_off = locations
a = np.sqrt((x - x_off) ** 2 + (y - y_off) ** 2)
theta = np.degrees(np.arctan2((y_off - y), (x_off - x)))
return x, y, a, theta | def function[sky2pix_vec, parameter[self, pos, r, pa]]:
constant[
Convert a vector from sky to pixel coords.
The vector has a magnitude, angle, and an origin on the sky.
Parameters
----------
pos : (float, float)
The (ra, dec) of the origin of the vector (degrees).
r : float
The magnitude or length of the vector (degrees).
pa : float
The position angle of the vector (degrees).
Returns
-------
x, y : float
The pixel coordinates of the origin.
r, theta : float
The magnitude (pixels) and angle (degrees) of the vector.
]
<ast.Tuple object at 0x7da20c7950c0> assign[=] name[pos]
<ast.Tuple object at 0x7da20c795b10> assign[=] call[name[self].sky2pix, parameter[name[pos]]]
variable[a] assign[=] call[name[translate], parameter[name[ra], name[dec], name[r], name[pa]]]
variable[locations] assign[=] call[name[self].sky2pix, parameter[name[a]]]
<ast.Tuple object at 0x7da20c794c70> assign[=] name[locations]
variable[a] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[x] - name[x_off]] ** constant[2]] + binary_operation[binary_operation[name[y] - name[y_off]] ** constant[2]]]]]
variable[theta] assign[=] call[name[np].degrees, parameter[call[name[np].arctan2, parameter[binary_operation[name[y_off] - name[y]], binary_operation[name[x_off] - name[x]]]]]]
return[tuple[[<ast.Name object at 0x7da20c795330>, <ast.Name object at 0x7da20c7951e0>, <ast.Name object at 0x7da20c7940d0>, <ast.Name object at 0x7da20c7957e0>]]] | keyword[def] identifier[sky2pix_vec] ( identifier[self] , identifier[pos] , identifier[r] , identifier[pa] ):
literal[string]
identifier[ra] , identifier[dec] = identifier[pos]
identifier[x] , identifier[y] = identifier[self] . identifier[sky2pix] ( identifier[pos] )
identifier[a] = identifier[translate] ( identifier[ra] , identifier[dec] , identifier[r] , identifier[pa] )
identifier[locations] = identifier[self] . identifier[sky2pix] ( identifier[a] )
identifier[x_off] , identifier[y_off] = identifier[locations]
identifier[a] = identifier[np] . identifier[sqrt] (( identifier[x] - identifier[x_off] )** literal[int] +( identifier[y] - identifier[y_off] )** literal[int] )
identifier[theta] = identifier[np] . identifier[degrees] ( identifier[np] . identifier[arctan2] (( identifier[y_off] - identifier[y] ),( identifier[x_off] - identifier[x] )))
keyword[return] identifier[x] , identifier[y] , identifier[a] , identifier[theta] | def sky2pix_vec(self, pos, r, pa):
"""
Convert a vector from sky to pixel coords.
The vector has a magnitude, angle, and an origin on the sky.
Parameters
----------
pos : (float, float)
The (ra, dec) of the origin of the vector (degrees).
r : float
The magnitude or length of the vector (degrees).
pa : float
The position angle of the vector (degrees).
Returns
-------
x, y : float
The pixel coordinates of the origin.
r, theta : float
The magnitude (pixels) and angle (degrees) of the vector.
"""
(ra, dec) = pos
(x, y) = self.sky2pix(pos)
a = translate(ra, dec, r, pa)
locations = self.sky2pix(a)
(x_off, y_off) = locations
a = np.sqrt((x - x_off) ** 2 + (y - y_off) ** 2)
theta = np.degrees(np.arctan2(y_off - y, x_off - x))
return (x, y, a, theta) |
def insert(self, value, key):
"""
Insert a value into a tree rooted at the given node, and return
whether this was an insertion or update.
Balances the tree during insertion.
An update is performed instead of an insertion if a value in the tree
compares equal to the new value.
"""
# Base case: Insertion into the empty tree is just creating a new node
# with no children.
if self is NULL:
return Node(value, NULL, NULL, True), True
# Recursive case: Insertion into a non-empty tree is insertion into
# whichever of the two sides is correctly compared.
direction = cmp(key(value), key(self.value))
if direction < 0:
left, insertion = self.left.insert(value, key)
self = self._replace(left=left)
elif direction > 0:
right, insertion = self.right.insert(value, key)
self = self._replace(right=right)
elif direction == 0:
# Exact hit on an existing node (this node, in fact). In this
# case, perform an update.
self = self._replace(value=value)
insertion = False
# And balance on the way back up.
return self.balance(), insertion | def function[insert, parameter[self, value, key]]:
constant[
Insert a value into a tree rooted at the given node, and return
whether this was an insertion or update.
Balances the tree during insertion.
An update is performed instead of an insertion if a value in the tree
compares equal to the new value.
]
if compare[name[self] is name[NULL]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b27e1f60>, <ast.Constant object at 0x7da20c76c280>]]]
variable[direction] assign[=] call[name[cmp], parameter[call[name[key], parameter[name[value]]], call[name[key], parameter[name[self].value]]]]
if compare[name[direction] less[<] constant[0]] begin[:]
<ast.Tuple object at 0x7da20c76d540> assign[=] call[name[self].left.insert, parameter[name[value], name[key]]]
variable[self] assign[=] call[name[self]._replace, parameter[]]
return[tuple[[<ast.Call object at 0x7da20c76fe20>, <ast.Name object at 0x7da20c76f880>]]] | keyword[def] identifier[insert] ( identifier[self] , identifier[value] , identifier[key] ):
literal[string]
keyword[if] identifier[self] keyword[is] identifier[NULL] :
keyword[return] identifier[Node] ( identifier[value] , identifier[NULL] , identifier[NULL] , keyword[True] ), keyword[True]
identifier[direction] = identifier[cmp] ( identifier[key] ( identifier[value] ), identifier[key] ( identifier[self] . identifier[value] ))
keyword[if] identifier[direction] < literal[int] :
identifier[left] , identifier[insertion] = identifier[self] . identifier[left] . identifier[insert] ( identifier[value] , identifier[key] )
identifier[self] = identifier[self] . identifier[_replace] ( identifier[left] = identifier[left] )
keyword[elif] identifier[direction] > literal[int] :
identifier[right] , identifier[insertion] = identifier[self] . identifier[right] . identifier[insert] ( identifier[value] , identifier[key] )
identifier[self] = identifier[self] . identifier[_replace] ( identifier[right] = identifier[right] )
keyword[elif] identifier[direction] == literal[int] :
identifier[self] = identifier[self] . identifier[_replace] ( identifier[value] = identifier[value] )
identifier[insertion] = keyword[False]
keyword[return] identifier[self] . identifier[balance] (), identifier[insertion] | def insert(self, value, key):
"""
Insert a value into a tree rooted at the given node, and return
whether this was an insertion or update.
Balances the tree during insertion.
An update is performed instead of an insertion if a value in the tree
compares equal to the new value.
"""
# Base case: Insertion into the empty tree is just creating a new node
# with no children.
if self is NULL:
return (Node(value, NULL, NULL, True), True) # depends on [control=['if'], data=['NULL']]
# Recursive case: Insertion into a non-empty tree is insertion into
# whichever of the two sides is correctly compared.
direction = cmp(key(value), key(self.value))
if direction < 0:
(left, insertion) = self.left.insert(value, key)
self = self._replace(left=left) # depends on [control=['if'], data=[]]
elif direction > 0:
(right, insertion) = self.right.insert(value, key)
self = self._replace(right=right) # depends on [control=['if'], data=[]]
elif direction == 0:
# Exact hit on an existing node (this node, in fact). In this
# case, perform an update.
self = self._replace(value=value)
insertion = False # depends on [control=['if'], data=[]]
# And balance on the way back up.
return (self.balance(), insertion) |
def engage(self, height):
"""
Move the magnet to a specific height, in mm from home position
"""
if height > MAX_ENGAGE_HEIGHT or height < 0:
raise ValueError('Invalid engage height. Should be 0 to {}'.format(
MAX_ENGAGE_HEIGHT))
self._driver.move(height)
self._engaged = True | def function[engage, parameter[self, height]]:
constant[
Move the magnet to a specific height, in mm from home position
]
if <ast.BoolOp object at 0x7da1b086e950> begin[:]
<ast.Raise object at 0x7da1b086d5d0>
call[name[self]._driver.move, parameter[name[height]]]
name[self]._engaged assign[=] constant[True] | keyword[def] identifier[engage] ( identifier[self] , identifier[height] ):
literal[string]
keyword[if] identifier[height] > identifier[MAX_ENGAGE_HEIGHT] keyword[or] identifier[height] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[MAX_ENGAGE_HEIGHT] ))
identifier[self] . identifier[_driver] . identifier[move] ( identifier[height] )
identifier[self] . identifier[_engaged] = keyword[True] | def engage(self, height):
"""
Move the magnet to a specific height, in mm from home position
"""
if height > MAX_ENGAGE_HEIGHT or height < 0:
raise ValueError('Invalid engage height. Should be 0 to {}'.format(MAX_ENGAGE_HEIGHT)) # depends on [control=['if'], data=[]]
self._driver.move(height)
self._engaged = True |
def setInverted(self, state):
"""
Sets whether or not to invert the check state for collapsing.
:param state | <bool>
"""
collapsed = self.isCollapsed()
self._inverted = state
if self.isCollapsible():
self.setCollapsed(collapsed) | def function[setInverted, parameter[self, state]]:
constant[
Sets whether or not to invert the check state for collapsing.
:param state | <bool>
]
variable[collapsed] assign[=] call[name[self].isCollapsed, parameter[]]
name[self]._inverted assign[=] name[state]
if call[name[self].isCollapsible, parameter[]] begin[:]
call[name[self].setCollapsed, parameter[name[collapsed]]] | keyword[def] identifier[setInverted] ( identifier[self] , identifier[state] ):
literal[string]
identifier[collapsed] = identifier[self] . identifier[isCollapsed] ()
identifier[self] . identifier[_inverted] = identifier[state]
keyword[if] identifier[self] . identifier[isCollapsible] ():
identifier[self] . identifier[setCollapsed] ( identifier[collapsed] ) | def setInverted(self, state):
"""
Sets whether or not to invert the check state for collapsing.
:param state | <bool>
"""
collapsed = self.isCollapsed()
self._inverted = state
if self.isCollapsible():
self.setCollapsed(collapsed) # depends on [control=['if'], data=[]] |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da18c4cf4f0> begin[:]
call[name[_dict]][constant[status]] assign[=] name[self].status
if <ast.BoolOp object at 0x7da18c4ce2c0> begin[:]
call[name[_dict]][constant[url]] assign[=] name[self].url
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[status] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[status]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[url]
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status # depends on [control=['if'], data=[]]
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url # depends on [control=['if'], data=[]]
return _dict |
def add_contact(self, phone_number: str, first_name: str, last_name: str=None, on_success: callable=None):
"""
Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list.
"""
pass | def function[add_contact, parameter[self, phone_number, first_name, last_name, on_success]]:
constant[
Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list.
]
pass | keyword[def] identifier[add_contact] ( identifier[self] , identifier[phone_number] : identifier[str] , identifier[first_name] : identifier[str] , identifier[last_name] : identifier[str] = keyword[None] , identifier[on_success] : identifier[callable] = keyword[None] ):
literal[string]
keyword[pass] | def add_contact(self, phone_number: str, first_name: str, last_name: str=None, on_success: callable=None):
"""
Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list.
"""
pass |
def Validate(self):
"""Check the source is well constructed."""
self._ValidateReturnedTypes()
self._ValidatePaths()
self._ValidateType()
self._ValidateRequiredAttributes()
self._ValidateCommandArgs() | def function[Validate, parameter[self]]:
constant[Check the source is well constructed.]
call[name[self]._ValidateReturnedTypes, parameter[]]
call[name[self]._ValidatePaths, parameter[]]
call[name[self]._ValidateType, parameter[]]
call[name[self]._ValidateRequiredAttributes, parameter[]]
call[name[self]._ValidateCommandArgs, parameter[]] | keyword[def] identifier[Validate] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_ValidateReturnedTypes] ()
identifier[self] . identifier[_ValidatePaths] ()
identifier[self] . identifier[_ValidateType] ()
identifier[self] . identifier[_ValidateRequiredAttributes] ()
identifier[self] . identifier[_ValidateCommandArgs] () | def Validate(self):
"""Check the source is well constructed."""
self._ValidateReturnedTypes()
self._ValidatePaths()
self._ValidateType()
self._ValidateRequiredAttributes()
self._ValidateCommandArgs() |
def tokenize (self, value):
"""Take a string and break it into tokens. Return the tokens as a list of
strings.
"""
# This code uses a state machine:
class STATE:
NORMAL = 0
GROUP_PUNCTUATION = 1
PROCESS_HTML_TAG = 2
PROCESS_HTML_ENTITY = 3
GROUP_LINEBREAKS = 4
state_names = {
STATE.NORMAL: "normal",
STATE.GROUP_PUNCTUATION: "punctuation",
STATE.PROCESS_HTML_TAG: "html",
STATE.PROCESS_HTML_ENTITY: "html_entity",
STATE.GROUP_LINEBREAKS: "break"
}
# "state" and "token" have array values to allow their
# contents to be modified within finishToken().
state = [STATE.NORMAL]
token = [""] # The current token being assembled.
tokens = [] # The tokens extracted from the input.
index = -1
def clearToken():
"""Clear the current token and return to normal state."""
token[0] = ""
state[0] = STATE.NORMAL
def emitToken():
"""Emit the current token, if any, and return to normal state."""
if len(token[0]) > 0:
# add character end and start
char_start, char_end = index, index + len(token[0])
if self.create_structured_tokens:
new_token = {'value': token[0], 'type': state_names[state[0]], 'char_start': char_start, 'char_end': char_end}
tokens.append(new_token)
else:
tokens.append(token[0])
clearToken()
def fixBrokenHtmlEntity():
# This is not a valid HTML entity.
# TODO: embedded "#" characters should be treated better
# here.
if not self.recognizePunctuation:
# If we aren't treating punctuation specially, then just treat
# the broken HTML entity as an ordinary token.
#
# TODO: This is not quite correct. "x& " should
# be treated as a single token, althouth "s & "
# should result in two tokens.
state[0] = STATE.NORMAL
return
if self.groupPunctuation:
# If all the saved tokens are punctuation characters, then
# enter STATE.GROUP_PUNCTUATION insted of STATE.NORMAL.
sawOnlyPunctuation = True
for c in token[0]:
if c not in CrfTokenizer.punctuationSet:
sawOnlyPunctuation = False
break
if sawOnlyPunctuation:
state[0] = STATE.GROUP_PUNCTUATION
return
# Emit the ampersand that began the prospective entity and use the
# rest as a new current token.
saveToken = token[0]
token[0] = saveToken[0:1]
emitToken()
if len(saveToken) > 1:
token[0] = saveToken[1:]
# The caller should continue processing with the current
# character.
# Process each character in the input string:
for c in value:
index += 1
if state[0] == STATE.PROCESS_HTML_TAG:
if c in CrfTokenizer.whitespaceSet:
continue # Suppress for safety. CRF++ doesn't like spaces in tokens, for example.
token[0] += c
if c == CrfTokenizer.END_HTML_TAG_CHAR:
if self.skipHtmlTags:
clearToken()
else:
emitToken()
continue
if state[0] == STATE.PROCESS_HTML_ENTITY:
# Parse an HTML entity name. TODO: embedded "#"
# characters imply more extensive parsing rules should
# be performed here.
if c == CrfTokenizer.END_HTML_ENTITY_CHAR:
if len(token[0]) == 1:
# This is the special case of "&;", which is not a
# valid HTML entity. If self.groupPunctuation is
# True, return to normal parsing state in case more
# punctuation follows. Otherwise, emit "&" and ";" as
# separate tokens.
if not self.recognizePunctuation:
# TODO: This is not quite correct. "x&;" should
# be treated as a single token, althouth "s &;"
# should result in two tokens.
token[0] = token[0] + c
state[0] = STATE.NORMAL
elif self.groupPunctuation:
token[0] = token[0] + c
state[0] = STATE.GROUP_PUNCTUATION
else:
emitToken() # Emit the "&" as a seperate token.
token[0] = token[0] + c
emitToken() # Emit the ";' as a seperate token.
continue
token[0] = token[0] + c
if self.skipHtmlEntities:
clearToken()
else:
emitToken()
continue
elif c in CrfTokenizer.htmlEntityNameCharacterSet:
token[0] = token[0] + c
continue
else:
# This is not a valid HTML entity.
fixBrokenHtmlEntity()
# intentional fall-through
if state[0] == STATE.GROUP_LINEBREAKS:
# we will look for \n\r and ignore spaces
if c in CrfTokenizer.linebreaking_character_set:
token[0] += c
continue
elif c in CrfTokenizer.whitespaceSet:
continue
else:
emitToken()
state[0] = STATE.NORMAL
if c in CrfTokenizer.whitespaceSet:
# White space terminates the current token, then is dropped.
emitToken()
# Check to see whether we should look for line breaks
if c in CrfTokenizer.linebreaking_start_character_set and self.recognize_linebreaks:
state[0] = STATE.GROUP_LINEBREAKS
token[0] = c
elif c == CrfTokenizer.START_HTML_TAG_CHAR and self.recognizeHtmlTags:
emitToken()
state[0] = STATE.PROCESS_HTML_TAG
token[0] = c
elif c == CrfTokenizer.START_HTML_ENTITY_CHAR and self.recognizeHtmlEntities:
emitToken()
state[0] = STATE.PROCESS_HTML_ENTITY
token[0] = c
elif c in CrfTokenizer.punctuationSet and self.recognizePunctuation:
if self.groupPunctuation:
# Finish any current token. Concatenate
# contiguous punctuation into a single token:
if state[0] != STATE.GROUP_PUNCTUATION:
emitToken()
state[0] = STATE.GROUP_PUNCTUATION
token[0] = token[0] + c
else:
# Finish any current token and form a token from
# the punctuation character:
emitToken()
token[0] = c
emitToken()
else:
# Everything else goes here. Presumably, that includes
# Unicode characters that aren't ASCII
# strings. Further work is needed.
if state[0] != STATE.NORMAL:
emitToken()
token[0] = token[0] + c
# Finish any final token and return the array of tokens:
if state[0] == STATE.PROCESS_HTML_ENTITY:
fixBrokenHtmlEntity()
emitToken()
# Was a token prefix requested? If so, we'll apply it now. If the
# normal case is not to apply a token prefix, this might be a little
# more efficient than applying the prefix in emitToken().
if self.tokenPrefix is not None and len(self.tokenPrefix) > 0:
tokens = map(lambda x: self.tokenPrefix + x, tokens)
return tokens | def function[tokenize, parameter[self, value]]:
constant[Take a string and break it into tokens. Return the tokens as a list of
strings.
]
class class[STATE, parameter[]] begin[:]
variable[NORMAL] assign[=] constant[0]
variable[GROUP_PUNCTUATION] assign[=] constant[1]
variable[PROCESS_HTML_TAG] assign[=] constant[2]
variable[PROCESS_HTML_ENTITY] assign[=] constant[3]
variable[GROUP_LINEBREAKS] assign[=] constant[4]
variable[state_names] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1ddaaa0>, <ast.Attribute object at 0x7da1b1ddaad0>, <ast.Attribute object at 0x7da1b1ddabf0>, <ast.Attribute object at 0x7da1b1ddab30>, <ast.Attribute object at 0x7da1b1ddac20>], [<ast.Constant object at 0x7da1b1ddacb0>, <ast.Constant object at 0x7da1b1dda980>, <ast.Constant object at 0x7da1b1dda8c0>, <ast.Constant object at 0x7da1b1dda8f0>, <ast.Constant object at 0x7da1b1dda890>]]
variable[state] assign[=] list[[<ast.Attribute object at 0x7da1b1dda800>]]
variable[token] assign[=] list[[<ast.Constant object at 0x7da1b1dda7d0>]]
variable[tokens] assign[=] list[[]]
variable[index] assign[=] <ast.UnaryOp object at 0x7da1b1dda830>
def function[clearToken, parameter[]]:
constant[Clear the current token and return to normal state.]
call[name[token]][constant[0]] assign[=] constant[]
call[name[state]][constant[0]] assign[=] name[STATE].NORMAL
def function[emitToken, parameter[]]:
constant[Emit the current token, if any, and return to normal state.]
if compare[call[name[len], parameter[call[name[token]][constant[0]]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da1b1dd9870> assign[=] tuple[[<ast.Name object at 0x7da1b1dd9d80>, <ast.BinOp object at 0x7da1b1dd9de0>]]
if name[self].create_structured_tokens begin[:]
variable[new_token] assign[=] dictionary[[<ast.Constant object at 0x7da1b1da07c0>, <ast.Constant object at 0x7da1b1da24d0>, <ast.Constant object at 0x7da1b1da0970>, <ast.Constant object at 0x7da1b1da2ce0>], [<ast.Subscript object at 0x7da1b1da3d30>, <ast.Subscript object at 0x7da1b1da32b0>, <ast.Name object at 0x7da1b1da1cf0>, <ast.Name object at 0x7da1b1da1b40>]]
call[name[tokens].append, parameter[name[new_token]]]
call[name[clearToken], parameter[]]
def function[fixBrokenHtmlEntity, parameter[]]:
if <ast.UnaryOp object at 0x7da1b1d4cb50> begin[:]
call[name[state]][constant[0]] assign[=] name[STATE].NORMAL
return[None]
if name[self].groupPunctuation begin[:]
variable[sawOnlyPunctuation] assign[=] constant[True]
for taget[name[c]] in starred[call[name[token]][constant[0]]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[CrfTokenizer].punctuationSet] begin[:]
variable[sawOnlyPunctuation] assign[=] constant[False]
break
if name[sawOnlyPunctuation] begin[:]
call[name[state]][constant[0]] assign[=] name[STATE].GROUP_PUNCTUATION
return[None]
variable[saveToken] assign[=] call[name[token]][constant[0]]
call[name[token]][constant[0]] assign[=] call[name[saveToken]][<ast.Slice object at 0x7da1b1ddafe0>]
call[name[emitToken], parameter[]]
if compare[call[name[len], parameter[name[saveToken]]] greater[>] constant[1]] begin[:]
call[name[token]][constant[0]] assign[=] call[name[saveToken]][<ast.Slice object at 0x7da1b1ddb910>]
for taget[name[c]] in starred[name[value]] begin[:]
<ast.AugAssign object at 0x7da1b1ddba60>
if compare[call[name[state]][constant[0]] equal[==] name[STATE].PROCESS_HTML_TAG] begin[:]
if compare[name[c] in name[CrfTokenizer].whitespaceSet] begin[:]
continue
<ast.AugAssign object at 0x7da1b1ddbcd0>
if compare[name[c] equal[==] name[CrfTokenizer].END_HTML_TAG_CHAR] begin[:]
if name[self].skipHtmlTags begin[:]
call[name[clearToken], parameter[]]
continue
if compare[call[name[state]][constant[0]] equal[==] name[STATE].PROCESS_HTML_ENTITY] begin[:]
if compare[name[c] equal[==] name[CrfTokenizer].END_HTML_ENTITY_CHAR] begin[:]
if compare[call[name[len], parameter[call[name[token]][constant[0]]]] equal[==] constant[1]] begin[:]
if <ast.UnaryOp object at 0x7da1b1dd8b20> begin[:]
call[name[token]][constant[0]] assign[=] binary_operation[call[name[token]][constant[0]] + name[c]]
call[name[state]][constant[0]] assign[=] name[STATE].NORMAL
continue
call[name[token]][constant[0]] assign[=] binary_operation[call[name[token]][constant[0]] + name[c]]
if name[self].skipHtmlEntities begin[:]
call[name[clearToken], parameter[]]
continue
if compare[call[name[state]][constant[0]] equal[==] name[STATE].GROUP_LINEBREAKS] begin[:]
if compare[name[c] in name[CrfTokenizer].linebreaking_character_set] begin[:]
<ast.AugAssign object at 0x7da1b1dd8850>
continue
if compare[name[c] in name[CrfTokenizer].whitespaceSet] begin[:]
call[name[emitToken], parameter[]]
if <ast.BoolOp object at 0x7da1b1d62d70> begin[:]
call[name[state]][constant[0]] assign[=] name[STATE].GROUP_LINEBREAKS
call[name[token]][constant[0]] assign[=] name[c]
if compare[call[name[state]][constant[0]] equal[==] name[STATE].PROCESS_HTML_ENTITY] begin[:]
call[name[fixBrokenHtmlEntity], parameter[]]
call[name[emitToken], parameter[]]
if <ast.BoolOp object at 0x7da1b1d4a590> begin[:]
variable[tokens] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b1d49600>, name[tokens]]]
return[name[tokens]] | keyword[def] identifier[tokenize] ( identifier[self] , identifier[value] ):
literal[string]
keyword[class] identifier[STATE] :
identifier[NORMAL] = literal[int]
identifier[GROUP_PUNCTUATION] = literal[int]
identifier[PROCESS_HTML_TAG] = literal[int]
identifier[PROCESS_HTML_ENTITY] = literal[int]
identifier[GROUP_LINEBREAKS] = literal[int]
identifier[state_names] ={
identifier[STATE] . identifier[NORMAL] : literal[string] ,
identifier[STATE] . identifier[GROUP_PUNCTUATION] : literal[string] ,
identifier[STATE] . identifier[PROCESS_HTML_TAG] : literal[string] ,
identifier[STATE] . identifier[PROCESS_HTML_ENTITY] : literal[string] ,
identifier[STATE] . identifier[GROUP_LINEBREAKS] : literal[string]
}
identifier[state] =[ identifier[STATE] . identifier[NORMAL] ]
identifier[token] =[ literal[string] ]
identifier[tokens] =[]
identifier[index] =- literal[int]
keyword[def] identifier[clearToken] ():
literal[string]
identifier[token] [ literal[int] ]= literal[string]
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[NORMAL]
keyword[def] identifier[emitToken] ():
literal[string]
keyword[if] identifier[len] ( identifier[token] [ literal[int] ])> literal[int] :
identifier[char_start] , identifier[char_end] = identifier[index] , identifier[index] + identifier[len] ( identifier[token] [ literal[int] ])
keyword[if] identifier[self] . identifier[create_structured_tokens] :
identifier[new_token] ={ literal[string] : identifier[token] [ literal[int] ], literal[string] : identifier[state_names] [ identifier[state] [ literal[int] ]], literal[string] : identifier[char_start] , literal[string] : identifier[char_end] }
identifier[tokens] . identifier[append] ( identifier[new_token] )
keyword[else] :
identifier[tokens] . identifier[append] ( identifier[token] [ literal[int] ])
identifier[clearToken] ()
keyword[def] identifier[fixBrokenHtmlEntity] ():
keyword[if] keyword[not] identifier[self] . identifier[recognizePunctuation] :
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[NORMAL]
keyword[return]
keyword[if] identifier[self] . identifier[groupPunctuation] :
identifier[sawOnlyPunctuation] = keyword[True]
keyword[for] identifier[c] keyword[in] identifier[token] [ literal[int] ]:
keyword[if] identifier[c] keyword[not] keyword[in] identifier[CrfTokenizer] . identifier[punctuationSet] :
identifier[sawOnlyPunctuation] = keyword[False]
keyword[break]
keyword[if] identifier[sawOnlyPunctuation] :
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[GROUP_PUNCTUATION]
keyword[return]
identifier[saveToken] = identifier[token] [ literal[int] ]
identifier[token] [ literal[int] ]= identifier[saveToken] [ literal[int] : literal[int] ]
identifier[emitToken] ()
keyword[if] identifier[len] ( identifier[saveToken] )> literal[int] :
identifier[token] [ literal[int] ]= identifier[saveToken] [ literal[int] :]
keyword[for] identifier[c] keyword[in] identifier[value] :
identifier[index] += literal[int]
keyword[if] identifier[state] [ literal[int] ]== identifier[STATE] . identifier[PROCESS_HTML_TAG] :
keyword[if] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[whitespaceSet] :
keyword[continue]
identifier[token] [ literal[int] ]+= identifier[c]
keyword[if] identifier[c] == identifier[CrfTokenizer] . identifier[END_HTML_TAG_CHAR] :
keyword[if] identifier[self] . identifier[skipHtmlTags] :
identifier[clearToken] ()
keyword[else] :
identifier[emitToken] ()
keyword[continue]
keyword[if] identifier[state] [ literal[int] ]== identifier[STATE] . identifier[PROCESS_HTML_ENTITY] :
keyword[if] identifier[c] == identifier[CrfTokenizer] . identifier[END_HTML_ENTITY_CHAR] :
keyword[if] identifier[len] ( identifier[token] [ literal[int] ])== literal[int] :
keyword[if] keyword[not] identifier[self] . identifier[recognizePunctuation] :
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[NORMAL]
keyword[elif] identifier[self] . identifier[groupPunctuation] :
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[GROUP_PUNCTUATION]
keyword[else] :
identifier[emitToken] ()
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
identifier[emitToken] ()
keyword[continue]
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
keyword[if] identifier[self] . identifier[skipHtmlEntities] :
identifier[clearToken] ()
keyword[else] :
identifier[emitToken] ()
keyword[continue]
keyword[elif] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[htmlEntityNameCharacterSet] :
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
keyword[continue]
keyword[else] :
identifier[fixBrokenHtmlEntity] ()
keyword[if] identifier[state] [ literal[int] ]== identifier[STATE] . identifier[GROUP_LINEBREAKS] :
keyword[if] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[linebreaking_character_set] :
identifier[token] [ literal[int] ]+= identifier[c]
keyword[continue]
keyword[elif] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[whitespaceSet] :
keyword[continue]
keyword[else] :
identifier[emitToken] ()
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[NORMAL]
keyword[if] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[whitespaceSet] :
identifier[emitToken] ()
keyword[if] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[linebreaking_start_character_set] keyword[and] identifier[self] . identifier[recognize_linebreaks] :
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[GROUP_LINEBREAKS]
identifier[token] [ literal[int] ]= identifier[c]
keyword[elif] identifier[c] == identifier[CrfTokenizer] . identifier[START_HTML_TAG_CHAR] keyword[and] identifier[self] . identifier[recognizeHtmlTags] :
identifier[emitToken] ()
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[PROCESS_HTML_TAG]
identifier[token] [ literal[int] ]= identifier[c]
keyword[elif] identifier[c] == identifier[CrfTokenizer] . identifier[START_HTML_ENTITY_CHAR] keyword[and] identifier[self] . identifier[recognizeHtmlEntities] :
identifier[emitToken] ()
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[PROCESS_HTML_ENTITY]
identifier[token] [ literal[int] ]= identifier[c]
keyword[elif] identifier[c] keyword[in] identifier[CrfTokenizer] . identifier[punctuationSet] keyword[and] identifier[self] . identifier[recognizePunctuation] :
keyword[if] identifier[self] . identifier[groupPunctuation] :
keyword[if] identifier[state] [ literal[int] ]!= identifier[STATE] . identifier[GROUP_PUNCTUATION] :
identifier[emitToken] ()
identifier[state] [ literal[int] ]= identifier[STATE] . identifier[GROUP_PUNCTUATION]
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
keyword[else] :
identifier[emitToken] ()
identifier[token] [ literal[int] ]= identifier[c]
identifier[emitToken] ()
keyword[else] :
keyword[if] identifier[state] [ literal[int] ]!= identifier[STATE] . identifier[NORMAL] :
identifier[emitToken] ()
identifier[token] [ literal[int] ]= identifier[token] [ literal[int] ]+ identifier[c]
keyword[if] identifier[state] [ literal[int] ]== identifier[STATE] . identifier[PROCESS_HTML_ENTITY] :
identifier[fixBrokenHtmlEntity] ()
identifier[emitToken] ()
keyword[if] identifier[self] . identifier[tokenPrefix] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[self] . identifier[tokenPrefix] )> literal[int] :
identifier[tokens] = identifier[map] ( keyword[lambda] identifier[x] : identifier[self] . identifier[tokenPrefix] + identifier[x] , identifier[tokens] )
keyword[return] identifier[tokens] | def tokenize(self, value):
"""Take a string and break it into tokens. Return the tokens as a list of
strings.
"""
# This code uses a state machine:
class STATE:
NORMAL = 0
GROUP_PUNCTUATION = 1
PROCESS_HTML_TAG = 2
PROCESS_HTML_ENTITY = 3
GROUP_LINEBREAKS = 4
state_names = {STATE.NORMAL: 'normal', STATE.GROUP_PUNCTUATION: 'punctuation', STATE.PROCESS_HTML_TAG: 'html', STATE.PROCESS_HTML_ENTITY: 'html_entity', STATE.GROUP_LINEBREAKS: 'break'}
# "state" and "token" have array values to allow their
# contents to be modified within finishToken().
state = [STATE.NORMAL]
token = [''] # The current token being assembled.
tokens = [] # The tokens extracted from the input.
index = -1
def clearToken():
"""Clear the current token and return to normal state."""
token[0] = ''
state[0] = STATE.NORMAL
def emitToken():
"""Emit the current token, if any, and return to normal state."""
if len(token[0]) > 0:
# add character end and start
(char_start, char_end) = (index, index + len(token[0]))
if self.create_structured_tokens:
new_token = {'value': token[0], 'type': state_names[state[0]], 'char_start': char_start, 'char_end': char_end}
tokens.append(new_token) # depends on [control=['if'], data=[]]
else:
tokens.append(token[0]) # depends on [control=['if'], data=[]]
clearToken()
def fixBrokenHtmlEntity():
# This is not a valid HTML entity.
# TODO: embedded "#" characters should be treated better
# here.
if not self.recognizePunctuation:
# If we aren't treating punctuation specially, then just treat
# the broken HTML entity as an ordinary token.
#
# TODO: This is not quite correct. "x& " should
# be treated as a single token, althouth "s & "
# should result in two tokens.
state[0] = STATE.NORMAL
return # depends on [control=['if'], data=[]]
if self.groupPunctuation:
# If all the saved tokens are punctuation characters, then
# enter STATE.GROUP_PUNCTUATION insted of STATE.NORMAL.
sawOnlyPunctuation = True
for c in token[0]:
if c not in CrfTokenizer.punctuationSet:
sawOnlyPunctuation = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
if sawOnlyPunctuation:
state[0] = STATE.GROUP_PUNCTUATION
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Emit the ampersand that began the prospective entity and use the
# rest as a new current token.
saveToken = token[0]
token[0] = saveToken[0:1]
emitToken()
if len(saveToken) > 1:
token[0] = saveToken[1:] # depends on [control=['if'], data=[]]
# The caller should continue processing with the current
# character.
# Process each character in the input string:
for c in value:
index += 1
if state[0] == STATE.PROCESS_HTML_TAG:
if c in CrfTokenizer.whitespaceSet:
continue # Suppress for safety. CRF++ doesn't like spaces in tokens, for example. # depends on [control=['if'], data=[]]
token[0] += c
if c == CrfTokenizer.END_HTML_TAG_CHAR:
if self.skipHtmlTags:
clearToken() # depends on [control=['if'], data=[]]
else:
emitToken() # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if state[0] == STATE.PROCESS_HTML_ENTITY:
# Parse an HTML entity name. TODO: embedded "#"
# characters imply more extensive parsing rules should
# be performed here.
if c == CrfTokenizer.END_HTML_ENTITY_CHAR:
if len(token[0]) == 1:
# This is the special case of "&;", which is not a
# valid HTML entity. If self.groupPunctuation is
# True, return to normal parsing state in case more
# punctuation follows. Otherwise, emit "&" and ";" as
# separate tokens.
if not self.recognizePunctuation:
# TODO: This is not quite correct. "x&;" should
# be treated as a single token, althouth "s &;"
# should result in two tokens.
token[0] = token[0] + c
state[0] = STATE.NORMAL # depends on [control=['if'], data=[]]
elif self.groupPunctuation:
token[0] = token[0] + c
state[0] = STATE.GROUP_PUNCTUATION # depends on [control=['if'], data=[]]
else:
emitToken() # Emit the "&" as a seperate token.
token[0] = token[0] + c
emitToken() # Emit the ";' as a seperate token.
continue # depends on [control=['if'], data=[]]
token[0] = token[0] + c
if self.skipHtmlEntities:
clearToken() # depends on [control=['if'], data=[]]
else:
emitToken()
continue # depends on [control=['if'], data=['c']]
elif c in CrfTokenizer.htmlEntityNameCharacterSet:
token[0] = token[0] + c
continue # depends on [control=['if'], data=['c']]
else:
# This is not a valid HTML entity.
fixBrokenHtmlEntity() # depends on [control=['if'], data=[]]
# intentional fall-through
if state[0] == STATE.GROUP_LINEBREAKS:
# we will look for \n\r and ignore spaces
if c in CrfTokenizer.linebreaking_character_set:
token[0] += c
continue # depends on [control=['if'], data=['c']]
elif c in CrfTokenizer.whitespaceSet:
continue # depends on [control=['if'], data=[]]
else:
emitToken()
state[0] = STATE.NORMAL # depends on [control=['if'], data=[]]
if c in CrfTokenizer.whitespaceSet:
# White space terminates the current token, then is dropped.
emitToken()
# Check to see whether we should look for line breaks
if c in CrfTokenizer.linebreaking_start_character_set and self.recognize_linebreaks:
state[0] = STATE.GROUP_LINEBREAKS
token[0] = c # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['c']]
elif c == CrfTokenizer.START_HTML_TAG_CHAR and self.recognizeHtmlTags:
emitToken()
state[0] = STATE.PROCESS_HTML_TAG
token[0] = c # depends on [control=['if'], data=[]]
elif c == CrfTokenizer.START_HTML_ENTITY_CHAR and self.recognizeHtmlEntities:
emitToken()
state[0] = STATE.PROCESS_HTML_ENTITY
token[0] = c # depends on [control=['if'], data=[]]
elif c in CrfTokenizer.punctuationSet and self.recognizePunctuation:
if self.groupPunctuation:
# Finish any current token. Concatenate
# contiguous punctuation into a single token:
if state[0] != STATE.GROUP_PUNCTUATION:
emitToken()
state[0] = STATE.GROUP_PUNCTUATION # depends on [control=['if'], data=[]]
token[0] = token[0] + c # depends on [control=['if'], data=[]]
else:
# Finish any current token and form a token from
# the punctuation character:
emitToken()
token[0] = c
emitToken() # depends on [control=['if'], data=[]]
else:
# Everything else goes here. Presumably, that includes
# Unicode characters that aren't ASCII
# strings. Further work is needed.
if state[0] != STATE.NORMAL:
emitToken() # depends on [control=['if'], data=[]]
token[0] = token[0] + c # depends on [control=['for'], data=['c']]
# Finish any final token and return the array of tokens:
if state[0] == STATE.PROCESS_HTML_ENTITY:
fixBrokenHtmlEntity() # depends on [control=['if'], data=[]]
emitToken()
# Was a token prefix requested? If so, we'll apply it now. If the
# normal case is not to apply a token prefix, this might be a little
# more efficient than applying the prefix in emitToken().
if self.tokenPrefix is not None and len(self.tokenPrefix) > 0:
tokens = map(lambda x: self.tokenPrefix + x, tokens) # depends on [control=['if'], data=[]]
return tokens |
def nics_skip(name, nics, ipv6):
'''
Alias for :mod:`csf.nics_skipped <salt.states.csf.nics_skipped>`
'''
return nics_skipped(name, nics=nics, ipv6=ipv6) | def function[nics_skip, parameter[name, nics, ipv6]]:
constant[
Alias for :mod:`csf.nics_skipped <salt.states.csf.nics_skipped>`
]
return[call[name[nics_skipped], parameter[name[name]]]] | keyword[def] identifier[nics_skip] ( identifier[name] , identifier[nics] , identifier[ipv6] ):
literal[string]
keyword[return] identifier[nics_skipped] ( identifier[name] , identifier[nics] = identifier[nics] , identifier[ipv6] = identifier[ipv6] ) | def nics_skip(name, nics, ipv6):
"""
Alias for :mod:`csf.nics_skipped <salt.states.csf.nics_skipped>`
"""
return nics_skipped(name, nics=nics, ipv6=ipv6) |
def deep_compare(obj1, obj2):
"""
>>> deep_compare({'1': None}, {})
False
>>> deep_compare({'1': {}}, {'1': None})
False
>>> deep_compare({'1': [1]}, {'1': [2]})
False
>>> deep_compare({'1': 2}, {'1': '2'})
True
>>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})
True
"""
if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys
return False
for key, value in obj1.items():
if isinstance(value, dict):
if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])):
return False
elif str(value) != str(obj2[key]):
return False
return True | def function[deep_compare, parameter[obj1, obj2]]:
constant[
>>> deep_compare({'1': None}, {})
False
>>> deep_compare({'1': {}}, {'1': None})
False
>>> deep_compare({'1': [1]}, {'1': [2]})
False
>>> deep_compare({'1': 2}, {'1': '2'})
True
>>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})
True
]
if compare[call[name[set], parameter[call[name[list], parameter[call[name[obj1].keys, parameter[]]]]]] not_equal[!=] call[name[set], parameter[call[name[list], parameter[call[name[obj2].keys, parameter[]]]]]]] begin[:]
return[constant[False]]
for taget[tuple[[<ast.Name object at 0x7da1b21b9ea0>, <ast.Name object at 0x7da1b21ba530>]]] in starred[call[name[obj1].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
if <ast.UnaryOp object at 0x7da1b21b9630> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[deep_compare] ( identifier[obj1] , identifier[obj2] ):
literal[string]
keyword[if] identifier[set] ( identifier[list] ( identifier[obj1] . identifier[keys] ()))!= identifier[set] ( identifier[list] ( identifier[obj2] . identifier[keys] ())):
keyword[return] keyword[False]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj1] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[obj2] [ identifier[key] ], identifier[dict] ) keyword[and] identifier[deep_compare] ( identifier[value] , identifier[obj2] [ identifier[key] ])):
keyword[return] keyword[False]
keyword[elif] identifier[str] ( identifier[value] )!= identifier[str] ( identifier[obj2] [ identifier[key] ]):
keyword[return] keyword[False]
keyword[return] keyword[True] | def deep_compare(obj1, obj2):
"""
>>> deep_compare({'1': None}, {})
False
>>> deep_compare({'1': {}}, {'1': None})
False
>>> deep_compare({'1': [1]}, {'1': [2]})
False
>>> deep_compare({'1': 2}, {'1': '2'})
True
>>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})
True
"""
if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys
return False # depends on [control=['if'], data=[]]
for (key, value) in obj1.items():
if isinstance(value, dict):
if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif str(value) != str(obj2[key]):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True |
def units(self):
"""Units."""
if "units" in self.attrs.keys():
# This try-except here for compatibility with v1.0.0 of WT5 format
try:
self.attrs["units"] = self.attrs["units"].decode()
except AttributeError:
pass # already a string, not bytes
return self.attrs["units"]
return None | def function[units, parameter[self]]:
constant[Units.]
if compare[constant[units] in call[name[self].attrs.keys, parameter[]]] begin[:]
<ast.Try object at 0x7da1b0b9f670>
return[call[name[self].attrs][constant[units]]]
return[constant[None]] | keyword[def] identifier[units] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[attrs] . identifier[keys] ():
keyword[try] :
identifier[self] . identifier[attrs] [ literal[string] ]= identifier[self] . identifier[attrs] [ literal[string] ]. identifier[decode] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] identifier[self] . identifier[attrs] [ literal[string] ]
keyword[return] keyword[None] | def units(self):
"""Units."""
if 'units' in self.attrs.keys():
# This try-except here for compatibility with v1.0.0 of WT5 format
try:
self.attrs['units'] = self.attrs['units'].decode() # depends on [control=['try'], data=[]]
except AttributeError:
pass # already a string, not bytes # depends on [control=['except'], data=[]]
return self.attrs['units'] # depends on [control=['if'], data=[]]
return None |
def logpdf(self, mu):
"""
Log PDF for Skew t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return self.logpdf_internal_prior(mu, df=self.df0, loc=self.loc0, scale=self.scale0, gamma=self.gamma0) | def function[logpdf, parameter[self, mu]]:
constant[
Log PDF for Skew t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
]
if compare[name[self].transform is_not constant[None]] begin[:]
variable[mu] assign[=] call[name[self].transform, parameter[name[mu]]]
return[call[name[self].logpdf_internal_prior, parameter[name[mu]]]] | keyword[def] identifier[logpdf] ( identifier[self] , identifier[mu] ):
literal[string]
keyword[if] identifier[self] . identifier[transform] keyword[is] keyword[not] keyword[None] :
identifier[mu] = identifier[self] . identifier[transform] ( identifier[mu] )
keyword[return] identifier[self] . identifier[logpdf_internal_prior] ( identifier[mu] , identifier[df] = identifier[self] . identifier[df0] , identifier[loc] = identifier[self] . identifier[loc0] , identifier[scale] = identifier[self] . identifier[scale0] , identifier[gamma] = identifier[self] . identifier[gamma0] ) | def logpdf(self, mu):
"""
Log PDF for Skew t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu) # depends on [control=['if'], data=[]]
return self.logpdf_internal_prior(mu, df=self.df0, loc=self.loc0, scale=self.scale0, gamma=self.gamma0) |
def compute_metric(self, components):
"""Compute precision from `components`"""
numerator = components[PRECISION_RELEVANT_RETRIEVED]
denominator = components[PRECISION_RETRIEVED]
if denominator == 0.:
if numerator == 0:
return 1.
else:
raise ValueError('')
else:
return numerator/denominator | def function[compute_metric, parameter[self, components]]:
constant[Compute precision from `components`]
variable[numerator] assign[=] call[name[components]][name[PRECISION_RELEVANT_RETRIEVED]]
variable[denominator] assign[=] call[name[components]][name[PRECISION_RETRIEVED]]
if compare[name[denominator] equal[==] constant[0.0]] begin[:]
if compare[name[numerator] equal[==] constant[0]] begin[:]
return[constant[1.0]] | keyword[def] identifier[compute_metric] ( identifier[self] , identifier[components] ):
literal[string]
identifier[numerator] = identifier[components] [ identifier[PRECISION_RELEVANT_RETRIEVED] ]
identifier[denominator] = identifier[components] [ identifier[PRECISION_RETRIEVED] ]
keyword[if] identifier[denominator] == literal[int] :
keyword[if] identifier[numerator] == literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[return] identifier[numerator] / identifier[denominator] | def compute_metric(self, components):
"""Compute precision from `components`"""
numerator = components[PRECISION_RELEVANT_RETRIEVED]
denominator = components[PRECISION_RETRIEVED]
if denominator == 0.0:
if numerator == 0:
return 1.0 # depends on [control=['if'], data=[]]
else:
raise ValueError('') # depends on [control=['if'], data=[]]
else:
return numerator / denominator |
def predict_subsequences(
self,
sequence_dict,
peptide_lengths=None):
"""
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
"""
if isinstance(sequence_dict, string_types):
sequence_dict = {"seq": sequence_dict}
elif isinstance(sequence_dict, (list, tuple)):
sequence_dict = {seq: seq for seq in sequence_dict}
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# convert long protein sequences to set of peptides and
# associated sequence name / offsets that each peptide may have come
# from
peptide_set = set([])
peptide_to_name_offset_pairs = defaultdict(list)
for name, sequence in sequence_dict.items():
for peptide_length in peptide_lengths:
for i in range(len(sequence) - peptide_length + 1):
peptide = sequence[i:i + peptide_length]
peptide_set.add(peptide)
peptide_to_name_offset_pairs[peptide].append((name, i))
peptide_list = sorted(peptide_set)
binding_predictions = self.predict_peptides(peptide_list)
# create BindingPrediction objects with sequence name and offset
results = []
for binding_prediction in binding_predictions:
for name, offset in peptide_to_name_offset_pairs[
binding_prediction.peptide]:
results.append(binding_prediction.clone_with_updates(
source_sequence_name=name,
offset=offset))
self._check_results(
results,
peptides=peptide_set,
alleles=self.alleles)
return BindingPredictionCollection(results) | def function[predict_subsequences, parameter[self, sequence_dict, peptide_lengths]]:
constant[
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
]
if call[name[isinstance], parameter[name[sequence_dict], name[string_types]]] begin[:]
variable[sequence_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0005960>], [<ast.Name object at 0x7da1b0006020>]]
variable[peptide_lengths] assign[=] call[name[self]._check_peptide_lengths, parameter[name[peptide_lengths]]]
variable[peptide_set] assign[=] call[name[set], parameter[list[[]]]]
variable[peptide_to_name_offset_pairs] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da1b0004f40>, <ast.Name object at 0x7da1b01d4670>]]] in starred[call[name[sequence_dict].items, parameter[]]] begin[:]
for taget[name[peptide_length]] in starred[name[peptide_lengths]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[binary_operation[call[name[len], parameter[name[sequence]]] - name[peptide_length]] + constant[1]]]]] begin[:]
variable[peptide] assign[=] call[name[sequence]][<ast.Slice object at 0x7da1b01d5de0>]
call[name[peptide_set].add, parameter[name[peptide]]]
call[call[name[peptide_to_name_offset_pairs]][name[peptide]].append, parameter[tuple[[<ast.Name object at 0x7da1b01d71c0>, <ast.Name object at 0x7da1b01d6e60>]]]]
variable[peptide_list] assign[=] call[name[sorted], parameter[name[peptide_set]]]
variable[binding_predictions] assign[=] call[name[self].predict_peptides, parameter[name[peptide_list]]]
variable[results] assign[=] list[[]]
for taget[name[binding_prediction]] in starred[name[binding_predictions]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b01d7a30>, <ast.Name object at 0x7da1b01d50f0>]]] in starred[call[name[peptide_to_name_offset_pairs]][name[binding_prediction].peptide]] begin[:]
call[name[results].append, parameter[call[name[binding_prediction].clone_with_updates, parameter[]]]]
call[name[self]._check_results, parameter[name[results]]]
return[call[name[BindingPredictionCollection], parameter[name[results]]]] | keyword[def] identifier[predict_subsequences] (
identifier[self] ,
identifier[sequence_dict] ,
identifier[peptide_lengths] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[sequence_dict] , identifier[string_types] ):
identifier[sequence_dict] ={ literal[string] : identifier[sequence_dict] }
keyword[elif] identifier[isinstance] ( identifier[sequence_dict] ,( identifier[list] , identifier[tuple] )):
identifier[sequence_dict] ={ identifier[seq] : identifier[seq] keyword[for] identifier[seq] keyword[in] identifier[sequence_dict] }
identifier[peptide_lengths] = identifier[self] . identifier[_check_peptide_lengths] ( identifier[peptide_lengths] )
identifier[peptide_set] = identifier[set] ([])
identifier[peptide_to_name_offset_pairs] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[name] , identifier[sequence] keyword[in] identifier[sequence_dict] . identifier[items] ():
keyword[for] identifier[peptide_length] keyword[in] identifier[peptide_lengths] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sequence] )- identifier[peptide_length] + literal[int] ):
identifier[peptide] = identifier[sequence] [ identifier[i] : identifier[i] + identifier[peptide_length] ]
identifier[peptide_set] . identifier[add] ( identifier[peptide] )
identifier[peptide_to_name_offset_pairs] [ identifier[peptide] ]. identifier[append] (( identifier[name] , identifier[i] ))
identifier[peptide_list] = identifier[sorted] ( identifier[peptide_set] )
identifier[binding_predictions] = identifier[self] . identifier[predict_peptides] ( identifier[peptide_list] )
identifier[results] =[]
keyword[for] identifier[binding_prediction] keyword[in] identifier[binding_predictions] :
keyword[for] identifier[name] , identifier[offset] keyword[in] identifier[peptide_to_name_offset_pairs] [
identifier[binding_prediction] . identifier[peptide] ]:
identifier[results] . identifier[append] ( identifier[binding_prediction] . identifier[clone_with_updates] (
identifier[source_sequence_name] = identifier[name] ,
identifier[offset] = identifier[offset] ))
identifier[self] . identifier[_check_results] (
identifier[results] ,
identifier[peptides] = identifier[peptide_set] ,
identifier[alleles] = identifier[self] . identifier[alleles] )
keyword[return] identifier[BindingPredictionCollection] ( identifier[results] ) | def predict_subsequences(self, sequence_dict, peptide_lengths=None):
"""
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
"""
if isinstance(sequence_dict, string_types):
sequence_dict = {'seq': sequence_dict} # depends on [control=['if'], data=[]]
elif isinstance(sequence_dict, (list, tuple)):
sequence_dict = {seq: seq for seq in sequence_dict} # depends on [control=['if'], data=[]]
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# convert long protein sequences to set of peptides and
# associated sequence name / offsets that each peptide may have come
# from
peptide_set = set([])
peptide_to_name_offset_pairs = defaultdict(list)
for (name, sequence) in sequence_dict.items():
for peptide_length in peptide_lengths:
for i in range(len(sequence) - peptide_length + 1):
peptide = sequence[i:i + peptide_length]
peptide_set.add(peptide)
peptide_to_name_offset_pairs[peptide].append((name, i)) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['peptide_length']] # depends on [control=['for'], data=[]]
peptide_list = sorted(peptide_set)
binding_predictions = self.predict_peptides(peptide_list)
# create BindingPrediction objects with sequence name and offset
results = []
for binding_prediction in binding_predictions:
for (name, offset) in peptide_to_name_offset_pairs[binding_prediction.peptide]:
results.append(binding_prediction.clone_with_updates(source_sequence_name=name, offset=offset)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['binding_prediction']]
self._check_results(results, peptides=peptide_set, alleles=self.alleles)
return BindingPredictionCollection(results) |
def path_for_import(name):
"""
Returns the directory path for the given package or module.
"""
return os.path.dirname(os.path.abspath(import_module(name).__file__)) | def function[path_for_import, parameter[name]]:
constant[
Returns the directory path for the given package or module.
]
return[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[call[name[import_module], parameter[name[name]]].__file__]]]]] | keyword[def] identifier[path_for_import] ( identifier[name] ):
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[import_module] ( identifier[name] ). identifier[__file__] )) | def path_for_import(name):
"""
Returns the directory path for the given package or module.
"""
return os.path.dirname(os.path.abspath(import_module(name).__file__)) |
def _draw_frame(self, framedata):
"""Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data.
"""
original = self.read_frame()
if original is None:
self.update_info(self.info_string(message='Finished.',
frame=framedata))
return
if self.original is not None:
processed = self.process_frame(original.copy())
if self.cmap_original is not None:
original = to_gray(original)
elif not is_color_image(original):
self.original.set_cmap('gray')
self.original.set_data(original)
else:
processed = self.process_frame(original)
if self.cmap_processed is not None:
processed = to_gray(processed)
elif not is_color_image(processed):
self.processed.set_cmap('gray')
if self.annotations:
self.annotate(framedata)
self.processed.set_data(processed)
self.update_info(self.info_string(frame=framedata)) | def function[_draw_frame, parameter[self, framedata]]:
constant[Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data.
]
variable[original] assign[=] call[name[self].read_frame, parameter[]]
if compare[name[original] is constant[None]] begin[:]
call[name[self].update_info, parameter[call[name[self].info_string, parameter[]]]]
return[None]
if compare[name[self].original is_not constant[None]] begin[:]
variable[processed] assign[=] call[name[self].process_frame, parameter[call[name[original].copy, parameter[]]]]
if compare[name[self].cmap_original is_not constant[None]] begin[:]
variable[original] assign[=] call[name[to_gray], parameter[name[original]]]
call[name[self].original.set_data, parameter[name[original]]]
if compare[name[self].cmap_processed is_not constant[None]] begin[:]
variable[processed] assign[=] call[name[to_gray], parameter[name[processed]]]
if name[self].annotations begin[:]
call[name[self].annotate, parameter[name[framedata]]]
call[name[self].processed.set_data, parameter[name[processed]]]
call[name[self].update_info, parameter[call[name[self].info_string, parameter[]]]] | keyword[def] identifier[_draw_frame] ( identifier[self] , identifier[framedata] ):
literal[string]
identifier[original] = identifier[self] . identifier[read_frame] ()
keyword[if] identifier[original] keyword[is] keyword[None] :
identifier[self] . identifier[update_info] ( identifier[self] . identifier[info_string] ( identifier[message] = literal[string] ,
identifier[frame] = identifier[framedata] ))
keyword[return]
keyword[if] identifier[self] . identifier[original] keyword[is] keyword[not] keyword[None] :
identifier[processed] = identifier[self] . identifier[process_frame] ( identifier[original] . identifier[copy] ())
keyword[if] identifier[self] . identifier[cmap_original] keyword[is] keyword[not] keyword[None] :
identifier[original] = identifier[to_gray] ( identifier[original] )
keyword[elif] keyword[not] identifier[is_color_image] ( identifier[original] ):
identifier[self] . identifier[original] . identifier[set_cmap] ( literal[string] )
identifier[self] . identifier[original] . identifier[set_data] ( identifier[original] )
keyword[else] :
identifier[processed] = identifier[self] . identifier[process_frame] ( identifier[original] )
keyword[if] identifier[self] . identifier[cmap_processed] keyword[is] keyword[not] keyword[None] :
identifier[processed] = identifier[to_gray] ( identifier[processed] )
keyword[elif] keyword[not] identifier[is_color_image] ( identifier[processed] ):
identifier[self] . identifier[processed] . identifier[set_cmap] ( literal[string] )
keyword[if] identifier[self] . identifier[annotations] :
identifier[self] . identifier[annotate] ( identifier[framedata] )
identifier[self] . identifier[processed] . identifier[set_data] ( identifier[processed] )
identifier[self] . identifier[update_info] ( identifier[self] . identifier[info_string] ( identifier[frame] = identifier[framedata] )) | def _draw_frame(self, framedata):
"""Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data.
"""
original = self.read_frame()
if original is None:
self.update_info(self.info_string(message='Finished.', frame=framedata))
return # depends on [control=['if'], data=[]]
if self.original is not None:
processed = self.process_frame(original.copy())
if self.cmap_original is not None:
original = to_gray(original) # depends on [control=['if'], data=[]]
elif not is_color_image(original):
self.original.set_cmap('gray') # depends on [control=['if'], data=[]]
self.original.set_data(original) # depends on [control=['if'], data=[]]
else:
processed = self.process_frame(original)
if self.cmap_processed is not None:
processed = to_gray(processed) # depends on [control=['if'], data=[]]
elif not is_color_image(processed):
self.processed.set_cmap('gray') # depends on [control=['if'], data=[]]
if self.annotations:
self.annotate(framedata) # depends on [control=['if'], data=[]]
self.processed.set_data(processed)
self.update_info(self.info_string(frame=framedata)) |
def sync(self, force=False, safe=True, revision=0, changelist=0):
"""Syncs the file at the current revision
:param force: Force the file to sync
:type force: bool
:param safe: Don't sync files that were changed outside perforce
:type safe: bool
:param revision: Sync to a specific revision
:type revision: int
:param changelist: Changelist to sync to
:type changelist: int
"""
cmd = ['sync']
if force:
cmd.append('-f')
if safe:
cmd.append('-s')
if revision:
cmd.append('{}#{}'.format(self.depotFile, revision))
elif changelist:
cmd.append('{}@{}'.format(self.depotFile, changelist))
else:
cmd.append(self.depotFile)
self._connection.run(cmd)
self.query() | def function[sync, parameter[self, force, safe, revision, changelist]]:
constant[Syncs the file at the current revision
:param force: Force the file to sync
:type force: bool
:param safe: Don't sync files that were changed outside perforce
:type safe: bool
:param revision: Sync to a specific revision
:type revision: int
:param changelist: Changelist to sync to
:type changelist: int
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20e9568f0>]]
if name[force] begin[:]
call[name[cmd].append, parameter[constant[-f]]]
if name[safe] begin[:]
call[name[cmd].append, parameter[constant[-s]]]
if name[revision] begin[:]
call[name[cmd].append, parameter[call[constant[{}#{}].format, parameter[name[self].depotFile, name[revision]]]]]
call[name[self]._connection.run, parameter[name[cmd]]]
call[name[self].query, parameter[]] | keyword[def] identifier[sync] ( identifier[self] , identifier[force] = keyword[False] , identifier[safe] = keyword[True] , identifier[revision] = literal[int] , identifier[changelist] = literal[int] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[if] identifier[force] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[safe] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[revision] :
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[self] . identifier[depotFile] , identifier[revision] ))
keyword[elif] identifier[changelist] :
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[self] . identifier[depotFile] , identifier[changelist] ))
keyword[else] :
identifier[cmd] . identifier[append] ( identifier[self] . identifier[depotFile] )
identifier[self] . identifier[_connection] . identifier[run] ( identifier[cmd] )
identifier[self] . identifier[query] () | def sync(self, force=False, safe=True, revision=0, changelist=0):
"""Syncs the file at the current revision
:param force: Force the file to sync
:type force: bool
:param safe: Don't sync files that were changed outside perforce
:type safe: bool
:param revision: Sync to a specific revision
:type revision: int
:param changelist: Changelist to sync to
:type changelist: int
"""
cmd = ['sync']
if force:
cmd.append('-f') # depends on [control=['if'], data=[]]
if safe:
cmd.append('-s') # depends on [control=['if'], data=[]]
if revision:
cmd.append('{}#{}'.format(self.depotFile, revision)) # depends on [control=['if'], data=[]]
elif changelist:
cmd.append('{}@{}'.format(self.depotFile, changelist)) # depends on [control=['if'], data=[]]
else:
cmd.append(self.depotFile)
self._connection.run(cmd)
self.query() |
def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity'])
else:
quantity.append(1)
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
inserted, transforms = multipack(polygons=polygons,
quantity=quantity,
sheet_size=sheet_size)
multi = []
for i, T in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T)
# append all packed paths into a single Path object
packed = concatenate(multi)
return packed, inserted | def function[pack_paths, parameter[paths, sheet_size]]:
constant[
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
]
from relative_module[util] import module[concatenate]
if compare[name[sheet_size] is_not constant[None]] begin[:]
variable[sheet_size] assign[=] call[call[name[np].sort, parameter[name[sheet_size]]]][<ast.Slice object at 0x7da1b22b8070>]
variable[quantity] assign[=] list[[]]
for taget[name[path]] in starred[name[paths]] begin[:]
if compare[constant[quantity] in name[path].metadata] begin[:]
call[name[quantity].append, parameter[call[name[path].metadata][constant[quantity]]]]
variable[polygons] assign[=] <ast.ListComp object at 0x7da1b22d4b50>
<ast.Tuple object at 0x7da1b22d7a60> assign[=] call[name[multipack], parameter[]]
variable[multi] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b22d7a00>, <ast.Name object at 0x7da1b22d6800>]]] in starred[call[name[zip], parameter[name[inserted], name[transforms]]]] begin[:]
call[name[multi].append, parameter[call[call[name[paths]][name[i]].copy, parameter[]]]]
call[call[name[multi]][<ast.UnaryOp object at 0x7da1b22d67d0>].apply_transform, parameter[name[T]]]
variable[packed] assign[=] call[name[concatenate], parameter[name[multi]]]
return[tuple[[<ast.Name object at 0x7da1b22d7250>, <ast.Name object at 0x7da1b22d6bc0>]]] | keyword[def] identifier[pack_paths] ( identifier[paths] , identifier[sheet_size] = keyword[None] ):
literal[string]
keyword[from] . identifier[util] keyword[import] identifier[concatenate]
keyword[if] identifier[sheet_size] keyword[is] keyword[not] keyword[None] :
identifier[sheet_size] = identifier[np] . identifier[sort] ( identifier[sheet_size] )[::- literal[int] ]
identifier[quantity] =[]
keyword[for] identifier[path] keyword[in] identifier[paths] :
keyword[if] literal[string] keyword[in] identifier[path] . identifier[metadata] :
identifier[quantity] . identifier[append] ( identifier[path] . identifier[metadata] [ literal[string] ])
keyword[else] :
identifier[quantity] . identifier[append] ( literal[int] )
identifier[polygons] =[ identifier[i] . identifier[polygons_closed] [ identifier[i] . identifier[root] [ literal[int] ]] keyword[for] identifier[i] keyword[in] identifier[paths] ]
identifier[inserted] , identifier[transforms] = identifier[multipack] ( identifier[polygons] = identifier[polygons] ,
identifier[quantity] = identifier[quantity] ,
identifier[sheet_size] = identifier[sheet_size] )
identifier[multi] =[]
keyword[for] identifier[i] , identifier[T] keyword[in] identifier[zip] ( identifier[inserted] , identifier[transforms] ):
identifier[multi] . identifier[append] ( identifier[paths] [ identifier[i] ]. identifier[copy] ())
identifier[multi] [- literal[int] ]. identifier[apply_transform] ( identifier[T] )
identifier[packed] = identifier[concatenate] ( identifier[multi] )
keyword[return] identifier[packed] , identifier[inserted] | def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1] # depends on [control=['if'], data=['sheet_size']]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity']) # depends on [control=['if'], data=[]]
else:
quantity.append(1) # depends on [control=['for'], data=['path']]
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
(inserted, transforms) = multipack(polygons=polygons, quantity=quantity, sheet_size=sheet_size)
multi = []
for (i, T) in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T) # depends on [control=['for'], data=[]]
# append all packed paths into a single Path object
packed = concatenate(multi)
return (packed, inserted) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.