code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def fetch(self, select=None, include=None):
"""
从服务器获取当前对象所有的值,如果与本地值不同,将会覆盖本地的值。
:return: 当前对象
"""
data = {}
if select:
if not isinstance(select, (list, tuple)):
raise TypeError('select parameter must be a list or a tuple')
data['keys'] = ','.join(select)
if include:
if not isinstance(include, (list, tuple)):
raise TypeError('include parameter must be a list or a tuple')
data['include'] = ','.join(include)
response = client.get('/classes/{0}/{1}'.format(self._class_name, self.id), data)
self._update_data(response.json()) | def function[fetch, parameter[self, select, include]]:
constant[
从服务器获取当前对象所有的值,如果与本地值不同,将会覆盖本地的值。
:return: 当前对象
]
variable[data] assign[=] dictionary[[], []]
if name[select] begin[:]
if <ast.UnaryOp object at 0x7da1b0efb0a0> begin[:]
<ast.Raise object at 0x7da1b0efbc10>
call[name[data]][constant[keys]] assign[=] call[constant[,].join, parameter[name[select]]]
if name[include] begin[:]
if <ast.UnaryOp object at 0x7da1b0ef98d0> begin[:]
<ast.Raise object at 0x7da1b0efa260>
call[name[data]][constant[include]] assign[=] call[constant[,].join, parameter[name[include]]]
variable[response] assign[=] call[name[client].get, parameter[call[constant[/classes/{0}/{1}].format, parameter[name[self]._class_name, name[self].id]], name[data]]]
call[name[self]._update_data, parameter[call[name[response].json, parameter[]]]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[select] = keyword[None] , identifier[include] = keyword[None] ):
literal[string]
identifier[data] ={}
keyword[if] identifier[select] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[select] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[data] [ literal[string] ]= literal[string] . identifier[join] ( identifier[select] )
keyword[if] identifier[include] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[include] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[data] [ literal[string] ]= literal[string] . identifier[join] ( identifier[include] )
identifier[response] = identifier[client] . identifier[get] ( literal[string] . identifier[format] ( identifier[self] . identifier[_class_name] , identifier[self] . identifier[id] ), identifier[data] )
identifier[self] . identifier[_update_data] ( identifier[response] . identifier[json] ()) | def fetch(self, select=None, include=None):
"""
从服务器获取当前对象所有的值,如果与本地值不同,将会覆盖本地的值。
:return: 当前对象
"""
data = {}
if select:
if not isinstance(select, (list, tuple)):
raise TypeError('select parameter must be a list or a tuple') # depends on [control=['if'], data=[]]
data['keys'] = ','.join(select) # depends on [control=['if'], data=[]]
if include:
if not isinstance(include, (list, tuple)):
raise TypeError('include parameter must be a list or a tuple') # depends on [control=['if'], data=[]]
data['include'] = ','.join(include) # depends on [control=['if'], data=[]]
response = client.get('/classes/{0}/{1}'.format(self._class_name, self.id), data)
self._update_data(response.json()) |
def unify_partitions(self):
"""For all of the segments for a partition, create the parent partition, combine the
children into the parent, and delete the children. """
partitions = self.collect_segment_partitions()
# For each group, copy the segment partitions to the parent partitions, then
# delete the segment partitions.
with self.progress.start('coalesce', 0, message='Coalescing partition segments') as ps:
for name, segments in iteritems(partitions):
ps.add(item_type='partitions', item_count=len(segments),
message='Colescing partition {}'.format(name))
self.unify_partition(name, segments, ps) | def function[unify_partitions, parameter[self]]:
constant[For all of the segments for a partition, create the parent partition, combine the
children into the parent, and delete the children. ]
variable[partitions] assign[=] call[name[self].collect_segment_partitions, parameter[]]
with call[name[self].progress.start, parameter[constant[coalesce], constant[0]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c9936d0>, <ast.Name object at 0x7da20c9900a0>]]] in starred[call[name[iteritems], parameter[name[partitions]]]] begin[:]
call[name[ps].add, parameter[]]
call[name[self].unify_partition, parameter[name[name], name[segments], name[ps]]] | keyword[def] identifier[unify_partitions] ( identifier[self] ):
literal[string]
identifier[partitions] = identifier[self] . identifier[collect_segment_partitions] ()
keyword[with] identifier[self] . identifier[progress] . identifier[start] ( literal[string] , literal[int] , identifier[message] = literal[string] ) keyword[as] identifier[ps] :
keyword[for] identifier[name] , identifier[segments] keyword[in] identifier[iteritems] ( identifier[partitions] ):
identifier[ps] . identifier[add] ( identifier[item_type] = literal[string] , identifier[item_count] = identifier[len] ( identifier[segments] ),
identifier[message] = literal[string] . identifier[format] ( identifier[name] ))
identifier[self] . identifier[unify_partition] ( identifier[name] , identifier[segments] , identifier[ps] ) | def unify_partitions(self):
"""For all of the segments for a partition, create the parent partition, combine the
children into the parent, and delete the children. """
partitions = self.collect_segment_partitions()
# For each group, copy the segment partitions to the parent partitions, then
# delete the segment partitions.
with self.progress.start('coalesce', 0, message='Coalescing partition segments') as ps:
for (name, segments) in iteritems(partitions):
ps.add(item_type='partitions', item_count=len(segments), message='Colescing partition {}'.format(name))
self.unify_partition(name, segments, ps) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['ps']] |
def _update_table_names(name, dat):
"""
Model placement is subject to change. That means all names within the model (names are path-dependent) are also
subject to change. Whichever name is decided, the inner data needs to match it.
:param dict dat: Metadata
:param str name: Table name
:return dict dat: Metadata
"""
for _tabletype in ["summary", "distribution", "ensemble"]:
_ttname = "{}Table".format(_tabletype)
if _ttname in dat:
_new_tables = OrderedDict()
_idx = 0
# change all the top level table names
for k,v in dat[_ttname].items():
_new_ttname= "{}{}{}".format(name, _tabletype, _idx)
_idx +=1
#change all the table names in the table metadata
v["tableName"] = _new_ttname
# remove the filename. It shouldn't be stored anyway
if "filename" in v:
v["filename"] = ""
# place dat into the new ordered dictionary
_new_tables[_new_ttname] = v
# place new tables into the original dat
dat[_ttname] = _new_tables
return dat | def function[_update_table_names, parameter[name, dat]]:
constant[
Model placement is subject to change. That means all names within the model (names are path-dependent) are also
subject to change. Whichever name is decided, the inner data needs to match it.
:param dict dat: Metadata
:param str name: Table name
:return dict dat: Metadata
]
for taget[name[_tabletype]] in starred[list[[<ast.Constant object at 0x7da18f00e620>, <ast.Constant object at 0x7da18f00d300>, <ast.Constant object at 0x7da18f00d2d0>]]] begin[:]
variable[_ttname] assign[=] call[constant[{}Table].format, parameter[name[_tabletype]]]
if compare[name[_ttname] in name[dat]] begin[:]
variable[_new_tables] assign[=] call[name[OrderedDict], parameter[]]
variable[_idx] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18f00ec80>, <ast.Name object at 0x7da18f00ed10>]]] in starred[call[call[name[dat]][name[_ttname]].items, parameter[]]] begin[:]
variable[_new_ttname] assign[=] call[constant[{}{}{}].format, parameter[name[name], name[_tabletype], name[_idx]]]
<ast.AugAssign object at 0x7da18f00c0a0>
call[name[v]][constant[tableName]] assign[=] name[_new_ttname]
if compare[constant[filename] in name[v]] begin[:]
call[name[v]][constant[filename]] assign[=] constant[]
call[name[_new_tables]][name[_new_ttname]] assign[=] name[v]
call[name[dat]][name[_ttname]] assign[=] name[_new_tables]
return[name[dat]] | keyword[def] identifier[_update_table_names] ( identifier[name] , identifier[dat] ):
literal[string]
keyword[for] identifier[_tabletype] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[_ttname] = literal[string] . identifier[format] ( identifier[_tabletype] )
keyword[if] identifier[_ttname] keyword[in] identifier[dat] :
identifier[_new_tables] = identifier[OrderedDict] ()
identifier[_idx] = literal[int]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dat] [ identifier[_ttname] ]. identifier[items] ():
identifier[_new_ttname] = literal[string] . identifier[format] ( identifier[name] , identifier[_tabletype] , identifier[_idx] )
identifier[_idx] += literal[int]
identifier[v] [ literal[string] ]= identifier[_new_ttname]
keyword[if] literal[string] keyword[in] identifier[v] :
identifier[v] [ literal[string] ]= literal[string]
identifier[_new_tables] [ identifier[_new_ttname] ]= identifier[v]
identifier[dat] [ identifier[_ttname] ]= identifier[_new_tables]
keyword[return] identifier[dat] | def _update_table_names(name, dat):
"""
Model placement is subject to change. That means all names within the model (names are path-dependent) are also
subject to change. Whichever name is decided, the inner data needs to match it.
:param dict dat: Metadata
:param str name: Table name
:return dict dat: Metadata
"""
for _tabletype in ['summary', 'distribution', 'ensemble']:
_ttname = '{}Table'.format(_tabletype)
if _ttname in dat:
_new_tables = OrderedDict()
_idx = 0
# change all the top level table names
for (k, v) in dat[_ttname].items():
_new_ttname = '{}{}{}'.format(name, _tabletype, _idx)
_idx += 1
#change all the table names in the table metadata
v['tableName'] = _new_ttname
# remove the filename. It shouldn't be stored anyway
if 'filename' in v:
v['filename'] = '' # depends on [control=['if'], data=['v']]
# place dat into the new ordered dictionary
_new_tables[_new_ttname] = v # depends on [control=['for'], data=[]]
# place new tables into the original dat
dat[_ttname] = _new_tables # depends on [control=['if'], data=['_ttname', 'dat']] # depends on [control=['for'], data=['_tabletype']]
return dat |
def request_data(cls, time, site_id, derived=False):
"""Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data.
"""
igra2 = cls()
# Set parameters for data query
if derived:
igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/'
igra2.suffix = igra2.suffix + '-drvd.txt'
else:
igra2.ftpsite = igra2.ftpsite + 'data/data-por/'
igra2.suffix = igra2.suffix + '-data.txt'
if type(time) == datetime.datetime:
igra2.begin_date = time
igra2.end_date = time
else:
igra2.begin_date, igra2.end_date = time
igra2.site_id = site_id
df, headers = igra2._get_data()
return df, headers | def function[request_data, parameter[cls, time, site_id, derived]]:
constant[Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data.
]
variable[igra2] assign[=] call[name[cls], parameter[]]
if name[derived] begin[:]
name[igra2].ftpsite assign[=] binary_operation[name[igra2].ftpsite + constant[derived/derived-por/]]
name[igra2].suffix assign[=] binary_operation[name[igra2].suffix + constant[-drvd.txt]]
if compare[call[name[type], parameter[name[time]]] equal[==] name[datetime].datetime] begin[:]
name[igra2].begin_date assign[=] name[time]
name[igra2].end_date assign[=] name[time]
name[igra2].site_id assign[=] name[site_id]
<ast.Tuple object at 0x7da1b1044730> assign[=] call[name[igra2]._get_data, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1045900>, <ast.Name object at 0x7da1b10464d0>]]] | keyword[def] identifier[request_data] ( identifier[cls] , identifier[time] , identifier[site_id] , identifier[derived] = keyword[False] ):
literal[string]
identifier[igra2] = identifier[cls] ()
keyword[if] identifier[derived] :
identifier[igra2] . identifier[ftpsite] = identifier[igra2] . identifier[ftpsite] + literal[string]
identifier[igra2] . identifier[suffix] = identifier[igra2] . identifier[suffix] + literal[string]
keyword[else] :
identifier[igra2] . identifier[ftpsite] = identifier[igra2] . identifier[ftpsite] + literal[string]
identifier[igra2] . identifier[suffix] = identifier[igra2] . identifier[suffix] + literal[string]
keyword[if] identifier[type] ( identifier[time] )== identifier[datetime] . identifier[datetime] :
identifier[igra2] . identifier[begin_date] = identifier[time]
identifier[igra2] . identifier[end_date] = identifier[time]
keyword[else] :
identifier[igra2] . identifier[begin_date] , identifier[igra2] . identifier[end_date] = identifier[time]
identifier[igra2] . identifier[site_id] = identifier[site_id]
identifier[df] , identifier[headers] = identifier[igra2] . identifier[_get_data] ()
keyword[return] identifier[df] , identifier[headers] | def request_data(cls, time, site_id, derived=False):
"""Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data.
"""
igra2 = cls()
# Set parameters for data query
if derived:
igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/'
igra2.suffix = igra2.suffix + '-drvd.txt' # depends on [control=['if'], data=[]]
else:
igra2.ftpsite = igra2.ftpsite + 'data/data-por/'
igra2.suffix = igra2.suffix + '-data.txt'
if type(time) == datetime.datetime:
igra2.begin_date = time
igra2.end_date = time # depends on [control=['if'], data=[]]
else:
(igra2.begin_date, igra2.end_date) = time
igra2.site_id = site_id
(df, headers) = igra2._get_data()
return (df, headers) |
def get(args):
"""
Get a river by name.
"""
m = RiverManager(args.hosts)
r = m.get(args.name)
if r:
print(json.dumps(r, indent=2))
else:
sys.exit(1) | def function[get, parameter[args]]:
constant[
Get a river by name.
]
variable[m] assign[=] call[name[RiverManager], parameter[name[args].hosts]]
variable[r] assign[=] call[name[m].get, parameter[name[args].name]]
if name[r] begin[:]
call[name[print], parameter[call[name[json].dumps, parameter[name[r]]]]] | keyword[def] identifier[get] ( identifier[args] ):
literal[string]
identifier[m] = identifier[RiverManager] ( identifier[args] . identifier[hosts] )
identifier[r] = identifier[m] . identifier[get] ( identifier[args] . identifier[name] )
keyword[if] identifier[r] :
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[r] , identifier[indent] = literal[int] ))
keyword[else] :
identifier[sys] . identifier[exit] ( literal[int] ) | def get(args):
"""
Get a river by name.
"""
m = RiverManager(args.hosts)
r = m.get(args.name)
if r:
print(json.dumps(r, indent=2)) # depends on [control=['if'], data=[]]
else:
sys.exit(1) |
def validate():
"""
Validate that the currently installed version of spaCy is compatible
with the installed models. Should be run after `pip install -U spacy`.
"""
msg = Printer()
with msg.loading("Loading compatibility table..."):
r = requests.get(about.__compatibility__)
if r.status_code != 200:
msg.fail(
"Server error ({})".format(r.status_code),
"Couldn't fetch compatibility table.",
exits=1,
)
msg.good("Loaded compatibility table")
compat = r.json()["spacy"]
version = about.__version__
version = version.rsplit(".dev", 1)[0]
current_compat = compat.get(version)
if not current_compat:
msg.fail(
"Can't find spaCy v{} in compatibility table".format(version),
about.__compatibility__,
exits=1,
)
all_models = set()
for spacy_v, models in dict(compat).items():
all_models.update(models.keys())
for model, model_vs in models.items():
compat[spacy_v][model] = [reformat_version(v) for v in model_vs]
model_links = get_model_links(current_compat)
model_pkgs = get_model_pkgs(current_compat, all_models)
incompat_links = {l for l, d in model_links.items() if not d["compat"]}
incompat_models = {d["name"] for _, d in model_pkgs.items() if not d["compat"]}
incompat_models.update(
[d["name"] for _, d in model_links.items() if not d["compat"]]
)
na_models = [m for m in incompat_models if m not in current_compat]
update_models = [m for m in incompat_models if m in current_compat]
spacy_dir = Path(__file__).parent.parent
msg.divider("Installed models (spaCy v{})".format(about.__version__))
msg.info("spaCy installation: {}".format(path2str(spacy_dir)))
if model_links or model_pkgs:
header = ("TYPE", "NAME", "MODEL", "VERSION", "")
rows = []
for name, data in model_pkgs.items():
rows.append(get_model_row(current_compat, name, data, msg))
for name, data in model_links.items():
rows.append(get_model_row(current_compat, name, data, msg, "link"))
msg.table(rows, header=header)
else:
msg.text("No models found in your current environment.", exits=0)
if update_models:
msg.divider("Install updates")
msg.text("Use the following commands to update the model packages:")
cmd = "python -m spacy download {}"
print("\n".join([cmd.format(pkg) for pkg in update_models]) + "\n")
if na_models:
msg.text(
"The following models are not available for spaCy "
"v{}: {}".format(about.__version__, ", ".join(na_models))
)
if incompat_links:
msg.text(
"You may also want to overwrite the incompatible links using the "
"`python -m spacy link` command with `--force`, or remove them "
"from the data directory. "
"Data path: {path}".format(path=path2str(get_data_path()))
)
if incompat_models or incompat_links:
sys.exit(1) | def function[validate, parameter[]]:
constant[
Validate that the currently installed version of spaCy is compatible
with the installed models. Should be run after `pip install -U spacy`.
]
variable[msg] assign[=] call[name[Printer], parameter[]]
with call[name[msg].loading, parameter[constant[Loading compatibility table...]]] begin[:]
variable[r] assign[=] call[name[requests].get, parameter[name[about].__compatibility__]]
if compare[name[r].status_code not_equal[!=] constant[200]] begin[:]
call[name[msg].fail, parameter[call[constant[Server error ({})].format, parameter[name[r].status_code]], constant[Couldn't fetch compatibility table.]]]
call[name[msg].good, parameter[constant[Loaded compatibility table]]]
variable[compat] assign[=] call[call[name[r].json, parameter[]]][constant[spacy]]
variable[version] assign[=] name[about].__version__
variable[version] assign[=] call[call[name[version].rsplit, parameter[constant[.dev], constant[1]]]][constant[0]]
variable[current_compat] assign[=] call[name[compat].get, parameter[name[version]]]
if <ast.UnaryOp object at 0x7da20cabee60> begin[:]
call[name[msg].fail, parameter[call[constant[Can't find spaCy v{} in compatibility table].format, parameter[name[version]]], name[about].__compatibility__]]
variable[all_models] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20cabd480>, <ast.Name object at 0x7da20cabf2e0>]]] in starred[call[call[name[dict], parameter[name[compat]]].items, parameter[]]] begin[:]
call[name[all_models].update, parameter[call[name[models].keys, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da20cabf670>, <ast.Name object at 0x7da20cabcc10>]]] in starred[call[name[models].items, parameter[]]] begin[:]
call[call[name[compat]][name[spacy_v]]][name[model]] assign[=] <ast.ListComp object at 0x7da20cabf430>
variable[model_links] assign[=] call[name[get_model_links], parameter[name[current_compat]]]
variable[model_pkgs] assign[=] call[name[get_model_pkgs], parameter[name[current_compat], name[all_models]]]
variable[incompat_links] assign[=] <ast.SetComp object at 0x7da1b1eeb520>
variable[incompat_models] assign[=] <ast.SetComp object at 0x7da1b1eea980>
call[name[incompat_models].update, parameter[<ast.ListComp object at 0x7da1b1ee9de0>]]
variable[na_models] assign[=] <ast.ListComp object at 0x7da1b1eebc40>
variable[update_models] assign[=] <ast.ListComp object at 0x7da1b1eea440>
variable[spacy_dir] assign[=] call[name[Path], parameter[name[__file__]]].parent.parent
call[name[msg].divider, parameter[call[constant[Installed models (spaCy v{})].format, parameter[name[about].__version__]]]]
call[name[msg].info, parameter[call[constant[spaCy installation: {}].format, parameter[call[name[path2str], parameter[name[spacy_dir]]]]]]]
if <ast.BoolOp object at 0x7da1b1eeb7f0> begin[:]
variable[header] assign[=] tuple[[<ast.Constant object at 0x7da1b1ee87c0>, <ast.Constant object at 0x7da1b1eeabf0>, <ast.Constant object at 0x7da1b1ee97b0>, <ast.Constant object at 0x7da1b1eeb250>, <ast.Constant object at 0x7da1b1eeb340>]]
variable[rows] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1ee9690>, <ast.Name object at 0x7da1b1eeafb0>]]] in starred[call[name[model_pkgs].items, parameter[]]] begin[:]
call[name[rows].append, parameter[call[name[get_model_row], parameter[name[current_compat], name[name], name[data], name[msg]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1eea350>, <ast.Name object at 0x7da1b1eebeb0>]]] in starred[call[name[model_links].items, parameter[]]] begin[:]
call[name[rows].append, parameter[call[name[get_model_row], parameter[name[current_compat], name[name], name[data], name[msg], constant[link]]]]]
call[name[msg].table, parameter[name[rows]]]
if name[update_models] begin[:]
call[name[msg].divider, parameter[constant[Install updates]]]
call[name[msg].text, parameter[constant[Use the following commands to update the model packages:]]]
variable[cmd] assign[=] constant[python -m spacy download {}]
call[name[print], parameter[binary_operation[call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b1ee8760>]] + constant[
]]]]
if name[na_models] begin[:]
call[name[msg].text, parameter[call[constant[The following models are not available for spaCy v{}: {}].format, parameter[name[about].__version__, call[constant[, ].join, parameter[name[na_models]]]]]]]
if name[incompat_links] begin[:]
call[name[msg].text, parameter[call[constant[You may also want to overwrite the incompatible links using the `python -m spacy link` command with `--force`, or remove them from the data directory. Data path: {path}].format, parameter[]]]]
if <ast.BoolOp object at 0x7da1b1ef82b0> begin[:]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[validate] ():
literal[string]
identifier[msg] = identifier[Printer] ()
keyword[with] identifier[msg] . identifier[loading] ( literal[string] ):
identifier[r] = identifier[requests] . identifier[get] ( identifier[about] . identifier[__compatibility__] )
keyword[if] identifier[r] . identifier[status_code] != literal[int] :
identifier[msg] . identifier[fail] (
literal[string] . identifier[format] ( identifier[r] . identifier[status_code] ),
literal[string] ,
identifier[exits] = literal[int] ,
)
identifier[msg] . identifier[good] ( literal[string] )
identifier[compat] = identifier[r] . identifier[json] ()[ literal[string] ]
identifier[version] = identifier[about] . identifier[__version__]
identifier[version] = identifier[version] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[current_compat] = identifier[compat] . identifier[get] ( identifier[version] )
keyword[if] keyword[not] identifier[current_compat] :
identifier[msg] . identifier[fail] (
literal[string] . identifier[format] ( identifier[version] ),
identifier[about] . identifier[__compatibility__] ,
identifier[exits] = literal[int] ,
)
identifier[all_models] = identifier[set] ()
keyword[for] identifier[spacy_v] , identifier[models] keyword[in] identifier[dict] ( identifier[compat] ). identifier[items] ():
identifier[all_models] . identifier[update] ( identifier[models] . identifier[keys] ())
keyword[for] identifier[model] , identifier[model_vs] keyword[in] identifier[models] . identifier[items] ():
identifier[compat] [ identifier[spacy_v] ][ identifier[model] ]=[ identifier[reformat_version] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[model_vs] ]
identifier[model_links] = identifier[get_model_links] ( identifier[current_compat] )
identifier[model_pkgs] = identifier[get_model_pkgs] ( identifier[current_compat] , identifier[all_models] )
identifier[incompat_links] ={ identifier[l] keyword[for] identifier[l] , identifier[d] keyword[in] identifier[model_links] . identifier[items] () keyword[if] keyword[not] identifier[d] [ literal[string] ]}
identifier[incompat_models] ={ identifier[d] [ literal[string] ] keyword[for] identifier[_] , identifier[d] keyword[in] identifier[model_pkgs] . identifier[items] () keyword[if] keyword[not] identifier[d] [ literal[string] ]}
identifier[incompat_models] . identifier[update] (
[ identifier[d] [ literal[string] ] keyword[for] identifier[_] , identifier[d] keyword[in] identifier[model_links] . identifier[items] () keyword[if] keyword[not] identifier[d] [ literal[string] ]]
)
identifier[na_models] =[ identifier[m] keyword[for] identifier[m] keyword[in] identifier[incompat_models] keyword[if] identifier[m] keyword[not] keyword[in] identifier[current_compat] ]
identifier[update_models] =[ identifier[m] keyword[for] identifier[m] keyword[in] identifier[incompat_models] keyword[if] identifier[m] keyword[in] identifier[current_compat] ]
identifier[spacy_dir] = identifier[Path] ( identifier[__file__] ). identifier[parent] . identifier[parent]
identifier[msg] . identifier[divider] ( literal[string] . identifier[format] ( identifier[about] . identifier[__version__] ))
identifier[msg] . identifier[info] ( literal[string] . identifier[format] ( identifier[path2str] ( identifier[spacy_dir] )))
keyword[if] identifier[model_links] keyword[or] identifier[model_pkgs] :
identifier[header] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
identifier[rows] =[]
keyword[for] identifier[name] , identifier[data] keyword[in] identifier[model_pkgs] . identifier[items] ():
identifier[rows] . identifier[append] ( identifier[get_model_row] ( identifier[current_compat] , identifier[name] , identifier[data] , identifier[msg] ))
keyword[for] identifier[name] , identifier[data] keyword[in] identifier[model_links] . identifier[items] ():
identifier[rows] . identifier[append] ( identifier[get_model_row] ( identifier[current_compat] , identifier[name] , identifier[data] , identifier[msg] , literal[string] ))
identifier[msg] . identifier[table] ( identifier[rows] , identifier[header] = identifier[header] )
keyword[else] :
identifier[msg] . identifier[text] ( literal[string] , identifier[exits] = literal[int] )
keyword[if] identifier[update_models] :
identifier[msg] . identifier[divider] ( literal[string] )
identifier[msg] . identifier[text] ( literal[string] )
identifier[cmd] = literal[string]
identifier[print] ( literal[string] . identifier[join] ([ identifier[cmd] . identifier[format] ( identifier[pkg] ) keyword[for] identifier[pkg] keyword[in] identifier[update_models] ])+ literal[string] )
keyword[if] identifier[na_models] :
identifier[msg] . identifier[text] (
literal[string]
literal[string] . identifier[format] ( identifier[about] . identifier[__version__] , literal[string] . identifier[join] ( identifier[na_models] ))
)
keyword[if] identifier[incompat_links] :
identifier[msg] . identifier[text] (
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[path] = identifier[path2str] ( identifier[get_data_path] ()))
)
keyword[if] identifier[incompat_models] keyword[or] identifier[incompat_links] :
identifier[sys] . identifier[exit] ( literal[int] ) | def validate():
"""
Validate that the currently installed version of spaCy is compatible
with the installed models. Should be run after `pip install -U spacy`.
"""
msg = Printer()
with msg.loading('Loading compatibility table...'):
r = requests.get(about.__compatibility__)
if r.status_code != 200:
msg.fail('Server error ({})'.format(r.status_code), "Couldn't fetch compatibility table.", exits=1) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
msg.good('Loaded compatibility table')
compat = r.json()['spacy']
version = about.__version__
version = version.rsplit('.dev', 1)[0]
current_compat = compat.get(version)
if not current_compat:
msg.fail("Can't find spaCy v{} in compatibility table".format(version), about.__compatibility__, exits=1) # depends on [control=['if'], data=[]]
all_models = set()
for (spacy_v, models) in dict(compat).items():
all_models.update(models.keys())
for (model, model_vs) in models.items():
compat[spacy_v][model] = [reformat_version(v) for v in model_vs] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
model_links = get_model_links(current_compat)
model_pkgs = get_model_pkgs(current_compat, all_models)
incompat_links = {l for (l, d) in model_links.items() if not d['compat']}
incompat_models = {d['name'] for (_, d) in model_pkgs.items() if not d['compat']}
incompat_models.update([d['name'] for (_, d) in model_links.items() if not d['compat']])
na_models = [m for m in incompat_models if m not in current_compat]
update_models = [m for m in incompat_models if m in current_compat]
spacy_dir = Path(__file__).parent.parent
msg.divider('Installed models (spaCy v{})'.format(about.__version__))
msg.info('spaCy installation: {}'.format(path2str(spacy_dir)))
if model_links or model_pkgs:
header = ('TYPE', 'NAME', 'MODEL', 'VERSION', '')
rows = []
for (name, data) in model_pkgs.items():
rows.append(get_model_row(current_compat, name, data, msg)) # depends on [control=['for'], data=[]]
for (name, data) in model_links.items():
rows.append(get_model_row(current_compat, name, data, msg, 'link')) # depends on [control=['for'], data=[]]
msg.table(rows, header=header) # depends on [control=['if'], data=[]]
else:
msg.text('No models found in your current environment.', exits=0)
if update_models:
msg.divider('Install updates')
msg.text('Use the following commands to update the model packages:')
cmd = 'python -m spacy download {}'
print('\n'.join([cmd.format(pkg) for pkg in update_models]) + '\n') # depends on [control=['if'], data=[]]
if na_models:
msg.text('The following models are not available for spaCy v{}: {}'.format(about.__version__, ', '.join(na_models))) # depends on [control=['if'], data=[]]
if incompat_links:
msg.text('You may also want to overwrite the incompatible links using the `python -m spacy link` command with `--force`, or remove them from the data directory. Data path: {path}'.format(path=path2str(get_data_path()))) # depends on [control=['if'], data=[]]
if incompat_models or incompat_links:
sys.exit(1) # depends on [control=['if'], data=[]] |
def parse_options(arguments):
"""Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
"""
arguments = arguments[1:]
options = {}
while arguments:
key = arguments.pop(0)
if key in ("-h", "--help"):
raise UsageError("Help requested.")
if key.startswith("--"):
key = key[2:]
try:
value = arguments.pop(0)
except IndexError:
raise OptionError("'--%s' is missing a value." % key)
options[key] = value
else:
raise OptionError("Encountered unexpected value '%s'." % key)
default_key = os.environ.get("AWS_ACCESS_KEY_ID")
if "key" not in options and default_key:
options["key"] = default_key
default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
if "secret" not in options and default_secret:
options["secret"] = default_secret
default_endpoint = os.environ.get("AWS_ENDPOINT")
if "endpoint" not in options and default_endpoint:
options["endpoint"] = default_endpoint
for name in ("key", "secret", "endpoint", "action"):
if name not in options:
raise OptionError(
"The '--%s' command-line argument is required." % name)
return options | def function[parse_options, parameter[arguments]]:
constant[Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
]
variable[arguments] assign[=] call[name[arguments]][<ast.Slice object at 0x7da18ede72b0>]
variable[options] assign[=] dictionary[[], []]
while name[arguments] begin[:]
variable[key] assign[=] call[name[arguments].pop, parameter[constant[0]]]
if compare[name[key] in tuple[[<ast.Constant object at 0x7da18ede5960>, <ast.Constant object at 0x7da18ede5030>]]] begin[:]
<ast.Raise object at 0x7da18ede7940>
if call[name[key].startswith, parameter[constant[--]]] begin[:]
variable[key] assign[=] call[name[key]][<ast.Slice object at 0x7da18ede56f0>]
<ast.Try object at 0x7da18ede6530>
call[name[options]][name[key]] assign[=] name[value]
variable[default_key] assign[=] call[name[os].environ.get, parameter[constant[AWS_ACCESS_KEY_ID]]]
if <ast.BoolOp object at 0x7da18ede53f0> begin[:]
call[name[options]][constant[key]] assign[=] name[default_key]
variable[default_secret] assign[=] call[name[os].environ.get, parameter[constant[AWS_SECRET_ACCESS_KEY]]]
if <ast.BoolOp object at 0x7da18ede58d0> begin[:]
call[name[options]][constant[secret]] assign[=] name[default_secret]
variable[default_endpoint] assign[=] call[name[os].environ.get, parameter[constant[AWS_ENDPOINT]]]
if <ast.BoolOp object at 0x7da18ede66b0> begin[:]
call[name[options]][constant[endpoint]] assign[=] name[default_endpoint]
for taget[name[name]] in starred[tuple[[<ast.Constant object at 0x7da18ede4b20>, <ast.Constant object at 0x7da18ede6c50>, <ast.Constant object at 0x7da18ede4f70>, <ast.Constant object at 0x7da18ede4ca0>]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[options]] begin[:]
<ast.Raise object at 0x7da18ede6560>
return[name[options]] | keyword[def] identifier[parse_options] ( identifier[arguments] ):
literal[string]
identifier[arguments] = identifier[arguments] [ literal[int] :]
identifier[options] ={}
keyword[while] identifier[arguments] :
identifier[key] = identifier[arguments] . identifier[pop] ( literal[int] )
keyword[if] identifier[key] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[UsageError] ( literal[string] )
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[key] = identifier[key] [ literal[int] :]
keyword[try] :
identifier[value] = identifier[arguments] . identifier[pop] ( literal[int] )
keyword[except] identifier[IndexError] :
keyword[raise] identifier[OptionError] ( literal[string] % identifier[key] )
identifier[options] [ identifier[key] ]= identifier[value]
keyword[else] :
keyword[raise] identifier[OptionError] ( literal[string] % identifier[key] )
identifier[default_key] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[options] keyword[and] identifier[default_key] :
identifier[options] [ literal[string] ]= identifier[default_key]
identifier[default_secret] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[options] keyword[and] identifier[default_secret] :
identifier[options] [ literal[string] ]= identifier[default_secret]
identifier[default_endpoint] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[options] keyword[and] identifier[default_endpoint] :
identifier[options] [ literal[string] ]= identifier[default_endpoint]
keyword[for] identifier[name] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[options] :
keyword[raise] identifier[OptionError] (
literal[string] % identifier[name] )
keyword[return] identifier[options] | def parse_options(arguments):
"""Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
"""
arguments = arguments[1:]
options = {}
while arguments:
key = arguments.pop(0)
if key in ('-h', '--help'):
raise UsageError('Help requested.') # depends on [control=['if'], data=[]]
if key.startswith('--'):
key = key[2:]
try:
value = arguments.pop(0) # depends on [control=['try'], data=[]]
except IndexError:
raise OptionError("'--%s' is missing a value." % key) # depends on [control=['except'], data=[]]
options[key] = value # depends on [control=['if'], data=[]]
else:
raise OptionError("Encountered unexpected value '%s'." % key) # depends on [control=['while'], data=[]]
default_key = os.environ.get('AWS_ACCESS_KEY_ID')
if 'key' not in options and default_key:
options['key'] = default_key # depends on [control=['if'], data=[]]
default_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')
if 'secret' not in options and default_secret:
options['secret'] = default_secret # depends on [control=['if'], data=[]]
default_endpoint = os.environ.get('AWS_ENDPOINT')
if 'endpoint' not in options and default_endpoint:
options['endpoint'] = default_endpoint # depends on [control=['if'], data=[]]
for name in ('key', 'secret', 'endpoint', 'action'):
if name not in options:
raise OptionError("The '--%s' command-line argument is required." % name) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']]
return options |
def _closeCompletion(self):
"""Close completion, if visible.
Delete widget
"""
if self._widget is not None:
self._widget.close()
self._widget = None
self._completionOpenedManually = False | def function[_closeCompletion, parameter[self]]:
constant[Close completion, if visible.
Delete widget
]
if compare[name[self]._widget is_not constant[None]] begin[:]
call[name[self]._widget.close, parameter[]]
name[self]._widget assign[=] constant[None]
name[self]._completionOpenedManually assign[=] constant[False] | keyword[def] identifier[_closeCompletion] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_widget] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_widget] . identifier[close] ()
identifier[self] . identifier[_widget] = keyword[None]
identifier[self] . identifier[_completionOpenedManually] = keyword[False] | def _closeCompletion(self):
"""Close completion, if visible.
Delete widget
"""
if self._widget is not None:
self._widget.close()
self._widget = None
self._completionOpenedManually = False # depends on [control=['if'], data=[]] |
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux | def function[read_times, parameter[self, filename]]:
constant[
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
]
variable[aux] assign[=] call[name[nm].array, parameter[list[[]]]]
return[tuple[[<ast.Call object at 0x7da20c9930a0>, <ast.Name object at 0x7da20c990d60>, <ast.Name object at 0x7da20c990d30>]]] | keyword[def] identifier[read_times] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
identifier[aux] = identifier[nm] . identifier[array] ([], identifier[dtype] = identifier[nm] . identifier[float64] )
keyword[return] identifier[aux] . identifier[astype] ( identifier[nm] . identifier[int32] ), identifier[aux] , identifier[aux] | def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([], dtype=nm.float64)
return (aux.astype(nm.int32), aux, aux) |
def compute_stats(array, stats, weights):
"""
:param array:
an array of R elements (which can be arrays)
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of S elements (which can be arrays)
"""
result = numpy.zeros((len(stats),) + array.shape[1:], array.dtype)
for i, func in enumerate(stats):
result[i] = apply_stat(func, array, weights)
return result | def function[compute_stats, parameter[array, stats, weights]]:
constant[
:param array:
an array of R elements (which can be arrays)
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of S elements (which can be arrays)
]
variable[result] assign[=] call[name[numpy].zeros, parameter[binary_operation[tuple[[<ast.Call object at 0x7da207f016f0>]] + call[name[array].shape][<ast.Slice object at 0x7da207f03ca0>]], name[array].dtype]]
for taget[tuple[[<ast.Name object at 0x7da20c7c9060>, <ast.Name object at 0x7da20c7c87f0>]]] in starred[call[name[enumerate], parameter[name[stats]]]] begin[:]
call[name[result]][name[i]] assign[=] call[name[apply_stat], parameter[name[func], name[array], name[weights]]]
return[name[result]] | keyword[def] identifier[compute_stats] ( identifier[array] , identifier[stats] , identifier[weights] ):
literal[string]
identifier[result] = identifier[numpy] . identifier[zeros] (( identifier[len] ( identifier[stats] ),)+ identifier[array] . identifier[shape] [ literal[int] :], identifier[array] . identifier[dtype] )
keyword[for] identifier[i] , identifier[func] keyword[in] identifier[enumerate] ( identifier[stats] ):
identifier[result] [ identifier[i] ]= identifier[apply_stat] ( identifier[func] , identifier[array] , identifier[weights] )
keyword[return] identifier[result] | def compute_stats(array, stats, weights):
"""
:param array:
an array of R elements (which can be arrays)
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of S elements (which can be arrays)
"""
result = numpy.zeros((len(stats),) + array.shape[1:], array.dtype)
for (i, func) in enumerate(stats):
result[i] = apply_stat(func, array, weights) # depends on [control=['for'], data=[]]
return result |
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
missing_attr = set([None, ])
unique_nums = set(getattr(errno, k, None) for k in errnames)
return list(unique_nums - missing_attr) | def function[plat_specific_errors, parameter[]]:
constant[Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
]
variable[missing_attr] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da20c6e7580>]]]]
variable[unique_nums] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6e58d0>]]
return[call[name[list], parameter[binary_operation[name[unique_nums] - name[missing_attr]]]]] | keyword[def] identifier[plat_specific_errors] (* identifier[errnames] ):
literal[string]
identifier[missing_attr] = identifier[set] ([ keyword[None] ,])
identifier[unique_nums] = identifier[set] ( identifier[getattr] ( identifier[errno] , identifier[k] , keyword[None] ) keyword[for] identifier[k] keyword[in] identifier[errnames] )
keyword[return] identifier[list] ( identifier[unique_nums] - identifier[missing_attr] ) | def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
missing_attr = set([None])
unique_nums = set((getattr(errno, k, None) for k in errnames))
return list(unique_nums - missing_attr) |
def execute(self, time_interval):
"""
Execute the factor over the given time interval
:param time_interval:
:return:
"""
logging.info('{} with sink node {} running from {} to {}'.format(
self.tool.__class__.__name__, self.sink.node_id, time_interval.start, time_interval.end))
if self.plates:
if isinstance(self.tool, AggregateTool):
if len(self.sources) != 1:
raise ValueError("Currently only a single source node is valid for an Aggregate Tool")
if self.alignment_node:
raise ValueError("Currently an alignment node cannot be used with an Aggregate Tool")
all_sources = self.sources[0]
# Here we should loop through the plate values of the sink, and get the sources that are appropriate for
# that given plate value, and pass only those sources to the tool. This is cleaner than making the tool
# deal with all of the sources
for pv in self.sink.plate_values:
sources = [all_sources.streams[s] for s in all_sources.streams if all([v in s for v in pv])]
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=None)
elif isinstance(self.tool, SelectorTool):
if len(self.sources) == 1:
sources = self.sources[0].streams.values()
elif len(self.sources) == 2:
selector_node = self.sources[0]
if len(selector_node.streams) != 1:
raise ValueError("Selector node should only have one stream")
sources = [self.sources[0].streams[None], self.sources[1].streams.values()]
else:
raise ValueError("Currently only one or twos source nodes are valid for a Selector Tool")
if self.alignment_node:
raise ValueError("Currently an alignment node cannot be used with a Selector Tool")
diff, counts, is_sub_plate = self.sources[-1].difference(self.sink)
# TODO: This sub-plate selection is deprecated
if (counts == [1, 1] and is_sub_plate) or \
(len(self.sink.plates)==1 and counts == [1, 0] and is_sub_plate) or \
(next(p.is_root for p in self.sources[-1].plates)
and len(self.sink.plates) == 1
and self.sink.plates[0] in self.sources[-1].plates):
# Special case of tools that are performing sub-selection
self.tool.execute(sources=sources,
sinks=self.sink.streams.values(),
interval=time_interval)
else:
raise ValueError("Source and sink plates do not match within a Selector Tool")
else:
# TODO: This loop over plates is probably not correct:
# What we probably want is to take the cartesian product of plate values
if len(self.plates) == 1:
plate = self.plates[0]
for pv in plate.values:
sources = self.get_sources(plate, pv)
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval,
alignment_stream=self.get_alignment_stream(None, None))
else:
if len(self.sources) != 1 and not all(s.plates == self.plates for s in self.sources):
source_plates = sorted(p.plate_id for s in self.sources for p in s.plates)
self_plates = sorted(p.plate_id for p in self.plates)
if source_plates == self_plates:
# This is the case where the sources are all on separate plates and the sink is the
# combination
search = [[x[0] for x in p.values] for p in self.plates]
_pv = sorted(itertools.product(*search))
for pv in _pv:
# Here we're selecting the streams that have the partial match of the plate value
sources = [source.streams[s] for source in self.sources
for s in source.streams if (s[0] in pv)]
try:
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval,
alignment_stream=self.get_alignment_stream(None, None))
except KeyError as e:
continue
else:
raise NotImplementedError
for pv in Plate.get_overlapping_values(self.plates):
sources = [source.streams[s] for source in self.sources for s in source.streams if pv == s]
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval,
alignment_stream=self.get_alignment_stream(None, None))
else:
if isinstance(self.tool, AggregateTool):
# raise ValueError("Cannot execute an AggregateTool if no plates are defined for the factor")
# Here we're trying to aggregate off a plate. This is only allowed for a single non-overlapping plate.
if len(self.sources) != 1:
raise ValueError("Currently only a single source node is valid for an Aggregate Tool")
if self.alignment_node:
raise ValueError("Currently an alignment node cannot be used with an Aggregate Tool")
sources = self.sources[0].streams.values()
sink = self.sink.streams[None]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=None)
else:
# sources = [source.streams[None] for source in self.sources] if self.sources else None
sources = self.get_global_sources()
sink = self.sink.streams[None]
self.tool.execute(sources=sources, sink=sink, interval=time_interval,
alignment_stream=self.get_alignment_stream(None, None))
return self | def function[execute, parameter[self, time_interval]]:
constant[
Execute the factor over the given time interval
:param time_interval:
:return:
]
call[name[logging].info, parameter[call[constant[{} with sink node {} running from {} to {}].format, parameter[name[self].tool.__class__.__name__, name[self].sink.node_id, name[time_interval].start, name[time_interval].end]]]]
if name[self].plates begin[:]
if call[name[isinstance], parameter[name[self].tool, name[AggregateTool]]] begin[:]
if compare[call[name[len], parameter[name[self].sources]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20c6c4340>
if name[self].alignment_node begin[:]
<ast.Raise object at 0x7da20c6c5bd0>
variable[all_sources] assign[=] call[name[self].sources][constant[0]]
for taget[name[pv]] in starred[name[self].sink.plate_values] begin[:]
variable[sources] assign[=] <ast.ListComp object at 0x7da20c6c7850>
variable[sink] assign[=] call[name[self].sink.streams][name[pv]]
call[name[self].tool.execute, parameter[]]
return[name[self]] | keyword[def] identifier[execute] ( identifier[self] , identifier[time_interval] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] (
identifier[self] . identifier[tool] . identifier[__class__] . identifier[__name__] , identifier[self] . identifier[sink] . identifier[node_id] , identifier[time_interval] . identifier[start] , identifier[time_interval] . identifier[end] ))
keyword[if] identifier[self] . identifier[plates] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[tool] , identifier[AggregateTool] ):
keyword[if] identifier[len] ( identifier[self] . identifier[sources] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[alignment_node] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[all_sources] = identifier[self] . identifier[sources] [ literal[int] ]
keyword[for] identifier[pv] keyword[in] identifier[self] . identifier[sink] . identifier[plate_values] :
identifier[sources] =[ identifier[all_sources] . identifier[streams] [ identifier[s] ] keyword[for] identifier[s] keyword[in] identifier[all_sources] . identifier[streams] keyword[if] identifier[all] ([ identifier[v] keyword[in] identifier[s] keyword[for] identifier[v] keyword[in] identifier[pv] ])]
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ identifier[pv] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] , identifier[alignment_stream] = keyword[None] )
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[tool] , identifier[SelectorTool] ):
keyword[if] identifier[len] ( identifier[self] . identifier[sources] )== literal[int] :
identifier[sources] = identifier[self] . identifier[sources] [ literal[int] ]. identifier[streams] . identifier[values] ()
keyword[elif] identifier[len] ( identifier[self] . identifier[sources] )== literal[int] :
identifier[selector_node] = identifier[self] . identifier[sources] [ literal[int] ]
keyword[if] identifier[len] ( identifier[selector_node] . identifier[streams] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[sources] =[ identifier[self] . identifier[sources] [ literal[int] ]. identifier[streams] [ keyword[None] ], identifier[self] . identifier[sources] [ literal[int] ]. identifier[streams] . identifier[values] ()]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[alignment_node] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[diff] , identifier[counts] , identifier[is_sub_plate] = identifier[self] . identifier[sources] [- literal[int] ]. identifier[difference] ( identifier[self] . identifier[sink] )
keyword[if] ( identifier[counts] ==[ literal[int] , literal[int] ] keyword[and] identifier[is_sub_plate] ) keyword[or] ( identifier[len] ( identifier[self] . identifier[sink] . identifier[plates] )== literal[int] keyword[and] identifier[counts] ==[ literal[int] , literal[int] ] keyword[and] identifier[is_sub_plate] ) keyword[or] ( identifier[next] ( identifier[p] . identifier[is_root] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[sources] [- literal[int] ]. identifier[plates] )
keyword[and] identifier[len] ( identifier[self] . identifier[sink] . identifier[plates] )== literal[int]
keyword[and] identifier[self] . identifier[sink] . identifier[plates] [ literal[int] ] keyword[in] identifier[self] . identifier[sources] [- literal[int] ]. identifier[plates] ):
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] ,
identifier[sinks] = identifier[self] . identifier[sink] . identifier[streams] . identifier[values] (),
identifier[interval] = identifier[time_interval] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] identifier[len] ( identifier[self] . identifier[plates] )== literal[int] :
identifier[plate] = identifier[self] . identifier[plates] [ literal[int] ]
keyword[for] identifier[pv] keyword[in] identifier[plate] . identifier[values] :
identifier[sources] = identifier[self] . identifier[get_sources] ( identifier[plate] , identifier[pv] )
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ identifier[pv] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] ,
identifier[alignment_stream] = identifier[self] . identifier[get_alignment_stream] ( keyword[None] , keyword[None] ))
keyword[else] :
keyword[if] identifier[len] ( identifier[self] . identifier[sources] )!= literal[int] keyword[and] keyword[not] identifier[all] ( identifier[s] . identifier[plates] == identifier[self] . identifier[plates] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[sources] ):
identifier[source_plates] = identifier[sorted] ( identifier[p] . identifier[plate_id] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[sources] keyword[for] identifier[p] keyword[in] identifier[s] . identifier[plates] )
identifier[self_plates] = identifier[sorted] ( identifier[p] . identifier[plate_id] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[plates] )
keyword[if] identifier[source_plates] == identifier[self_plates] :
identifier[search] =[[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[p] . identifier[values] ] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[plates] ]
identifier[_pv] = identifier[sorted] ( identifier[itertools] . identifier[product] (* identifier[search] ))
keyword[for] identifier[pv] keyword[in] identifier[_pv] :
identifier[sources] =[ identifier[source] . identifier[streams] [ identifier[s] ] keyword[for] identifier[source] keyword[in] identifier[self] . identifier[sources]
keyword[for] identifier[s] keyword[in] identifier[source] . identifier[streams] keyword[if] ( identifier[s] [ literal[int] ] keyword[in] identifier[pv] )]
keyword[try] :
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ identifier[pv] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] ,
identifier[alignment_stream] = identifier[self] . identifier[get_alignment_stream] ( keyword[None] , keyword[None] ))
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
keyword[continue]
keyword[else] :
keyword[raise] identifier[NotImplementedError]
keyword[for] identifier[pv] keyword[in] identifier[Plate] . identifier[get_overlapping_values] ( identifier[self] . identifier[plates] ):
identifier[sources] =[ identifier[source] . identifier[streams] [ identifier[s] ] keyword[for] identifier[source] keyword[in] identifier[self] . identifier[sources] keyword[for] identifier[s] keyword[in] identifier[source] . identifier[streams] keyword[if] identifier[pv] == identifier[s] ]
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ identifier[pv] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] ,
identifier[alignment_stream] = identifier[self] . identifier[get_alignment_stream] ( keyword[None] , keyword[None] ))
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[tool] , identifier[AggregateTool] ):
keyword[if] identifier[len] ( identifier[self] . identifier[sources] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[alignment_node] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[sources] = identifier[self] . identifier[sources] [ literal[int] ]. identifier[streams] . identifier[values] ()
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ keyword[None] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] , identifier[alignment_stream] = keyword[None] )
keyword[else] :
identifier[sources] = identifier[self] . identifier[get_global_sources] ()
identifier[sink] = identifier[self] . identifier[sink] . identifier[streams] [ keyword[None] ]
identifier[self] . identifier[tool] . identifier[execute] ( identifier[sources] = identifier[sources] , identifier[sink] = identifier[sink] , identifier[interval] = identifier[time_interval] ,
identifier[alignment_stream] = identifier[self] . identifier[get_alignment_stream] ( keyword[None] , keyword[None] ))
keyword[return] identifier[self] | def execute(self, time_interval):
"""
Execute the factor over the given time interval
:param time_interval:
:return:
"""
logging.info('{} with sink node {} running from {} to {}'.format(self.tool.__class__.__name__, self.sink.node_id, time_interval.start, time_interval.end))
if self.plates:
if isinstance(self.tool, AggregateTool):
if len(self.sources) != 1:
raise ValueError('Currently only a single source node is valid for an Aggregate Tool') # depends on [control=['if'], data=[]]
if self.alignment_node:
raise ValueError('Currently an alignment node cannot be used with an Aggregate Tool') # depends on [control=['if'], data=[]]
all_sources = self.sources[0]
# Here we should loop through the plate values of the sink, and get the sources that are appropriate for
# that given plate value, and pass only those sources to the tool. This is cleaner than making the tool
# deal with all of the sources
for pv in self.sink.plate_values:
sources = [all_sources.streams[s] for s in all_sources.streams if all([v in s for v in pv])]
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=None) # depends on [control=['for'], data=['pv']] # depends on [control=['if'], data=[]]
elif isinstance(self.tool, SelectorTool):
if len(self.sources) == 1:
sources = self.sources[0].streams.values() # depends on [control=['if'], data=[]]
elif len(self.sources) == 2:
selector_node = self.sources[0]
if len(selector_node.streams) != 1:
raise ValueError('Selector node should only have one stream') # depends on [control=['if'], data=[]]
sources = [self.sources[0].streams[None], self.sources[1].streams.values()] # depends on [control=['if'], data=[]]
else:
raise ValueError('Currently only one or twos source nodes are valid for a Selector Tool')
if self.alignment_node:
raise ValueError('Currently an alignment node cannot be used with a Selector Tool') # depends on [control=['if'], data=[]]
(diff, counts, is_sub_plate) = self.sources[-1].difference(self.sink)
# TODO: This sub-plate selection is deprecated
if counts == [1, 1] and is_sub_plate or (len(self.sink.plates) == 1 and counts == [1, 0] and is_sub_plate) or (next((p.is_root for p in self.sources[-1].plates)) and len(self.sink.plates) == 1 and (self.sink.plates[0] in self.sources[-1].plates)):
# Special case of tools that are performing sub-selection
self.tool.execute(sources=sources, sinks=self.sink.streams.values(), interval=time_interval) # depends on [control=['if'], data=[]]
else:
raise ValueError('Source and sink plates do not match within a Selector Tool') # depends on [control=['if'], data=[]]
# TODO: This loop over plates is probably not correct:
# What we probably want is to take the cartesian product of plate values
elif len(self.plates) == 1:
plate = self.plates[0]
for pv in plate.values:
sources = self.get_sources(plate, pv)
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=self.get_alignment_stream(None, None)) # depends on [control=['for'], data=['pv']] # depends on [control=['if'], data=[]]
else:
if len(self.sources) != 1 and (not all((s.plates == self.plates for s in self.sources))):
source_plates = sorted((p.plate_id for s in self.sources for p in s.plates))
self_plates = sorted((p.plate_id for p in self.plates))
if source_plates == self_plates:
# This is the case where the sources are all on separate plates and the sink is the
# combination
search = [[x[0] for x in p.values] for p in self.plates]
_pv = sorted(itertools.product(*search))
for pv in _pv:
# Here we're selecting the streams that have the partial match of the plate value
sources = [source.streams[s] for source in self.sources for s in source.streams if s[0] in pv]
try:
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=self.get_alignment_stream(None, None)) # depends on [control=['try'], data=[]]
except KeyError as e:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['pv']] # depends on [control=['if'], data=[]]
else:
raise NotImplementedError # depends on [control=['if'], data=[]]
for pv in Plate.get_overlapping_values(self.plates):
sources = [source.streams[s] for source in self.sources for s in source.streams if pv == s]
sink = self.sink.streams[pv]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=self.get_alignment_stream(None, None)) # depends on [control=['for'], data=['pv']] # depends on [control=['if'], data=[]]
elif isinstance(self.tool, AggregateTool):
# raise ValueError("Cannot execute an AggregateTool if no plates are defined for the factor")
# Here we're trying to aggregate off a plate. This is only allowed for a single non-overlapping plate.
if len(self.sources) != 1:
raise ValueError('Currently only a single source node is valid for an Aggregate Tool') # depends on [control=['if'], data=[]]
if self.alignment_node:
raise ValueError('Currently an alignment node cannot be used with an Aggregate Tool') # depends on [control=['if'], data=[]]
sources = self.sources[0].streams.values()
sink = self.sink.streams[None]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=None) # depends on [control=['if'], data=[]]
else:
# sources = [source.streams[None] for source in self.sources] if self.sources else None
sources = self.get_global_sources()
sink = self.sink.streams[None]
self.tool.execute(sources=sources, sink=sink, interval=time_interval, alignment_stream=self.get_alignment_stream(None, None))
return self |
def add_review_date(self, doc, reviewed):
"""Sets the review date. Raises CardinalityError if
already set. OrderError if no reviewer defined before.
Raises SPDXValueError if invalid reviewed value.
"""
if len(doc.reviews) != 0:
if not self.review_date_set:
self.review_date_set = True
date = utils.datetime_from_iso_format(reviewed)
if date is not None:
doc.reviews[-1].review_date = date
return True
else:
raise SPDXValueError('Review::ReviewDate')
else:
raise CardinalityError('Review::ReviewDate')
else:
raise OrderError('Review::ReviewDate') | def function[add_review_date, parameter[self, doc, reviewed]]:
constant[Sets the review date. Raises CardinalityError if
already set. OrderError if no reviewer defined before.
Raises SPDXValueError if invalid reviewed value.
]
if compare[call[name[len], parameter[name[doc].reviews]] not_equal[!=] constant[0]] begin[:]
if <ast.UnaryOp object at 0x7da1b01fda80> begin[:]
name[self].review_date_set assign[=] constant[True]
variable[date] assign[=] call[name[utils].datetime_from_iso_format, parameter[name[reviewed]]]
if compare[name[date] is_not constant[None]] begin[:]
call[name[doc].reviews][<ast.UnaryOp object at 0x7da1b020d6c0>].review_date assign[=] name[date]
return[constant[True]] | keyword[def] identifier[add_review_date] ( identifier[self] , identifier[doc] , identifier[reviewed] ):
literal[string]
keyword[if] identifier[len] ( identifier[doc] . identifier[reviews] )!= literal[int] :
keyword[if] keyword[not] identifier[self] . identifier[review_date_set] :
identifier[self] . identifier[review_date_set] = keyword[True]
identifier[date] = identifier[utils] . identifier[datetime_from_iso_format] ( identifier[reviewed] )
keyword[if] identifier[date] keyword[is] keyword[not] keyword[None] :
identifier[doc] . identifier[reviews] [- literal[int] ]. identifier[review_date] = identifier[date]
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[SPDXValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[CardinalityError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[OrderError] ( literal[string] ) | def add_review_date(self, doc, reviewed):
"""Sets the review date. Raises CardinalityError if
already set. OrderError if no reviewer defined before.
Raises SPDXValueError if invalid reviewed value.
"""
if len(doc.reviews) != 0:
if not self.review_date_set:
self.review_date_set = True
date = utils.datetime_from_iso_format(reviewed)
if date is not None:
doc.reviews[-1].review_date = date
return True # depends on [control=['if'], data=['date']]
else:
raise SPDXValueError('Review::ReviewDate') # depends on [control=['if'], data=[]]
else:
raise CardinalityError('Review::ReviewDate') # depends on [control=['if'], data=[]]
else:
raise OrderError('Review::ReviewDate') |
def current_version():
"""
Get the current version number from setup.py
"""
# Monkeypatch setuptools.setup so we get the verison number
import setuptools
version = [None]
def monkey_setup(**settings):
version[0] = settings['version']
old_setup = setuptools.setup
setuptools.setup = monkey_setup
import setup # setup.py
reload(setup)
setuptools.setup = old_setup
return version[0] | def function[current_version, parameter[]]:
constant[
Get the current version number from setup.py
]
import module[setuptools]
variable[version] assign[=] list[[<ast.Constant object at 0x7da2054a6f20>]]
def function[monkey_setup, parameter[]]:
call[name[version]][constant[0]] assign[=] call[name[settings]][constant[version]]
variable[old_setup] assign[=] name[setuptools].setup
name[setuptools].setup assign[=] name[monkey_setup]
import module[setup]
call[name[reload], parameter[name[setup]]]
name[setuptools].setup assign[=] name[old_setup]
return[call[name[version]][constant[0]]] | keyword[def] identifier[current_version] ():
literal[string]
keyword[import] identifier[setuptools]
identifier[version] =[ keyword[None] ]
keyword[def] identifier[monkey_setup] (** identifier[settings] ):
identifier[version] [ literal[int] ]= identifier[settings] [ literal[string] ]
identifier[old_setup] = identifier[setuptools] . identifier[setup]
identifier[setuptools] . identifier[setup] = identifier[monkey_setup]
keyword[import] identifier[setup]
identifier[reload] ( identifier[setup] )
identifier[setuptools] . identifier[setup] = identifier[old_setup]
keyword[return] identifier[version] [ literal[int] ] | def current_version():
"""
Get the current version number from setup.py
"""
# Monkeypatch setuptools.setup so we get the verison number
import setuptools
version = [None]
def monkey_setup(**settings):
version[0] = settings['version']
old_setup = setuptools.setup
setuptools.setup = monkey_setup
import setup # setup.py
reload(setup)
setuptools.setup = old_setup
return version[0] |
def get_topclasses(cls):
"""Gets the base classes that are in pycbc."""
bases = [c for c in inspect.getmro(cls)
if c.__module__.startswith('pycbc') and c != cls]
return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases]) | def function[get_topclasses, parameter[cls]]:
constant[Gets the base classes that are in pycbc.]
variable[bases] assign[=] <ast.ListComp object at 0x7da20c990fd0>
return[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da207f00580>]]] | keyword[def] identifier[get_topclasses] ( identifier[cls] ):
literal[string]
identifier[bases] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[inspect] . identifier[getmro] ( identifier[cls] )
keyword[if] identifier[c] . identifier[__module__] . identifier[startswith] ( literal[string] ) keyword[and] identifier[c] != identifier[cls] ]
keyword[return] literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[c] . identifier[__module__] , identifier[c] . identifier[__name__] ) keyword[for] identifier[c] keyword[in] identifier[bases] ]) | def get_topclasses(cls):
"""Gets the base classes that are in pycbc."""
bases = [c for c in inspect.getmro(cls) if c.__module__.startswith('pycbc') and c != cls]
return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases]) |
def get_result(api, trans_id, id_get):
'''
Check if the given id_get and trans_id is from a valid transaction.
'''
check_values = {'api': api, 'id_get': id_get, 'trans_id': trans_id}
check_transaction = requests.post(CHECK_URL_FINAL, data=check_values)
result = check_transaction.text
if result == "1":
print(".تراکنش موفقیت آمیز بوده است")
return True
elif result == "-1":
print(
" apiارسالی با نوع apiتعریف شده در paylineسازگار نیست.")
elif result == "-2":
print(".ارسال شده معتبر نمی باشد trans_id")
elif result == "-3":
print(".id_get ارسال شده معتبر نمی باشد")
elif result == "-4":
print(".چنین تراکنشی در سیستم وجود ندارد و یا موفقیت آمیز نبوده است")
else:
print("some error(s) occurred, please try again.") | def function[get_result, parameter[api, trans_id, id_get]]:
constant[
Check if the given id_get and trans_id is from a valid transaction.
]
variable[check_values] assign[=] dictionary[[<ast.Constant object at 0x7da18dc04fd0>, <ast.Constant object at 0x7da18dc04670>, <ast.Constant object at 0x7da18dc06110>], [<ast.Name object at 0x7da18dc05cc0>, <ast.Name object at 0x7da18f00d600>, <ast.Name object at 0x7da18f00fb20>]]
variable[check_transaction] assign[=] call[name[requests].post, parameter[name[CHECK_URL_FINAL]]]
variable[result] assign[=] name[check_transaction].text
if compare[name[result] equal[==] constant[1]] begin[:]
call[name[print], parameter[constant[.تراکنش موفقیت آمیز بوده است]]]
return[constant[True]] | keyword[def] identifier[get_result] ( identifier[api] , identifier[trans_id] , identifier[id_get] ):
literal[string]
identifier[check_values] ={ literal[string] : identifier[api] , literal[string] : identifier[id_get] , literal[string] : identifier[trans_id] }
identifier[check_transaction] = identifier[requests] . identifier[post] ( identifier[CHECK_URL_FINAL] , identifier[data] = identifier[check_values] )
identifier[result] = identifier[check_transaction] . identifier[text]
keyword[if] identifier[result] == literal[string] :
identifier[print] ( literal[string] )
keyword[return] keyword[True]
keyword[elif] identifier[result] == literal[string] :
identifier[print] (
literal[string] )
keyword[elif] identifier[result] == literal[string] :
identifier[print] ( literal[string] )
keyword[elif] identifier[result] == literal[string] :
identifier[print] ( literal[string] )
keyword[elif] identifier[result] == literal[string] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] ) | def get_result(api, trans_id, id_get):
"""
Check if the given id_get and trans_id is from a valid transaction.
"""
check_values = {'api': api, 'id_get': id_get, 'trans_id': trans_id}
check_transaction = requests.post(CHECK_URL_FINAL, data=check_values)
result = check_transaction.text
if result == '1':
print('.\u202bتراکنش موفقیت آمیز بوده است\u202c')
return True # depends on [control=['if'], data=[]]
elif result == '-1':
print('\u202b\u202a api\u202cارسالی با نوع \u202a api\u202cتعریف شده در \u202a payline\u202cسازگار نیست.\u202c') # depends on [control=['if'], data=[]]
elif result == '-2':
print('.ارسال شده معتبر نمی باشد\u202c trans_id') # depends on [control=['if'], data=[]]
elif result == '-3':
print('.\u202bid_get ارسال شده معتبر نمی باشد\u202c') # depends on [control=['if'], data=[]]
elif result == '-4':
print('.\u202bچنین تراکنشی در سیستم وجود ندارد و یا موفقیت آمیز نبوده است\u202c') # depends on [control=['if'], data=[]]
else:
print('some error(s) occurred, please try again.') |
def eval(self, text):
"""Respond to text entered by the user.
:param text: the user's input
"""
program = Program(text, echo=self.echo, transforms=self.transforms)
tokens = program.gen_tokens()
for sentence in program.gen_sentences(tokens, self.aliases):
if self.echo:
self.terminal.debug(str(sentence))
program.interpret(sentence, self.commands) | def function[eval, parameter[self, text]]:
constant[Respond to text entered by the user.
:param text: the user's input
]
variable[program] assign[=] call[name[Program], parameter[name[text]]]
variable[tokens] assign[=] call[name[program].gen_tokens, parameter[]]
for taget[name[sentence]] in starred[call[name[program].gen_sentences, parameter[name[tokens], name[self].aliases]]] begin[:]
if name[self].echo begin[:]
call[name[self].terminal.debug, parameter[call[name[str], parameter[name[sentence]]]]]
call[name[program].interpret, parameter[name[sentence], name[self].commands]] | keyword[def] identifier[eval] ( identifier[self] , identifier[text] ):
literal[string]
identifier[program] = identifier[Program] ( identifier[text] , identifier[echo] = identifier[self] . identifier[echo] , identifier[transforms] = identifier[self] . identifier[transforms] )
identifier[tokens] = identifier[program] . identifier[gen_tokens] ()
keyword[for] identifier[sentence] keyword[in] identifier[program] . identifier[gen_sentences] ( identifier[tokens] , identifier[self] . identifier[aliases] ):
keyword[if] identifier[self] . identifier[echo] :
identifier[self] . identifier[terminal] . identifier[debug] ( identifier[str] ( identifier[sentence] ))
identifier[program] . identifier[interpret] ( identifier[sentence] , identifier[self] . identifier[commands] ) | def eval(self, text):
"""Respond to text entered by the user.
:param text: the user's input
"""
program = Program(text, echo=self.echo, transforms=self.transforms)
tokens = program.gen_tokens()
for sentence in program.gen_sentences(tokens, self.aliases):
if self.echo:
self.terminal.debug(str(sentence)) # depends on [control=['if'], data=[]]
program.interpret(sentence, self.commands) # depends on [control=['for'], data=['sentence']] |
def from_windows_handle(std_handle):
"""
Use the Windows Console Handles API to get the console width,
where ``std_handle`` is the WINAPI ``GetStdHandle`` input
(e.g. STD_INPUT_HANDLE).
https://msdn.microsoft.com/library/windows/desktop/ms682075
"""
from ctypes import windll, c_ushort
# https://msdn.microsoft.com/library/windows/desktop/ms683231
handle = windll.kernel32.GetStdHandle(std_handle)
# https://msdn.microsoft.com/library/windows/desktop/ms682093
info = (c_ushort * 11)() # It's a CONSOLE_SCREEN_BUFFER_INFO:
# xsize, ysize, | COORD dwSize
# xcursor, ycursor, | COORD dwCursorPosition
# attributes, | WORD wAttributes
# left, top, right, bottom, | SMALL_RECT srWindow
# xmax, ymax | COORD dwMaximumWindowSize
# https://msdn.microsoft.com/library/windows/desktop/ms683171
if windll.kernel32.GetConsoleScreenBufferInfo(handle, info):
return info[7] - info[5] + 1 | def function[from_windows_handle, parameter[std_handle]]:
constant[
Use the Windows Console Handles API to get the console width,
where ``std_handle`` is the WINAPI ``GetStdHandle`` input
(e.g. STD_INPUT_HANDLE).
https://msdn.microsoft.com/library/windows/desktop/ms682075
]
from relative_module[ctypes] import module[windll], module[c_ushort]
variable[handle] assign[=] call[name[windll].kernel32.GetStdHandle, parameter[name[std_handle]]]
variable[info] assign[=] call[binary_operation[name[c_ushort] * constant[11]], parameter[]]
if call[name[windll].kernel32.GetConsoleScreenBufferInfo, parameter[name[handle], name[info]]] begin[:]
return[binary_operation[binary_operation[call[name[info]][constant[7]] - call[name[info]][constant[5]]] + constant[1]]] | keyword[def] identifier[from_windows_handle] ( identifier[std_handle] ):
literal[string]
keyword[from] identifier[ctypes] keyword[import] identifier[windll] , identifier[c_ushort]
identifier[handle] = identifier[windll] . identifier[kernel32] . identifier[GetStdHandle] ( identifier[std_handle] )
identifier[info] =( identifier[c_ushort] * literal[int] )()
keyword[if] identifier[windll] . identifier[kernel32] . identifier[GetConsoleScreenBufferInfo] ( identifier[handle] , identifier[info] ):
keyword[return] identifier[info] [ literal[int] ]- identifier[info] [ literal[int] ]+ literal[int] | def from_windows_handle(std_handle):
"""
Use the Windows Console Handles API to get the console width,
where ``std_handle`` is the WINAPI ``GetStdHandle`` input
(e.g. STD_INPUT_HANDLE).
https://msdn.microsoft.com/library/windows/desktop/ms682075
"""
from ctypes import windll, c_ushort
# https://msdn.microsoft.com/library/windows/desktop/ms683231
handle = windll.kernel32.GetStdHandle(std_handle)
# https://msdn.microsoft.com/library/windows/desktop/ms682093
info = (c_ushort * 11)() # It's a CONSOLE_SCREEN_BUFFER_INFO:
# xsize, ysize, | COORD dwSize
# xcursor, ycursor, | COORD dwCursorPosition
# attributes, | WORD wAttributes
# left, top, right, bottom, | SMALL_RECT srWindow
# xmax, ymax | COORD dwMaximumWindowSize
# https://msdn.microsoft.com/library/windows/desktop/ms683171
if windll.kernel32.GetConsoleScreenBufferInfo(handle, info):
return info[7] - info[5] + 1 # depends on [control=['if'], data=[]] |
def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier | def function[supply_and_demand, parameter[lcm, choosers, alternatives, alt_segmenter, price_col, base_multiplier, clip_change_low, clip_change_high, iterations, multiplier_func]]:
constant[
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
]
call[name[logger].debug, parameter[constant[start: calculating supply and demand price adjustment]]]
variable[alternatives] assign[=] call[name[alternatives].copy, parameter[]]
if call[name[isinstance], parameter[name[alt_segmenter], name[str]]] begin[:]
variable[alt_segmenter] assign[=] call[name[alternatives]][name[alt_segmenter]]
<ast.Tuple object at 0x7da2054a5630> assign[=] call[name[lcm].apply_predict_filters, parameter[name[choosers], name[alternatives]]]
variable[alt_segmenter] assign[=] call[name[alt_segmenter].loc][name[alternatives].index]
if compare[name[base_multiplier] is_not constant[None]] begin[:]
variable[bm] assign[=] call[name[base_multiplier].loc][name[alt_segmenter]]
name[bm].index assign[=] name[alt_segmenter].index
call[name[alternatives]][name[price_col]] assign[=] binary_operation[call[name[alternatives]][name[price_col]] * name[bm]]
variable[base_multiplier] assign[=] call[name[base_multiplier].copy, parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[name[iterations]]]] begin[:]
<ast.Tuple object at 0x7da2054a7910> assign[=] call[name[_calculate_adjustment], parameter[name[lcm], name[choosers], name[alternatives], name[alt_segmenter], name[clip_change_low], name[clip_change_high]]]
call[name[alternatives]][name[price_col]] assign[=] binary_operation[call[name[alternatives]][name[price_col]] * name[alts_muliplier]]
if compare[name[base_multiplier] is constant[None]] begin[:]
variable[base_multiplier] assign[=] call[name[pd].Series, parameter[call[name[np].ones, parameter[call[name[len], parameter[name[submarkets_multiplier]]]]]]]
<ast.AugAssign object at 0x7da18f09c2b0>
if name[finished] begin[:]
break
call[name[logger].debug, parameter[constant[finish: calculating supply and demand price adjustment]]]
return[tuple[[<ast.Subscript object at 0x7da18f09ff40>, <ast.Name object at 0x7da18f09d570>]]] | keyword[def] identifier[supply_and_demand] (
identifier[lcm] , identifier[choosers] , identifier[alternatives] , identifier[alt_segmenter] , identifier[price_col] ,
identifier[base_multiplier] = keyword[None] , identifier[clip_change_low] = literal[int] , identifier[clip_change_high] = literal[int] ,
identifier[iterations] = literal[int] , identifier[multiplier_func] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[alternatives] = identifier[alternatives] . identifier[copy] ()
keyword[if] identifier[isinstance] ( identifier[alt_segmenter] , identifier[str] ):
identifier[alt_segmenter] = identifier[alternatives] [ identifier[alt_segmenter] ]
keyword[elif] identifier[isinstance] ( identifier[alt_segmenter] , identifier[np] . identifier[array] ):
identifier[alt_segmenter] = identifier[pd] . identifier[Series] ( identifier[alt_segmenter] , identifier[index] = identifier[alternatives] . identifier[index] )
identifier[choosers] , identifier[alternatives] = identifier[lcm] . identifier[apply_predict_filters] ( identifier[choosers] , identifier[alternatives] )
identifier[alt_segmenter] = identifier[alt_segmenter] . identifier[loc] [ identifier[alternatives] . identifier[index] ]
keyword[if] identifier[base_multiplier] keyword[is] keyword[not] keyword[None] :
identifier[bm] = identifier[base_multiplier] . identifier[loc] [ identifier[alt_segmenter] ]
identifier[bm] . identifier[index] = identifier[alt_segmenter] . identifier[index]
identifier[alternatives] [ identifier[price_col] ]= identifier[alternatives] [ identifier[price_col] ]* identifier[bm]
identifier[base_multiplier] = identifier[base_multiplier] . identifier[copy] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[iterations] ):
identifier[alts_muliplier] , identifier[submarkets_multiplier] , identifier[finished] = identifier[_calculate_adjustment] (
identifier[lcm] , identifier[choosers] , identifier[alternatives] , identifier[alt_segmenter] ,
identifier[clip_change_low] , identifier[clip_change_high] , identifier[multiplier_func] = identifier[multiplier_func] )
identifier[alternatives] [ identifier[price_col] ]= identifier[alternatives] [ identifier[price_col] ]* identifier[alts_muliplier]
keyword[if] identifier[base_multiplier] keyword[is] keyword[None] :
identifier[base_multiplier] = identifier[pd] . identifier[Series] (
identifier[np] . identifier[ones] ( identifier[len] ( identifier[submarkets_multiplier] )),
identifier[index] = identifier[submarkets_multiplier] . identifier[index] )
identifier[base_multiplier] *= identifier[submarkets_multiplier]
keyword[if] identifier[finished] :
keyword[break]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[alternatives] [ identifier[price_col] ], identifier[base_multiplier] | def supply_and_demand(lcm, choosers, alternatives, alt_segmenter, price_col, base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25, iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter] # depends on [control=['if'], data=[]]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index) # depends on [control=['if'], data=[]]
(choosers, alternatives) = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy() # depends on [control=['if'], data=['base_multiplier']]
for _ in range(iterations):
(alts_muliplier, submarkets_multiplier, finished) = _calculate_adjustment(lcm, choosers, alternatives, alt_segmenter, clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(np.ones(len(submarkets_multiplier)), index=submarkets_multiplier.index) # depends on [control=['if'], data=['base_multiplier']]
base_multiplier *= submarkets_multiplier
if finished:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
logger.debug('finish: calculating supply and demand price adjustment')
return (alternatives[price_col], base_multiplier) |
def inflate(self):
"""Load the collection from the server, if necessary."""
if not self._is_inflated:
self.check_version()
for k, v in self._filter.items():
if '[' in v:
self._filter[k] = ast.literal_eval(v)
self.load(self.client.get(self.url, params=self._filter))
self._is_inflated = True
return self | def function[inflate, parameter[self]]:
constant[Load the collection from the server, if necessary.]
if <ast.UnaryOp object at 0x7da1b2344b80> begin[:]
call[name[self].check_version, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2344730>, <ast.Name object at 0x7da1b2346d70>]]] in starred[call[name[self]._filter.items, parameter[]]] begin[:]
if compare[constant[[] in name[v]] begin[:]
call[name[self]._filter][name[k]] assign[=] call[name[ast].literal_eval, parameter[name[v]]]
call[name[self].load, parameter[call[name[self].client.get, parameter[name[self].url]]]]
name[self]._is_inflated assign[=] constant[True]
return[name[self]] | keyword[def] identifier[inflate] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_inflated] :
identifier[self] . identifier[check_version] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_filter] . identifier[items] ():
keyword[if] literal[string] keyword[in] identifier[v] :
identifier[self] . identifier[_filter] [ identifier[k] ]= identifier[ast] . identifier[literal_eval] ( identifier[v] )
identifier[self] . identifier[load] ( identifier[self] . identifier[client] . identifier[get] ( identifier[self] . identifier[url] , identifier[params] = identifier[self] . identifier[_filter] ))
identifier[self] . identifier[_is_inflated] = keyword[True]
keyword[return] identifier[self] | def inflate(self):
"""Load the collection from the server, if necessary."""
if not self._is_inflated:
self.check_version()
for (k, v) in self._filter.items():
if '[' in v:
self._filter[k] = ast.literal_eval(v) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=[]]
self.load(self.client.get(self.url, params=self._filter)) # depends on [control=['if'], data=[]]
self._is_inflated = True
return self |
def _action_allowed(self, action):
"""
participation actions can be disabled on layer level, or disabled on a per node basis
"""
if getattr(self.layer.participation_settings, '{0}_allowed'.format(action)) is False:
return False
else:
return getattr(self.participation_settings, '{0}_allowed'.format(action)) | def function[_action_allowed, parameter[self, action]]:
constant[
participation actions can be disabled on layer level, or disabled on a per node basis
]
if compare[call[name[getattr], parameter[name[self].layer.participation_settings, call[constant[{0}_allowed].format, parameter[name[action]]]]] is constant[False]] begin[:]
return[constant[False]] | keyword[def] identifier[_action_allowed] ( identifier[self] , identifier[action] ):
literal[string]
keyword[if] identifier[getattr] ( identifier[self] . identifier[layer] . identifier[participation_settings] , literal[string] . identifier[format] ( identifier[action] )) keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[getattr] ( identifier[self] . identifier[participation_settings] , literal[string] . identifier[format] ( identifier[action] )) | def _action_allowed(self, action):
"""
participation actions can be disabled on layer level, or disabled on a per node basis
"""
if getattr(self.layer.participation_settings, '{0}_allowed'.format(action)) is False:
return False # depends on [control=['if'], data=[]]
else:
return getattr(self.participation_settings, '{0}_allowed'.format(action)) |
def optimise_newton(x, a, c, tolerance=0.001):
"""
Optimise value of x using newton gauss
"""
x_new = x
x_old = x-1 # dummy value
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
x_new = newton_update(x_old, a, c)
return x_new | def function[optimise_newton, parameter[x, a, c, tolerance]]:
constant[
Optimise value of x using newton gauss
]
variable[x_new] assign[=] name[x]
variable[x_old] assign[=] binary_operation[name[x] - constant[1]]
while compare[call[call[name[np].abs, parameter[binary_operation[name[x_new] - name[x_old]]]].sum, parameter[]] greater[>] name[tolerance]] begin[:]
variable[x_old] assign[=] name[x_new]
variable[x_new] assign[=] call[name[newton_update], parameter[name[x_old], name[a], name[c]]]
return[name[x_new]] | keyword[def] identifier[optimise_newton] ( identifier[x] , identifier[a] , identifier[c] , identifier[tolerance] = literal[int] ):
literal[string]
identifier[x_new] = identifier[x]
identifier[x_old] = identifier[x] - literal[int]
keyword[while] identifier[np] . identifier[abs] ( identifier[x_new] - identifier[x_old] ). identifier[sum] ()> identifier[tolerance] :
identifier[x_old] = identifier[x_new]
identifier[x_new] = identifier[newton_update] ( identifier[x_old] , identifier[a] , identifier[c] )
keyword[return] identifier[x_new] | def optimise_newton(x, a, c, tolerance=0.001):
"""
Optimise value of x using newton gauss
"""
x_new = x
x_old = x - 1 # dummy value
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
x_new = newton_update(x_old, a, c) # depends on [control=['while'], data=[]]
return x_new |
def insert_model(self, model, *, upsert=None):
"""Inserts a record for the given model.
If model's primary key is auto, the primary key will be set appropriately.
"""
pkname = model.primary_key_name
include_keys = set(model.attrs.keys()).difference(model.exclude_keys_sql)
if model.primary_key_is_auto:
if pkname in include_keys:
include_keys.remove(pkname)
else:
if isinstance(pkname, str):
include_keys.add(pkname)
else:
include_keys.update(set(pkname))
data = model.to_dict(include_keys=include_keys)
returnings = []
if model.primary_key_is_auto:
returnings.append(pkname)
if model.timestamps:
returnings.extend(ts_name for ts_name in model.timestamps if ts_name)
returning = ", ".join(returnings)
cr = self.insert(model.table_name, data, returning=returning, upsert=upsert)
if self.core.supports_returning_syntax:
if returning:
rec = cr.fetchone()
if rec:
for idx, attr_name in enumerate(returnings):
setattr(model, attr_name, rec[idx])
else:
if model.primary_key_is_auto:
setattr(model, model.primary_key_name, cr.lastrowid)
return model | def function[insert_model, parameter[self, model]]:
constant[Inserts a record for the given model.
If model's primary key is auto, the primary key will be set appropriately.
]
variable[pkname] assign[=] name[model].primary_key_name
variable[include_keys] assign[=] call[call[name[set], parameter[call[name[model].attrs.keys, parameter[]]]].difference, parameter[name[model].exclude_keys_sql]]
if name[model].primary_key_is_auto begin[:]
if compare[name[pkname] in name[include_keys]] begin[:]
call[name[include_keys].remove, parameter[name[pkname]]]
variable[data] assign[=] call[name[model].to_dict, parameter[]]
variable[returnings] assign[=] list[[]]
if name[model].primary_key_is_auto begin[:]
call[name[returnings].append, parameter[name[pkname]]]
if name[model].timestamps begin[:]
call[name[returnings].extend, parameter[<ast.GeneratorExp object at 0x7da1b15d7010>]]
variable[returning] assign[=] call[constant[, ].join, parameter[name[returnings]]]
variable[cr] assign[=] call[name[self].insert, parameter[name[model].table_name, name[data]]]
if name[self].core.supports_returning_syntax begin[:]
if name[returning] begin[:]
variable[rec] assign[=] call[name[cr].fetchone, parameter[]]
if name[rec] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b15f0130>, <ast.Name object at 0x7da1b15f0460>]]] in starred[call[name[enumerate], parameter[name[returnings]]]] begin[:]
call[name[setattr], parameter[name[model], name[attr_name], call[name[rec]][name[idx]]]]
return[name[model]] | keyword[def] identifier[insert_model] ( identifier[self] , identifier[model] ,*, identifier[upsert] = keyword[None] ):
literal[string]
identifier[pkname] = identifier[model] . identifier[primary_key_name]
identifier[include_keys] = identifier[set] ( identifier[model] . identifier[attrs] . identifier[keys] ()). identifier[difference] ( identifier[model] . identifier[exclude_keys_sql] )
keyword[if] identifier[model] . identifier[primary_key_is_auto] :
keyword[if] identifier[pkname] keyword[in] identifier[include_keys] :
identifier[include_keys] . identifier[remove] ( identifier[pkname] )
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[pkname] , identifier[str] ):
identifier[include_keys] . identifier[add] ( identifier[pkname] )
keyword[else] :
identifier[include_keys] . identifier[update] ( identifier[set] ( identifier[pkname] ))
identifier[data] = identifier[model] . identifier[to_dict] ( identifier[include_keys] = identifier[include_keys] )
identifier[returnings] =[]
keyword[if] identifier[model] . identifier[primary_key_is_auto] :
identifier[returnings] . identifier[append] ( identifier[pkname] )
keyword[if] identifier[model] . identifier[timestamps] :
identifier[returnings] . identifier[extend] ( identifier[ts_name] keyword[for] identifier[ts_name] keyword[in] identifier[model] . identifier[timestamps] keyword[if] identifier[ts_name] )
identifier[returning] = literal[string] . identifier[join] ( identifier[returnings] )
identifier[cr] = identifier[self] . identifier[insert] ( identifier[model] . identifier[table_name] , identifier[data] , identifier[returning] = identifier[returning] , identifier[upsert] = identifier[upsert] )
keyword[if] identifier[self] . identifier[core] . identifier[supports_returning_syntax] :
keyword[if] identifier[returning] :
identifier[rec] = identifier[cr] . identifier[fetchone] ()
keyword[if] identifier[rec] :
keyword[for] identifier[idx] , identifier[attr_name] keyword[in] identifier[enumerate] ( identifier[returnings] ):
identifier[setattr] ( identifier[model] , identifier[attr_name] , identifier[rec] [ identifier[idx] ])
keyword[else] :
keyword[if] identifier[model] . identifier[primary_key_is_auto] :
identifier[setattr] ( identifier[model] , identifier[model] . identifier[primary_key_name] , identifier[cr] . identifier[lastrowid] )
keyword[return] identifier[model] | def insert_model(self, model, *, upsert=None):
"""Inserts a record for the given model.
If model's primary key is auto, the primary key will be set appropriately.
"""
pkname = model.primary_key_name
include_keys = set(model.attrs.keys()).difference(model.exclude_keys_sql)
if model.primary_key_is_auto:
if pkname in include_keys:
include_keys.remove(pkname) # depends on [control=['if'], data=['pkname', 'include_keys']] # depends on [control=['if'], data=[]]
elif isinstance(pkname, str):
include_keys.add(pkname) # depends on [control=['if'], data=[]]
else:
include_keys.update(set(pkname))
data = model.to_dict(include_keys=include_keys)
returnings = []
if model.primary_key_is_auto:
returnings.append(pkname) # depends on [control=['if'], data=[]]
if model.timestamps:
returnings.extend((ts_name for ts_name in model.timestamps if ts_name)) # depends on [control=['if'], data=[]]
returning = ', '.join(returnings)
cr = self.insert(model.table_name, data, returning=returning, upsert=upsert)
if self.core.supports_returning_syntax:
if returning:
rec = cr.fetchone()
if rec:
for (idx, attr_name) in enumerate(returnings):
setattr(model, attr_name, rec[idx]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif model.primary_key_is_auto:
setattr(model, model.primary_key_name, cr.lastrowid) # depends on [control=['if'], data=[]]
return model |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the SignatureVerify response payload and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(SignatureVerifyResponsePayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
else:
raise ValueError(
"Parsed payload encoding is missing the unique identifier "
"field."
)
if self.is_tag_next(enums.Tags.VALIDITY_INDICATOR, local_stream):
self._validity_indicator = primitives.Enumeration(
enums.ValidityIndicator,
tag=enums.Tags.VALIDITY_INDICATOR
)
self._validity_indicator.read(
local_stream,
kmip_version=kmip_version
)
else:
raise ValueError(
"Parsed payload encoding is missing the validity indicator "
"field."
)
if self.is_tag_next(enums.Tags.DATA, local_stream):
self._data = primitives.ByteString(tag=enums.Tags.DATA)
self._data.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream):
self._correlation_value = primitives.ByteString(
tag=enums.Tags.CORRELATION_VALUE
)
self._correlation_value.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream) | def function[read, parameter[self, input_stream, kmip_version]]:
constant[
Read the data encoding the SignatureVerify response payload and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
]
call[call[name[super], parameter[name[SignatureVerifyResponsePayload], name[self]]].read, parameter[name[input_stream]]]
variable[local_stream] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_stream].read, parameter[name[self].length]]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.UNIQUE_IDENTIFIER, name[local_stream]]] begin[:]
name[self]._unique_identifier assign[=] call[name[primitives].TextString, parameter[]]
call[name[self]._unique_identifier.read, parameter[name[local_stream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDITY_INDICATOR, name[local_stream]]] begin[:]
name[self]._validity_indicator assign[=] call[name[primitives].Enumeration, parameter[name[enums].ValidityIndicator]]
call[name[self]._validity_indicator.read, parameter[name[local_stream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.DATA, name[local_stream]]] begin[:]
name[self]._data assign[=] call[name[primitives].ByteString, parameter[]]
call[name[self]._data.read, parameter[name[local_stream]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.CORRELATION_VALUE, name[local_stream]]] begin[:]
name[self]._correlation_value assign[=] call[name[primitives].ByteString, parameter[]]
call[name[self]._correlation_value.read, parameter[name[local_stream]]]
call[name[self].is_oversized, parameter[name[local_stream]]] | keyword[def] identifier[read] ( identifier[self] , identifier[input_stream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ):
literal[string]
identifier[super] ( identifier[SignatureVerifyResponsePayload] , identifier[self] ). identifier[read] (
identifier[input_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[local_stream] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_stream] . identifier[read] ( identifier[self] . identifier[length] ))
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[UNIQUE_IDENTIFIER] , identifier[local_stream] ):
identifier[self] . identifier[_unique_identifier] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[UNIQUE_IDENTIFIER]
)
identifier[self] . identifier[_unique_identifier] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDITY_INDICATOR] , identifier[local_stream] ):
identifier[self] . identifier[_validity_indicator] = identifier[primitives] . identifier[Enumeration] (
identifier[enums] . identifier[ValidityIndicator] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDITY_INDICATOR]
)
identifier[self] . identifier[_validity_indicator] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[DATA] , identifier[local_stream] ):
identifier[self] . identifier[_data] = identifier[primitives] . identifier[ByteString] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[DATA] )
identifier[self] . identifier[_data] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] )
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[CORRELATION_VALUE] , identifier[local_stream] ):
identifier[self] . identifier[_correlation_value] = identifier[primitives] . identifier[ByteString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[CORRELATION_VALUE]
)
identifier[self] . identifier[_correlation_value] . identifier[read] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[is_oversized] ( identifier[local_stream] ) | def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the SignatureVerify response payload and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(SignatureVerifyResponsePayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise ValueError('Parsed payload encoding is missing the unique identifier field.')
if self.is_tag_next(enums.Tags.VALIDITY_INDICATOR, local_stream):
self._validity_indicator = primitives.Enumeration(enums.ValidityIndicator, tag=enums.Tags.VALIDITY_INDICATOR)
self._validity_indicator.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise ValueError('Parsed payload encoding is missing the validity indicator field.')
if self.is_tag_next(enums.Tags.DATA, local_stream):
self._data = primitives.ByteString(tag=enums.Tags.DATA)
self._data.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream):
self._correlation_value = primitives.ByteString(tag=enums.Tags.CORRELATION_VALUE)
self._correlation_value.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
self.is_oversized(local_stream) |
def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
# Pre_Normalization
modified_word = self.__normalize_pre(modified_word)
# Start stemming
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word)
# or next TODO: How to deal with or next instruction
else:
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
# or next ? todo : how to deal with or next
else:
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
modified_word = self.__Suffix_Noun_Step2b(modified_word)
modified_word = self.__Suffix_Noun_Step3(modified_word)
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word)
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word)
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word)
else:
if not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word)
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word | def function[stem, parameter[self, word]]:
constant[
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
]
name[self].is_verb assign[=] constant[True]
name[self].is_noun assign[=] constant[True]
name[self].is_defined assign[=] constant[False]
name[self].suffix_verb_step2a_success assign[=] constant[False]
name[self].suffix_verb_step2b_success assign[=] constant[False]
name[self].suffix_noun_step2c2_success assign[=] constant[False]
name[self].suffix_noun_step1a_success assign[=] constant[False]
name[self].suffix_noun_step2a_success assign[=] constant[False]
name[self].suffix_noun_step2b_success assign[=] constant[False]
name[self].suffixe_noun_step1b_success assign[=] constant[False]
name[self].prefix_step2a_success assign[=] constant[False]
name[self].prefix_step3a_noun_success assign[=] constant[False]
name[self].prefix_step3b_noun_success assign[=] constant[False]
variable[modified_word] assign[=] name[word]
call[name[self].__checks_1, parameter[name[modified_word]]]
call[name[self].__checks_2, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__normalize_pre, parameter[name[modified_word]]]
if name[self].is_verb begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Verb_Step1, parameter[name[modified_word]]]
if name[self].suffixes_verb_step1_success begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Verb_Step2a, parameter[name[modified_word]]]
if <ast.UnaryOp object at 0x7da1b1da6d10> begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Verb_Step2c, parameter[name[modified_word]]]
if name[self].is_noun begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step2c2, parameter[name[modified_word]]]
if <ast.UnaryOp object at 0x7da1b1da5510> begin[:]
if <ast.UnaryOp object at 0x7da1b1da5450> begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step1a, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step2a, parameter[name[modified_word]]]
if <ast.UnaryOp object at 0x7da1b1da5150> begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step2b, parameter[name[modified_word]]]
if <ast.BoolOp object at 0x7da1b1da4f70> begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step2c1, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__Suffix_Noun_Step3, parameter[name[modified_word]]]
if <ast.BoolOp object at 0x7da1b1da4160> begin[:]
variable[modified_word] assign[=] call[name[self].__Suffix_All_alef_maqsura, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__Prefix_Step1, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__Prefix_Step2a, parameter[name[modified_word]]]
if <ast.UnaryOp object at 0x7da1b1d043a0> begin[:]
variable[modified_word] assign[=] call[name[self].__Prefix_Step2b, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__Prefix_Step3a_Noun, parameter[name[modified_word]]]
if <ast.BoolOp object at 0x7da1b1d07460> begin[:]
variable[modified_word] assign[=] call[name[self].__Prefix_Step3b_Noun, parameter[name[modified_word]]]
variable[modified_word] assign[=] call[name[self].__normalize_post, parameter[name[modified_word]]]
variable[stemmed_word] assign[=] name[modified_word]
return[name[stemmed_word]] | keyword[def] identifier[stem] ( identifier[self] , identifier[word] ):
literal[string]
identifier[self] . identifier[is_verb] = keyword[True]
identifier[self] . identifier[is_noun] = keyword[True]
identifier[self] . identifier[is_defined] = keyword[False]
identifier[self] . identifier[suffix_verb_step2a_success] = keyword[False]
identifier[self] . identifier[suffix_verb_step2b_success] = keyword[False]
identifier[self] . identifier[suffix_noun_step2c2_success] = keyword[False]
identifier[self] . identifier[suffix_noun_step1a_success] = keyword[False]
identifier[self] . identifier[suffix_noun_step2a_success] = keyword[False]
identifier[self] . identifier[suffix_noun_step2b_success] = keyword[False]
identifier[self] . identifier[suffixe_noun_step1b_success] = keyword[False]
identifier[self] . identifier[prefix_step2a_success] = keyword[False]
identifier[self] . identifier[prefix_step3a_noun_success] = keyword[False]
identifier[self] . identifier[prefix_step3b_noun_success] = keyword[False]
identifier[modified_word] = identifier[word]
identifier[self] . identifier[__checks_1] ( identifier[modified_word] )
identifier[self] . identifier[__checks_2] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__normalize_pre] ( identifier[modified_word] )
keyword[if] identifier[self] . identifier[is_verb] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Verb_Step1] ( identifier[modified_word] )
keyword[if] identifier[self] . identifier[suffixes_verb_step1_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Verb_Step2a] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[suffix_verb_step2a_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Verb_Step2c] ( identifier[modified_word] )
keyword[else] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Verb_Step2b] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[suffix_verb_step2b_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Verb_Step2a] ( identifier[modified_word] )
keyword[if] identifier[self] . identifier[is_noun] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2c2] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[suffix_noun_step2c2_success] :
keyword[if] keyword[not] identifier[self] . identifier[is_defined] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step1a] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2a] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[suffix_noun_step2a_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2b] ( identifier[modified_word] )
keyword[if] (
keyword[not] identifier[self] . identifier[suffix_noun_step2b_success]
keyword[and] keyword[not] identifier[self] . identifier[suffix_noun_step2a_success]
):
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2c1] ( identifier[modified_word] )
keyword[else] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step1b] ( identifier[modified_word] )
keyword[if] identifier[self] . identifier[suffixe_noun_step1b_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2a] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[suffix_noun_step2a_success] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2b] ( identifier[modified_word] )
keyword[if] (
keyword[not] identifier[self] . identifier[suffix_noun_step2b_success]
keyword[and] keyword[not] identifier[self] . identifier[suffix_noun_step2a_success]
):
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2c1] ( identifier[modified_word] )
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[is_defined] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2a] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step2b] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Suffix_Noun_Step3] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[is_noun] keyword[and] identifier[self] . identifier[is_verb] :
identifier[modified_word] = identifier[self] . identifier[__Suffix_All_alef_maqsura] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step1] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step2a] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[prefix_step2a_success] :
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step2b] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step3a_Noun] ( identifier[modified_word] )
keyword[if] keyword[not] identifier[self] . identifier[prefix_step3a_noun_success] keyword[and] identifier[self] . identifier[is_noun] :
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step3b_Noun] ( identifier[modified_word] )
keyword[else] :
keyword[if] keyword[not] identifier[self] . identifier[prefix_step3b_noun_success] keyword[and] identifier[self] . identifier[is_verb] :
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step3_Verb] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__Prefix_Step4_Verb] ( identifier[modified_word] )
identifier[modified_word] = identifier[self] . identifier[__normalize_post] ( identifier[modified_word] )
identifier[stemmed_word] = identifier[modified_word]
keyword[return] identifier[stemmed_word] | def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
# Pre_Normalization
modified_word = self.__normalize_pre(modified_word)
# Start stemming
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# or next TODO: How to deal with or next instruction
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word) # depends on [control=['if'], data=[]]
if not self.suffix_noun_step2b_success and (not self.suffix_noun_step2a_success):
modified_word = self.__Suffix_Noun_Step2c1(modified_word) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# or next ? todo : how to deal with or next
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word) # depends on [control=['if'], data=[]]
if not self.suffix_noun_step2b_success and (not self.suffix_noun_step2a_success):
modified_word = self.__Suffix_Noun_Step2c1(modified_word) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word) # depends on [control=['if'], data=[]]
modified_word = self.__Suffix_Noun_Step2b(modified_word) # depends on [control=['if'], data=[]]
modified_word = self.__Suffix_Noun_Step3(modified_word) # depends on [control=['if'], data=[]]
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word) # depends on [control=['if'], data=[]]
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word) # depends on [control=['if'], data=[]]
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word) # depends on [control=['if'], data=[]]
elif not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word) # depends on [control=['if'], data=[]]
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word |
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : Sequence, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
.. versionchanged:: 0.25.0
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See Also
--------
merge
merge_asof
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result | def function[merge_ordered, parameter[left, right, on, left_on, right_on, left_by, right_by, fill_method, suffixes, how]]:
constant[Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : Sequence, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
.. versionchanged:: 0.25.0
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See Also
--------
merge
merge_asof
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
]
def function[_merger, parameter[x, y]]:
variable[op] assign[=] call[name[_OrderedMerge], parameter[name[x], name[y]]]
return[call[name[op].get_result, parameter[]]]
if <ast.BoolOp object at 0x7da18bcc9750> begin[:]
<ast.Raise object at 0x7da18bcc9ae0>
return[name[result]] | keyword[def] identifier[merge_ordered] ( identifier[left] , identifier[right] , identifier[on] = keyword[None] ,
identifier[left_on] = keyword[None] , identifier[right_on] = keyword[None] ,
identifier[left_by] = keyword[None] , identifier[right_by] = keyword[None] ,
identifier[fill_method] = keyword[None] , identifier[suffixes] =( literal[string] , literal[string] ),
identifier[how] = literal[string] ):
literal[string]
keyword[def] identifier[_merger] ( identifier[x] , identifier[y] ):
identifier[op] = identifier[_OrderedMerge] ( identifier[x] , identifier[y] , identifier[on] = identifier[on] , identifier[left_on] = identifier[left_on] , identifier[right_on] = identifier[right_on] ,
identifier[suffixes] = identifier[suffixes] , identifier[fill_method] = identifier[fill_method] ,
identifier[how] = identifier[how] )
keyword[return] identifier[op] . identifier[get_result] ()
keyword[if] identifier[left_by] keyword[is] keyword[not] keyword[None] keyword[and] identifier[right_by] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[left_by] keyword[is] keyword[not] keyword[None] :
identifier[result] , identifier[_] = identifier[_groupby_and_merge] ( identifier[left_by] , identifier[on] , identifier[left] , identifier[right] ,
keyword[lambda] identifier[x] , identifier[y] : identifier[_merger] ( identifier[x] , identifier[y] ),
identifier[check_duplicates] = keyword[False] )
keyword[elif] identifier[right_by] keyword[is] keyword[not] keyword[None] :
identifier[result] , identifier[_] = identifier[_groupby_and_merge] ( identifier[right_by] , identifier[on] , identifier[right] , identifier[left] ,
keyword[lambda] identifier[x] , identifier[y] : identifier[_merger] ( identifier[y] , identifier[x] ),
identifier[check_duplicates] = keyword[False] )
keyword[else] :
identifier[result] = identifier[_merger] ( identifier[left] , identifier[right] )
keyword[return] identifier[result] | def merge_ordered(left, right, on=None, left_on=None, right_on=None, left_by=None, right_by=None, fill_method=None, suffixes=('_x', '_y'), how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : Sequence, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
.. versionchanged:: 0.25.0
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See Also
--------
merge
merge_asof
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on, suffixes=suffixes, fill_method=fill_method, how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames') # depends on [control=['if'], data=[]]
elif left_by is not None:
(result, _) = _groupby_and_merge(left_by, on, left, right, lambda x, y: _merger(x, y), check_duplicates=False) # depends on [control=['if'], data=['left_by']]
elif right_by is not None:
(result, _) = _groupby_and_merge(right_by, on, right, left, lambda x, y: _merger(y, x), check_duplicates=False) # depends on [control=['if'], data=['right_by']]
else:
result = _merger(left, right)
return result |
def scale_and_center(mol):
"""Center and Scale molecule 2D coordinates.
This method changes mol coordinates directly to center but not scale.
This method returns width, height and MLB(median length of bond)
and scaling will be done by drawer method with these values.
Returns:
width: float
height: float
mlb: median length of bond
"""
cnt = mol.atom_count()
if cnt < 2:
mol.size2d = (0, 0, 1)
mol.descriptors.add("ScaleAndCenter")
return
xs = []
ys = []
for _, atom in mol.atoms_iter():
xs.append(atom.coords[0])
ys.append(atom.coords[1])
xmin, xmax = (min(xs), max(xs))
ymin, ymax = (min(ys), max(ys))
width = xmax - xmin
height = ymax - ymin
x_offset = width / 2 + xmin
y_offset = height / 2 + ymin
dists = []
for u, v, _ in mol.bonds_iter():
dists.append(geometry.distance(mol.atom(u).coords, mol.atom(v).coords))
try:
mlb = statistics.median(dists)
except statistics.StatisticsError:
# No connection
mlb = math.sqrt(max([width, height]) / cnt) # empirical
if not mlb: # Many of connected atoms are overlapped
mol.size2d = (0, 0, 1)
mol.descriptors.add("ScaleAndCenter")
return
# Centering
for _, atom in mol.atoms_iter():
atom.coords = (atom.coords[0] - x_offset, atom.coords[1] - y_offset)
mol.size2d = (width, height, mlb)
mol.descriptors.add("ScaleAndCenter") | def function[scale_and_center, parameter[mol]]:
constant[Center and Scale molecule 2D coordinates.
This method changes mol coordinates directly to center but not scale.
This method returns width, height and MLB(median length of bond)
and scaling will be done by drawer method with these values.
Returns:
width: float
height: float
mlb: median length of bond
]
variable[cnt] assign[=] call[name[mol].atom_count, parameter[]]
if compare[name[cnt] less[<] constant[2]] begin[:]
name[mol].size2d assign[=] tuple[[<ast.Constant object at 0x7da1b24295a0>, <ast.Constant object at 0x7da1b242b5e0>, <ast.Constant object at 0x7da1b242b760>]]
call[name[mol].descriptors.add, parameter[constant[ScaleAndCenter]]]
return[None]
variable[xs] assign[=] list[[]]
variable[ys] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b242ad10>, <ast.Name object at 0x7da1b24299f0>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:]
call[name[xs].append, parameter[call[name[atom].coords][constant[0]]]]
call[name[ys].append, parameter[call[name[atom].coords][constant[1]]]]
<ast.Tuple object at 0x7da1b242b8e0> assign[=] tuple[[<ast.Call object at 0x7da1b2429930>, <ast.Call object at 0x7da1b242b8b0>]]
<ast.Tuple object at 0x7da1b242b910> assign[=] tuple[[<ast.Call object at 0x7da1b2429570>, <ast.Call object at 0x7da1b242ac80>]]
variable[width] assign[=] binary_operation[name[xmax] - name[xmin]]
variable[height] assign[=] binary_operation[name[ymax] - name[ymin]]
variable[x_offset] assign[=] binary_operation[binary_operation[name[width] / constant[2]] + name[xmin]]
variable[y_offset] assign[=] binary_operation[binary_operation[name[height] / constant[2]] + name[ymin]]
variable[dists] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b24284f0>, <ast.Name object at 0x7da1b24284c0>, <ast.Name object at 0x7da1b242ab60>]]] in starred[call[name[mol].bonds_iter, parameter[]]] begin[:]
call[name[dists].append, parameter[call[name[geometry].distance, parameter[call[name[mol].atom, parameter[name[u]]].coords, call[name[mol].atom, parameter[name[v]]].coords]]]]
<ast.Try object at 0x7da1b242a980>
if <ast.UnaryOp object at 0x7da1b2429780> begin[:]
name[mol].size2d assign[=] tuple[[<ast.Constant object at 0x7da1b242a500>, <ast.Constant object at 0x7da1b242bd90>, <ast.Constant object at 0x7da1b2428cd0>]]
call[name[mol].descriptors.add, parameter[constant[ScaleAndCenter]]]
return[None]
for taget[tuple[[<ast.Name object at 0x7da1b242bbb0>, <ast.Name object at 0x7da1b242ba60>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:]
name[atom].coords assign[=] tuple[[<ast.BinOp object at 0x7da1b237e380>, <ast.BinOp object at 0x7da1b237d9c0>]]
name[mol].size2d assign[=] tuple[[<ast.Name object at 0x7da1b237f040>, <ast.Name object at 0x7da1b237ccd0>, <ast.Name object at 0x7da1b237ca90>]]
call[name[mol].descriptors.add, parameter[constant[ScaleAndCenter]]] | keyword[def] identifier[scale_and_center] ( identifier[mol] ):
literal[string]
identifier[cnt] = identifier[mol] . identifier[atom_count] ()
keyword[if] identifier[cnt] < literal[int] :
identifier[mol] . identifier[size2d] =( literal[int] , literal[int] , literal[int] )
identifier[mol] . identifier[descriptors] . identifier[add] ( literal[string] )
keyword[return]
identifier[xs] =[]
identifier[ys] =[]
keyword[for] identifier[_] , identifier[atom] keyword[in] identifier[mol] . identifier[atoms_iter] ():
identifier[xs] . identifier[append] ( identifier[atom] . identifier[coords] [ literal[int] ])
identifier[ys] . identifier[append] ( identifier[atom] . identifier[coords] [ literal[int] ])
identifier[xmin] , identifier[xmax] =( identifier[min] ( identifier[xs] ), identifier[max] ( identifier[xs] ))
identifier[ymin] , identifier[ymax] =( identifier[min] ( identifier[ys] ), identifier[max] ( identifier[ys] ))
identifier[width] = identifier[xmax] - identifier[xmin]
identifier[height] = identifier[ymax] - identifier[ymin]
identifier[x_offset] = identifier[width] / literal[int] + identifier[xmin]
identifier[y_offset] = identifier[height] / literal[int] + identifier[ymin]
identifier[dists] =[]
keyword[for] identifier[u] , identifier[v] , identifier[_] keyword[in] identifier[mol] . identifier[bonds_iter] ():
identifier[dists] . identifier[append] ( identifier[geometry] . identifier[distance] ( identifier[mol] . identifier[atom] ( identifier[u] ). identifier[coords] , identifier[mol] . identifier[atom] ( identifier[v] ). identifier[coords] ))
keyword[try] :
identifier[mlb] = identifier[statistics] . identifier[median] ( identifier[dists] )
keyword[except] identifier[statistics] . identifier[StatisticsError] :
identifier[mlb] = identifier[math] . identifier[sqrt] ( identifier[max] ([ identifier[width] , identifier[height] ])/ identifier[cnt] )
keyword[if] keyword[not] identifier[mlb] :
identifier[mol] . identifier[size2d] =( literal[int] , literal[int] , literal[int] )
identifier[mol] . identifier[descriptors] . identifier[add] ( literal[string] )
keyword[return]
keyword[for] identifier[_] , identifier[atom] keyword[in] identifier[mol] . identifier[atoms_iter] ():
identifier[atom] . identifier[coords] =( identifier[atom] . identifier[coords] [ literal[int] ]- identifier[x_offset] , identifier[atom] . identifier[coords] [ literal[int] ]- identifier[y_offset] )
identifier[mol] . identifier[size2d] =( identifier[width] , identifier[height] , identifier[mlb] )
identifier[mol] . identifier[descriptors] . identifier[add] ( literal[string] ) | def scale_and_center(mol):
"""Center and Scale molecule 2D coordinates.
This method changes mol coordinates directly to center but not scale.
This method returns width, height and MLB(median length of bond)
and scaling will be done by drawer method with these values.
Returns:
width: float
height: float
mlb: median length of bond
"""
cnt = mol.atom_count()
if cnt < 2:
mol.size2d = (0, 0, 1)
mol.descriptors.add('ScaleAndCenter')
return # depends on [control=['if'], data=[]]
xs = []
ys = []
for (_, atom) in mol.atoms_iter():
xs.append(atom.coords[0])
ys.append(atom.coords[1]) # depends on [control=['for'], data=[]]
(xmin, xmax) = (min(xs), max(xs))
(ymin, ymax) = (min(ys), max(ys))
width = xmax - xmin
height = ymax - ymin
x_offset = width / 2 + xmin
y_offset = height / 2 + ymin
dists = []
for (u, v, _) in mol.bonds_iter():
dists.append(geometry.distance(mol.atom(u).coords, mol.atom(v).coords)) # depends on [control=['for'], data=[]]
try:
mlb = statistics.median(dists) # depends on [control=['try'], data=[]]
except statistics.StatisticsError:
# No connection
mlb = math.sqrt(max([width, height]) / cnt) # empirical # depends on [control=['except'], data=[]]
if not mlb: # Many of connected atoms are overlapped
mol.size2d = (0, 0, 1)
mol.descriptors.add('ScaleAndCenter')
return # depends on [control=['if'], data=[]]
# Centering
for (_, atom) in mol.atoms_iter():
atom.coords = (atom.coords[0] - x_offset, atom.coords[1] - y_offset) # depends on [control=['for'], data=[]]
mol.size2d = (width, height, mlb)
mol.descriptors.add('ScaleAndCenter') |
def _iso_name_and_parent_from_path(self, iso_path):
# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]
'''
An internal method to find the parent directory record and name given an
ISO path. If the parent is found, return a tuple containing the
basename of the path and the parent directory record object.
Parameters:
iso_path - The absolute ISO path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a Directory Record
object representing the parent of the entry.
'''
splitpath = utils.split_path(iso_path)
name = splitpath.pop()
parent = self._find_iso_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) | def function[_iso_name_and_parent_from_path, parameter[self, iso_path]]:
constant[
An internal method to find the parent directory record and name given an
ISO path. If the parent is found, return a tuple containing the
basename of the path and the parent directory record object.
Parameters:
iso_path - The absolute ISO path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a Directory Record
object representing the parent of the entry.
]
variable[splitpath] assign[=] call[name[utils].split_path, parameter[name[iso_path]]]
variable[name] assign[=] call[name[splitpath].pop, parameter[]]
variable[parent] assign[=] call[name[self]._find_iso_record, parameter[binary_operation[constant[b'/'] + call[constant[b'/'].join, parameter[name[splitpath]]]]]]
return[tuple[[<ast.Call object at 0x7da1b0d0f160>, <ast.Name object at 0x7da1b0d0e080>]]] | keyword[def] identifier[_iso_name_and_parent_from_path] ( identifier[self] , identifier[iso_path] ):
literal[string]
identifier[splitpath] = identifier[utils] . identifier[split_path] ( identifier[iso_path] )
identifier[name] = identifier[splitpath] . identifier[pop] ()
identifier[parent] = identifier[self] . identifier[_find_iso_record] ( literal[string] + literal[string] . identifier[join] ( identifier[splitpath] ))
keyword[return] ( identifier[name] . identifier[decode] ( literal[string] ). identifier[encode] ( literal[string] ), identifier[parent] ) | def _iso_name_and_parent_from_path(self, iso_path):
# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]
'\n An internal method to find the parent directory record and name given an\n ISO path. If the parent is found, return a tuple containing the\n basename of the path and the parent directory record object.\n\n Parameters:\n iso_path - The absolute ISO path to the entry on the ISO.\n Returns:\n A tuple containing just the name of the entry and a Directory Record\n object representing the parent of the entry.\n '
splitpath = utils.split_path(iso_path)
name = splitpath.pop()
parent = self._find_iso_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) |
def _assert_command_dict(self, struct, name, path=None, extra_info=None):
"""Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair."""
self._assert_dict(struct, name, path, extra_info)
if len(struct) != 1:
err = [self._format_error_path(path + [name])]
err.append('Commands of run, dependencies, and argument sections must be mapping with '
'exactly 1 key-value pair, got {0}: {1}'.format(len(struct), struct))
if extra_info:
err.append(extra_info)
raise exceptions.YamlSyntaxError('\n'.join(err)) | def function[_assert_command_dict, parameter[self, struct, name, path, extra_info]]:
constant[Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair.]
call[name[self]._assert_dict, parameter[name[struct], name[name], name[path], name[extra_info]]]
if compare[call[name[len], parameter[name[struct]]] not_equal[!=] constant[1]] begin[:]
variable[err] assign[=] list[[<ast.Call object at 0x7da1b0f2ac20>]]
call[name[err].append, parameter[call[constant[Commands of run, dependencies, and argument sections must be mapping with exactly 1 key-value pair, got {0}: {1}].format, parameter[call[name[len], parameter[name[struct]]], name[struct]]]]]
if name[extra_info] begin[:]
call[name[err].append, parameter[name[extra_info]]]
<ast.Raise object at 0x7da1b0f0ee00> | keyword[def] identifier[_assert_command_dict] ( identifier[self] , identifier[struct] , identifier[name] , identifier[path] = keyword[None] , identifier[extra_info] = keyword[None] ):
literal[string]
identifier[self] . identifier[_assert_dict] ( identifier[struct] , identifier[name] , identifier[path] , identifier[extra_info] )
keyword[if] identifier[len] ( identifier[struct] )!= literal[int] :
identifier[err] =[ identifier[self] . identifier[_format_error_path] ( identifier[path] +[ identifier[name] ])]
identifier[err] . identifier[append] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[struct] ), identifier[struct] ))
keyword[if] identifier[extra_info] :
identifier[err] . identifier[append] ( identifier[extra_info] )
keyword[raise] identifier[exceptions] . identifier[YamlSyntaxError] ( literal[string] . identifier[join] ( identifier[err] )) | def _assert_command_dict(self, struct, name, path=None, extra_info=None):
"""Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair."""
self._assert_dict(struct, name, path, extra_info)
if len(struct) != 1:
err = [self._format_error_path(path + [name])]
err.append('Commands of run, dependencies, and argument sections must be mapping with exactly 1 key-value pair, got {0}: {1}'.format(len(struct), struct))
if extra_info:
err.append(extra_info) # depends on [control=['if'], data=[]]
raise exceptions.YamlSyntaxError('\n'.join(err)) # depends on [control=['if'], data=[]] |
def render_field_path(field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents that data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Iterable[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
result = []
for field_name in field_names:
match = _SIMPLE_FIELD_NAME.match(field_name)
if match and match.group(0) == field_name:
result.append(field_name)
else:
replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace(
_BACKTICK, _ESCAPED_BACKTICK
)
result.append(_BACKTICK + replaced + _BACKTICK)
return _FIELD_PATH_DELIMITER.join(result) | def function[render_field_path, parameter[field_names]]:
constant[Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents that data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Iterable[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
]
variable[result] assign[=] list[[]]
for taget[name[field_name]] in starred[name[field_names]] begin[:]
variable[match] assign[=] call[name[_SIMPLE_FIELD_NAME].match, parameter[name[field_name]]]
if <ast.BoolOp object at 0x7da207f00d30> begin[:]
call[name[result].append, parameter[name[field_name]]]
return[call[name[_FIELD_PATH_DELIMITER].join, parameter[name[result]]]] | keyword[def] identifier[render_field_path] ( identifier[field_names] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[field_name] keyword[in] identifier[field_names] :
identifier[match] = identifier[_SIMPLE_FIELD_NAME] . identifier[match] ( identifier[field_name] )
keyword[if] identifier[match] keyword[and] identifier[match] . identifier[group] ( literal[int] )== identifier[field_name] :
identifier[result] . identifier[append] ( identifier[field_name] )
keyword[else] :
identifier[replaced] = identifier[field_name] . identifier[replace] ( identifier[_BACKSLASH] , identifier[_ESCAPED_BACKSLASH] ). identifier[replace] (
identifier[_BACKTICK] , identifier[_ESCAPED_BACKTICK]
)
identifier[result] . identifier[append] ( identifier[_BACKTICK] + identifier[replaced] + identifier[_BACKTICK] )
keyword[return] identifier[_FIELD_PATH_DELIMITER] . identifier[join] ( identifier[result] ) | def render_field_path(field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents that data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Iterable[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
result = []
for field_name in field_names:
match = _SIMPLE_FIELD_NAME.match(field_name)
if match and match.group(0) == field_name:
result.append(field_name) # depends on [control=['if'], data=[]]
else:
replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace(_BACKTICK, _ESCAPED_BACKTICK)
result.append(_BACKTICK + replaced + _BACKTICK) # depends on [control=['for'], data=['field_name']]
return _FIELD_PATH_DELIMITER.join(result) |
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded | def function[_tsne, parameter[self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors, skip_num_points]]:
constant[Runs t-SNE.]
variable[params] assign[=] call[name[X_embedded].ravel, parameter[]]
variable[opt_args] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57f70>, <ast.Constant object at 0x7da18eb56440>, <ast.Constant object at 0x7da18eb54940>, <ast.Constant object at 0x7da18eb57fa0>, <ast.Constant object at 0x7da18eb571f0>, <ast.Constant object at 0x7da18eb562c0>, <ast.Constant object at 0x7da18eb540a0>, <ast.Constant object at 0x7da18eb56200>, <ast.Constant object at 0x7da18eb54c70>, <ast.Constant object at 0x7da18eb55f00>], [<ast.Constant object at 0x7da18eb56cb0>, <ast.Attribute object at 0x7da18eb55720>, <ast.Attribute object at 0x7da18eb56170>, <ast.Attribute object at 0x7da18eb571c0>, <ast.Attribute object at 0x7da18eb54cd0>, <ast.Call object at 0x7da18eb54640>, <ast.List object at 0x7da18eb568f0>, <ast.Attribute object at 0x7da18eb55f90>, <ast.Attribute object at 0x7da18eb56c20>, <ast.Constant object at 0x7da18eb57910>]]
if compare[name[self].method equal[==] constant[barnes_hut]] begin[:]
variable[obj_func] assign[=] name[_kl_divergence_bh]
call[call[name[opt_args]][constant[kwargs]]][constant[angle]] assign[=] name[self].angle
call[call[name[opt_args]][constant[kwargs]]][constant[verbose]] assign[=] name[self].verbose
<ast.AugAssign object at 0x7da18eb566e0>
<ast.Tuple object at 0x7da18eb57b20> assign[=] call[name[_gradient_descent], parameter[name[obj_func], name[params]]]
if name[self].verbose begin[:]
call[name[print], parameter[binary_operation[constant[[t-SNE] KL divergence after %d iterations with early exaggeration: %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18eb569e0>, <ast.Name object at 0x7da18eb554b0>]]]]]
<ast.AugAssign object at 0x7da18eb56e30>
variable[remaining] assign[=] binary_operation[name[self].n_iter - name[self]._EXPLORATION_N_ITER]
if <ast.BoolOp object at 0x7da18eb55a80> begin[:]
call[name[opt_args]][constant[n_iter]] assign[=] name[self].n_iter
call[name[opt_args]][constant[it]] assign[=] binary_operation[name[it] + constant[1]]
call[name[opt_args]][constant[momentum]] assign[=] constant[0.8]
call[name[opt_args]][constant[n_iter_without_progress]] assign[=] name[self].n_iter_without_progress
<ast.Tuple object at 0x7da18eb54790> assign[=] call[name[_gradient_descent], parameter[name[obj_func], name[params]]]
name[self].n_iter_ assign[=] name[it]
if name[self].verbose begin[:]
call[name[print], parameter[binary_operation[constant[[t-SNE] Error after %d iterations: %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18eb56020>, <ast.Name object at 0x7da18eb553c0>]]]]]
variable[X_embedded] assign[=] call[name[params].reshape, parameter[name[n_samples], name[self].n_components]]
name[self].kl_divergence_ assign[=] name[kl_divergence]
return[name[X_embedded]] | keyword[def] identifier[_tsne] ( identifier[self] , identifier[P] , identifier[degrees_of_freedom] , identifier[n_samples] , identifier[random_state] , identifier[X_embedded] ,
identifier[neighbors] = keyword[None] , identifier[skip_num_points] = literal[int] ):
literal[string]
identifier[params] = identifier[X_embedded] . identifier[ravel] ()
identifier[opt_args] ={
literal[string] : literal[int] ,
literal[string] : identifier[self] . identifier[_N_ITER_CHECK] ,
literal[string] : identifier[self] . identifier[min_grad_norm] ,
literal[string] : identifier[self] . identifier[learning_rate] ,
literal[string] : identifier[self] . identifier[verbose] ,
literal[string] : identifier[dict] ( identifier[skip_num_points] = identifier[skip_num_points] ),
literal[string] :[ identifier[P] , identifier[degrees_of_freedom] , identifier[n_samples] , identifier[self] . identifier[n_components] ],
literal[string] : identifier[self] . identifier[_EXPLORATION_N_ITER] ,
literal[string] : identifier[self] . identifier[_EXPLORATION_N_ITER] ,
literal[string] : literal[int] ,
}
keyword[if] identifier[self] . identifier[method] == literal[string] :
identifier[obj_func] = identifier[_kl_divergence_bh]
identifier[opt_args] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[angle]
identifier[opt_args] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[verbose]
keyword[else] :
identifier[obj_func] = identifier[_kl_divergence]
identifier[P] *= identifier[self] . identifier[early_exaggeration]
identifier[params] , identifier[kl_divergence] , identifier[it] = identifier[_gradient_descent] ( identifier[obj_func] , identifier[params] ,
** identifier[opt_args] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[print] ( literal[string]
literal[string] %( identifier[it] + literal[int] , identifier[kl_divergence] ))
identifier[P] /= identifier[self] . identifier[early_exaggeration]
identifier[remaining] = identifier[self] . identifier[n_iter] - identifier[self] . identifier[_EXPLORATION_N_ITER]
keyword[if] identifier[it] < identifier[self] . identifier[_EXPLORATION_N_ITER] keyword[or] identifier[remaining] > literal[int] :
identifier[opt_args] [ literal[string] ]= identifier[self] . identifier[n_iter]
identifier[opt_args] [ literal[string] ]= identifier[it] + literal[int]
identifier[opt_args] [ literal[string] ]= literal[int]
identifier[opt_args] [ literal[string] ]= identifier[self] . identifier[n_iter_without_progress]
identifier[params] , identifier[kl_divergence] , identifier[it] = identifier[_gradient_descent] ( identifier[obj_func] , identifier[params] ,
** identifier[opt_args] )
identifier[self] . identifier[n_iter_] = identifier[it]
keyword[if] identifier[self] . identifier[verbose] :
identifier[print] ( literal[string]
%( identifier[it] + literal[int] , identifier[kl_divergence] ))
identifier[X_embedded] = identifier[params] . identifier[reshape] ( identifier[n_samples] , identifier[self] . identifier[n_components] )
identifier[self] . identifier[kl_divergence_] = identifier[kl_divergence]
keyword[return] identifier[X_embedded] | def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {'it': 0, 'n_iter_check': self._N_ITER_CHECK, 'min_grad_norm': self.min_grad_norm, 'learning_rate': self.learning_rate, 'verbose': self.verbose, 'kwargs': dict(skip_num_points=skip_num_points), 'args': [P, degrees_of_freedom, n_samples, self.n_components], 'n_iter_without_progress': self._EXPLORATION_N_ITER, 'n_iter': self._EXPLORATION_N_ITER, 'momentum': 0.5}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose # depends on [control=['if'], data=[]]
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
(params, kl_divergence, it) = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print('[t-SNE] KL divergence after %d iterations with early exaggeration: %f' % (it + 1, kl_divergence)) # depends on [control=['if'], data=[]]
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
(params, kl_divergence, it) = _gradient_descent(obj_func, params, **opt_args) # depends on [control=['if'], data=[]]
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print('[t-SNE] Error after %d iterations: %f' % (it + 1, kl_divergence)) # depends on [control=['if'], data=[]]
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded |
def add_state_machine(self, state_machine):
"""Add a state machine to the list of managed state machines. If there is no active state machine set yet,
then set as active state machine.
:param state_machine: State Machine Object
:raises exceptions.AttributeError: if the passed state machine was already added of is of a wrong type
"""
if not isinstance(state_machine, StateMachine):
raise AttributeError("State machine must be of type StateMachine")
if state_machine.file_system_path is not None:
if self.is_state_machine_open(state_machine.file_system_path):
raise AttributeError("The state machine is already open {0}".format(state_machine.file_system_path))
logger.debug("Add new state machine with id {0}".format(state_machine.state_machine_id))
self._state_machines[state_machine.state_machine_id] = state_machine
return state_machine.state_machine_id | def function[add_state_machine, parameter[self, state_machine]]:
constant[Add a state machine to the list of managed state machines. If there is no active state machine set yet,
then set as active state machine.
:param state_machine: State Machine Object
:raises exceptions.AttributeError: if the passed state machine was already added of is of a wrong type
]
if <ast.UnaryOp object at 0x7da1b1a90e20> begin[:]
<ast.Raise object at 0x7da1b1aef580>
if compare[name[state_machine].file_system_path is_not constant[None]] begin[:]
if call[name[self].is_state_machine_open, parameter[name[state_machine].file_system_path]] begin[:]
<ast.Raise object at 0x7da1b1aef9a0>
call[name[logger].debug, parameter[call[constant[Add new state machine with id {0}].format, parameter[name[state_machine].state_machine_id]]]]
call[name[self]._state_machines][name[state_machine].state_machine_id] assign[=] name[state_machine]
return[name[state_machine].state_machine_id] | keyword[def] identifier[add_state_machine] ( identifier[self] , identifier[state_machine] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[state_machine] , identifier[StateMachine] ):
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] identifier[state_machine] . identifier[file_system_path] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[is_state_machine_open] ( identifier[state_machine] . identifier[file_system_path] ):
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[state_machine] . identifier[file_system_path] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[state_machine] . identifier[state_machine_id] ))
identifier[self] . identifier[_state_machines] [ identifier[state_machine] . identifier[state_machine_id] ]= identifier[state_machine]
keyword[return] identifier[state_machine] . identifier[state_machine_id] | def add_state_machine(self, state_machine):
"""Add a state machine to the list of managed state machines. If there is no active state machine set yet,
then set as active state machine.
:param state_machine: State Machine Object
:raises exceptions.AttributeError: if the passed state machine was already added of is of a wrong type
"""
if not isinstance(state_machine, StateMachine):
raise AttributeError('State machine must be of type StateMachine') # depends on [control=['if'], data=[]]
if state_machine.file_system_path is not None:
if self.is_state_machine_open(state_machine.file_system_path):
raise AttributeError('The state machine is already open {0}'.format(state_machine.file_system_path)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
logger.debug('Add new state machine with id {0}'.format(state_machine.state_machine_id))
self._state_machines[state_machine.state_machine_id] = state_machine
return state_machine.state_machine_id |
def update_dialog_node(self,
workspace_id,
dialog_node,
new_dialog_node=None,
new_description=None,
new_conditions=None,
new_parent=None,
new_previous_sibling=None,
new_output=None,
new_context=None,
new_metadata=None,
new_next_step=None,
new_title=None,
new_node_type=None,
new_event_name=None,
new_variable=None,
new_actions=None,
new_digress_in=None,
new_digress_out=None,
new_digress_out_slots=None,
new_user_label=None,
**kwargs):
"""
Update dialog node.
Update an existing dialog node with new or modified data.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param str new_dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str new_description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str new_conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str new_parent: The ID of the parent dialog node. This property is omitted
if the dialog node has no parent.
:param str new_previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput new_output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict new_context: The context for the dialog node.
:param dict new_metadata: The metadata for the dialog node.
:param DialogNodeNextStep new_next_step: The next step to execute following this
dialog node.
:param str new_title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str new_node_type: How the dialog node is processed.
:param str new_event_name: How an `event_handler` node is processed.
:param str new_variable: The location in the dialog context where output is
stored.
:param list[DialogNodeAction] new_actions: An array of objects describing any
actions to be invoked by the dialog node.
:param str new_digress_in: Whether this top-level dialog node can be digressed
into.
:param str new_digress_out: Whether this dialog node can be returned to after a
digression.
:param str new_digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str new_user_label: A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512
characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if dialog_node is None:
raise ValueError('dialog_node must be provided')
if new_output is not None:
new_output = self._convert_model(new_output, DialogNodeOutput)
if new_next_step is not None:
new_next_step = self._convert_model(new_next_step,
DialogNodeNextStep)
if new_actions is not None:
new_actions = [
self._convert_model(x, DialogNodeAction) for x in new_actions
]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1',
'update_dialog_node')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'dialog_node': new_dialog_node,
'description': new_description,
'conditions': new_conditions,
'parent': new_parent,
'previous_sibling': new_previous_sibling,
'output': new_output,
'context': new_context,
'metadata': new_metadata,
'next_step': new_next_step,
'title': new_title,
'type': new_node_type,
'event_name': new_event_name,
'variable': new_variable,
'actions': new_actions,
'digress_in': new_digress_in,
'digress_out': new_digress_out,
'digress_out_slots': new_digress_out_slots,
'user_label': new_user_label
}
url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format(
*self._encode_path_vars(workspace_id, dialog_node))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | def function[update_dialog_node, parameter[self, workspace_id, dialog_node, new_dialog_node, new_description, new_conditions, new_parent, new_previous_sibling, new_output, new_context, new_metadata, new_next_step, new_title, new_node_type, new_event_name, new_variable, new_actions, new_digress_in, new_digress_out, new_digress_out_slots, new_user_label]]:
constant[
Update dialog node.
Update an existing dialog node with new or modified data.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param str new_dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str new_description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str new_conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str new_parent: The ID of the parent dialog node. This property is omitted
if the dialog node has no parent.
:param str new_previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput new_output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict new_context: The context for the dialog node.
:param dict new_metadata: The metadata for the dialog node.
:param DialogNodeNextStep new_next_step: The next step to execute following this
dialog node.
:param str new_title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str new_node_type: How the dialog node is processed.
:param str new_event_name: How an `event_handler` node is processed.
:param str new_variable: The location in the dialog context where output is
stored.
:param list[DialogNodeAction] new_actions: An array of objects describing any
actions to be invoked by the dialog node.
:param str new_digress_in: Whether this top-level dialog node can be digressed
into.
:param str new_digress_out: Whether this dialog node can be returned to after a
digression.
:param str new_digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str new_user_label: A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512
characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
]
if compare[name[workspace_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76ebf0>
if compare[name[dialog_node] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76f700>
if compare[name[new_output] is_not constant[None]] begin[:]
variable[new_output] assign[=] call[name[self]._convert_model, parameter[name[new_output], name[DialogNodeOutput]]]
if compare[name[new_next_step] is_not constant[None]] begin[:]
variable[new_next_step] assign[=] call[name[self]._convert_model, parameter[name[new_next_step], name[DialogNodeNextStep]]]
if compare[name[new_actions] is_not constant[None]] begin[:]
variable[new_actions] assign[=] <ast.ListComp object at 0x7da20c76fe80>
variable[headers] assign[=] dictionary[[], []]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[conversation], constant[V1], constant[update_dialog_node]]]
call[name[headers].update, parameter[name[sdk_headers]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c76f280>], [<ast.Attribute object at 0x7da20c76f670>]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c76c670>, <ast.Constant object at 0x7da20c76f520>, <ast.Constant object at 0x7da20c76c190>, <ast.Constant object at 0x7da20c76cc40>, <ast.Constant object at 0x7da20c76ea70>, <ast.Constant object at 0x7da20c76cd30>, <ast.Constant object at 0x7da20c76df00>, <ast.Constant object at 0x7da20c76cb50>, <ast.Constant object at 0x7da20c76f310>, <ast.Constant object at 0x7da20c76d630>, <ast.Constant object at 0x7da20c76dcf0>, <ast.Constant object at 0x7da20c76f400>, <ast.Constant object at 0x7da20c76d1b0>, <ast.Constant object at 0x7da20c76eb30>, <ast.Constant object at 0x7da20c76c3a0>, <ast.Constant object at 0x7da20c76c550>, <ast.Constant object at 0x7da20c76f220>, <ast.Constant object at 0x7da20c76cdc0>], [<ast.Name object at 0x7da20c76d450>, <ast.Name object at 0x7da20c76e9e0>, <ast.Name object at 0x7da20c76cfd0>, <ast.Name object at 0x7da20c76d7e0>, <ast.Name object at 0x7da20c76e530>, <ast.Name object at 0x7da20c76f1c0>, <ast.Name object at 0x7da20c76c070>, <ast.Name object at 0x7da20c76d120>, <ast.Name object at 0x7da20c76e890>, <ast.Name object at 0x7da20c76c0d0>, <ast.Name object at 0x7da18bcca650>, <ast.Name object at 0x7da18bccabf0>, <ast.Name object at 0x7da18bccbb20>, <ast.Name object at 0x7da18bccbd90>, <ast.Name object at 0x7da18bcc8f10>, <ast.Name object at 0x7da18bcca980>, <ast.Name object at 0x7da18bcc9b10>, <ast.Name object at 0x7da18bcca830>]]
variable[url] assign[=] call[constant[/v1/workspaces/{0}/dialog_nodes/{1}].format, parameter[<ast.Starred object at 0x7da1b1b44e20>]]
variable[response] assign[=] call[name[self].request, parameter[]]
return[name[response]] | keyword[def] identifier[update_dialog_node] ( identifier[self] ,
identifier[workspace_id] ,
identifier[dialog_node] ,
identifier[new_dialog_node] = keyword[None] ,
identifier[new_description] = keyword[None] ,
identifier[new_conditions] = keyword[None] ,
identifier[new_parent] = keyword[None] ,
identifier[new_previous_sibling] = keyword[None] ,
identifier[new_output] = keyword[None] ,
identifier[new_context] = keyword[None] ,
identifier[new_metadata] = keyword[None] ,
identifier[new_next_step] = keyword[None] ,
identifier[new_title] = keyword[None] ,
identifier[new_node_type] = keyword[None] ,
identifier[new_event_name] = keyword[None] ,
identifier[new_variable] = keyword[None] ,
identifier[new_actions] = keyword[None] ,
identifier[new_digress_in] = keyword[None] ,
identifier[new_digress_out] = keyword[None] ,
identifier[new_digress_out_slots] = keyword[None] ,
identifier[new_user_label] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[workspace_id] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[dialog_node] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[new_output] keyword[is] keyword[not] keyword[None] :
identifier[new_output] = identifier[self] . identifier[_convert_model] ( identifier[new_output] , identifier[DialogNodeOutput] )
keyword[if] identifier[new_next_step] keyword[is] keyword[not] keyword[None] :
identifier[new_next_step] = identifier[self] . identifier[_convert_model] ( identifier[new_next_step] ,
identifier[DialogNodeNextStep] )
keyword[if] identifier[new_actions] keyword[is] keyword[not] keyword[None] :
identifier[new_actions] =[
identifier[self] . identifier[_convert_model] ( identifier[x] , identifier[DialogNodeAction] ) keyword[for] identifier[x] keyword[in] identifier[new_actions]
]
identifier[headers] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] ,
literal[string] )
identifier[headers] . identifier[update] ( identifier[sdk_headers] )
identifier[params] ={ literal[string] : identifier[self] . identifier[version] }
identifier[data] ={
literal[string] : identifier[new_dialog_node] ,
literal[string] : identifier[new_description] ,
literal[string] : identifier[new_conditions] ,
literal[string] : identifier[new_parent] ,
literal[string] : identifier[new_previous_sibling] ,
literal[string] : identifier[new_output] ,
literal[string] : identifier[new_context] ,
literal[string] : identifier[new_metadata] ,
literal[string] : identifier[new_next_step] ,
literal[string] : identifier[new_title] ,
literal[string] : identifier[new_node_type] ,
literal[string] : identifier[new_event_name] ,
literal[string] : identifier[new_variable] ,
literal[string] : identifier[new_actions] ,
literal[string] : identifier[new_digress_in] ,
literal[string] : identifier[new_digress_out] ,
literal[string] : identifier[new_digress_out_slots] ,
literal[string] : identifier[new_user_label]
}
identifier[url] = literal[string] . identifier[format] (
* identifier[self] . identifier[_encode_path_vars] ( identifier[workspace_id] , identifier[dialog_node] ))
identifier[response] = identifier[self] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[params] = identifier[params] ,
identifier[json] = identifier[data] ,
identifier[accept_json] = keyword[True] )
keyword[return] identifier[response] | def update_dialog_node(self, workspace_id, dialog_node, new_dialog_node=None, new_description=None, new_conditions=None, new_parent=None, new_previous_sibling=None, new_output=None, new_context=None, new_metadata=None, new_next_step=None, new_title=None, new_node_type=None, new_event_name=None, new_variable=None, new_actions=None, new_digress_in=None, new_digress_out=None, new_digress_out_slots=None, new_user_label=None, **kwargs):
"""
Update dialog node.
Update an existing dialog node with new or modified data.
This operation is limited to 500 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str dialog_node: The dialog node ID (for example, `get_order`).
:param str new_dialog_node: The dialog node ID. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 1024 characters.
:param str new_description: The description of the dialog node. This string cannot
contain carriage return, newline, or tab characters, and it must be no longer than
128 characters.
:param str new_conditions: The condition that will trigger the dialog node. This
string cannot contain carriage return, newline, or tab characters, and it must be
no longer than 2048 characters.
:param str new_parent: The ID of the parent dialog node. This property is omitted
if the dialog node has no parent.
:param str new_previous_sibling: The ID of the previous sibling dialog node. This
property is omitted if the dialog node has no previous sibling.
:param DialogNodeOutput new_output: The output of the dialog node. For more
information about how to specify dialog node output, see the
[documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses).
:param dict new_context: The context for the dialog node.
:param dict new_metadata: The metadata for the dialog node.
:param DialogNodeNextStep new_next_step: The next step to execute following this
dialog node.
:param str new_title: The alias used to identify the dialog node. This string must
conform to the following restrictions:
- It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot
characters.
- It must be no longer than 64 characters.
:param str new_node_type: How the dialog node is processed.
:param str new_event_name: How an `event_handler` node is processed.
:param str new_variable: The location in the dialog context where output is
stored.
:param list[DialogNodeAction] new_actions: An array of objects describing any
actions to be invoked by the dialog node.
:param str new_digress_in: Whether this top-level dialog node can be digressed
into.
:param str new_digress_out: Whether this dialog node can be returned to after a
digression.
:param str new_digress_out_slots: Whether the user can digress to top-level nodes
while filling out slots.
:param str new_user_label: A label that can be displayed externally to describe
the purpose of the node to users. This string must be no longer than 512
characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided') # depends on [control=['if'], data=[]]
if dialog_node is None:
raise ValueError('dialog_node must be provided') # depends on [control=['if'], data=[]]
if new_output is not None:
new_output = self._convert_model(new_output, DialogNodeOutput) # depends on [control=['if'], data=['new_output']]
if new_next_step is not None:
new_next_step = self._convert_model(new_next_step, DialogNodeNextStep) # depends on [control=['if'], data=['new_next_step']]
if new_actions is not None:
new_actions = [self._convert_model(x, DialogNodeAction) for x in new_actions] # depends on [control=['if'], data=['new_actions']]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
sdk_headers = get_sdk_headers('conversation', 'V1', 'update_dialog_node')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'dialog_node': new_dialog_node, 'description': new_description, 'conditions': new_conditions, 'parent': new_parent, 'previous_sibling': new_previous_sibling, 'output': new_output, 'context': new_context, 'metadata': new_metadata, 'next_step': new_next_step, 'title': new_title, 'type': new_node_type, 'event_name': new_event_name, 'variable': new_variable, 'actions': new_actions, 'digress_in': new_digress_in, 'digress_out': new_digress_out, 'digress_out_slots': new_digress_out_slots, 'user_label': new_user_label}
url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format(*self._encode_path_vars(workspace_id, dialog_node))
response = self.request(method='POST', url=url, headers=headers, params=params, json=data, accept_json=True)
return response |
def bulk_update(self, request):
"""Put multiple items in the basket,
removing anything that already exists
"""
# Delete everything in the basket
bid = utils.destroy_basket(request)
for item_data in request.data:
item = BasketItem(basket_id=bid, **item_data)
item.save()
serializer = BasketItemSerializer(self.get_queryset(request), many=True)
response = Response(data=serializer.data,
status=status.HTTP_200_OK)
return response | def function[bulk_update, parameter[self, request]]:
constant[Put multiple items in the basket,
removing anything that already exists
]
variable[bid] assign[=] call[name[utils].destroy_basket, parameter[name[request]]]
for taget[name[item_data]] in starred[name[request].data] begin[:]
variable[item] assign[=] call[name[BasketItem], parameter[]]
call[name[item].save, parameter[]]
variable[serializer] assign[=] call[name[BasketItemSerializer], parameter[call[name[self].get_queryset, parameter[name[request]]]]]
variable[response] assign[=] call[name[Response], parameter[]]
return[name[response]] | keyword[def] identifier[bulk_update] ( identifier[self] , identifier[request] ):
literal[string]
identifier[bid] = identifier[utils] . identifier[destroy_basket] ( identifier[request] )
keyword[for] identifier[item_data] keyword[in] identifier[request] . identifier[data] :
identifier[item] = identifier[BasketItem] ( identifier[basket_id] = identifier[bid] ,** identifier[item_data] )
identifier[item] . identifier[save] ()
identifier[serializer] = identifier[BasketItemSerializer] ( identifier[self] . identifier[get_queryset] ( identifier[request] ), identifier[many] = keyword[True] )
identifier[response] = identifier[Response] ( identifier[data] = identifier[serializer] . identifier[data] ,
identifier[status] = identifier[status] . identifier[HTTP_200_OK] )
keyword[return] identifier[response] | def bulk_update(self, request):
"""Put multiple items in the basket,
removing anything that already exists
""" # Delete everything in the basket
bid = utils.destroy_basket(request)
for item_data in request.data:
item = BasketItem(basket_id=bid, **item_data)
item.save() # depends on [control=['for'], data=['item_data']]
serializer = BasketItemSerializer(self.get_queryset(request), many=True)
response = Response(data=serializer.data, status=status.HTTP_200_OK)
return response |
def render_and_create_dir(dirname, context, output_dir, environment,
overwrite_if_exists=False):
"""Render name of a directory, create the directory, return its path."""
name_tmpl = environment.from_string(dirname)
rendered_dirname = name_tmpl.render(**context)
dir_to_create = os.path.normpath(
os.path.join(output_dir, rendered_dirname)
)
logger.debug('Rendered dir {} must exist in output_dir {}'.format(
dir_to_create,
output_dir
))
output_dir_exists = os.path.exists(dir_to_create)
if output_dir_exists:
if overwrite_if_exists:
logger.debug(
'Output directory {} already exists,'
'overwriting it'.format(dir_to_create)
)
else:
msg = 'Error: "{}" directory already exists'.format(dir_to_create)
raise OutputDirExistsException(msg)
else:
make_sure_path_exists(dir_to_create)
return dir_to_create, not output_dir_exists | def function[render_and_create_dir, parameter[dirname, context, output_dir, environment, overwrite_if_exists]]:
constant[Render name of a directory, create the directory, return its path.]
variable[name_tmpl] assign[=] call[name[environment].from_string, parameter[name[dirname]]]
variable[rendered_dirname] assign[=] call[name[name_tmpl].render, parameter[]]
variable[dir_to_create] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[output_dir], name[rendered_dirname]]]]]
call[name[logger].debug, parameter[call[constant[Rendered dir {} must exist in output_dir {}].format, parameter[name[dir_to_create], name[output_dir]]]]]
variable[output_dir_exists] assign[=] call[name[os].path.exists, parameter[name[dir_to_create]]]
if name[output_dir_exists] begin[:]
if name[overwrite_if_exists] begin[:]
call[name[logger].debug, parameter[call[constant[Output directory {} already exists,overwriting it].format, parameter[name[dir_to_create]]]]]
return[tuple[[<ast.Name object at 0x7da1b21797e0>, <ast.UnaryOp object at 0x7da1b2178190>]]] | keyword[def] identifier[render_and_create_dir] ( identifier[dirname] , identifier[context] , identifier[output_dir] , identifier[environment] ,
identifier[overwrite_if_exists] = keyword[False] ):
literal[string]
identifier[name_tmpl] = identifier[environment] . identifier[from_string] ( identifier[dirname] )
identifier[rendered_dirname] = identifier[name_tmpl] . identifier[render] (** identifier[context] )
identifier[dir_to_create] = identifier[os] . identifier[path] . identifier[normpath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[rendered_dirname] )
)
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[dir_to_create] ,
identifier[output_dir]
))
identifier[output_dir_exists] = identifier[os] . identifier[path] . identifier[exists] ( identifier[dir_to_create] )
keyword[if] identifier[output_dir_exists] :
keyword[if] identifier[overwrite_if_exists] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string] . identifier[format] ( identifier[dir_to_create] )
)
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[dir_to_create] )
keyword[raise] identifier[OutputDirExistsException] ( identifier[msg] )
keyword[else] :
identifier[make_sure_path_exists] ( identifier[dir_to_create] )
keyword[return] identifier[dir_to_create] , keyword[not] identifier[output_dir_exists] | def render_and_create_dir(dirname, context, output_dir, environment, overwrite_if_exists=False):
"""Render name of a directory, create the directory, return its path."""
name_tmpl = environment.from_string(dirname)
rendered_dirname = name_tmpl.render(**context)
dir_to_create = os.path.normpath(os.path.join(output_dir, rendered_dirname))
logger.debug('Rendered dir {} must exist in output_dir {}'.format(dir_to_create, output_dir))
output_dir_exists = os.path.exists(dir_to_create)
if output_dir_exists:
if overwrite_if_exists:
logger.debug('Output directory {} already exists,overwriting it'.format(dir_to_create)) # depends on [control=['if'], data=[]]
else:
msg = 'Error: "{}" directory already exists'.format(dir_to_create)
raise OutputDirExistsException(msg) # depends on [control=['if'], data=[]]
else:
make_sure_path_exists(dir_to_create)
return (dir_to_create, not output_dir_exists) |
def gen_docs(corpus, lemmatize, rm_stops):
"""Open and process files from a corpus. Return a list of sentences for an author. Each sentence
is itself a list of tokenized words.
"""
assert corpus in ['phi5', 'tlg']
if corpus == 'phi5':
language = 'latin'
filepaths = assemble_phi5_author_filepaths()
jv_replacer = JVReplacer()
text_cleaner = phi5_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
elif corpus == 'tlg':
language = 'greek'
filepaths = assemble_tlg_author_filepaths()
text_cleaner = tlg_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
if lemmatize:
lemmatizer = LemmaReplacer(language)
sent_tokenizer = TokenizeSentence(language)
for filepath in filepaths:
with open(filepath) as f:
text = f.read()
# light first-pass cleanup, before sentence tokenization (which relies on punctuation)
text = text_cleaner(text, rm_punctuation=False, rm_periods=False)
sent_tokens = sent_tokenizer.tokenize_sentences(text)
# doc_sentences = []
for sentence in sent_tokens:
# a second cleanup at sentence-level, to rm all punctuation
sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True)
sentence = word_tokenizer(sentence)
sentence = [s.lower() for s in sentence]
sentence = [w for w in sentence if w]
if language == 'latin':
sentence = [w[1:] if w.startswith('-') else w for w in sentence]
if stops:
sentence = [w for w in sentence if w not in stops]
sentence = [w for w in sentence if len(w) > 1] # rm short words
if sentence:
sentence = sentence
if lemmatize:
sentence = lemmatizer.lemmatize(sentence)
if sentence and language == 'latin':
sentence = [jv_replacer.replace(word) for word in sentence]
if sentence:
yield sentence | def function[gen_docs, parameter[corpus, lemmatize, rm_stops]]:
constant[Open and process files from a corpus. Return a list of sentences for an author. Each sentence
is itself a list of tokenized words.
]
assert[compare[name[corpus] in list[[<ast.Constant object at 0x7da204347c40>, <ast.Constant object at 0x7da2043467d0>]]]]
if compare[name[corpus] equal[==] constant[phi5]] begin[:]
variable[language] assign[=] constant[latin]
variable[filepaths] assign[=] call[name[assemble_phi5_author_filepaths], parameter[]]
variable[jv_replacer] assign[=] call[name[JVReplacer], parameter[]]
variable[text_cleaner] assign[=] name[phi5_plaintext_cleanup]
variable[word_tokenizer] assign[=] name[nltk_tokenize_words]
if name[rm_stops] begin[:]
variable[stops] assign[=] name[latin_stops]
if name[lemmatize] begin[:]
variable[lemmatizer] assign[=] call[name[LemmaReplacer], parameter[name[language]]]
variable[sent_tokenizer] assign[=] call[name[TokenizeSentence], parameter[name[language]]]
for taget[name[filepath]] in starred[name[filepaths]] begin[:]
with call[name[open], parameter[name[filepath]]] begin[:]
variable[text] assign[=] call[name[f].read, parameter[]]
variable[text] assign[=] call[name[text_cleaner], parameter[name[text]]]
variable[sent_tokens] assign[=] call[name[sent_tokenizer].tokenize_sentences, parameter[name[text]]]
for taget[name[sentence]] in starred[name[sent_tokens]] begin[:]
variable[sentence] assign[=] call[name[text_cleaner], parameter[name[sentence]]]
variable[sentence] assign[=] call[name[word_tokenizer], parameter[name[sentence]]]
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09c340>
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09dde0>
if compare[name[language] equal[==] constant[latin]] begin[:]
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09ce80>
if name[stops] begin[:]
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09e920>
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09f9d0>
if name[sentence] begin[:]
variable[sentence] assign[=] name[sentence]
if name[lemmatize] begin[:]
variable[sentence] assign[=] call[name[lemmatizer].lemmatize, parameter[name[sentence]]]
if <ast.BoolOp object at 0x7da18f09e320> begin[:]
variable[sentence] assign[=] <ast.ListComp object at 0x7da18f09d5a0>
if name[sentence] begin[:]
<ast.Yield object at 0x7da18f09e290> | keyword[def] identifier[gen_docs] ( identifier[corpus] , identifier[lemmatize] , identifier[rm_stops] ):
literal[string]
keyword[assert] identifier[corpus] keyword[in] [ literal[string] , literal[string] ]
keyword[if] identifier[corpus] == literal[string] :
identifier[language] = literal[string]
identifier[filepaths] = identifier[assemble_phi5_author_filepaths] ()
identifier[jv_replacer] = identifier[JVReplacer] ()
identifier[text_cleaner] = identifier[phi5_plaintext_cleanup]
identifier[word_tokenizer] = identifier[nltk_tokenize_words]
keyword[if] identifier[rm_stops] :
identifier[stops] = identifier[latin_stops]
keyword[else] :
identifier[stops] = keyword[None]
keyword[elif] identifier[corpus] == literal[string] :
identifier[language] = literal[string]
identifier[filepaths] = identifier[assemble_tlg_author_filepaths] ()
identifier[text_cleaner] = identifier[tlg_plaintext_cleanup]
identifier[word_tokenizer] = identifier[nltk_tokenize_words]
keyword[if] identifier[rm_stops] :
identifier[stops] = identifier[latin_stops]
keyword[else] :
identifier[stops] = keyword[None]
keyword[if] identifier[lemmatize] :
identifier[lemmatizer] = identifier[LemmaReplacer] ( identifier[language] )
identifier[sent_tokenizer] = identifier[TokenizeSentence] ( identifier[language] )
keyword[for] identifier[filepath] keyword[in] identifier[filepaths] :
keyword[with] identifier[open] ( identifier[filepath] ) keyword[as] identifier[f] :
identifier[text] = identifier[f] . identifier[read] ()
identifier[text] = identifier[text_cleaner] ( identifier[text] , identifier[rm_punctuation] = keyword[False] , identifier[rm_periods] = keyword[False] )
identifier[sent_tokens] = identifier[sent_tokenizer] . identifier[tokenize_sentences] ( identifier[text] )
keyword[for] identifier[sentence] keyword[in] identifier[sent_tokens] :
identifier[sentence] = identifier[text_cleaner] ( identifier[sentence] , identifier[rm_punctuation] = keyword[True] , identifier[rm_periods] = keyword[True] )
identifier[sentence] = identifier[word_tokenizer] ( identifier[sentence] )
identifier[sentence] =[ identifier[s] . identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[sentence] ]
identifier[sentence] =[ identifier[w] keyword[for] identifier[w] keyword[in] identifier[sentence] keyword[if] identifier[w] ]
keyword[if] identifier[language] == literal[string] :
identifier[sentence] =[ identifier[w] [ literal[int] :] keyword[if] identifier[w] . identifier[startswith] ( literal[string] ) keyword[else] identifier[w] keyword[for] identifier[w] keyword[in] identifier[sentence] ]
keyword[if] identifier[stops] :
identifier[sentence] =[ identifier[w] keyword[for] identifier[w] keyword[in] identifier[sentence] keyword[if] identifier[w] keyword[not] keyword[in] identifier[stops] ]
identifier[sentence] =[ identifier[w] keyword[for] identifier[w] keyword[in] identifier[sentence] keyword[if] identifier[len] ( identifier[w] )> literal[int] ]
keyword[if] identifier[sentence] :
identifier[sentence] = identifier[sentence]
keyword[if] identifier[lemmatize] :
identifier[sentence] = identifier[lemmatizer] . identifier[lemmatize] ( identifier[sentence] )
keyword[if] identifier[sentence] keyword[and] identifier[language] == literal[string] :
identifier[sentence] =[ identifier[jv_replacer] . identifier[replace] ( identifier[word] ) keyword[for] identifier[word] keyword[in] identifier[sentence] ]
keyword[if] identifier[sentence] :
keyword[yield] identifier[sentence] | def gen_docs(corpus, lemmatize, rm_stops):
"""Open and process files from a corpus. Return a list of sentences for an author. Each sentence
is itself a list of tokenized words.
"""
assert corpus in ['phi5', 'tlg']
if corpus == 'phi5':
language = 'latin'
filepaths = assemble_phi5_author_filepaths()
jv_replacer = JVReplacer()
text_cleaner = phi5_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops # depends on [control=['if'], data=[]]
else:
stops = None # depends on [control=['if'], data=[]]
elif corpus == 'tlg':
language = 'greek'
filepaths = assemble_tlg_author_filepaths()
text_cleaner = tlg_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops # depends on [control=['if'], data=[]]
else:
stops = None # depends on [control=['if'], data=[]]
if lemmatize:
lemmatizer = LemmaReplacer(language) # depends on [control=['if'], data=[]]
sent_tokenizer = TokenizeSentence(language)
for filepath in filepaths:
with open(filepath) as f:
text = f.read() # depends on [control=['with'], data=['f']]
# light first-pass cleanup, before sentence tokenization (which relies on punctuation)
text = text_cleaner(text, rm_punctuation=False, rm_periods=False)
sent_tokens = sent_tokenizer.tokenize_sentences(text)
# doc_sentences = []
for sentence in sent_tokens:
# a second cleanup at sentence-level, to rm all punctuation
sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True)
sentence = word_tokenizer(sentence)
sentence = [s.lower() for s in sentence]
sentence = [w for w in sentence if w]
if language == 'latin':
sentence = [w[1:] if w.startswith('-') else w for w in sentence] # depends on [control=['if'], data=[]]
if stops:
sentence = [w for w in sentence if w not in stops] # depends on [control=['if'], data=[]]
sentence = [w for w in sentence if len(w) > 1] # rm short words
if sentence:
sentence = sentence # depends on [control=['if'], data=[]]
if lemmatize:
sentence = lemmatizer.lemmatize(sentence) # depends on [control=['if'], data=[]]
if sentence and language == 'latin':
sentence = [jv_replacer.replace(word) for word in sentence] # depends on [control=['if'], data=[]]
if sentence:
yield sentence # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sentence']] # depends on [control=['for'], data=['filepath']] |
def files_in_dir(path, extension):
"""Enumartes the files in path with the given extension"""
ends = '.{0}'.format(extension)
return (f for f in os.listdir(path) if f.endswith(ends)) | def function[files_in_dir, parameter[path, extension]]:
constant[Enumartes the files in path with the given extension]
variable[ends] assign[=] call[constant[.{0}].format, parameter[name[extension]]]
return[<ast.GeneratorExp object at 0x7da1b20efeb0>] | keyword[def] identifier[files_in_dir] ( identifier[path] , identifier[extension] ):
literal[string]
identifier[ends] = literal[string] . identifier[format] ( identifier[extension] )
keyword[return] ( identifier[f] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ) keyword[if] identifier[f] . identifier[endswith] ( identifier[ends] )) | def files_in_dir(path, extension):
"""Enumartes the files in path with the given extension"""
ends = '.{0}'.format(extension)
return (f for f in os.listdir(path) if f.endswith(ends)) |
def get_intercept_only_candidate_models(data, weights_col):
""" Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model.
"""
model_type = "intercept_only"
formula = "meter_value ~ 1"
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return [get_fit_failed_candidate_model(model_type, formula)]
result = model.fit()
# CalTrack 3.3.1.3
model_params = {"intercept": result.params["Intercept"]}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return [
CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=0,
)
] | def function[get_intercept_only_candidate_models, parameter[data, weights_col]]:
constant[ Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model.
]
variable[model_type] assign[=] constant[intercept_only]
variable[formula] assign[=] constant[meter_value ~ 1]
if compare[name[weights_col] is constant[None]] begin[:]
variable[weights] assign[=] constant[1]
<ast.Try object at 0x7da1b08e51e0>
variable[result] assign[=] call[name[model].fit, parameter[]]
variable[model_params] assign[=] dictionary[[<ast.Constant object at 0x7da1b08e5e10>], [<ast.Subscript object at 0x7da1b08e5660>]]
variable[model_warnings] assign[=] list[[]]
for taget[name[parameter]] in starred[list[[<ast.Constant object at 0x7da1b08e78e0>]]] begin[:]
call[name[model_warnings].extend, parameter[call[name[get_parameter_negative_warning], parameter[name[model_type], name[model_params], name[parameter]]]]]
if compare[call[name[len], parameter[name[model_warnings]]] greater[>] constant[0]] begin[:]
variable[status] assign[=] constant[DISQUALIFIED]
return[list[[<ast.Call object at 0x7da20c6c6bc0>]]] | keyword[def] identifier[get_intercept_only_candidate_models] ( identifier[data] , identifier[weights_col] ):
literal[string]
identifier[model_type] = literal[string]
identifier[formula] = literal[string]
keyword[if] identifier[weights_col] keyword[is] keyword[None] :
identifier[weights] = literal[int]
keyword[else] :
identifier[weights] = identifier[data] [ identifier[weights_col] ]
keyword[try] :
identifier[model] = identifier[smf] . identifier[wls] ( identifier[formula] = identifier[formula] , identifier[data] = identifier[data] , identifier[weights] = identifier[weights] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] [ identifier[get_fit_failed_candidate_model] ( identifier[model_type] , identifier[formula] )]
identifier[result] = identifier[model] . identifier[fit] ()
identifier[model_params] ={ literal[string] : identifier[result] . identifier[params] [ literal[string] ]}
identifier[model_warnings] =[]
keyword[for] identifier[parameter] keyword[in] [ literal[string] ]:
identifier[model_warnings] . identifier[extend] (
identifier[get_parameter_negative_warning] ( identifier[model_type] , identifier[model_params] , identifier[parameter] )
)
keyword[if] identifier[len] ( identifier[model_warnings] )> literal[int] :
identifier[status] = literal[string]
keyword[else] :
identifier[status] = literal[string]
keyword[return] [
identifier[CalTRACKUsagePerDayCandidateModel] (
identifier[model_type] = identifier[model_type] ,
identifier[formula] = identifier[formula] ,
identifier[status] = identifier[status] ,
identifier[warnings] = identifier[model_warnings] ,
identifier[model_params] = identifier[model_params] ,
identifier[model] = identifier[model] ,
identifier[result] = identifier[result] ,
identifier[r_squared_adj] = literal[int] ,
)
] | def get_intercept_only_candidate_models(data, weights_col):
""" Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model.
"""
model_type = 'intercept_only'
formula = 'meter_value ~ 1'
if weights_col is None:
weights = 1 # depends on [control=['if'], data=[]]
else:
weights = data[weights_col]
try:
model = smf.wls(formula=formula, data=data, weights=weights) # depends on [control=['try'], data=[]]
except Exception as e:
return [get_fit_failed_candidate_model(model_type, formula)] # depends on [control=['except'], data=[]]
result = model.fit()
# CalTrack 3.3.1.3
model_params = {'intercept': result.params['Intercept']}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ['intercept']:
model_warnings.extend(get_parameter_negative_warning(model_type, model_params, parameter)) # depends on [control=['for'], data=['parameter']]
if len(model_warnings) > 0:
status = 'DISQUALIFIED' # depends on [control=['if'], data=[]]
else:
status = 'QUALIFIED'
return [CalTRACKUsagePerDayCandidateModel(model_type=model_type, formula=formula, status=status, warnings=model_warnings, model_params=model_params, model=model, result=result, r_squared_adj=0)] |
def admin_view_reverse_fk_links(modeladmin: ModelAdmin,
obj,
reverse_fk_set_field: str,
missing: str = "(None)",
use_str: bool = True,
separator: str = "<br>",
view_type: str = "change",
current_app: str = None) -> str:
"""
Get multiple Django admin site URL for multiple objects linked to our
object of interest (where the other objects have foreign keys to our
object).
"""
if not hasattr(obj, reverse_fk_set_field):
return missing
linked_objs = getattr(obj, reverse_fk_set_field).all()
if not linked_objs:
return missing
first = linked_objs[0]
app_name = first._meta.app_label.lower()
model_name = first._meta.object_name.lower()
viewname = "admin:{}_{}_{}".format(app_name, model_name, view_type)
if current_app is None:
current_app = modeladmin.admin_site.name
links = []
for linked_obj in linked_objs:
# log.debug("linked_obj: {}", linked_obj)
url = reverse(viewname, args=[linked_obj.pk], current_app=current_app)
if use_str:
label = escape(str(linked_obj))
else:
label = "{} {}".format(escape(linked_obj._meta.object_name),
linked_obj.pk)
links.append('<a href="{}">{}</a>'.format(url, label))
# log.debug("links: {}", links)
return separator.join(links) | def function[admin_view_reverse_fk_links, parameter[modeladmin, obj, reverse_fk_set_field, missing, use_str, separator, view_type, current_app]]:
constant[
Get multiple Django admin site URL for multiple objects linked to our
object of interest (where the other objects have foreign keys to our
object).
]
if <ast.UnaryOp object at 0x7da1b18e7a60> begin[:]
return[name[missing]]
variable[linked_objs] assign[=] call[call[name[getattr], parameter[name[obj], name[reverse_fk_set_field]]].all, parameter[]]
if <ast.UnaryOp object at 0x7da1b18e5ed0> begin[:]
return[name[missing]]
variable[first] assign[=] call[name[linked_objs]][constant[0]]
variable[app_name] assign[=] call[name[first]._meta.app_label.lower, parameter[]]
variable[model_name] assign[=] call[name[first]._meta.object_name.lower, parameter[]]
variable[viewname] assign[=] call[constant[admin:{}_{}_{}].format, parameter[name[app_name], name[model_name], name[view_type]]]
if compare[name[current_app] is constant[None]] begin[:]
variable[current_app] assign[=] name[modeladmin].admin_site.name
variable[links] assign[=] list[[]]
for taget[name[linked_obj]] in starred[name[linked_objs]] begin[:]
variable[url] assign[=] call[name[reverse], parameter[name[viewname]]]
if name[use_str] begin[:]
variable[label] assign[=] call[name[escape], parameter[call[name[str], parameter[name[linked_obj]]]]]
call[name[links].append, parameter[call[constant[<a href="{}">{}</a>].format, parameter[name[url], name[label]]]]]
return[call[name[separator].join, parameter[name[links]]]] | keyword[def] identifier[admin_view_reverse_fk_links] ( identifier[modeladmin] : identifier[ModelAdmin] ,
identifier[obj] ,
identifier[reverse_fk_set_field] : identifier[str] ,
identifier[missing] : identifier[str] = literal[string] ,
identifier[use_str] : identifier[bool] = keyword[True] ,
identifier[separator] : identifier[str] = literal[string] ,
identifier[view_type] : identifier[str] = literal[string] ,
identifier[current_app] : identifier[str] = keyword[None] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[obj] , identifier[reverse_fk_set_field] ):
keyword[return] identifier[missing]
identifier[linked_objs] = identifier[getattr] ( identifier[obj] , identifier[reverse_fk_set_field] ). identifier[all] ()
keyword[if] keyword[not] identifier[linked_objs] :
keyword[return] identifier[missing]
identifier[first] = identifier[linked_objs] [ literal[int] ]
identifier[app_name] = identifier[first] . identifier[_meta] . identifier[app_label] . identifier[lower] ()
identifier[model_name] = identifier[first] . identifier[_meta] . identifier[object_name] . identifier[lower] ()
identifier[viewname] = literal[string] . identifier[format] ( identifier[app_name] , identifier[model_name] , identifier[view_type] )
keyword[if] identifier[current_app] keyword[is] keyword[None] :
identifier[current_app] = identifier[modeladmin] . identifier[admin_site] . identifier[name]
identifier[links] =[]
keyword[for] identifier[linked_obj] keyword[in] identifier[linked_objs] :
identifier[url] = identifier[reverse] ( identifier[viewname] , identifier[args] =[ identifier[linked_obj] . identifier[pk] ], identifier[current_app] = identifier[current_app] )
keyword[if] identifier[use_str] :
identifier[label] = identifier[escape] ( identifier[str] ( identifier[linked_obj] ))
keyword[else] :
identifier[label] = literal[string] . identifier[format] ( identifier[escape] ( identifier[linked_obj] . identifier[_meta] . identifier[object_name] ),
identifier[linked_obj] . identifier[pk] )
identifier[links] . identifier[append] ( literal[string] . identifier[format] ( identifier[url] , identifier[label] ))
keyword[return] identifier[separator] . identifier[join] ( identifier[links] ) | def admin_view_reverse_fk_links(modeladmin: ModelAdmin, obj, reverse_fk_set_field: str, missing: str='(None)', use_str: bool=True, separator: str='<br>', view_type: str='change', current_app: str=None) -> str:
"""
Get multiple Django admin site URL for multiple objects linked to our
object of interest (where the other objects have foreign keys to our
object).
"""
if not hasattr(obj, reverse_fk_set_field):
return missing # depends on [control=['if'], data=[]]
linked_objs = getattr(obj, reverse_fk_set_field).all()
if not linked_objs:
return missing # depends on [control=['if'], data=[]]
first = linked_objs[0]
app_name = first._meta.app_label.lower()
model_name = first._meta.object_name.lower()
viewname = 'admin:{}_{}_{}'.format(app_name, model_name, view_type)
if current_app is None:
current_app = modeladmin.admin_site.name # depends on [control=['if'], data=['current_app']]
links = []
for linked_obj in linked_objs:
# log.debug("linked_obj: {}", linked_obj)
url = reverse(viewname, args=[linked_obj.pk], current_app=current_app)
if use_str:
label = escape(str(linked_obj)) # depends on [control=['if'], data=[]]
else:
label = '{} {}'.format(escape(linked_obj._meta.object_name), linked_obj.pk)
links.append('<a href="{}">{}</a>'.format(url, label)) # depends on [control=['for'], data=['linked_obj']]
# log.debug("links: {}", links)
return separator.join(links) |
def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n | def function[seek, parameter[self, recIndex]]:
constant[Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
]
if compare[name[recIndex] greater[>] binary_operation[name[self]._nrecs - constant[1]]] begin[:]
if compare[name[recIndex] equal[==] name[self]._nrecs] begin[:]
return[call[name[self].seekend, parameter[]]]
variable[n] assign[=] call[name[_C].VSseek, parameter[name[self]._id, name[recIndex]]]
call[name[_checkErr], parameter[constant[seek], name[n], constant[cannot seek]]]
name[self]._offset assign[=] name[n]
return[name[n]] | keyword[def] identifier[seek] ( identifier[self] , identifier[recIndex] ):
literal[string]
keyword[if] identifier[recIndex] > identifier[self] . identifier[_nrecs] - literal[int] :
keyword[if] identifier[recIndex] == identifier[self] . identifier[_nrecs] :
keyword[return] identifier[self] . identifier[seekend] ()
keyword[else] :
keyword[raise] identifier[HDF4Error] ( literal[string] )
identifier[n] = identifier[_C] . identifier[VSseek] ( identifier[self] . identifier[_id] , identifier[recIndex] )
identifier[_checkErr] ( literal[string] , identifier[n] , literal[string] )
identifier[self] . identifier[_offset] = identifier[n]
keyword[return] identifier[n] | def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend() # depends on [control=['if'], data=[]]
else:
raise HDF4Error('attempt to seek past last record') # depends on [control=['if'], data=['recIndex']]
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n |
def read_node_label_matrix(file_path, separator, numbering="matlab"):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_categories = int(file_row[3])
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
if numbering == "matlab":
row -= 1
col -= 1
labelled_node_indices -= 1
elif numbering == "c":
pass
else:
print("Invalid numbering style.")
raise RuntimeError
# Form sparse adjacency matrix
node_label_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices | def function[read_node_label_matrix, parameter[file_path, separator, numbering]]:
constant[
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", " ", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
]
variable[file_row_generator] assign[=] call[name[get_file_row_generator], parameter[name[file_path], name[separator]]]
variable[file_row] assign[=] call[name[next], parameter[name[file_row_generator]]]
variable[number_of_rows] assign[=] call[name[file_row]][constant[1]]
variable[number_of_categories] assign[=] call[name[int], parameter[call[name[file_row]][constant[3]]]]
variable[row] assign[=] call[name[list], parameter[]]
variable[col] assign[=] call[name[list], parameter[]]
variable[append_row] assign[=] name[row].append
variable[append_col] assign[=] name[col].append
for taget[name[file_row]] in starred[name[file_row_generator]] begin[:]
variable[node] assign[=] call[name[np].int64, parameter[call[name[file_row]][constant[0]]]]
variable[label] assign[=] call[name[np].int64, parameter[call[name[file_row]][constant[1]]]]
call[name[append_row], parameter[name[node]]]
call[name[append_col], parameter[name[label]]]
variable[labelled_node_indices] assign[=] call[name[np].array, parameter[call[name[list], parameter[call[name[set], parameter[name[row]]]]]]]
variable[row] assign[=] call[name[np].array, parameter[name[row]]]
variable[col] assign[=] call[name[np].array, parameter[name[col]]]
variable[data] assign[=] call[name[np].ones_like, parameter[name[row]]]
if compare[name[numbering] equal[==] constant[matlab]] begin[:]
<ast.AugAssign object at 0x7da1b1be5b70>
<ast.AugAssign object at 0x7da1b1be4280>
<ast.AugAssign object at 0x7da1b1be75b0>
variable[node_label_matrix] assign[=] call[name[spsp].coo_matrix, parameter[tuple[[<ast.Name object at 0x7da1b1be5f00>, <ast.Tuple object at 0x7da1b1be43a0>]]]]
variable[node_label_matrix] assign[=] call[name[node_label_matrix].tocsr, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1be4970>, <ast.Name object at 0x7da1b1be4cd0>, <ast.Name object at 0x7da1b1be4fa0>]]] | keyword[def] identifier[read_node_label_matrix] ( identifier[file_path] , identifier[separator] , identifier[numbering] = literal[string] ):
literal[string]
identifier[file_row_generator] = identifier[get_file_row_generator] ( identifier[file_path] , identifier[separator] )
identifier[file_row] = identifier[next] ( identifier[file_row_generator] )
identifier[number_of_rows] = identifier[file_row] [ literal[int] ]
identifier[number_of_categories] = identifier[int] ( identifier[file_row] [ literal[int] ])
identifier[row] = identifier[list] ()
identifier[col] = identifier[list] ()
identifier[append_row] = identifier[row] . identifier[append]
identifier[append_col] = identifier[col] . identifier[append]
keyword[for] identifier[file_row] keyword[in] identifier[file_row_generator] :
identifier[node] = identifier[np] . identifier[int64] ( identifier[file_row] [ literal[int] ])
identifier[label] = identifier[np] . identifier[int64] ( identifier[file_row] [ literal[int] ])
identifier[append_row] ( identifier[node] )
identifier[append_col] ( identifier[label] )
identifier[labelled_node_indices] = identifier[np] . identifier[array] ( identifier[list] ( identifier[set] ( identifier[row] )))
identifier[row] = identifier[np] . identifier[array] ( identifier[row] , identifier[dtype] = identifier[np] . identifier[int64] )
identifier[col] = identifier[np] . identifier[array] ( identifier[col] , identifier[dtype] = identifier[np] . identifier[int64] )
identifier[data] = identifier[np] . identifier[ones_like] ( identifier[row] , identifier[dtype] = identifier[np] . identifier[float64] )
keyword[if] identifier[numbering] == literal[string] :
identifier[row] -= literal[int]
identifier[col] -= literal[int]
identifier[labelled_node_indices] -= literal[int]
keyword[elif] identifier[numbering] == literal[string] :
keyword[pass]
keyword[else] :
identifier[print] ( literal[string] )
keyword[raise] identifier[RuntimeError]
identifier[node_label_matrix] = identifier[spsp] . identifier[coo_matrix] (( identifier[data] ,( identifier[row] , identifier[col] )), identifier[shape] =( identifier[number_of_rows] , identifier[number_of_categories] ))
identifier[node_label_matrix] = identifier[node_label_matrix] . identifier[tocsr] ()
keyword[return] identifier[node_label_matrix] , identifier[number_of_categories] , identifier[labelled_node_indices] | def read_node_label_matrix(file_path, separator, numbering='matlab'):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", " ", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_categories = int(file_row[3])
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label) # depends on [control=['for'], data=['file_row']]
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
if numbering == 'matlab':
row -= 1
col -= 1
labelled_node_indices -= 1 # depends on [control=['if'], data=[]]
elif numbering == 'c':
pass # depends on [control=['if'], data=[]]
else:
print('Invalid numbering style.')
raise RuntimeError
# Form sparse adjacency matrix
node_label_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return (node_label_matrix, number_of_categories, labelled_node_indices) |
def remove_entity(self, name):
"""
Remove an entity from the model.
:param name: The name of the entity to remove.
"""
entity_to_remove = None
for e in self.entities:
if e.name == name:
entity_to_remove = e
if entity_to_remove is not None:
self.entities.remove(entity_to_remove) | def function[remove_entity, parameter[self, name]]:
constant[
Remove an entity from the model.
:param name: The name of the entity to remove.
]
variable[entity_to_remove] assign[=] constant[None]
for taget[name[e]] in starred[name[self].entities] begin[:]
if compare[name[e].name equal[==] name[name]] begin[:]
variable[entity_to_remove] assign[=] name[e]
if compare[name[entity_to_remove] is_not constant[None]] begin[:]
call[name[self].entities.remove, parameter[name[entity_to_remove]]] | keyword[def] identifier[remove_entity] ( identifier[self] , identifier[name] ):
literal[string]
identifier[entity_to_remove] = keyword[None]
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[entities] :
keyword[if] identifier[e] . identifier[name] == identifier[name] :
identifier[entity_to_remove] = identifier[e]
keyword[if] identifier[entity_to_remove] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[entities] . identifier[remove] ( identifier[entity_to_remove] ) | def remove_entity(self, name):
"""
Remove an entity from the model.
:param name: The name of the entity to remove.
"""
entity_to_remove = None
for e in self.entities:
if e.name == name:
entity_to_remove = e # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
if entity_to_remove is not None:
self.entities.remove(entity_to_remove) # depends on [control=['if'], data=['entity_to_remove']] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.