code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _single_quote_handler_factory(on_single_quote, on_other):
"""Generates handlers used for classifying tokens that begin with one or more single quotes.
Args:
on_single_quote (callable): Called when another single quote is found. Accepts the current character's ordinal,
the current context, and True if the token is a field name; returns a Transition.
on_other (callable): Called when any character other than a single quote is found. Accepts the current
character's ordinal, the current context, and True if the token is a field name; returns a Transition.
"""
@coroutine
def single_quote_handler(c, ctx, is_field_name=False):
assert c == _SINGLE_QUOTE
c, self = yield
if c == _SINGLE_QUOTE and not _is_escaped(c):
yield on_single_quote(c, ctx, is_field_name)
else:
ctx.set_unicode(quoted_text=True)
yield on_other(c, ctx, is_field_name)
return single_quote_handler | def function[_single_quote_handler_factory, parameter[on_single_quote, on_other]]:
constant[Generates handlers used for classifying tokens that begin with one or more single quotes.
Args:
on_single_quote (callable): Called when another single quote is found. Accepts the current character's ordinal,
the current context, and True if the token is a field name; returns a Transition.
on_other (callable): Called when any character other than a single quote is found. Accepts the current
character's ordinal, the current context, and True if the token is a field name; returns a Transition.
]
def function[single_quote_handler, parameter[c, ctx, is_field_name]]:
assert[compare[name[c] equal[==] name[_SINGLE_QUOTE]]]
<ast.Tuple object at 0x7da1b15f1f30> assign[=] <ast.Yield object at 0x7da1b15f04f0>
if <ast.BoolOp object at 0x7da1b15f0820> begin[:]
<ast.Yield object at 0x7da18f00dc60>
return[name[single_quote_handler]] | keyword[def] identifier[_single_quote_handler_factory] ( identifier[on_single_quote] , identifier[on_other] ):
literal[string]
@ identifier[coroutine]
keyword[def] identifier[single_quote_handler] ( identifier[c] , identifier[ctx] , identifier[is_field_name] = keyword[False] ):
keyword[assert] identifier[c] == identifier[_SINGLE_QUOTE]
identifier[c] , identifier[self] = keyword[yield]
keyword[if] identifier[c] == identifier[_SINGLE_QUOTE] keyword[and] keyword[not] identifier[_is_escaped] ( identifier[c] ):
keyword[yield] identifier[on_single_quote] ( identifier[c] , identifier[ctx] , identifier[is_field_name] )
keyword[else] :
identifier[ctx] . identifier[set_unicode] ( identifier[quoted_text] = keyword[True] )
keyword[yield] identifier[on_other] ( identifier[c] , identifier[ctx] , identifier[is_field_name] )
keyword[return] identifier[single_quote_handler] | def _single_quote_handler_factory(on_single_quote, on_other):
"""Generates handlers used for classifying tokens that begin with one or more single quotes.
Args:
on_single_quote (callable): Called when another single quote is found. Accepts the current character's ordinal,
the current context, and True if the token is a field name; returns a Transition.
on_other (callable): Called when any character other than a single quote is found. Accepts the current
character's ordinal, the current context, and True if the token is a field name; returns a Transition.
"""
@coroutine
def single_quote_handler(c, ctx, is_field_name=False):
assert c == _SINGLE_QUOTE
(c, self) = (yield)
if c == _SINGLE_QUOTE and (not _is_escaped(c)):
yield on_single_quote(c, ctx, is_field_name) # depends on [control=['if'], data=[]]
else:
ctx.set_unicode(quoted_text=True)
yield on_other(c, ctx, is_field_name)
return single_quote_handler |
def dayofyear(self):
"""Day of the year index (the first of January = 0...).
For reasons of consistency between leap years and non-leap years,
assuming a daily time step, index 59 is always associated with the
29th of February. Hence, it is missing in non-leap years:
>>> from hydpy import pub
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> Indexer().dayofyear
array([57, 58, 59, 60, 61])
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
>>> Indexer().dayofyear
array([57, 58, 60, 61])
"""
def _dayofyear(date):
return (date.dayofyear-1 +
((date.month > 2) and (not date.leapyear)))
return _dayofyear | def function[dayofyear, parameter[self]]:
constant[Day of the year index (the first of January = 0...).
For reasons of consistency between leap years and non-leap years,
assuming a daily time step, index 59 is always associated with the
29th of February. Hence, it is missing in non-leap years:
>>> from hydpy import pub
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> Indexer().dayofyear
array([57, 58, 59, 60, 61])
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
>>> Indexer().dayofyear
array([57, 58, 60, 61])
]
def function[_dayofyear, parameter[date]]:
return[binary_operation[binary_operation[name[date].dayofyear - constant[1]] + <ast.BoolOp object at 0x7da20c7cac50>]]
return[name[_dayofyear]] | keyword[def] identifier[dayofyear] ( identifier[self] ):
literal[string]
keyword[def] identifier[_dayofyear] ( identifier[date] ):
keyword[return] ( identifier[date] . identifier[dayofyear] - literal[int] +
(( identifier[date] . identifier[month] > literal[int] ) keyword[and] ( keyword[not] identifier[date] . identifier[leapyear] )))
keyword[return] identifier[_dayofyear] | def dayofyear(self):
"""Day of the year index (the first of January = 0...).
For reasons of consistency between leap years and non-leap years,
assuming a daily time step, index 59 is always associated with the
29th of February. Hence, it is missing in non-leap years:
>>> from hydpy import pub
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> Indexer().dayofyear
array([57, 58, 59, 60, 61])
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
>>> Indexer().dayofyear
array([57, 58, 60, 61])
"""
def _dayofyear(date):
return date.dayofyear - 1 + (date.month > 2 and (not date.leapyear))
return _dayofyear |
def set_result(self, res):
"""
The worker thread should call this if it was successful. Unlike normal
functions, which will return None if execution is allowed to fall off
the end, either set_result() or self_error() must be called, or the
the get()ing side will hang.
"""
self.result = (True, res)
self._lock.release() | def function[set_result, parameter[self, res]]:
constant[
The worker thread should call this if it was successful. Unlike normal
functions, which will return None if execution is allowed to fall off
the end, either set_result() or self_error() must be called, or the
the get()ing side will hang.
]
name[self].result assign[=] tuple[[<ast.Constant object at 0x7da18f09d3c0>, <ast.Name object at 0x7da18f09efb0>]]
call[name[self]._lock.release, parameter[]] | keyword[def] identifier[set_result] ( identifier[self] , identifier[res] ):
literal[string]
identifier[self] . identifier[result] =( keyword[True] , identifier[res] )
identifier[self] . identifier[_lock] . identifier[release] () | def set_result(self, res):
"""
The worker thread should call this if it was successful. Unlike normal
functions, which will return None if execution is allowed to fall off
the end, either set_result() or self_error() must be called, or the
the get()ing side will hang.
"""
self.result = (True, res)
self._lock.release() |
def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
"""
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size) | def function[download_sample_and_align, parameter[job, sample, inputs, ids]]:
constant[
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
]
<ast.Tuple object at 0x7da2049624d0> assign[=] name[sample]
<ast.Tuple object at 0x7da1b10c5c00> assign[=] <ast.IfExp object at 0x7da1b10c52a0>
call[name[job].fileStore.logToMaster, parameter[call[constant[Downloaded sample: {0}. R1 {1}
R2 {2}
Starting BWA Run].format, parameter[name[uuid], name[r1_url], name[r2_url]]]]]
call[name[ids]][constant[r1]] assign[=] call[call[name[job].addChildJobFn, parameter[name[download_url_job], name[r1_url]]].rv, parameter[]]
if name[r2_url] begin[:]
call[name[ids]][constant[r2]] assign[=] call[call[name[job].addChildJobFn, parameter[name[download_url_job], name[r2_url]]].rv, parameter[]]
name[inputs].cores assign[=] call[name[min], parameter[name[inputs].maxCores, call[name[multiprocessing].cpu_count, parameter[]]]]
name[inputs].uuid assign[=] name[uuid]
variable[config] assign[=] call[name[dict], parameter[]]
call[name[config].update, parameter[name[ids]]]
variable[config] assign[=] call[name[argparse].Namespace, parameter[]]
variable[bam_id] assign[=] call[name[job].wrapJobFn, parameter[name[run_bwakit], name[config]]]
call[name[job].addFollowOn, parameter[name[bam_id]]]
variable[output_name] assign[=] <ast.IfExp object at 0x7da18f810ca0>
if compare[call[name[urlparse], parameter[name[inputs].output_dir]].scheme equal[==] constant[s3]] begin[:]
call[name[bam_id].addChildJobFn, parameter[name[s3am_upload_job]]] | keyword[def] identifier[download_sample_and_align] ( identifier[job] , identifier[sample] , identifier[inputs] , identifier[ids] ):
literal[string]
identifier[uuid] , identifier[urls] = identifier[sample]
identifier[r1_url] , identifier[r2_url] = identifier[urls] keyword[if] identifier[len] ( identifier[urls] )== literal[int] keyword[else] ( identifier[urls] [ literal[int] ], keyword[None] )
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] . identifier[format] ( identifier[uuid] , identifier[r1_url] , identifier[r2_url] ))
identifier[ids] [ literal[string] ]= identifier[job] . identifier[addChildJobFn] ( identifier[download_url_job] , identifier[r1_url] , identifier[s3_key_path] = identifier[inputs] . identifier[ssec] , identifier[disk] = identifier[inputs] . identifier[file_size] ). identifier[rv] ()
keyword[if] identifier[r2_url] :
identifier[ids] [ literal[string] ]= identifier[job] . identifier[addChildJobFn] ( identifier[download_url_job] , identifier[r2_url] , identifier[s3_key_path] = identifier[inputs] . identifier[ssec] , identifier[disk] = identifier[inputs] . identifier[file_size] ). identifier[rv] ()
keyword[else] :
identifier[ids] [ literal[string] ]= keyword[None]
identifier[inputs] . identifier[cores] = identifier[min] ( identifier[inputs] . identifier[maxCores] , identifier[multiprocessing] . identifier[cpu_count] ())
identifier[inputs] . identifier[uuid] = identifier[uuid]
identifier[config] = identifier[dict] (** identifier[vars] ( identifier[inputs] ))
identifier[config] . identifier[update] ( identifier[ids] )
identifier[config] = identifier[argparse] . identifier[Namespace] (** identifier[config] )
identifier[bam_id] = identifier[job] . identifier[wrapJobFn] ( identifier[run_bwakit] , identifier[config] , identifier[sort] = identifier[inputs] . identifier[sort] , identifier[trim] = identifier[inputs] . identifier[trim] ,
identifier[disk] = identifier[inputs] . identifier[file_size] , identifier[cores] = identifier[inputs] . identifier[cores] )
identifier[job] . identifier[addFollowOn] ( identifier[bam_id] )
identifier[output_name] = identifier[uuid] + literal[string] + identifier[str] ( identifier[inputs] . identifier[suffix] ) keyword[if] identifier[inputs] . identifier[suffix] keyword[else] identifier[uuid] + literal[string]
keyword[if] identifier[urlparse] ( identifier[inputs] . identifier[output_dir] ). identifier[scheme] == literal[string] :
identifier[bam_id] . identifier[addChildJobFn] ( identifier[s3am_upload_job] , identifier[file_id] = identifier[bam_id] . identifier[rv] (), identifier[file_name] = identifier[output_name] , identifier[s3_dir] = identifier[inputs] . identifier[output_dir] ,
identifier[s3_key_path] = identifier[inputs] . identifier[ssec] , identifier[cores] = identifier[inputs] . identifier[cores] , identifier[disk] = identifier[inputs] . identifier[file_size] )
keyword[else] :
identifier[mkdir_p] ( identifier[inputs] . identifier[ouput_dir] )
identifier[bam_id] . identifier[addChildJobFn] ( identifier[copy_file_job] , identifier[name] = identifier[output_name] , identifier[file_id] = identifier[bam_id] . identifier[rv] (), identifier[output_dir] = identifier[inputs] . identifier[output_dir] ,
identifier[disk] = identifier[inputs] . identifier[file_size] ) | def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
"""
(uuid, urls) = sample
(r1_url, r2_url) = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() # depends on [control=['if'], data=[]]
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim, disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir, s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size) # depends on [control=['if'], data=[]]
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir, disk=inputs.file_size) |
def ean(self, fmt: Optional[EANFormat] = None) -> str:
"""Generate EAN.
To change EAN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.EANFormat`.
:param fmt: Format of EAN.
:return: EAN.
:raises NonEnumerableError: if fmt is not enum EANFormat.
"""
key = self._validate_enum(
item=fmt,
enum=EANFormat,
)
mask = EAN_MASKS[key]
return self.random.custom_code(mask=mask) | def function[ean, parameter[self, fmt]]:
constant[Generate EAN.
To change EAN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.EANFormat`.
:param fmt: Format of EAN.
:return: EAN.
:raises NonEnumerableError: if fmt is not enum EANFormat.
]
variable[key] assign[=] call[name[self]._validate_enum, parameter[]]
variable[mask] assign[=] call[name[EAN_MASKS]][name[key]]
return[call[name[self].random.custom_code, parameter[]]] | keyword[def] identifier[ean] ( identifier[self] , identifier[fmt] : identifier[Optional] [ identifier[EANFormat] ]= keyword[None] )-> identifier[str] :
literal[string]
identifier[key] = identifier[self] . identifier[_validate_enum] (
identifier[item] = identifier[fmt] ,
identifier[enum] = identifier[EANFormat] ,
)
identifier[mask] = identifier[EAN_MASKS] [ identifier[key] ]
keyword[return] identifier[self] . identifier[random] . identifier[custom_code] ( identifier[mask] = identifier[mask] ) | def ean(self, fmt: Optional[EANFormat]=None) -> str:
"""Generate EAN.
To change EAN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.EANFormat`.
:param fmt: Format of EAN.
:return: EAN.
:raises NonEnumerableError: if fmt is not enum EANFormat.
"""
key = self._validate_enum(item=fmt, enum=EANFormat)
mask = EAN_MASKS[key]
return self.random.custom_code(mask=mask) |
def get_detail_view(self, request, object, opts=None):
"""
Instantiates and returns the view class that will generate the actual
context for this plugin.
"""
view = self.get_view(request, self.view_class, opts)
view.object = object
return view | def function[get_detail_view, parameter[self, request, object, opts]]:
constant[
Instantiates and returns the view class that will generate the actual
context for this plugin.
]
variable[view] assign[=] call[name[self].get_view, parameter[name[request], name[self].view_class, name[opts]]]
name[view].object assign[=] name[object]
return[name[view]] | keyword[def] identifier[get_detail_view] ( identifier[self] , identifier[request] , identifier[object] , identifier[opts] = keyword[None] ):
literal[string]
identifier[view] = identifier[self] . identifier[get_view] ( identifier[request] , identifier[self] . identifier[view_class] , identifier[opts] )
identifier[view] . identifier[object] = identifier[object]
keyword[return] identifier[view] | def get_detail_view(self, request, object, opts=None):
"""
Instantiates and returns the view class that will generate the actual
context for this plugin.
"""
view = self.get_view(request, self.view_class, opts)
view.object = object
return view |
def algorithms(self):
"""
Returns a list of available load balancing algorithms.
"""
if self._algorithms is None:
uri = "/loadbalancers/algorithms"
resp, body = self.method_get(uri)
self._algorithms = [alg["name"] for alg in body["algorithms"]]
return self._algorithms | def function[algorithms, parameter[self]]:
constant[
Returns a list of available load balancing algorithms.
]
if compare[name[self]._algorithms is constant[None]] begin[:]
variable[uri] assign[=] constant[/loadbalancers/algorithms]
<ast.Tuple object at 0x7da1b0558880> assign[=] call[name[self].method_get, parameter[name[uri]]]
name[self]._algorithms assign[=] <ast.ListComp object at 0x7da1b05581f0>
return[name[self]._algorithms] | keyword[def] identifier[algorithms] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_algorithms] keyword[is] keyword[None] :
identifier[uri] = literal[string]
identifier[resp] , identifier[body] = identifier[self] . identifier[method_get] ( identifier[uri] )
identifier[self] . identifier[_algorithms] =[ identifier[alg] [ literal[string] ] keyword[for] identifier[alg] keyword[in] identifier[body] [ literal[string] ]]
keyword[return] identifier[self] . identifier[_algorithms] | def algorithms(self):
"""
Returns a list of available load balancing algorithms.
"""
if self._algorithms is None:
uri = '/loadbalancers/algorithms'
(resp, body) = self.method_get(uri)
self._algorithms = [alg['name'] for alg in body['algorithms']] # depends on [control=['if'], data=[]]
return self._algorithms |
def settings(self):
"""Retrieve and cache settings from server"""
if not self.__settings:
self.__settings = self.request('get', 'settings').json()
return self.__settings | def function[settings, parameter[self]]:
constant[Retrieve and cache settings from server]
if <ast.UnaryOp object at 0x7da2054a58d0> begin[:]
name[self].__settings assign[=] call[call[name[self].request, parameter[constant[get], constant[settings]]].json, parameter[]]
return[name[self].__settings] | keyword[def] identifier[settings] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__settings] :
identifier[self] . identifier[__settings] = identifier[self] . identifier[request] ( literal[string] , literal[string] ). identifier[json] ()
keyword[return] identifier[self] . identifier[__settings] | def settings(self):
"""Retrieve and cache settings from server"""
if not self.__settings:
self.__settings = self.request('get', 'settings').json() # depends on [control=['if'], data=[]]
return self.__settings |
def _in_stoplist(self, entity):
"""Return True if the entity is in the stoplist."""
start = 0
end = len(entity)
# Adjust boundaries to exclude disallowed prefixes/suffixes
for prefix in IGNORE_PREFIX:
if entity.startswith(prefix):
# print('%s removing %s' % (currenttext, prefix))
start += len(prefix)
break
for suffix in IGNORE_SUFFIX:
if entity.endswith(suffix):
# print('%s removing %s' % (currenttext, suffix))
end -= len(suffix)
break
# Return True if entity has been reduced to nothing by adjusting boundaries
if start >= end:
return True
# Return True if adjusted entity is in the literal stoplist
entity = entity[start:end]
if entity in STOPLIST:
return True
# log.debug('Entity: %s', entity)
for stop_re in STOP_RES:
if re.search(stop_re, entity):
log.debug('Killed: %s', entity)
return True | def function[_in_stoplist, parameter[self, entity]]:
constant[Return True if the entity is in the stoplist.]
variable[start] assign[=] constant[0]
variable[end] assign[=] call[name[len], parameter[name[entity]]]
for taget[name[prefix]] in starred[name[IGNORE_PREFIX]] begin[:]
if call[name[entity].startswith, parameter[name[prefix]]] begin[:]
<ast.AugAssign object at 0x7da18f58cc70>
break
for taget[name[suffix]] in starred[name[IGNORE_SUFFIX]] begin[:]
if call[name[entity].endswith, parameter[name[suffix]]] begin[:]
<ast.AugAssign object at 0x7da18f58d150>
break
if compare[name[start] greater_or_equal[>=] name[end]] begin[:]
return[constant[True]]
variable[entity] assign[=] call[name[entity]][<ast.Slice object at 0x7da18f58d870>]
if compare[name[entity] in name[STOPLIST]] begin[:]
return[constant[True]]
for taget[name[stop_re]] in starred[name[STOP_RES]] begin[:]
if call[name[re].search, parameter[name[stop_re], name[entity]]] begin[:]
call[name[log].debug, parameter[constant[Killed: %s], name[entity]]]
return[constant[True]] | keyword[def] identifier[_in_stoplist] ( identifier[self] , identifier[entity] ):
literal[string]
identifier[start] = literal[int]
identifier[end] = identifier[len] ( identifier[entity] )
keyword[for] identifier[prefix] keyword[in] identifier[IGNORE_PREFIX] :
keyword[if] identifier[entity] . identifier[startswith] ( identifier[prefix] ):
identifier[start] += identifier[len] ( identifier[prefix] )
keyword[break]
keyword[for] identifier[suffix] keyword[in] identifier[IGNORE_SUFFIX] :
keyword[if] identifier[entity] . identifier[endswith] ( identifier[suffix] ):
identifier[end] -= identifier[len] ( identifier[suffix] )
keyword[break]
keyword[if] identifier[start] >= identifier[end] :
keyword[return] keyword[True]
identifier[entity] = identifier[entity] [ identifier[start] : identifier[end] ]
keyword[if] identifier[entity] keyword[in] identifier[STOPLIST] :
keyword[return] keyword[True]
keyword[for] identifier[stop_re] keyword[in] identifier[STOP_RES] :
keyword[if] identifier[re] . identifier[search] ( identifier[stop_re] , identifier[entity] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[entity] )
keyword[return] keyword[True] | def _in_stoplist(self, entity):
"""Return True if the entity is in the stoplist."""
start = 0
end = len(entity)
# Adjust boundaries to exclude disallowed prefixes/suffixes
for prefix in IGNORE_PREFIX:
if entity.startswith(prefix):
# print('%s removing %s' % (currenttext, prefix))
start += len(prefix)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prefix']]
for suffix in IGNORE_SUFFIX:
if entity.endswith(suffix):
# print('%s removing %s' % (currenttext, suffix))
end -= len(suffix)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['suffix']]
# Return True if entity has been reduced to nothing by adjusting boundaries
if start >= end:
return True # depends on [control=['if'], data=[]]
# Return True if adjusted entity is in the literal stoplist
entity = entity[start:end]
if entity in STOPLIST:
return True # depends on [control=['if'], data=[]]
# log.debug('Entity: %s', entity)
for stop_re in STOP_RES:
if re.search(stop_re, entity):
log.debug('Killed: %s', entity)
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stop_re']] |
def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2):
"""Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop"""
if isinstance(seqprop1, str):
seqprop1_id = seqprop1
else:
seqprop1_id = seqprop1.id
if isinstance(seqprop2, str):
seqprop2_id = seqprop2
else:
seqprop2_id = seqprop2.id
aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id)) | def function[_get_seqprop_to_seqprop_alignment, parameter[self, seqprop1, seqprop2]]:
constant[Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop]
if call[name[isinstance], parameter[name[seqprop1], name[str]]] begin[:]
variable[seqprop1_id] assign[=] name[seqprop1]
if call[name[isinstance], parameter[name[seqprop2], name[str]]] begin[:]
variable[seqprop2_id] assign[=] name[seqprop2]
variable[aln_id] assign[=] call[constant[{}_{}].format, parameter[name[seqprop1_id], name[seqprop2_id]]]
if call[name[self].sequence_alignments.has_id, parameter[name[aln_id]]] begin[:]
variable[alignment] assign[=] call[name[self].sequence_alignments.get_by_id, parameter[name[aln_id]]]
return[name[alignment]] | keyword[def] identifier[_get_seqprop_to_seqprop_alignment] ( identifier[self] , identifier[seqprop1] , identifier[seqprop2] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[seqprop1] , identifier[str] ):
identifier[seqprop1_id] = identifier[seqprop1]
keyword[else] :
identifier[seqprop1_id] = identifier[seqprop1] . identifier[id]
keyword[if] identifier[isinstance] ( identifier[seqprop2] , identifier[str] ):
identifier[seqprop2_id] = identifier[seqprop2]
keyword[else] :
identifier[seqprop2_id] = identifier[seqprop2] . identifier[id]
identifier[aln_id] = literal[string] . identifier[format] ( identifier[seqprop1_id] , identifier[seqprop2_id] )
keyword[if] identifier[self] . identifier[sequence_alignments] . identifier[has_id] ( identifier[aln_id] ):
identifier[alignment] = identifier[self] . identifier[sequence_alignments] . identifier[get_by_id] ( identifier[aln_id] )
keyword[return] identifier[alignment]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[aln_id] )) | def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2):
"""Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop"""
if isinstance(seqprop1, str):
seqprop1_id = seqprop1 # depends on [control=['if'], data=[]]
else:
seqprop1_id = seqprop1.id
if isinstance(seqprop2, str):
seqprop2_id = seqprop2 # depends on [control=['if'], data=[]]
else:
seqprop2_id = seqprop2.id
aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment # depends on [control=['if'], data=[]]
else:
raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id)) |
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
user_groups_field = get_user_class()._meta.get_field('groups') # pylint: disable=W0212
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
perms = Permission.objects.filter(**{user_groups_query: user_obj})
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms]) # pylint: disable=W0212
return user_obj._group_perm_cache | def function[get_group_permissions, parameter[self, user_obj, obj]]:
constant[
Returns a set of permission strings that this user has through his/her
groups.
]
if <ast.BoolOp object at 0x7da1b25ee650> begin[:]
return[call[name[set], parameter[]]]
if <ast.UnaryOp object at 0x7da1b25edc90> begin[:]
if name[user_obj].is_superuser begin[:]
variable[perms] assign[=] call[name[Permission].objects.all, parameter[]]
variable[perms] assign[=] call[call[name[perms].values_list, parameter[constant[content_type__app_label], constant[codename]]].order_by, parameter[]]
name[user_obj]._group_perm_cache assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b259dff0>]]
return[name[user_obj]._group_perm_cache] | keyword[def] identifier[get_group_permissions] ( identifier[self] , identifier[user_obj] , identifier[obj] = keyword[None] ):
literal[string]
keyword[if] identifier[user_obj] . identifier[is_anonymous] () keyword[or] identifier[obj] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[set] ()
keyword[if] keyword[not] identifier[hasattr] ( identifier[user_obj] , literal[string] ):
keyword[if] identifier[user_obj] . identifier[is_superuser] :
identifier[perms] = identifier[Permission] . identifier[objects] . identifier[all] ()
keyword[else] :
identifier[user_groups_field] = identifier[get_user_class] (). identifier[_meta] . identifier[get_field] ( literal[string] )
identifier[user_groups_query] = literal[string] % identifier[user_groups_field] . identifier[related_query_name] ()
identifier[perms] = identifier[Permission] . identifier[objects] . identifier[filter] (**{ identifier[user_groups_query] : identifier[user_obj] })
identifier[perms] = identifier[perms] . identifier[values_list] ( literal[string] , literal[string] ). identifier[order_by] ()
identifier[user_obj] . identifier[_group_perm_cache] = identifier[set] ([ literal[string] %( identifier[ct] , identifier[name] ) keyword[for] identifier[ct] , identifier[name] keyword[in] identifier[perms] ])
keyword[return] identifier[user_obj] . identifier[_group_perm_cache] | def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set() # depends on [control=['if'], data=[]]
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all() # depends on [control=['if'], data=[]]
else:
user_groups_field = get_user_class()._meta.get_field('groups') # pylint: disable=W0212
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
perms = Permission.objects.filter(**{user_groups_query: user_obj})
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(['%s.%s' % (ct, name) for (ct, name) in perms]) # pylint: disable=W0212 # depends on [control=['if'], data=[]]
return user_obj._group_perm_cache |
def set_threshold_override(self, limit_name, warn_percent=None,
warn_count=None, crit_percent=None,
crit_count=None):
"""
Override the default warning and critical thresholds used to evaluate
the specified limit's usage. Theresholds can be specified as a
percentage of the limit, or as a usage count, or both.
:param warn_percent: new warning threshold, percentage used
:type warn_percent: int
:param warn_count: new warning threshold, actual count/number
:type warn_count: int
:param crit_percent: new critical threshold, percentage used
:type crit_percent: int
:param crit_count: new critical threshold, actual count/number
:type crit_count: int
"""
try:
self.limits[limit_name].set_threshold_override(
warn_percent=warn_percent,
warn_count=warn_count,
crit_percent=crit_percent,
crit_count=crit_count
)
except KeyError:
raise ValueError("{s} service has no '{l}' limit".format(
s=self.service_name,
l=limit_name)) | def function[set_threshold_override, parameter[self, limit_name, warn_percent, warn_count, crit_percent, crit_count]]:
constant[
Override the default warning and critical thresholds used to evaluate
the specified limit's usage. Theresholds can be specified as a
percentage of the limit, or as a usage count, or both.
:param warn_percent: new warning threshold, percentage used
:type warn_percent: int
:param warn_count: new warning threshold, actual count/number
:type warn_count: int
:param crit_percent: new critical threshold, percentage used
:type crit_percent: int
:param crit_count: new critical threshold, actual count/number
:type crit_count: int
]
<ast.Try object at 0x7da18f00d540> | keyword[def] identifier[set_threshold_override] ( identifier[self] , identifier[limit_name] , identifier[warn_percent] = keyword[None] ,
identifier[warn_count] = keyword[None] , identifier[crit_percent] = keyword[None] ,
identifier[crit_count] = keyword[None] ):
literal[string]
keyword[try] :
identifier[self] . identifier[limits] [ identifier[limit_name] ]. identifier[set_threshold_override] (
identifier[warn_percent] = identifier[warn_percent] ,
identifier[warn_count] = identifier[warn_count] ,
identifier[crit_percent] = identifier[crit_percent] ,
identifier[crit_count] = identifier[crit_count]
)
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[s] = identifier[self] . identifier[service_name] ,
identifier[l] = identifier[limit_name] )) | def set_threshold_override(self, limit_name, warn_percent=None, warn_count=None, crit_percent=None, crit_count=None):
"""
Override the default warning and critical thresholds used to evaluate
the specified limit's usage. Theresholds can be specified as a
percentage of the limit, or as a usage count, or both.
:param warn_percent: new warning threshold, percentage used
:type warn_percent: int
:param warn_count: new warning threshold, actual count/number
:type warn_count: int
:param crit_percent: new critical threshold, percentage used
:type crit_percent: int
:param crit_count: new critical threshold, actual count/number
:type crit_count: int
"""
try:
self.limits[limit_name].set_threshold_override(warn_percent=warn_percent, warn_count=warn_count, crit_percent=crit_percent, crit_count=crit_count) # depends on [control=['try'], data=[]]
except KeyError:
raise ValueError("{s} service has no '{l}' limit".format(s=self.service_name, l=limit_name)) # depends on [control=['except'], data=[]] |
def quote_js(text):
'''Quotes text to be used as JavaScript string in HTML templates. The
result doesn't contain surrounding quotes.'''
if isinstance(text, six.binary_type):
text = text.decode('utf-8') # for Jinja2 Markup
text = text.replace('\\', '\\\\');
text = text.replace('\n', '\\n');
text = text.replace('\r', '');
for char in '\'"<>&':
text = text.replace(char, '\\x{:02x}'.format(ord(char)))
return text | def function[quote_js, parameter[text]]:
constant[Quotes text to be used as JavaScript string in HTML templates. The
result doesn't contain surrounding quotes.]
if call[name[isinstance], parameter[name[text], name[six].binary_type]] begin[:]
variable[text] assign[=] call[name[text].decode, parameter[constant[utf-8]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[\], constant[\\]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[
], constant[\n]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[
], constant[]]]
for taget[name[char]] in starred[constant['"<>&]] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[name[char], call[constant[\x{:02x}].format, parameter[call[name[ord], parameter[name[char]]]]]]]
return[name[text]] | keyword[def] identifier[quote_js] ( identifier[text] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[text] , identifier[six] . identifier[binary_type] ):
identifier[text] = identifier[text] . identifier[decode] ( literal[string] )
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] );
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] );
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] );
keyword[for] identifier[char] keyword[in] literal[string] :
identifier[text] = identifier[text] . identifier[replace] ( identifier[char] , literal[string] . identifier[format] ( identifier[ord] ( identifier[char] )))
keyword[return] identifier[text] | def quote_js(text):
"""Quotes text to be used as JavaScript string in HTML templates. The
result doesn't contain surrounding quotes."""
if isinstance(text, six.binary_type):
text = text.decode('utf-8') # for Jinja2 Markup # depends on [control=['if'], data=[]]
text = text.replace('\\', '\\\\')
text = text.replace('\n', '\\n')
text = text.replace('\r', '')
for char in '\'"<>&':
text = text.replace(char, '\\x{:02x}'.format(ord(char))) # depends on [control=['for'], data=['char']]
return text |
def print_virt_table(self, data):
"""Print a vertical pretty table from data."""
table = prettytable.PrettyTable()
keys = sorted(data.keys())
table.add_column('Keys', keys)
table.add_column('Values', [data.get(i) for i in keys])
for tbl in table.align.keys():
table.align[tbl] = 'l'
self.printer(table) | def function[print_virt_table, parameter[self, data]]:
constant[Print a vertical pretty table from data.]
variable[table] assign[=] call[name[prettytable].PrettyTable, parameter[]]
variable[keys] assign[=] call[name[sorted], parameter[call[name[data].keys, parameter[]]]]
call[name[table].add_column, parameter[constant[Keys], name[keys]]]
call[name[table].add_column, parameter[constant[Values], <ast.ListComp object at 0x7da2054a5db0>]]
for taget[name[tbl]] in starred[call[name[table].align.keys, parameter[]]] begin[:]
call[name[table].align][name[tbl]] assign[=] constant[l]
call[name[self].printer, parameter[name[table]]] | keyword[def] identifier[print_virt_table] ( identifier[self] , identifier[data] ):
literal[string]
identifier[table] = identifier[prettytable] . identifier[PrettyTable] ()
identifier[keys] = identifier[sorted] ( identifier[data] . identifier[keys] ())
identifier[table] . identifier[add_column] ( literal[string] , identifier[keys] )
identifier[table] . identifier[add_column] ( literal[string] ,[ identifier[data] . identifier[get] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[keys] ])
keyword[for] identifier[tbl] keyword[in] identifier[table] . identifier[align] . identifier[keys] ():
identifier[table] . identifier[align] [ identifier[tbl] ]= literal[string]
identifier[self] . identifier[printer] ( identifier[table] ) | def print_virt_table(self, data):
"""Print a vertical pretty table from data."""
table = prettytable.PrettyTable()
keys = sorted(data.keys())
table.add_column('Keys', keys)
table.add_column('Values', [data.get(i) for i in keys])
for tbl in table.align.keys():
table.align[tbl] = 'l' # depends on [control=['for'], data=['tbl']]
self.printer(table) |
def chunks(self, size=32, alignment=1):
"""Return chunks of the data aligned as given by `alignment`. `size`
must be a multiple of `alignment`. Each chunk is returned as a
named two-tuple of its address and data.
"""
if (size % alignment) != 0:
raise Error(
'size {} is not a multiple of alignment {}'.format(
size,
alignment))
address = self.address
data = self.data
# First chunk may be shorter than `size` due to alignment.
chunk_offset = (address % alignment)
if chunk_offset != 0:
first_chunk_size = (alignment - chunk_offset)
yield self._Chunk(address, data[:first_chunk_size])
address += (first_chunk_size // self._word_size_bytes)
data = data[first_chunk_size:]
else:
first_chunk_size = 0
for offset in range(0, len(data), size):
yield self._Chunk(address + offset // self._word_size_bytes,
data[offset:offset + size]) | def function[chunks, parameter[self, size, alignment]]:
constant[Return chunks of the data aligned as given by `alignment`. `size`
must be a multiple of `alignment`. Each chunk is returned as a
named two-tuple of its address and data.
]
if compare[binary_operation[name[size] <ast.Mod object at 0x7da2590d6920> name[alignment]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18bc73490>
variable[address] assign[=] name[self].address
variable[data] assign[=] name[self].data
variable[chunk_offset] assign[=] binary_operation[name[address] <ast.Mod object at 0x7da2590d6920> name[alignment]]
if compare[name[chunk_offset] not_equal[!=] constant[0]] begin[:]
variable[first_chunk_size] assign[=] binary_operation[name[alignment] - name[chunk_offset]]
<ast.Yield object at 0x7da18bc73310>
<ast.AugAssign object at 0x7da18bc737c0>
variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da18bc715d0>]
for taget[name[offset]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[data]]], name[size]]]] begin[:]
<ast.Yield object at 0x7da18bc70040> | keyword[def] identifier[chunks] ( identifier[self] , identifier[size] = literal[int] , identifier[alignment] = literal[int] ):
literal[string]
keyword[if] ( identifier[size] % identifier[alignment] )!= literal[int] :
keyword[raise] identifier[Error] (
literal[string] . identifier[format] (
identifier[size] ,
identifier[alignment] ))
identifier[address] = identifier[self] . identifier[address]
identifier[data] = identifier[self] . identifier[data]
identifier[chunk_offset] =( identifier[address] % identifier[alignment] )
keyword[if] identifier[chunk_offset] != literal[int] :
identifier[first_chunk_size] =( identifier[alignment] - identifier[chunk_offset] )
keyword[yield] identifier[self] . identifier[_Chunk] ( identifier[address] , identifier[data] [: identifier[first_chunk_size] ])
identifier[address] +=( identifier[first_chunk_size] // identifier[self] . identifier[_word_size_bytes] )
identifier[data] = identifier[data] [ identifier[first_chunk_size] :]
keyword[else] :
identifier[first_chunk_size] = literal[int]
keyword[for] identifier[offset] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[data] ), identifier[size] ):
keyword[yield] identifier[self] . identifier[_Chunk] ( identifier[address] + identifier[offset] // identifier[self] . identifier[_word_size_bytes] ,
identifier[data] [ identifier[offset] : identifier[offset] + identifier[size] ]) | def chunks(self, size=32, alignment=1):
"""Return chunks of the data aligned as given by `alignment`. `size`
must be a multiple of `alignment`. Each chunk is returned as a
named two-tuple of its address and data.
"""
if size % alignment != 0:
raise Error('size {} is not a multiple of alignment {}'.format(size, alignment)) # depends on [control=['if'], data=[]]
address = self.address
data = self.data
# First chunk may be shorter than `size` due to alignment.
chunk_offset = address % alignment
if chunk_offset != 0:
first_chunk_size = alignment - chunk_offset
yield self._Chunk(address, data[:first_chunk_size])
address += first_chunk_size // self._word_size_bytes
data = data[first_chunk_size:] # depends on [control=['if'], data=['chunk_offset']]
else:
first_chunk_size = 0
for offset in range(0, len(data), size):
yield self._Chunk(address + offset // self._word_size_bytes, data[offset:offset + size]) # depends on [control=['for'], data=['offset']] |
def find_shows_by_ids(self, show_ids):
"""doc: http://open.youku.com/docs/doc?id=60
"""
url = 'https://openapi.youku.com/v2/shows/show_batch.json'
params = {
'client_id': self.client_id,
'show_ids': show_ids
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | def function[find_shows_by_ids, parameter[self, show_ids]]:
constant[doc: http://open.youku.com/docs/doc?id=60
]
variable[url] assign[=] constant[https://openapi.youku.com/v2/shows/show_batch.json]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b265fd30>, <ast.Constant object at 0x7da1b265f970>], [<ast.Attribute object at 0x7da1b265e7d0>, <ast.Name object at 0x7da1b265f0a0>]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
call[name[check_error], parameter[name[r]]]
return[call[name[r].json, parameter[]]] | keyword[def] identifier[find_shows_by_ids] ( identifier[self] , identifier[show_ids] ):
literal[string]
identifier[url] = literal[string]
identifier[params] ={
literal[string] : identifier[self] . identifier[client_id] ,
literal[string] : identifier[show_ids]
}
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] )
identifier[check_error] ( identifier[r] )
keyword[return] identifier[r] . identifier[json] () | def find_shows_by_ids(self, show_ids):
"""doc: http://open.youku.com/docs/doc?id=60
"""
url = 'https://openapi.youku.com/v2/shows/show_batch.json'
params = {'client_id': self.client_id, 'show_ids': show_ids}
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def maybe_reduce(nodes):
r"""Reduce nodes in a curve if they are degree-elevated.
.. note::
This is a helper for :func:`_full_reduce`. Hence there is no
corresponding Fortran speedup.
We check if the nodes are degree-elevated by projecting onto the
space of degree-elevated curves of the same degree, then comparing
to the projection. We form the projection by taking the corresponding
(right) elevation matrix :math:`E` (from one degree lower) and forming
:math:`E^T \left(E E^T\right)^{-1} E`.
Args:
nodes (numpy.ndarray): The nodes in the curve.
Returns:
Tuple[bool, numpy.ndarray]: Pair of values. The first indicates
if the ``nodes`` were reduced. The second is the resulting nodes,
either the reduced ones or the original passed in.
Raises:
.UnsupportedDegree: If the curve is degree 5 or higher.
"""
_, num_nodes = nodes.shape
if num_nodes < 2:
return False, nodes
elif num_nodes == 2:
projection = _PROJECTION0
denom = _PROJ_DENOM0
elif num_nodes == 3:
projection = _PROJECTION1
denom = _PROJ_DENOM1
elif num_nodes == 4:
projection = _PROJECTION2
denom = _PROJ_DENOM2
elif num_nodes == 5:
projection = _PROJECTION3
denom = _PROJ_DENOM3
else:
raise _helpers.UnsupportedDegree(
num_nodes - 1, supported=(0, 1, 2, 3, 4)
)
projected = _helpers.matrix_product(nodes, projection) / denom
relative_err = projection_error(nodes, projected)
if relative_err < _REDUCE_THRESHOLD:
return True, reduce_pseudo_inverse(nodes)
else:
return False, nodes | def function[maybe_reduce, parameter[nodes]]:
constant[Reduce nodes in a curve if they are degree-elevated.
.. note::
This is a helper for :func:`_full_reduce`. Hence there is no
corresponding Fortran speedup.
We check if the nodes are degree-elevated by projecting onto the
space of degree-elevated curves of the same degree, then comparing
to the projection. We form the projection by taking the corresponding
(right) elevation matrix :math:`E` (from one degree lower) and forming
:math:`E^T \left(E E^T\right)^{-1} E`.
Args:
nodes (numpy.ndarray): The nodes in the curve.
Returns:
Tuple[bool, numpy.ndarray]: Pair of values. The first indicates
if the ``nodes`` were reduced. The second is the resulting nodes,
either the reduced ones or the original passed in.
Raises:
.UnsupportedDegree: If the curve is degree 5 or higher.
]
<ast.Tuple object at 0x7da2054a7700> assign[=] name[nodes].shape
if compare[name[num_nodes] less[<] constant[2]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2054a4d00>, <ast.Name object at 0x7da2054a4610>]]]
variable[projected] assign[=] binary_operation[call[name[_helpers].matrix_product, parameter[name[nodes], name[projection]]] / name[denom]]
variable[relative_err] assign[=] call[name[projection_error], parameter[name[nodes], name[projected]]]
if compare[name[relative_err] less[<] name[_REDUCE_THRESHOLD]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18eb55db0>, <ast.Call object at 0x7da18eb541f0>]]] | keyword[def] identifier[maybe_reduce] ( identifier[nodes] ):
literal[string]
identifier[_] , identifier[num_nodes] = identifier[nodes] . identifier[shape]
keyword[if] identifier[num_nodes] < literal[int] :
keyword[return] keyword[False] , identifier[nodes]
keyword[elif] identifier[num_nodes] == literal[int] :
identifier[projection] = identifier[_PROJECTION0]
identifier[denom] = identifier[_PROJ_DENOM0]
keyword[elif] identifier[num_nodes] == literal[int] :
identifier[projection] = identifier[_PROJECTION1]
identifier[denom] = identifier[_PROJ_DENOM1]
keyword[elif] identifier[num_nodes] == literal[int] :
identifier[projection] = identifier[_PROJECTION2]
identifier[denom] = identifier[_PROJ_DENOM2]
keyword[elif] identifier[num_nodes] == literal[int] :
identifier[projection] = identifier[_PROJECTION3]
identifier[denom] = identifier[_PROJ_DENOM3]
keyword[else] :
keyword[raise] identifier[_helpers] . identifier[UnsupportedDegree] (
identifier[num_nodes] - literal[int] , identifier[supported] =( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] )
)
identifier[projected] = identifier[_helpers] . identifier[matrix_product] ( identifier[nodes] , identifier[projection] )/ identifier[denom]
identifier[relative_err] = identifier[projection_error] ( identifier[nodes] , identifier[projected] )
keyword[if] identifier[relative_err] < identifier[_REDUCE_THRESHOLD] :
keyword[return] keyword[True] , identifier[reduce_pseudo_inverse] ( identifier[nodes] )
keyword[else] :
keyword[return] keyword[False] , identifier[nodes] | def maybe_reduce(nodes):
"""Reduce nodes in a curve if they are degree-elevated.
.. note::
This is a helper for :func:`_full_reduce`. Hence there is no
corresponding Fortran speedup.
We check if the nodes are degree-elevated by projecting onto the
space of degree-elevated curves of the same degree, then comparing
to the projection. We form the projection by taking the corresponding
(right) elevation matrix :math:`E` (from one degree lower) and forming
:math:`E^T \\left(E E^T\\right)^{-1} E`.
Args:
nodes (numpy.ndarray): The nodes in the curve.
Returns:
Tuple[bool, numpy.ndarray]: Pair of values. The first indicates
if the ``nodes`` were reduced. The second is the resulting nodes,
either the reduced ones or the original passed in.
Raises:
.UnsupportedDegree: If the curve is degree 5 or higher.
"""
(_, num_nodes) = nodes.shape
if num_nodes < 2:
return (False, nodes) # depends on [control=['if'], data=[]]
elif num_nodes == 2:
projection = _PROJECTION0
denom = _PROJ_DENOM0 # depends on [control=['if'], data=[]]
elif num_nodes == 3:
projection = _PROJECTION1
denom = _PROJ_DENOM1 # depends on [control=['if'], data=[]]
elif num_nodes == 4:
projection = _PROJECTION2
denom = _PROJ_DENOM2 # depends on [control=['if'], data=[]]
elif num_nodes == 5:
projection = _PROJECTION3
denom = _PROJ_DENOM3 # depends on [control=['if'], data=[]]
else:
raise _helpers.UnsupportedDegree(num_nodes - 1, supported=(0, 1, 2, 3, 4))
projected = _helpers.matrix_product(nodes, projection) / denom
relative_err = projection_error(nodes, projected)
if relative_err < _REDUCE_THRESHOLD:
return (True, reduce_pseudo_inverse(nodes)) # depends on [control=['if'], data=[]]
else:
return (False, nodes) |
def handle_set_statement_group(self, _, __, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET STATEMENT_GROUP = "X"`` statement."""
self.statement_group = tokens['group']
return tokens | def function[handle_set_statement_group, parameter[self, _, __, tokens]]:
constant[Handle a ``SET STATEMENT_GROUP = "X"`` statement.]
name[self].statement_group assign[=] call[name[tokens]][constant[group]]
return[name[tokens]] | keyword[def] identifier[handle_set_statement_group] ( identifier[self] , identifier[_] , identifier[__] , identifier[tokens] : identifier[ParseResults] )-> identifier[ParseResults] :
literal[string]
identifier[self] . identifier[statement_group] = identifier[tokens] [ literal[string] ]
keyword[return] identifier[tokens] | def handle_set_statement_group(self, _, __, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET STATEMENT_GROUP = "X"`` statement."""
self.statement_group = tokens['group']
return tokens |
def mcmc_sampling(self):
"""Adjust the weight of each function using mcmc sampling.
The initial value of each weight is evenly distribute.
Brief introduction:
(1)Definition of sample:
Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
(2)Definition of samples:
Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
(3)Definition of model:
Model is the function we chose right now. Such as: 'wap', 'weibull'.
(4)Definition of pos:
Pos is the position we want to predict, corresponds to the value of epoch.
Returns
-------
None
"""
init_weight = np.ones((self.effective_model_num), dtype=np.float) / self.effective_model_num
self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num))
for i in range(NUM_OF_SIMULATION_TIME):
# sample new value from Q(i, j)
new_values = np.random.randn(NUM_OF_INSTANCE, self.effective_model_num) * STEP_SIZE + self.weight_samples
new_values = self.normalize_weight(new_values)
# compute alpha(i, j) = min{1, P(j)Q(j, i)/P(i)Q(i, j)}
alpha = np.minimum(1, self.target_distribution(new_values) / self.target_distribution(self.weight_samples))
# sample u
u = np.random.rand(NUM_OF_INSTANCE)
# new value
change_value_flag = (u < alpha).astype(np.int)
for j in range(NUM_OF_INSTANCE):
new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j]
self.weight_samples = new_values | def function[mcmc_sampling, parameter[self]]:
constant[Adjust the weight of each function using mcmc sampling.
The initial value of each weight is evenly distribute.
Brief introduction:
(1)Definition of sample:
Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
(2)Definition of samples:
Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
(3)Definition of model:
Model is the function we chose right now. Such as: 'wap', 'weibull'.
(4)Definition of pos:
Pos is the position we want to predict, corresponds to the value of epoch.
Returns
-------
None
]
variable[init_weight] assign[=] binary_operation[call[name[np].ones, parameter[name[self].effective_model_num]] / name[self].effective_model_num]
name[self].weight_samples assign[=] call[name[np].broadcast_to, parameter[name[init_weight], tuple[[<ast.Name object at 0x7da18f09ef80>, <ast.Attribute object at 0x7da18f09cbb0>]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[NUM_OF_SIMULATION_TIME]]]] begin[:]
variable[new_values] assign[=] binary_operation[binary_operation[call[name[np].random.randn, parameter[name[NUM_OF_INSTANCE], name[self].effective_model_num]] * name[STEP_SIZE]] + name[self].weight_samples]
variable[new_values] assign[=] call[name[self].normalize_weight, parameter[name[new_values]]]
variable[alpha] assign[=] call[name[np].minimum, parameter[constant[1], binary_operation[call[name[self].target_distribution, parameter[name[new_values]]] / call[name[self].target_distribution, parameter[name[self].weight_samples]]]]]
variable[u] assign[=] call[name[np].random.rand, parameter[name[NUM_OF_INSTANCE]]]
variable[change_value_flag] assign[=] call[compare[name[u] less[<] name[alpha]].astype, parameter[name[np].int]]
for taget[name[j]] in starred[call[name[range], parameter[name[NUM_OF_INSTANCE]]]] begin[:]
call[name[new_values]][name[j]] assign[=] binary_operation[binary_operation[call[name[self].weight_samples][name[j]] * binary_operation[constant[1] - call[name[change_value_flag]][name[j]]]] + binary_operation[call[name[new_values]][name[j]] * call[name[change_value_flag]][name[j]]]]
name[self].weight_samples assign[=] name[new_values] | keyword[def] identifier[mcmc_sampling] ( identifier[self] ):
literal[string]
identifier[init_weight] = identifier[np] . identifier[ones] (( identifier[self] . identifier[effective_model_num] ), identifier[dtype] = identifier[np] . identifier[float] )/ identifier[self] . identifier[effective_model_num]
identifier[self] . identifier[weight_samples] = identifier[np] . identifier[broadcast_to] ( identifier[init_weight] ,( identifier[NUM_OF_INSTANCE] , identifier[self] . identifier[effective_model_num] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[NUM_OF_SIMULATION_TIME] ):
identifier[new_values] = identifier[np] . identifier[random] . identifier[randn] ( identifier[NUM_OF_INSTANCE] , identifier[self] . identifier[effective_model_num] )* identifier[STEP_SIZE] + identifier[self] . identifier[weight_samples]
identifier[new_values] = identifier[self] . identifier[normalize_weight] ( identifier[new_values] )
identifier[alpha] = identifier[np] . identifier[minimum] ( literal[int] , identifier[self] . identifier[target_distribution] ( identifier[new_values] )/ identifier[self] . identifier[target_distribution] ( identifier[self] . identifier[weight_samples] ))
identifier[u] = identifier[np] . identifier[random] . identifier[rand] ( identifier[NUM_OF_INSTANCE] )
identifier[change_value_flag] =( identifier[u] < identifier[alpha] ). identifier[astype] ( identifier[np] . identifier[int] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[NUM_OF_INSTANCE] ):
identifier[new_values] [ identifier[j] ]= identifier[self] . identifier[weight_samples] [ identifier[j] ]*( literal[int] - identifier[change_value_flag] [ identifier[j] ])+ identifier[new_values] [ identifier[j] ]* identifier[change_value_flag] [ identifier[j] ]
identifier[self] . identifier[weight_samples] = identifier[new_values] | def mcmc_sampling(self):
"""Adjust the weight of each function using mcmc sampling.
The initial value of each weight is evenly distribute.
Brief introduction:
(1)Definition of sample:
Sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk}
(2)Definition of samples:
Samples is a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
(3)Definition of model:
Model is the function we chose right now. Such as: 'wap', 'weibull'.
(4)Definition of pos:
Pos is the position we want to predict, corresponds to the value of epoch.
Returns
-------
None
"""
init_weight = np.ones(self.effective_model_num, dtype=np.float) / self.effective_model_num
self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num))
for i in range(NUM_OF_SIMULATION_TIME):
# sample new value from Q(i, j)
new_values = np.random.randn(NUM_OF_INSTANCE, self.effective_model_num) * STEP_SIZE + self.weight_samples
new_values = self.normalize_weight(new_values)
# compute alpha(i, j) = min{1, P(j)Q(j, i)/P(i)Q(i, j)}
alpha = np.minimum(1, self.target_distribution(new_values) / self.target_distribution(self.weight_samples))
# sample u
u = np.random.rand(NUM_OF_INSTANCE)
# new value
change_value_flag = (u < alpha).astype(np.int)
for j in range(NUM_OF_INSTANCE):
new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j] # depends on [control=['for'], data=['j']]
self.weight_samples = new_values # depends on [control=['for'], data=[]] |
def haversine_distance(point1, point2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
"""
lat1, lon1 = point1
lat2, lon2 = point2
# Convert decimal degrees to radians.
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# Haversine formula.
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# 6367 km is the radius of the Earth.
km = 6367 * c
return km | def function[haversine_distance, parameter[point1, point2]]:
constant[
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
]
<ast.Tuple object at 0x7da1b0395f00> assign[=] name[point1]
<ast.Tuple object at 0x7da1b0396650> assign[=] name[point2]
<ast.Tuple object at 0x7da1b0394820> assign[=] call[name[map], parameter[name[radians], list[[<ast.Name object at 0x7da1b039ac50>, <ast.Name object at 0x7da1b0399ea0>, <ast.Name object at 0x7da1b0399090>, <ast.Name object at 0x7da1b0399060>]]]]
variable[dlon] assign[=] binary_operation[name[lon2] - name[lon1]]
variable[dlat] assign[=] binary_operation[name[lat2] - name[lat1]]
variable[a] assign[=] binary_operation[binary_operation[call[name[sin], parameter[binary_operation[name[dlat] / constant[2]]]] ** constant[2]] + binary_operation[binary_operation[call[name[cos], parameter[name[lat1]]] * call[name[cos], parameter[name[lat2]]]] * binary_operation[call[name[sin], parameter[binary_operation[name[dlon] / constant[2]]]] ** constant[2]]]]
variable[c] assign[=] binary_operation[constant[2] * call[name[asin], parameter[call[name[sqrt], parameter[name[a]]]]]]
variable[km] assign[=] binary_operation[constant[6367] * name[c]]
return[name[km]] | keyword[def] identifier[haversine_distance] ( identifier[point1] , identifier[point2] ):
literal[string]
identifier[lat1] , identifier[lon1] = identifier[point1]
identifier[lat2] , identifier[lon2] = identifier[point2]
identifier[lon1] , identifier[lat1] , identifier[lon2] , identifier[lat2] = identifier[map] ( identifier[radians] ,[ identifier[lon1] , identifier[lat1] , identifier[lon2] , identifier[lat2] ])
identifier[dlon] = identifier[lon2] - identifier[lon1]
identifier[dlat] = identifier[lat2] - identifier[lat1]
identifier[a] = identifier[sin] ( identifier[dlat] / literal[int] )** literal[int] + identifier[cos] ( identifier[lat1] )* identifier[cos] ( identifier[lat2] )* identifier[sin] ( identifier[dlon] / literal[int] )** literal[int]
identifier[c] = literal[int] * identifier[asin] ( identifier[sqrt] ( identifier[a] ))
identifier[km] = literal[int] * identifier[c]
keyword[return] identifier[km] | def haversine_distance(point1, point2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
"""
(lat1, lon1) = point1
(lat2, lon2) = point2
# Convert decimal degrees to radians.
(lon1, lat1, lon2, lat2) = map(radians, [lon1, lat1, lon2, lat2])
# Haversine formula.
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
# 6367 km is the radius of the Earth.
km = 6367 * c
return km |
def _concatenate_virtual_arrays(arrs, cols=None, scaling=None):
"""Return a virtual concatenate of several NumPy arrays."""
return None if not len(arrs) else ConcatenatedArrays(arrs, cols,
scaling=scaling) | def function[_concatenate_virtual_arrays, parameter[arrs, cols, scaling]]:
constant[Return a virtual concatenate of several NumPy arrays.]
return[<ast.IfExp object at 0x7da1b12f1d50>] | keyword[def] identifier[_concatenate_virtual_arrays] ( identifier[arrs] , identifier[cols] = keyword[None] , identifier[scaling] = keyword[None] ):
literal[string]
keyword[return] keyword[None] keyword[if] keyword[not] identifier[len] ( identifier[arrs] ) keyword[else] identifier[ConcatenatedArrays] ( identifier[arrs] , identifier[cols] ,
identifier[scaling] = identifier[scaling] ) | def _concatenate_virtual_arrays(arrs, cols=None, scaling=None):
"""Return a virtual concatenate of several NumPy arrays."""
return None if not len(arrs) else ConcatenatedArrays(arrs, cols, scaling=scaling) |
def get_all_devs(auth, url, network_address=None, category=None, label=None):
"""Takes string input of IP address to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param network_address: str IPv4 Network Address
:param category: str or int corresponding to device category (0=router, 1=switches, see API docs for other examples)
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_list = get_all_devs( auth.creds, auth.url, network_address= '10.11.')
>>> assert type(dev_list) is list
>>> assert 'sysName' in dev_list[0]
"""
base_url = "/imcrs/plat/res/device?resPrivilegeFilter=false"
end_url = "&start=0&size=1000&orderBy=id&desc=false&total=false"
if network_address:
network_address = "&ip=" + str(network_address)
else:
network_address = ''
if label:
label = "&label=" + str(label)
else:
label = ''
if category:
category = "&category" + category
else:
category = ''
f_url = url + base_url + str(network_address) + str(label) + str(category) + end_url
print(f_url)
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_details = (json.loads(response.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details['device']) is dict:
return [dev_details['device']]
else:
return dev_details['device']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_dev_details: An Error has occured" | def function[get_all_devs, parameter[auth, url, network_address, category, label]]:
constant[Takes string input of IP address to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param network_address: str IPv4 Network Address
:param category: str or int corresponding to device category (0=router, 1=switches, see API docs for other examples)
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_list = get_all_devs( auth.creds, auth.url, network_address= '10.11.')
>>> assert type(dev_list) is list
>>> assert 'sysName' in dev_list[0]
]
variable[base_url] assign[=] constant[/imcrs/plat/res/device?resPrivilegeFilter=false]
variable[end_url] assign[=] constant[&start=0&size=1000&orderBy=id&desc=false&total=false]
if name[network_address] begin[:]
variable[network_address] assign[=] binary_operation[constant[&ip=] + call[name[str], parameter[name[network_address]]]]
if name[label] begin[:]
variable[label] assign[=] binary_operation[constant[&label=] + call[name[str], parameter[name[label]]]]
if name[category] begin[:]
variable[category] assign[=] binary_operation[constant[&category] + name[category]]
variable[f_url] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[url] + name[base_url]] + call[name[str], parameter[name[network_address]]]] + call[name[str], parameter[name[label]]]] + call[name[str], parameter[name[category]]]] + name[end_url]]
call[name[print], parameter[name[f_url]]]
variable[response] assign[=] call[name[requests].get, parameter[name[f_url]]]
<ast.Try object at 0x7da18f813970> | keyword[def] identifier[get_all_devs] ( identifier[auth] , identifier[url] , identifier[network_address] = keyword[None] , identifier[category] = keyword[None] , identifier[label] = keyword[None] ):
literal[string]
identifier[base_url] = literal[string]
identifier[end_url] = literal[string]
keyword[if] identifier[network_address] :
identifier[network_address] = literal[string] + identifier[str] ( identifier[network_address] )
keyword[else] :
identifier[network_address] = literal[string]
keyword[if] identifier[label] :
identifier[label] = literal[string] + identifier[str] ( identifier[label] )
keyword[else] :
identifier[label] = literal[string]
keyword[if] identifier[category] :
identifier[category] = literal[string] + identifier[category]
keyword[else] :
identifier[category] = literal[string]
identifier[f_url] = identifier[url] + identifier[base_url] + identifier[str] ( identifier[network_address] )+ identifier[str] ( identifier[label] )+ identifier[str] ( identifier[category] )+ identifier[end_url]
identifier[print] ( identifier[f_url] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[HEADERS] )
keyword[try] :
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
identifier[dev_details] =( identifier[json] . identifier[loads] ( identifier[response] . identifier[text] ))
keyword[if] identifier[len] ( identifier[dev_details] )== literal[int] :
identifier[print] ( literal[string] )
keyword[return] literal[string]
keyword[elif] identifier[type] ( identifier[dev_details] [ literal[string] ]) keyword[is] identifier[dict] :
keyword[return] [ identifier[dev_details] [ literal[string] ]]
keyword[else] :
keyword[return] identifier[dev_details] [ literal[string] ]
keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[error] :
keyword[return] literal[string] + identifier[str] ( identifier[error] )+ literal[string] | def get_all_devs(auth, url, network_address=None, category=None, label=None):
"""Takes string input of IP address to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param network_address: str IPv4 Network Address
:param category: str or int corresponding to device category (0=router, 1=switches, see API docs for other examples)
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_list = get_all_devs( auth.creds, auth.url, network_address= '10.11.')
>>> assert type(dev_list) is list
>>> assert 'sysName' in dev_list[0]
"""
base_url = '/imcrs/plat/res/device?resPrivilegeFilter=false'
end_url = '&start=0&size=1000&orderBy=id&desc=false&total=false'
if network_address:
network_address = '&ip=' + str(network_address) # depends on [control=['if'], data=[]]
else:
network_address = ''
if label:
label = '&label=' + str(label) # depends on [control=['if'], data=[]]
else:
label = ''
if category:
category = '&category' + category # depends on [control=['if'], data=[]]
else:
category = ''
f_url = url + base_url + str(network_address) + str(label) + str(category) + end_url
print(f_url)
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_details = json.loads(response.text)
if len(dev_details) == 0:
print('Device not found')
return 'Device not found' # depends on [control=['if'], data=[]]
elif type(dev_details['device']) is dict:
return [dev_details['device']] # depends on [control=['if'], data=[]]
else:
return dev_details['device'] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except requests.exceptions.RequestException as error:
return 'Error:\n' + str(error) + ' get_dev_details: An Error has occured' # depends on [control=['except'], data=['error']] |
def open(url):
""" Launches browser depending on os """
if sys.platform == 'win32':
os.startfile(url)
elif sys.platform == 'darwin':
subprocess.Popen(['open', url])
else:
try:
subprocess.Popen(['xdg-open', url])
except OSError:
import webbrowser
webbrowser.open(url) | def function[open, parameter[url]]:
constant[ Launches browser depending on os ]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
call[name[os].startfile, parameter[name[url]]] | keyword[def] identifier[open] ( identifier[url] ):
literal[string]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[os] . identifier[startfile] ( identifier[url] )
keyword[elif] identifier[sys] . identifier[platform] == literal[string] :
identifier[subprocess] . identifier[Popen] ([ literal[string] , identifier[url] ])
keyword[else] :
keyword[try] :
identifier[subprocess] . identifier[Popen] ([ literal[string] , identifier[url] ])
keyword[except] identifier[OSError] :
keyword[import] identifier[webbrowser]
identifier[webbrowser] . identifier[open] ( identifier[url] ) | def open(url):
""" Launches browser depending on os """
if sys.platform == 'win32':
os.startfile(url) # depends on [control=['if'], data=[]]
elif sys.platform == 'darwin':
subprocess.Popen(['open', url]) # depends on [control=['if'], data=[]]
else:
try:
subprocess.Popen(['xdg-open', url]) # depends on [control=['try'], data=[]]
except OSError:
import webbrowser
webbrowser.open(url) # depends on [control=['except'], data=[]] |
def gps_velocity_df(GPS):
'''return GPS velocity vector'''
vx = GPS.Spd * cos(radians(GPS.GCrs))
vy = GPS.Spd * sin(radians(GPS.GCrs))
return Vector3(vx, vy, GPS.VZ) | def function[gps_velocity_df, parameter[GPS]]:
constant[return GPS velocity vector]
variable[vx] assign[=] binary_operation[name[GPS].Spd * call[name[cos], parameter[call[name[radians], parameter[name[GPS].GCrs]]]]]
variable[vy] assign[=] binary_operation[name[GPS].Spd * call[name[sin], parameter[call[name[radians], parameter[name[GPS].GCrs]]]]]
return[call[name[Vector3], parameter[name[vx], name[vy], name[GPS].VZ]]] | keyword[def] identifier[gps_velocity_df] ( identifier[GPS] ):
literal[string]
identifier[vx] = identifier[GPS] . identifier[Spd] * identifier[cos] ( identifier[radians] ( identifier[GPS] . identifier[GCrs] ))
identifier[vy] = identifier[GPS] . identifier[Spd] * identifier[sin] ( identifier[radians] ( identifier[GPS] . identifier[GCrs] ))
keyword[return] identifier[Vector3] ( identifier[vx] , identifier[vy] , identifier[GPS] . identifier[VZ] ) | def gps_velocity_df(GPS):
"""return GPS velocity vector"""
vx = GPS.Spd * cos(radians(GPS.GCrs))
vy = GPS.Spd * sin(radians(GPS.GCrs))
return Vector3(vx, vy, GPS.VZ) |
def sentences(self):
'''
Iterate over <s> XML-like tags and tokenize with nltk
'''
for sentence_id, node in enumerate(self.ner_dom.childNodes):
## increment the char index with any text before the <s>
## tag. Crucial assumption here is that the LingPipe XML
## tags are inserted into the original byte array without
## modifying the portions that are not inside the
## LingPipe-added tags themselves.
if node.nodeType == node.TEXT_NODE:
## we expect to only see TEXT_NODE instances with whitespace
assert only_whitespace.match(node.data), repr(node.data)
## must convert back to utf-8 to have expected byte offsets
self.byte_idx += len(node.data.encode('utf-8'))
## count full lines, i.e. only those that end with a \n
# 'True' here means keep the trailing newlines
for line in node.data.splitlines(True):
if line.endswith('\n'):
self.line_idx += 1
else:
logger.debug('getting tokens for sentence_id=%d' % sentence_id)
more_sentence_remains = True
while more_sentence_remains:
## always a sentence
sent = Sentence()
## this "node" came from for loop above, and it's
## childNodes list might have been popped by a
## previous pass through this while loop
tokens = iter( self.tokens( node ) )
while 1:
try:
tok = tokens.next()
sent.tokens.append(tok)
#logger.debug('got token: %r %d %d' % (tok.token, tok.mention_id, tok.sentence_pos))
except StopIteration:
yield sent
more_sentence_remains = False
break | def function[sentences, parameter[self]]:
constant[
Iterate over <s> XML-like tags and tokenize with nltk
]
for taget[tuple[[<ast.Name object at 0x7da204960b20>, <ast.Name object at 0x7da204961d50>]]] in starred[call[name[enumerate], parameter[name[self].ner_dom.childNodes]]] begin[:]
if compare[name[node].nodeType equal[==] name[node].TEXT_NODE] begin[:]
assert[call[name[only_whitespace].match, parameter[name[node].data]]]
<ast.AugAssign object at 0x7da204963700>
for taget[name[line]] in starred[call[name[node].data.splitlines, parameter[constant[True]]]] begin[:]
if call[name[line].endswith, parameter[constant[
]]] begin[:]
<ast.AugAssign object at 0x7da2049605b0> | keyword[def] identifier[sentences] ( identifier[self] ):
literal[string]
keyword[for] identifier[sentence_id] , identifier[node] keyword[in] identifier[enumerate] ( identifier[self] . identifier[ner_dom] . identifier[childNodes] ):
keyword[if] identifier[node] . identifier[nodeType] == identifier[node] . identifier[TEXT_NODE] :
keyword[assert] identifier[only_whitespace] . identifier[match] ( identifier[node] . identifier[data] ), identifier[repr] ( identifier[node] . identifier[data] )
identifier[self] . identifier[byte_idx] += identifier[len] ( identifier[node] . identifier[data] . identifier[encode] ( literal[string] ))
keyword[for] identifier[line] keyword[in] identifier[node] . identifier[data] . identifier[splitlines] ( keyword[True] ):
keyword[if] identifier[line] . identifier[endswith] ( literal[string] ):
identifier[self] . identifier[line_idx] += literal[int]
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[sentence_id] )
identifier[more_sentence_remains] = keyword[True]
keyword[while] identifier[more_sentence_remains] :
identifier[sent] = identifier[Sentence] ()
identifier[tokens] = identifier[iter] ( identifier[self] . identifier[tokens] ( identifier[node] ))
keyword[while] literal[int] :
keyword[try] :
identifier[tok] = identifier[tokens] . identifier[next] ()
identifier[sent] . identifier[tokens] . identifier[append] ( identifier[tok] )
keyword[except] identifier[StopIteration] :
keyword[yield] identifier[sent]
identifier[more_sentence_remains] = keyword[False]
keyword[break] | def sentences(self):
"""
Iterate over <s> XML-like tags and tokenize with nltk
"""
for (sentence_id, node) in enumerate(self.ner_dom.childNodes):
## increment the char index with any text before the <s>
## tag. Crucial assumption here is that the LingPipe XML
## tags are inserted into the original byte array without
## modifying the portions that are not inside the
## LingPipe-added tags themselves.
if node.nodeType == node.TEXT_NODE:
## we expect to only see TEXT_NODE instances with whitespace
assert only_whitespace.match(node.data), repr(node.data)
## must convert back to utf-8 to have expected byte offsets
self.byte_idx += len(node.data.encode('utf-8'))
## count full lines, i.e. only those that end with a \n
# 'True' here means keep the trailing newlines
for line in node.data.splitlines(True):
if line.endswith('\n'):
self.line_idx += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
else:
logger.debug('getting tokens for sentence_id=%d' % sentence_id)
more_sentence_remains = True
while more_sentence_remains:
## always a sentence
sent = Sentence()
## this "node" came from for loop above, and it's
## childNodes list might have been popped by a
## previous pass through this while loop
tokens = iter(self.tokens(node))
while 1:
try:
tok = tokens.next()
sent.tokens.append(tok) # depends on [control=['try'], data=[]]
#logger.debug('got token: %r %d %d' % (tok.token, tok.mention_id, tok.sentence_pos))
except StopIteration:
yield sent
more_sentence_remains = False
break # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=[]] |
def simulate(self, steps, initial_lr):
"""
Simulates the learning rate scheduler.
Parameters
----------
steps: int
Number of steps to simulate
initial_lr: float
Initial learning rate
Returns
-------
lrs: numpy ndarray
Simulated learning rates
"""
test = torch.ones(1, requires_grad=True)
opt = torch.optim.SGD([{'params': test, 'lr': initial_lr}])
policy_cls = self._get_policy_cls()
sch = policy_cls(opt, **self.kwargs)
if hasattr(sch, 'batch_step') and callable(sch.batch_step):
step = sch.batch_step
else:
step = sch.step
lrs = []
for _ in range(steps):
step()
lrs.append(sch.get_lr()[0])
return np.array(lrs) | def function[simulate, parameter[self, steps, initial_lr]]:
constant[
Simulates the learning rate scheduler.
Parameters
----------
steps: int
Number of steps to simulate
initial_lr: float
Initial learning rate
Returns
-------
lrs: numpy ndarray
Simulated learning rates
]
variable[test] assign[=] call[name[torch].ones, parameter[constant[1]]]
variable[opt] assign[=] call[name[torch].optim.SGD, parameter[list[[<ast.Dict object at 0x7da18eb57940>]]]]
variable[policy_cls] assign[=] call[name[self]._get_policy_cls, parameter[]]
variable[sch] assign[=] call[name[policy_cls], parameter[name[opt]]]
if <ast.BoolOp object at 0x7da18eb57670> begin[:]
variable[step] assign[=] name[sch].batch_step
variable[lrs] assign[=] list[[]]
for taget[name[_]] in starred[call[name[range], parameter[name[steps]]]] begin[:]
call[name[step], parameter[]]
call[name[lrs].append, parameter[call[call[name[sch].get_lr, parameter[]]][constant[0]]]]
return[call[name[np].array, parameter[name[lrs]]]] | keyword[def] identifier[simulate] ( identifier[self] , identifier[steps] , identifier[initial_lr] ):
literal[string]
identifier[test] = identifier[torch] . identifier[ones] ( literal[int] , identifier[requires_grad] = keyword[True] )
identifier[opt] = identifier[torch] . identifier[optim] . identifier[SGD] ([{ literal[string] : identifier[test] , literal[string] : identifier[initial_lr] }])
identifier[policy_cls] = identifier[self] . identifier[_get_policy_cls] ()
identifier[sch] = identifier[policy_cls] ( identifier[opt] ,** identifier[self] . identifier[kwargs] )
keyword[if] identifier[hasattr] ( identifier[sch] , literal[string] ) keyword[and] identifier[callable] ( identifier[sch] . identifier[batch_step] ):
identifier[step] = identifier[sch] . identifier[batch_step]
keyword[else] :
identifier[step] = identifier[sch] . identifier[step]
identifier[lrs] =[]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[steps] ):
identifier[step] ()
identifier[lrs] . identifier[append] ( identifier[sch] . identifier[get_lr] ()[ literal[int] ])
keyword[return] identifier[np] . identifier[array] ( identifier[lrs] ) | def simulate(self, steps, initial_lr):
"""
Simulates the learning rate scheduler.
Parameters
----------
steps: int
Number of steps to simulate
initial_lr: float
Initial learning rate
Returns
-------
lrs: numpy ndarray
Simulated learning rates
"""
test = torch.ones(1, requires_grad=True)
opt = torch.optim.SGD([{'params': test, 'lr': initial_lr}])
policy_cls = self._get_policy_cls()
sch = policy_cls(opt, **self.kwargs)
if hasattr(sch, 'batch_step') and callable(sch.batch_step):
step = sch.batch_step # depends on [control=['if'], data=[]]
else:
step = sch.step
lrs = []
for _ in range(steps):
step()
lrs.append(sch.get_lr()[0]) # depends on [control=['for'], data=[]]
return np.array(lrs) |
def pop(self, k, d=_POP_DEFAULT):
"""Pop an ingredient off of this shelf."""
if d is _POP_DEFAULT:
return self._ingredients.pop(k)
else:
return self._ingredients.pop(k, d) | def function[pop, parameter[self, k, d]]:
constant[Pop an ingredient off of this shelf.]
if compare[name[d] is name[_POP_DEFAULT]] begin[:]
return[call[name[self]._ingredients.pop, parameter[name[k]]]] | keyword[def] identifier[pop] ( identifier[self] , identifier[k] , identifier[d] = identifier[_POP_DEFAULT] ):
literal[string]
keyword[if] identifier[d] keyword[is] identifier[_POP_DEFAULT] :
keyword[return] identifier[self] . identifier[_ingredients] . identifier[pop] ( identifier[k] )
keyword[else] :
keyword[return] identifier[self] . identifier[_ingredients] . identifier[pop] ( identifier[k] , identifier[d] ) | def pop(self, k, d=_POP_DEFAULT):
"""Pop an ingredient off of this shelf."""
if d is _POP_DEFAULT:
return self._ingredients.pop(k) # depends on [control=['if'], data=[]]
else:
return self._ingredients.pop(k, d) |
def from_file(cls, filename, directory=None,
format=None, engine=None, encoding=File._encoding):
"""Return an instance with the source string read from the given file.
Args:
filename: Filename for loading/saving the source.
directory: (Sub)directory for source loading/saving and rendering.
format: Rendering output format (``'pdf'``, ``'png'``, ...).
engine: Layout command used (``'dot'``, ``'neato'``, ...).
encoding: Encoding for loading/saving the source.
"""
filepath = os.path.join(directory or '', filename)
if encoding is None:
encoding = locale.getpreferredencoding()
with io.open(filepath, encoding=encoding) as fd:
source = fd.read()
return cls(source, filename, directory, format, engine, encoding) | def function[from_file, parameter[cls, filename, directory, format, engine, encoding]]:
constant[Return an instance with the source string read from the given file.
Args:
filename: Filename for loading/saving the source.
directory: (Sub)directory for source loading/saving and rendering.
format: Rendering output format (``'pdf'``, ``'png'``, ...).
engine: Layout command used (``'dot'``, ``'neato'``, ...).
encoding: Encoding for loading/saving the source.
]
variable[filepath] assign[=] call[name[os].path.join, parameter[<ast.BoolOp object at 0x7da20c7c88b0>, name[filename]]]
if compare[name[encoding] is constant[None]] begin[:]
variable[encoding] assign[=] call[name[locale].getpreferredencoding, parameter[]]
with call[name[io].open, parameter[name[filepath]]] begin[:]
variable[source] assign[=] call[name[fd].read, parameter[]]
return[call[name[cls], parameter[name[source], name[filename], name[directory], name[format], name[engine], name[encoding]]]] | keyword[def] identifier[from_file] ( identifier[cls] , identifier[filename] , identifier[directory] = keyword[None] ,
identifier[format] = keyword[None] , identifier[engine] = keyword[None] , identifier[encoding] = identifier[File] . identifier[_encoding] ):
literal[string]
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] keyword[or] literal[string] , identifier[filename] )
keyword[if] identifier[encoding] keyword[is] keyword[None] :
identifier[encoding] = identifier[locale] . identifier[getpreferredencoding] ()
keyword[with] identifier[io] . identifier[open] ( identifier[filepath] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[fd] :
identifier[source] = identifier[fd] . identifier[read] ()
keyword[return] identifier[cls] ( identifier[source] , identifier[filename] , identifier[directory] , identifier[format] , identifier[engine] , identifier[encoding] ) | def from_file(cls, filename, directory=None, format=None, engine=None, encoding=File._encoding):
"""Return an instance with the source string read from the given file.
Args:
filename: Filename for loading/saving the source.
directory: (Sub)directory for source loading/saving and rendering.
format: Rendering output format (``'pdf'``, ``'png'``, ...).
engine: Layout command used (``'dot'``, ``'neato'``, ...).
encoding: Encoding for loading/saving the source.
"""
filepath = os.path.join(directory or '', filename)
if encoding is None:
encoding = locale.getpreferredencoding() # depends on [control=['if'], data=['encoding']]
with io.open(filepath, encoding=encoding) as fd:
source = fd.read() # depends on [control=['with'], data=['fd']]
return cls(source, filename, directory, format, engine, encoding) |
def main():
"""The command line interface of the ``vcs-tool`` program."""
# Initialize logging to the terminal.
coloredlogs.install()
# Command line option defaults.
repository = None
revision = None
actions = []
# Parse the command line arguments.
try:
options, arguments = getopt.gnu_getopt(sys.argv[1:], 'r:dnisume:vqh', [
'repository=', 'rev=', 'revision=', 'release=', 'find-directory',
'find-revision-number', 'find-revision-id', 'list-releases',
'select-release=', 'sum-revisions', 'vcs-control-field', 'update',
'merge-up', 'export=', 'verbose', 'quiet', 'help',
])
for option, value in options:
if option in ('-r', '--repository'):
value = value.strip()
assert value, "Please specify the name of a repository! (using -r, --repository)"
repository = coerce_repository(value)
elif option in ('--rev', '--revision'):
revision = value.strip()
assert revision, "Please specify a nonempty revision string!"
elif option == '--release':
# TODO Right now --release and --merge-up cannot be combined
# because the following statements result in a global
# revision id which is immutable. If release objects had
# something like an optional `mutable_revision_id' it
# should be possible to support the combination of
# --release and --merge-up.
assert repository, "Please specify a repository first!"
release_id = value.strip()
assert release_id in repository.releases, "The given release identifier is invalid!"
revision = repository.releases[release_id].revision.revision_id
elif option in ('-d', '--find-directory'):
assert repository, "Please specify a repository first!"
actions.append(functools.partial(print_directory, repository))
elif option in ('-n', '--find-revision-number'):
assert repository, "Please specify a repository first!"
actions.append(functools.partial(print_revision_number, repository, revision))
elif option in ('-i', '--find-revision-id'):
assert repository, "Please specify a repository first!"
actions.append(functools.partial(print_revision_id, repository, revision))
elif option == '--list-releases':
assert repository, "Please specify a repository first!"
actions.append(functools.partial(print_releases, repository))
elif option == '--select-release':
assert repository, "Please specify a repository first!"
release_id = value.strip()
assert release_id, "Please specify a nonempty release identifier!"
actions.append(functools.partial(print_selected_release, repository, release_id))
elif option in ('-s', '--sum-revisions'):
assert len(arguments) >= 2, "Please specify one or more repository/revision pairs!"
actions.append(functools.partial(print_summed_revisions, arguments))
arguments = []
elif option == '--vcs-control-field':
assert repository, "Please specify a repository first!"
actions.append(functools.partial(print_vcs_control_field, repository, revision))
elif option in ('-u', '--update'):
assert repository, "Please specify a repository first!"
actions.append(functools.partial(repository.update))
elif option in ('-m', '--merge-up'):
assert repository, "Please specify a repository first!"
actions.append(functools.partial(
repository.merge_up,
target_branch=revision,
feature_branch=arguments[0] if arguments else None,
))
elif option in ('-e', '--export'):
directory = value.strip()
assert repository, "Please specify a repository first!"
assert directory, "Please specify the directory where the revision should be exported!"
actions.append(functools.partial(repository.export, directory, revision))
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity()
elif option in ('-h', '--help'):
usage(__doc__)
return
if not actions:
usage(__doc__)
return
except Exception as e:
warning("Error: %s", e)
sys.exit(1)
# Execute the requested action(s).
try:
for action in actions:
action()
except Exception:
logger.exception("Failed to execute requested action(s)!")
sys.exit(1) | def function[main, parameter[]]:
constant[The command line interface of the ``vcs-tool`` program.]
call[name[coloredlogs].install, parameter[]]
variable[repository] assign[=] constant[None]
variable[revision] assign[=] constant[None]
variable[actions] assign[=] list[[]]
<ast.Try object at 0x7da1b0a34400>
<ast.Try object at 0x7da1b0a2dbd0> | keyword[def] identifier[main] ():
literal[string]
identifier[coloredlogs] . identifier[install] ()
identifier[repository] = keyword[None]
identifier[revision] = keyword[None]
identifier[actions] =[]
keyword[try] :
identifier[options] , identifier[arguments] = identifier[getopt] . identifier[gnu_getopt] ( identifier[sys] . identifier[argv] [ literal[int] :], literal[string] ,[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
])
keyword[for] identifier[option] , identifier[value] keyword[in] identifier[options] :
keyword[if] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[value] = identifier[value] . identifier[strip] ()
keyword[assert] identifier[value] , literal[string]
identifier[repository] = identifier[coerce_repository] ( identifier[value] )
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[revision] = identifier[value] . identifier[strip] ()
keyword[assert] identifier[revision] , literal[string]
keyword[elif] identifier[option] == literal[string] :
keyword[assert] identifier[repository] , literal[string]
identifier[release_id] = identifier[value] . identifier[strip] ()
keyword[assert] identifier[release_id] keyword[in] identifier[repository] . identifier[releases] , literal[string]
identifier[revision] = identifier[repository] . identifier[releases] [ identifier[release_id] ]. identifier[revision] . identifier[revision_id]
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_directory] , identifier[repository] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_revision_number] , identifier[repository] , identifier[revision] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_revision_id] , identifier[repository] , identifier[revision] ))
keyword[elif] identifier[option] == literal[string] :
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_releases] , identifier[repository] ))
keyword[elif] identifier[option] == literal[string] :
keyword[assert] identifier[repository] , literal[string]
identifier[release_id] = identifier[value] . identifier[strip] ()
keyword[assert] identifier[release_id] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_selected_release] , identifier[repository] , identifier[release_id] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[len] ( identifier[arguments] )>= literal[int] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_summed_revisions] , identifier[arguments] ))
identifier[arguments] =[]
keyword[elif] identifier[option] == literal[string] :
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[print_vcs_control_field] , identifier[repository] , identifier[revision] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[repository] . identifier[update] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
keyword[assert] identifier[repository] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] (
identifier[repository] . identifier[merge_up] ,
identifier[target_branch] = identifier[revision] ,
identifier[feature_branch] = identifier[arguments] [ literal[int] ] keyword[if] identifier[arguments] keyword[else] keyword[None] ,
))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[directory] = identifier[value] . identifier[strip] ()
keyword[assert] identifier[repository] , literal[string]
keyword[assert] identifier[directory] , literal[string]
identifier[actions] . identifier[append] ( identifier[functools] . identifier[partial] ( identifier[repository] . identifier[export] , identifier[directory] , identifier[revision] ))
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[coloredlogs] . identifier[increase_verbosity] ()
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[coloredlogs] . identifier[decrease_verbosity] ()
keyword[elif] identifier[option] keyword[in] ( literal[string] , literal[string] ):
identifier[usage] ( identifier[__doc__] )
keyword[return]
keyword[if] keyword[not] identifier[actions] :
identifier[usage] ( identifier[__doc__] )
keyword[return]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[warning] ( literal[string] , identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[try] :
keyword[for] identifier[action] keyword[in] identifier[actions] :
identifier[action] ()
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] ) | def main():
"""The command line interface of the ``vcs-tool`` program."""
# Initialize logging to the terminal.
coloredlogs.install()
# Command line option defaults.
repository = None
revision = None
actions = []
# Parse the command line arguments.
try:
(options, arguments) = getopt.gnu_getopt(sys.argv[1:], 'r:dnisume:vqh', ['repository=', 'rev=', 'revision=', 'release=', 'find-directory', 'find-revision-number', 'find-revision-id', 'list-releases', 'select-release=', 'sum-revisions', 'vcs-control-field', 'update', 'merge-up', 'export=', 'verbose', 'quiet', 'help'])
for (option, value) in options:
if option in ('-r', '--repository'):
value = value.strip()
assert value, 'Please specify the name of a repository! (using -r, --repository)'
repository = coerce_repository(value) # depends on [control=['if'], data=[]]
elif option in ('--rev', '--revision'):
revision = value.strip()
assert revision, 'Please specify a nonempty revision string!' # depends on [control=['if'], data=[]]
elif option == '--release':
# TODO Right now --release and --merge-up cannot be combined
# because the following statements result in a global
# revision id which is immutable. If release objects had
# something like an optional `mutable_revision_id' it
# should be possible to support the combination of
# --release and --merge-up.
assert repository, 'Please specify a repository first!'
release_id = value.strip()
assert release_id in repository.releases, 'The given release identifier is invalid!'
revision = repository.releases[release_id].revision.revision_id # depends on [control=['if'], data=[]]
elif option in ('-d', '--find-directory'):
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(print_directory, repository)) # depends on [control=['if'], data=[]]
elif option in ('-n', '--find-revision-number'):
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(print_revision_number, repository, revision)) # depends on [control=['if'], data=[]]
elif option in ('-i', '--find-revision-id'):
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(print_revision_id, repository, revision)) # depends on [control=['if'], data=[]]
elif option == '--list-releases':
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(print_releases, repository)) # depends on [control=['if'], data=[]]
elif option == '--select-release':
assert repository, 'Please specify a repository first!'
release_id = value.strip()
assert release_id, 'Please specify a nonempty release identifier!'
actions.append(functools.partial(print_selected_release, repository, release_id)) # depends on [control=['if'], data=[]]
elif option in ('-s', '--sum-revisions'):
assert len(arguments) >= 2, 'Please specify one or more repository/revision pairs!'
actions.append(functools.partial(print_summed_revisions, arguments))
arguments = [] # depends on [control=['if'], data=[]]
elif option == '--vcs-control-field':
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(print_vcs_control_field, repository, revision)) # depends on [control=['if'], data=[]]
elif option in ('-u', '--update'):
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(repository.update)) # depends on [control=['if'], data=[]]
elif option in ('-m', '--merge-up'):
assert repository, 'Please specify a repository first!'
actions.append(functools.partial(repository.merge_up, target_branch=revision, feature_branch=arguments[0] if arguments else None)) # depends on [control=['if'], data=[]]
elif option in ('-e', '--export'):
directory = value.strip()
assert repository, 'Please specify a repository first!'
assert directory, 'Please specify the directory where the revision should be exported!'
actions.append(functools.partial(repository.export, directory, revision)) # depends on [control=['if'], data=[]]
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity() # depends on [control=['if'], data=[]]
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity() # depends on [control=['if'], data=[]]
elif option in ('-h', '--help'):
usage(__doc__)
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not actions:
usage(__doc__)
return # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
warning('Error: %s', e)
sys.exit(1) # depends on [control=['except'], data=['e']]
# Execute the requested action(s).
try:
for action in actions:
action() # depends on [control=['for'], data=['action']] # depends on [control=['try'], data=[]]
except Exception:
logger.exception('Failed to execute requested action(s)!')
sys.exit(1) # depends on [control=['except'], data=[]] |
def except_clause(self, except_loc, exc_opt):
"""
(2.6, 2.7) except_clause: 'except' [test [('as' | ',') test]]
(3.0-) except_clause: 'except' [test ['as' NAME]]
"""
type_ = name = as_loc = name_loc = None
loc = except_loc
if exc_opt:
type_, name_opt = exc_opt
loc = loc.join(type_.loc)
if name_opt:
as_loc, name_tok, name_node = name_opt
if name_tok:
name = name_tok.value
name_loc = name_tok.loc
else:
name = name_node
name_loc = name_node.loc
loc = loc.join(name_loc)
return ast.ExceptHandler(type=type_, name=name,
except_loc=except_loc, as_loc=as_loc, name_loc=name_loc,
loc=loc) | def function[except_clause, parameter[self, except_loc, exc_opt]]:
constant[
(2.6, 2.7) except_clause: 'except' [test [('as' | ',') test]]
(3.0-) except_clause: 'except' [test ['as' NAME]]
]
variable[type_] assign[=] constant[None]
variable[loc] assign[=] name[except_loc]
if name[exc_opt] begin[:]
<ast.Tuple object at 0x7da207f03340> assign[=] name[exc_opt]
variable[loc] assign[=] call[name[loc].join, parameter[name[type_].loc]]
if name[name_opt] begin[:]
<ast.Tuple object at 0x7da207f003d0> assign[=] name[name_opt]
if name[name_tok] begin[:]
variable[name] assign[=] name[name_tok].value
variable[name_loc] assign[=] name[name_tok].loc
variable[loc] assign[=] call[name[loc].join, parameter[name[name_loc]]]
return[call[name[ast].ExceptHandler, parameter[]]] | keyword[def] identifier[except_clause] ( identifier[self] , identifier[except_loc] , identifier[exc_opt] ):
literal[string]
identifier[type_] = identifier[name] = identifier[as_loc] = identifier[name_loc] = keyword[None]
identifier[loc] = identifier[except_loc]
keyword[if] identifier[exc_opt] :
identifier[type_] , identifier[name_opt] = identifier[exc_opt]
identifier[loc] = identifier[loc] . identifier[join] ( identifier[type_] . identifier[loc] )
keyword[if] identifier[name_opt] :
identifier[as_loc] , identifier[name_tok] , identifier[name_node] = identifier[name_opt]
keyword[if] identifier[name_tok] :
identifier[name] = identifier[name_tok] . identifier[value]
identifier[name_loc] = identifier[name_tok] . identifier[loc]
keyword[else] :
identifier[name] = identifier[name_node]
identifier[name_loc] = identifier[name_node] . identifier[loc]
identifier[loc] = identifier[loc] . identifier[join] ( identifier[name_loc] )
keyword[return] identifier[ast] . identifier[ExceptHandler] ( identifier[type] = identifier[type_] , identifier[name] = identifier[name] ,
identifier[except_loc] = identifier[except_loc] , identifier[as_loc] = identifier[as_loc] , identifier[name_loc] = identifier[name_loc] ,
identifier[loc] = identifier[loc] ) | def except_clause(self, except_loc, exc_opt):
"""
(2.6, 2.7) except_clause: 'except' [test [('as' | ',') test]]
(3.0-) except_clause: 'except' [test ['as' NAME]]
"""
type_ = name = as_loc = name_loc = None
loc = except_loc
if exc_opt:
(type_, name_opt) = exc_opt
loc = loc.join(type_.loc)
if name_opt:
(as_loc, name_tok, name_node) = name_opt
if name_tok:
name = name_tok.value
name_loc = name_tok.loc # depends on [control=['if'], data=[]]
else:
name = name_node
name_loc = name_node.loc
loc = loc.join(name_loc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ast.ExceptHandler(type=type_, name=name, except_loc=except_loc, as_loc=as_loc, name_loc=name_loc, loc=loc) |
def search(self, q, **kwargs):
"""
You can pass in any of the Summon Search API parameters
(without the "s." prefix). For example to remove highlighting:
result = api.search("Web", hl=False)
See the Summon API documentation for the full list of possible
parameters:
http://api.summon.serialssolutions.com/help/api/search/parameters
"""
params = {"s.q": q}
for k, v in kwargs.items():
params["s." + k] = v
r = self._get("/2.0.0/search", params)
return r | def function[search, parameter[self, q]]:
constant[
You can pass in any of the Summon Search API parameters
(without the "s." prefix). For example to remove highlighting:
result = api.search("Web", hl=False)
See the Summon API documentation for the full list of possible
parameters:
http://api.summon.serialssolutions.com/help/api/search/parameters
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18eb56590>], [<ast.Name object at 0x7da18eb55390>]]
for taget[tuple[[<ast.Name object at 0x7da18eb56fb0>, <ast.Name object at 0x7da18eb54ca0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
call[name[params]][binary_operation[constant[s.] + name[k]]] assign[=] name[v]
variable[r] assign[=] call[name[self]._get, parameter[constant[/2.0.0/search], name[params]]]
return[name[r]] | keyword[def] identifier[search] ( identifier[self] , identifier[q] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={ literal[string] : identifier[q] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[params] [ literal[string] + identifier[k] ]= identifier[v]
identifier[r] = identifier[self] . identifier[_get] ( literal[string] , identifier[params] )
keyword[return] identifier[r] | def search(self, q, **kwargs):
"""
You can pass in any of the Summon Search API parameters
(without the "s." prefix). For example to remove highlighting:
result = api.search("Web", hl=False)
See the Summon API documentation for the full list of possible
parameters:
http://api.summon.serialssolutions.com/help/api/search/parameters
"""
params = {'s.q': q}
for (k, v) in kwargs.items():
params['s.' + k] = v # depends on [control=['for'], data=[]]
r = self._get('/2.0.0/search', params)
return r |
def append_child(self, name, child): # type: (str, typing.Any) -> ArTree
"""Append new child and return it."""
temp = ArTree(name, child)
self._array.append(temp)
return temp | def function[append_child, parameter[self, name, child]]:
constant[Append new child and return it.]
variable[temp] assign[=] call[name[ArTree], parameter[name[name], name[child]]]
call[name[self]._array.append, parameter[name[temp]]]
return[name[temp]] | keyword[def] identifier[append_child] ( identifier[self] , identifier[name] , identifier[child] ):
literal[string]
identifier[temp] = identifier[ArTree] ( identifier[name] , identifier[child] )
identifier[self] . identifier[_array] . identifier[append] ( identifier[temp] )
keyword[return] identifier[temp] | def append_child(self, name, child): # type: (str, typing.Any) -> ArTree
'Append new child and return it.'
temp = ArTree(name, child)
self._array.append(temp)
return temp |
def some(predicate, *seqs):
"""
>>> some(lambda x: x, [0, False, None])
False
>>> some(lambda x: x, [None, 0, 2, 3])
2
>>> some(operator.eq, [0,1,2], [2,1,0])
True
>>> some(operator.eq, [1,2], [2,1])
False
"""
try:
if len(seqs) == 1: return ifilter(bool,imap(predicate, seqs[0])).next()
else: return ifilter(bool,starmap(predicate, izip(*seqs))).next()
except StopIteration: return False | def function[some, parameter[predicate]]:
constant[
>>> some(lambda x: x, [0, False, None])
False
>>> some(lambda x: x, [None, 0, 2, 3])
2
>>> some(operator.eq, [0,1,2], [2,1,0])
True
>>> some(operator.eq, [1,2], [2,1])
False
]
<ast.Try object at 0x7da1b10c7190> | keyword[def] identifier[some] ( identifier[predicate] ,* identifier[seqs] ):
literal[string]
keyword[try] :
keyword[if] identifier[len] ( identifier[seqs] )== literal[int] : keyword[return] identifier[ifilter] ( identifier[bool] , identifier[imap] ( identifier[predicate] , identifier[seqs] [ literal[int] ])). identifier[next] ()
keyword[else] : keyword[return] identifier[ifilter] ( identifier[bool] , identifier[starmap] ( identifier[predicate] , identifier[izip] (* identifier[seqs] ))). identifier[next] ()
keyword[except] identifier[StopIteration] : keyword[return] keyword[False] | def some(predicate, *seqs):
"""
>>> some(lambda x: x, [0, False, None])
False
>>> some(lambda x: x, [None, 0, 2, 3])
2
>>> some(operator.eq, [0,1,2], [2,1,0])
True
>>> some(operator.eq, [1,2], [2,1])
False
"""
try:
if len(seqs) == 1:
return ifilter(bool, imap(predicate, seqs[0])).next() # depends on [control=['if'], data=[]]
else:
return ifilter(bool, starmap(predicate, izip(*seqs))).next() # depends on [control=['try'], data=[]]
except StopIteration:
return False # depends on [control=['except'], data=[]] |
def show_profiles(name, server, org_vm):
"""
Create a table of info about the profiles based on getting the
references, etc. both in the dependent and antecedent direction.
The resulting table is printed.
"""
rows = []
for profile_inst in server.profiles:
pn = profile_name(org_vm, profile_inst)
deps = get_associated_profile_names(
profile_inst.path, "dependent", org_vm, server,
include_classnames=False)
dep_refs = get_references(profile_inst.path, "antecedent", pn, server)
ants = get_associated_profile_names(
profile_inst.path, "antecedent", org_vm, server,
include_classnames=False)
ant_refs = get_references(profile_inst.path, "dependent",
profile_name, server)
# get unique class names
dep_ref_clns = set([ref.classname for ref in dep_refs])
ant_ref_clns = set([ref.classname for ref in ant_refs])
row = (pn,
fold_list(deps),
fold_list(list(dep_ref_clns)),
fold_list(ants),
fold_list(list(ant_ref_clns)))
rows.append(row)
# append this server to the dict of servers for this profile
SERVERS_FOR_PROFILE[profile_name].append(name)
title = '%s: Advertised profiles showing Profiles associations' \
'Dependencies are Associators, AssocClass=CIM_ReferencedProfile' \
'This table shows the results for ' % name
headers = ['Profile',
'Assoc CIMReferencedProfile\nResultRole\nDependent',
'Ref classes References\nRole=Dependent',
'Assoc CIMReferencedProfile\nResultRole\nAntecedent',
'Ref classesReferences\nRole=Dependent']
print_table(title, headers, rows, sort_columns=[1, 0]) | def function[show_profiles, parameter[name, server, org_vm]]:
constant[
Create a table of info about the profiles based on getting the
references, etc. both in the dependent and antecedent direction.
The resulting table is printed.
]
variable[rows] assign[=] list[[]]
for taget[name[profile_inst]] in starred[name[server].profiles] begin[:]
variable[pn] assign[=] call[name[profile_name], parameter[name[org_vm], name[profile_inst]]]
variable[deps] assign[=] call[name[get_associated_profile_names], parameter[name[profile_inst].path, constant[dependent], name[org_vm], name[server]]]
variable[dep_refs] assign[=] call[name[get_references], parameter[name[profile_inst].path, constant[antecedent], name[pn], name[server]]]
variable[ants] assign[=] call[name[get_associated_profile_names], parameter[name[profile_inst].path, constant[antecedent], name[org_vm], name[server]]]
variable[ant_refs] assign[=] call[name[get_references], parameter[name[profile_inst].path, constant[dependent], name[profile_name], name[server]]]
variable[dep_ref_clns] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b0b0a800>]]
variable[ant_ref_clns] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b0b08fa0>]]
variable[row] assign[=] tuple[[<ast.Name object at 0x7da1b0b08790>, <ast.Call object at 0x7da1b0b09450>, <ast.Call object at 0x7da1b0b0ae30>, <ast.Call object at 0x7da1b0b09150>, <ast.Call object at 0x7da1b0b09870>]]
call[name[rows].append, parameter[name[row]]]
call[call[name[SERVERS_FOR_PROFILE]][name[profile_name]].append, parameter[name[name]]]
variable[title] assign[=] binary_operation[constant[%s: Advertised profiles showing Profiles associationsDependencies are Associators, AssocClass=CIM_ReferencedProfileThis table shows the results for ] <ast.Mod object at 0x7da2590d6920> name[name]]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da1b0e9fca0>, <ast.Constant object at 0x7da1b0e9f850>, <ast.Constant object at 0x7da1b0e9f070>, <ast.Constant object at 0x7da1b0e9c6d0>, <ast.Constant object at 0x7da1b0e9caf0>]]
call[name[print_table], parameter[name[title], name[headers], name[rows]]] | keyword[def] identifier[show_profiles] ( identifier[name] , identifier[server] , identifier[org_vm] ):
literal[string]
identifier[rows] =[]
keyword[for] identifier[profile_inst] keyword[in] identifier[server] . identifier[profiles] :
identifier[pn] = identifier[profile_name] ( identifier[org_vm] , identifier[profile_inst] )
identifier[deps] = identifier[get_associated_profile_names] (
identifier[profile_inst] . identifier[path] , literal[string] , identifier[org_vm] , identifier[server] ,
identifier[include_classnames] = keyword[False] )
identifier[dep_refs] = identifier[get_references] ( identifier[profile_inst] . identifier[path] , literal[string] , identifier[pn] , identifier[server] )
identifier[ants] = identifier[get_associated_profile_names] (
identifier[profile_inst] . identifier[path] , literal[string] , identifier[org_vm] , identifier[server] ,
identifier[include_classnames] = keyword[False] )
identifier[ant_refs] = identifier[get_references] ( identifier[profile_inst] . identifier[path] , literal[string] ,
identifier[profile_name] , identifier[server] )
identifier[dep_ref_clns] = identifier[set] ([ identifier[ref] . identifier[classname] keyword[for] identifier[ref] keyword[in] identifier[dep_refs] ])
identifier[ant_ref_clns] = identifier[set] ([ identifier[ref] . identifier[classname] keyword[for] identifier[ref] keyword[in] identifier[ant_refs] ])
identifier[row] =( identifier[pn] ,
identifier[fold_list] ( identifier[deps] ),
identifier[fold_list] ( identifier[list] ( identifier[dep_ref_clns] )),
identifier[fold_list] ( identifier[ants] ),
identifier[fold_list] ( identifier[list] ( identifier[ant_ref_clns] )))
identifier[rows] . identifier[append] ( identifier[row] )
identifier[SERVERS_FOR_PROFILE] [ identifier[profile_name] ]. identifier[append] ( identifier[name] )
identifier[title] = literal[string] literal[string] literal[string] % identifier[name]
identifier[headers] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[print_table] ( identifier[title] , identifier[headers] , identifier[rows] , identifier[sort_columns] =[ literal[int] , literal[int] ]) | def show_profiles(name, server, org_vm):
"""
Create a table of info about the profiles based on getting the
references, etc. both in the dependent and antecedent direction.
The resulting table is printed.
"""
rows = []
for profile_inst in server.profiles:
pn = profile_name(org_vm, profile_inst)
deps = get_associated_profile_names(profile_inst.path, 'dependent', org_vm, server, include_classnames=False)
dep_refs = get_references(profile_inst.path, 'antecedent', pn, server)
ants = get_associated_profile_names(profile_inst.path, 'antecedent', org_vm, server, include_classnames=False)
ant_refs = get_references(profile_inst.path, 'dependent', profile_name, server)
# get unique class names
dep_ref_clns = set([ref.classname for ref in dep_refs])
ant_ref_clns = set([ref.classname for ref in ant_refs])
row = (pn, fold_list(deps), fold_list(list(dep_ref_clns)), fold_list(ants), fold_list(list(ant_ref_clns)))
rows.append(row)
# append this server to the dict of servers for this profile
SERVERS_FOR_PROFILE[profile_name].append(name) # depends on [control=['for'], data=['profile_inst']]
title = '%s: Advertised profiles showing Profiles associationsDependencies are Associators, AssocClass=CIM_ReferencedProfileThis table shows the results for ' % name
headers = ['Profile', 'Assoc CIMReferencedProfile\nResultRole\nDependent', 'Ref classes References\nRole=Dependent', 'Assoc CIMReferencedProfile\nResultRole\nAntecedent', 'Ref classesReferences\nRole=Dependent']
print_table(title, headers, rows, sort_columns=[1, 0]) |
async def on_raw_761(self, message):
""" Metadata key/value. """
target, targetmeta = self._parse_user(message.params[0])
key, visibility = message.params[1:3]
value = message.params[3] if len(message.params) > 3 else None
if target not in self._pending['metadata']:
return
if target in self.users:
self._sync_user(target, targetmeta)
self._metadata_info[target][key] = value | <ast.AsyncFunctionDef object at 0x7da207f00ca0> | keyword[async] keyword[def] identifier[on_raw_761] ( identifier[self] , identifier[message] ):
literal[string]
identifier[target] , identifier[targetmeta] = identifier[self] . identifier[_parse_user] ( identifier[message] . identifier[params] [ literal[int] ])
identifier[key] , identifier[visibility] = identifier[message] . identifier[params] [ literal[int] : literal[int] ]
identifier[value] = identifier[message] . identifier[params] [ literal[int] ] keyword[if] identifier[len] ( identifier[message] . identifier[params] )> literal[int] keyword[else] keyword[None]
keyword[if] identifier[target] keyword[not] keyword[in] identifier[self] . identifier[_pending] [ literal[string] ]:
keyword[return]
keyword[if] identifier[target] keyword[in] identifier[self] . identifier[users] :
identifier[self] . identifier[_sync_user] ( identifier[target] , identifier[targetmeta] )
identifier[self] . identifier[_metadata_info] [ identifier[target] ][ identifier[key] ]= identifier[value] | async def on_raw_761(self, message):
""" Metadata key/value. """
(target, targetmeta) = self._parse_user(message.params[0])
(key, visibility) = message.params[1:3]
value = message.params[3] if len(message.params) > 3 else None
if target not in self._pending['metadata']:
return # depends on [control=['if'], data=[]]
if target in self.users:
self._sync_user(target, targetmeta) # depends on [control=['if'], data=['target']]
self._metadata_info[target][key] = value |
def topological_nodes(self):
"""
Yield nodes in topological order.
Returns:
generator(DAGNode): node in topological order
"""
return nx.lexicographical_topological_sort(self._multi_graph,
key=lambda x: str(x.qargs)) | def function[topological_nodes, parameter[self]]:
constant[
Yield nodes in topological order.
Returns:
generator(DAGNode): node in topological order
]
return[call[name[nx].lexicographical_topological_sort, parameter[name[self]._multi_graph]]] | keyword[def] identifier[topological_nodes] ( identifier[self] ):
literal[string]
keyword[return] identifier[nx] . identifier[lexicographical_topological_sort] ( identifier[self] . identifier[_multi_graph] ,
identifier[key] = keyword[lambda] identifier[x] : identifier[str] ( identifier[x] . identifier[qargs] )) | def topological_nodes(self):
"""
Yield nodes in topological order.
Returns:
generator(DAGNode): node in topological order
"""
return nx.lexicographical_topological_sort(self._multi_graph, key=lambda x: str(x.qargs)) |
def detect_xid_devices(self):
"""
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
"""
self.__xid_cons = []
for c in self.__com_ports:
device_found = False
for b in [115200, 19200, 9600, 57600, 38400]:
con = XidConnection(c, b)
try:
con.open()
except SerialException:
continue
con.flush_input()
con.flush_output()
returnval = con.send_xid_command("_c1", 5).decode('ASCII')
if returnval.startswith('_xid'):
device_found = True
self.__xid_cons.append(con)
if(returnval != '_xid0'):
# set the device into XID mode
con.send_xid_command('c10')
con.flush_input()
con.flush_output()
# be sure to reset the timer to avoid the 4.66 hours
# problem. (refer to XidConnection.xid_input_found to
# read about the 4.66 hours)
con.send_xid_command('e1')
con.send_xid_command('e5')
con.close()
if device_found:
break | def function[detect_xid_devices, parameter[self]]:
constant[
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
]
name[self].__xid_cons assign[=] list[[]]
for taget[name[c]] in starred[name[self].__com_ports] begin[:]
variable[device_found] assign[=] constant[False]
for taget[name[b]] in starred[list[[<ast.Constant object at 0x7da204564820>, <ast.Constant object at 0x7da204567190>, <ast.Constant object at 0x7da204567010>, <ast.Constant object at 0x7da204566800>, <ast.Constant object at 0x7da2045662f0>]]] begin[:]
variable[con] assign[=] call[name[XidConnection], parameter[name[c], name[b]]]
<ast.Try object at 0x7da204565810>
call[name[con].flush_input, parameter[]]
call[name[con].flush_output, parameter[]]
variable[returnval] assign[=] call[call[name[con].send_xid_command, parameter[constant[_c1], constant[5]]].decode, parameter[constant[ASCII]]]
if call[name[returnval].startswith, parameter[constant[_xid]]] begin[:]
variable[device_found] assign[=] constant[True]
call[name[self].__xid_cons.append, parameter[name[con]]]
if compare[name[returnval] not_equal[!=] constant[_xid0]] begin[:]
call[name[con].send_xid_command, parameter[constant[c10]]]
call[name[con].flush_input, parameter[]]
call[name[con].flush_output, parameter[]]
call[name[con].send_xid_command, parameter[constant[e1]]]
call[name[con].send_xid_command, parameter[constant[e5]]]
call[name[con].close, parameter[]]
if name[device_found] begin[:]
break | keyword[def] identifier[detect_xid_devices] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__xid_cons] =[]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[__com_ports] :
identifier[device_found] = keyword[False]
keyword[for] identifier[b] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[con] = identifier[XidConnection] ( identifier[c] , identifier[b] )
keyword[try] :
identifier[con] . identifier[open] ()
keyword[except] identifier[SerialException] :
keyword[continue]
identifier[con] . identifier[flush_input] ()
identifier[con] . identifier[flush_output] ()
identifier[returnval] = identifier[con] . identifier[send_xid_command] ( literal[string] , literal[int] ). identifier[decode] ( literal[string] )
keyword[if] identifier[returnval] . identifier[startswith] ( literal[string] ):
identifier[device_found] = keyword[True]
identifier[self] . identifier[__xid_cons] . identifier[append] ( identifier[con] )
keyword[if] ( identifier[returnval] != literal[string] ):
identifier[con] . identifier[send_xid_command] ( literal[string] )
identifier[con] . identifier[flush_input] ()
identifier[con] . identifier[flush_output] ()
identifier[con] . identifier[send_xid_command] ( literal[string] )
identifier[con] . identifier[send_xid_command] ( literal[string] )
identifier[con] . identifier[close] ()
keyword[if] identifier[device_found] :
keyword[break] | def detect_xid_devices(self):
"""
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
"""
self.__xid_cons = []
for c in self.__com_ports:
device_found = False
for b in [115200, 19200, 9600, 57600, 38400]:
con = XidConnection(c, b)
try:
con.open() # depends on [control=['try'], data=[]]
except SerialException:
continue # depends on [control=['except'], data=[]]
con.flush_input()
con.flush_output()
returnval = con.send_xid_command('_c1', 5).decode('ASCII')
if returnval.startswith('_xid'):
device_found = True
self.__xid_cons.append(con)
if returnval != '_xid0':
# set the device into XID mode
con.send_xid_command('c10')
con.flush_input()
con.flush_output() # depends on [control=['if'], data=[]]
# be sure to reset the timer to avoid the 4.66 hours
# problem. (refer to XidConnection.xid_input_found to
# read about the 4.66 hours)
con.send_xid_command('e1')
con.send_xid_command('e5') # depends on [control=['if'], data=[]]
con.close()
if device_found:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['b']] # depends on [control=['for'], data=['c']] |
def _default_next_colour(particle):
"""
Default next colour implementation - linear progression through
each colour tuple.
"""
return particle.colours[
(len(particle.colours) - 1) * particle.time // particle.life_time] | def function[_default_next_colour, parameter[particle]]:
constant[
Default next colour implementation - linear progression through
each colour tuple.
]
return[call[name[particle].colours][binary_operation[binary_operation[binary_operation[call[name[len], parameter[name[particle].colours]] - constant[1]] * name[particle].time] <ast.FloorDiv object at 0x7da2590d6bc0> name[particle].life_time]]] | keyword[def] identifier[_default_next_colour] ( identifier[particle] ):
literal[string]
keyword[return] identifier[particle] . identifier[colours] [
( identifier[len] ( identifier[particle] . identifier[colours] )- literal[int] )* identifier[particle] . identifier[time] // identifier[particle] . identifier[life_time] ] | def _default_next_colour(particle):
"""
Default next colour implementation - linear progression through
each colour tuple.
"""
return particle.colours[(len(particle.colours) - 1) * particle.time // particle.life_time] |
def make_multiple_uni_disc_packets(cid: tuple, sourceName: str, universes: list) -> List['UniverseDiscoveryPacket']:
"""
Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets
"""
tmpList = []
if len(universes)%512 != 0:
num_of_packets = int(len(universes)/512)+1
else: # just get how long the list has to be. Just read and think about the if statement.
# Should be self-explaining
num_of_packets = int(len(universes)/512)
universes.sort() # E1.31 wants that the send out universes are sorted
for i in range(0, num_of_packets):
if i == num_of_packets-1:
tmpUniverses = universes[i * 512:len(universes)]
# if we are here, then the for is in the last loop
else:
tmpUniverses = universes[i * 512:(i+1) * 512]
# create new UniverseDiscoveryPacket and append it to the list. Page and lastPage are getting special values
tmpList.append(UniverseDiscoveryPacket(cid=cid, sourceName=sourceName, universes=tmpUniverses,
page=i, lastPage=num_of_packets-1))
return tmpList | def function[make_multiple_uni_disc_packets, parameter[cid, sourceName, universes]]:
constant[
Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets
]
variable[tmpList] assign[=] list[[]]
if compare[binary_operation[call[name[len], parameter[name[universes]]] <ast.Mod object at 0x7da2590d6920> constant[512]] not_equal[!=] constant[0]] begin[:]
variable[num_of_packets] assign[=] binary_operation[call[name[int], parameter[binary_operation[call[name[len], parameter[name[universes]]] / constant[512]]]] + constant[1]]
call[name[universes].sort, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[num_of_packets]]]] begin[:]
if compare[name[i] equal[==] binary_operation[name[num_of_packets] - constant[1]]] begin[:]
variable[tmpUniverses] assign[=] call[name[universes]][<ast.Slice object at 0x7da20c6c6260>]
call[name[tmpList].append, parameter[call[name[UniverseDiscoveryPacket], parameter[]]]]
return[name[tmpList]] | keyword[def] identifier[make_multiple_uni_disc_packets] ( identifier[cid] : identifier[tuple] , identifier[sourceName] : identifier[str] , identifier[universes] : identifier[list] )-> identifier[List] [ literal[string] ]:
literal[string]
identifier[tmpList] =[]
keyword[if] identifier[len] ( identifier[universes] )% literal[int] != literal[int] :
identifier[num_of_packets] = identifier[int] ( identifier[len] ( identifier[universes] )/ literal[int] )+ literal[int]
keyword[else] :
identifier[num_of_packets] = identifier[int] ( identifier[len] ( identifier[universes] )/ literal[int] )
identifier[universes] . identifier[sort] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[num_of_packets] ):
keyword[if] identifier[i] == identifier[num_of_packets] - literal[int] :
identifier[tmpUniverses] = identifier[universes] [ identifier[i] * literal[int] : identifier[len] ( identifier[universes] )]
keyword[else] :
identifier[tmpUniverses] = identifier[universes] [ identifier[i] * literal[int] :( identifier[i] + literal[int] )* literal[int] ]
identifier[tmpList] . identifier[append] ( identifier[UniverseDiscoveryPacket] ( identifier[cid] = identifier[cid] , identifier[sourceName] = identifier[sourceName] , identifier[universes] = identifier[tmpUniverses] ,
identifier[page] = identifier[i] , identifier[lastPage] = identifier[num_of_packets] - literal[int] ))
keyword[return] identifier[tmpList] | def make_multiple_uni_disc_packets(cid: tuple, sourceName: str, universes: list) -> List['UniverseDiscoveryPacket']:
"""
Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets
"""
tmpList = []
if len(universes) % 512 != 0:
num_of_packets = int(len(universes) / 512) + 1 # depends on [control=['if'], data=[]]
else: # just get how long the list has to be. Just read and think about the if statement.
# Should be self-explaining
num_of_packets = int(len(universes) / 512)
universes.sort() # E1.31 wants that the send out universes are sorted
for i in range(0, num_of_packets):
if i == num_of_packets - 1:
tmpUniverses = universes[i * 512:len(universes)] # depends on [control=['if'], data=['i']]
else:
# if we are here, then the for is in the last loop
tmpUniverses = universes[i * 512:(i + 1) * 512]
# create new UniverseDiscoveryPacket and append it to the list. Page and lastPage are getting special values
tmpList.append(UniverseDiscoveryPacket(cid=cid, sourceName=sourceName, universes=tmpUniverses, page=i, lastPage=num_of_packets - 1)) # depends on [control=['for'], data=['i']]
return tmpList |
async def verify_scriptworker_task(chain, obj):
"""Verify the signing trust object.
Currently the only check is to make sure it was run on a scriptworker.
Args:
chain (ChainOfTrust): the chain we're operating on
obj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
"""
errors = []
if obj.worker_impl != "scriptworker":
errors.append("{} {} must be run from scriptworker!".format(obj.name, obj.task_id))
raise_on_errors(errors) | <ast.AsyncFunctionDef object at 0x7da204566c80> | keyword[async] keyword[def] identifier[verify_scriptworker_task] ( identifier[chain] , identifier[obj] ):
literal[string]
identifier[errors] =[]
keyword[if] identifier[obj] . identifier[worker_impl] != literal[string] :
identifier[errors] . identifier[append] ( literal[string] . identifier[format] ( identifier[obj] . identifier[name] , identifier[obj] . identifier[task_id] ))
identifier[raise_on_errors] ( identifier[errors] ) | async def verify_scriptworker_task(chain, obj):
"""Verify the signing trust object.
Currently the only check is to make sure it was run on a scriptworker.
Args:
chain (ChainOfTrust): the chain we're operating on
obj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
"""
errors = []
if obj.worker_impl != 'scriptworker':
errors.append('{} {} must be run from scriptworker!'.format(obj.name, obj.task_id)) # depends on [control=['if'], data=[]]
raise_on_errors(errors) |
def calcgain(self, ant1, ant2, skyfreq, pol):
""" Calculates the complex gain product (g1*g2) for a pair of antennas.
"""
select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]]
if len(select): # for when telcal solutions don't exist
ind1 = n.where(ant1 == self.antnum[select])
ind2 = n.where(ant2 == self.antnum[select])
g1 = self.amp[select][ind1]*n.exp(1j*n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0])
g2 = self.amp[select][ind2]*n.exp(-1j*n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0])
else:
g1 = [0]; g2 = [0]
try:
assert (g1[0] != 0j) and (g2[0] != 0j)
invg1g2 = 1./(g1[0]*g2[0])
except (AssertionError, IndexError):
invg1g2 = 0
return invg1g2 | def function[calcgain, parameter[self, ant1, ant2, skyfreq, pol]]:
constant[ Calculates the complex gain product (g1*g2) for a pair of antennas.
]
variable[select] assign[=] call[name[self].select][call[call[name[n].where, parameter[binary_operation[compare[call[name[self].skyfreq][name[self].select] equal[==] name[skyfreq]] <ast.BitAnd object at 0x7da2590d6b60> compare[call[name[self].polarization][name[self].select] equal[==] name[pol]]]]]][constant[0]]]
if call[name[len], parameter[name[select]]] begin[:]
variable[ind1] assign[=] call[name[n].where, parameter[compare[name[ant1] equal[==] call[name[self].antnum][name[select]]]]]
variable[ind2] assign[=] call[name[n].where, parameter[compare[name[ant2] equal[==] call[name[self].antnum][name[select]]]]]
variable[g1] assign[=] binary_operation[binary_operation[call[call[name[self].amp][name[select]]][name[ind1]] * call[name[n].exp, parameter[binary_operation[constant[1j] * call[name[n].radians, parameter[call[call[name[self].phase][name[select]]][name[ind1]]]]]]]] * <ast.UnaryOp object at 0x7da1b25df1c0>]
variable[g2] assign[=] binary_operation[binary_operation[call[call[name[self].amp][name[select]]][name[ind2]] * call[name[n].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b2698c40> * call[name[n].radians, parameter[call[call[name[self].phase][name[select]]][name[ind2]]]]]]]] * <ast.UnaryOp object at 0x7da1b26992a0>]
<ast.Try object at 0x7da1b2699750>
return[name[invg1g2]] | keyword[def] identifier[calcgain] ( identifier[self] , identifier[ant1] , identifier[ant2] , identifier[skyfreq] , identifier[pol] ):
literal[string]
identifier[select] = identifier[self] . identifier[select] [ identifier[n] . identifier[where] (( identifier[self] . identifier[skyfreq] [ identifier[self] . identifier[select] ]== identifier[skyfreq] )&( identifier[self] . identifier[polarization] [ identifier[self] . identifier[select] ]== identifier[pol] ))[ literal[int] ]]
keyword[if] identifier[len] ( identifier[select] ):
identifier[ind1] = identifier[n] . identifier[where] ( identifier[ant1] == identifier[self] . identifier[antnum] [ identifier[select] ])
identifier[ind2] = identifier[n] . identifier[where] ( identifier[ant2] == identifier[self] . identifier[antnum] [ identifier[select] ])
identifier[g1] = identifier[self] . identifier[amp] [ identifier[select] ][ identifier[ind1] ]* identifier[n] . identifier[exp] ( literal[int] * identifier[n] . identifier[radians] ( identifier[self] . identifier[phase] [ identifier[select] ][ identifier[ind1] ]))*( keyword[not] identifier[self] . identifier[flagged] . identifier[astype] ( identifier[int] )[ identifier[select] ][ identifier[ind1] ][ literal[int] ])
identifier[g2] = identifier[self] . identifier[amp] [ identifier[select] ][ identifier[ind2] ]* identifier[n] . identifier[exp] (- literal[int] * identifier[n] . identifier[radians] ( identifier[self] . identifier[phase] [ identifier[select] ][ identifier[ind2] ]))*( keyword[not] identifier[self] . identifier[flagged] . identifier[astype] ( identifier[int] )[ identifier[select] ][ identifier[ind2] ][ literal[int] ])
keyword[else] :
identifier[g1] =[ literal[int] ]; identifier[g2] =[ literal[int] ]
keyword[try] :
keyword[assert] ( identifier[g1] [ literal[int] ]!= literal[int] ) keyword[and] ( identifier[g2] [ literal[int] ]!= literal[int] )
identifier[invg1g2] = literal[int] /( identifier[g1] [ literal[int] ]* identifier[g2] [ literal[int] ])
keyword[except] ( identifier[AssertionError] , identifier[IndexError] ):
identifier[invg1g2] = literal[int]
keyword[return] identifier[invg1g2] | def calcgain(self, ant1, ant2, skyfreq, pol):
""" Calculates the complex gain product (g1*g2) for a pair of antennas.
"""
select = self.select[n.where((self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol))[0]]
if len(select): # for when telcal solutions don't exist
ind1 = n.where(ant1 == self.antnum[select])
ind2 = n.where(ant2 == self.antnum[select])
g1 = self.amp[select][ind1] * n.exp(1j * n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0])
g2 = self.amp[select][ind2] * n.exp(-1j * n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0]) # depends on [control=['if'], data=[]]
else:
g1 = [0]
g2 = [0]
try:
assert g1[0] != 0j and g2[0] != 0j
invg1g2 = 1.0 / (g1[0] * g2[0]) # depends on [control=['try'], data=[]]
except (AssertionError, IndexError):
invg1g2 = 0 # depends on [control=['except'], data=[]]
return invg1g2 |
def read_data(data_file, dataformat, name_mode):
"""
Load data_file described by a dataformat dict.
Parameters
----------
data_file : str
Path to data file, including extension.
dataformat : dict
A dataformat dict, see example below.
name_mode : str
How to identyfy sample names. If 'file_names' uses the
input name of the file, stripped of the extension. If
'metadata_names' uses the 'name' attribute of the 'meta'
sub-dictionary in dataformat. If any other str, uses this
str as the sample name.
Example
-------
>>>
{'genfromtext_args': {'delimiter': ',',
'skip_header': 4}, # passed directly to np.genfromtxt
'column_id': {'name_row': 3, # which row contains the column names
'delimiter': ',', # delimeter between column names
'timecolumn': 0, # which column contains the 'time' variable
'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names
'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs
0: (['path'], '(.*)'),
2: (['date', 'method'], # MUST include date
'([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\.m)')
}
}
Returns
-------
sample, analytes, data, meta : tuple
"""
with open(data_file) as f:
lines = f.readlines()
if 'meta_regex' in dataformat.keys():
meta = Bunch()
for k, v in dataformat['meta_regex'].items():
try:
out = re.search(v[-1], lines[int(k)]).groups()
except:
raise ValueError('Failed reading metadata when applying:\n regex: {}\nto\n line: {}'.format(v[-1], lines[int(k)]))
for i in np.arange(len(v[0])):
meta[v[0][i]] = out[i]
else:
meta = {}
# sample name
if name_mode == 'file_names':
sample = os.path.basename(data_file).split('.')[0]
elif name_mode == 'metadata_names':
sample = meta['name']
else:
sample = name_mode
# column and analyte names
columns = np.array(lines[dataformat['column_id']['name_row']].strip().split(
dataformat['column_id']['delimiter']))
if 'pattern' in dataformat['column_id'].keys():
pr = re.compile(dataformat['column_id']['pattern'])
analytes = [pr.match(c).groups()[0] for c in columns if pr.match(c)]
# do any required pre-formatting
if 'preformat_replace' in dataformat.keys():
with open(data_file) as f:
fbuffer = f.read()
for k, v in dataformat['preformat_replace'].items():
fbuffer = re.sub(k, v, fbuffer)
# dead data
read_data = np.genfromtxt(BytesIO(fbuffer.encode()),
**dataformat['genfromtext_args']).T
else:
# read data
read_data = np.genfromtxt(data_file,
**dataformat['genfromtext_args']).T
# data dict
dind = np.zeros(read_data.shape[0], dtype=bool)
for a in analytes:
dind[columns == a] = True
data = Bunch()
data['Time'] = read_data[dataformat['column_id']['timecolumn']]
# deal with time units
if 'time_unit' in dataformat['column_id']:
if isinstance(dataformat['column_id']['time_unit'], (float, int)):
time_mult = dataformat['column_id']['time_unit']
elif isinstance(dataformat['column_id']['time_unit'], str):
unit_multipliers = {'ms': 1/1000,
'min': 60/1,
's': 1}
try:
time_mult = unit_multipliers[dataformat['column_id']['time_unit']]
except:
raise ValueError("In dataformat: time_unit must be a number, 'ms', 'min' or 's'")
data['Time'] *= time_mult
# convert raw data into counts
# TODO: Is this correct? Should actually be per-analyte dwell?
# if 'unit' in dataformat:
# if dataformat['unit'] == 'cps':
# tstep = data['Time'][1] - data['Time'][0]
# read_data[dind] *= tstep
# else:
# pass
data['rawdata'] = Bunch(zip(analytes, read_data[dind]))
data['total_counts'] = np.nansum(read_data[dind], 0)
return sample, analytes, data, meta | def function[read_data, parameter[data_file, dataformat, name_mode]]:
constant[
Load data_file described by a dataformat dict.
Parameters
----------
data_file : str
Path to data file, including extension.
dataformat : dict
A dataformat dict, see example below.
name_mode : str
How to identyfy sample names. If 'file_names' uses the
input name of the file, stripped of the extension. If
'metadata_names' uses the 'name' attribute of the 'meta'
sub-dictionary in dataformat. If any other str, uses this
str as the sample name.
Example
-------
>>>
{'genfromtext_args': {'delimiter': ',',
'skip_header': 4}, # passed directly to np.genfromtxt
'column_id': {'name_row': 3, # which row contains the column names
'delimiter': ',', # delimeter between column names
'timecolumn': 0, # which column contains the 'time' variable
'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names
'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs
0: (['path'], '(.*)'),
2: (['date', 'method'], # MUST include date
'([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\.m)')
}
}
Returns
-------
sample, analytes, data, meta : tuple
]
with call[name[open], parameter[name[data_file]]] begin[:]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
if compare[constant[meta_regex] in call[name[dataformat].keys, parameter[]]] begin[:]
variable[meta] assign[=] call[name[Bunch], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18f722770>, <ast.Name object at 0x7da18f721720>]]] in starred[call[call[name[dataformat]][constant[meta_regex]].items, parameter[]]] begin[:]
<ast.Try object at 0x7da18f721d20>
for taget[name[i]] in starred[call[name[np].arange, parameter[call[name[len], parameter[call[name[v]][constant[0]]]]]]] begin[:]
call[name[meta]][call[call[name[v]][constant[0]]][name[i]]] assign[=] call[name[out]][name[i]]
if compare[name[name_mode] equal[==] constant[file_names]] begin[:]
variable[sample] assign[=] call[call[call[name[os].path.basename, parameter[name[data_file]]].split, parameter[constant[.]]]][constant[0]]
variable[columns] assign[=] call[name[np].array, parameter[call[call[call[name[lines]][call[call[name[dataformat]][constant[column_id]]][constant[name_row]]].strip, parameter[]].split, parameter[call[call[name[dataformat]][constant[column_id]]][constant[delimiter]]]]]]
if compare[constant[pattern] in call[call[name[dataformat]][constant[column_id]].keys, parameter[]]] begin[:]
variable[pr] assign[=] call[name[re].compile, parameter[call[call[name[dataformat]][constant[column_id]]][constant[pattern]]]]
variable[analytes] assign[=] <ast.ListComp object at 0x7da18f7216f0>
if compare[constant[preformat_replace] in call[name[dataformat].keys, parameter[]]] begin[:]
with call[name[open], parameter[name[data_file]]] begin[:]
variable[fbuffer] assign[=] call[name[f].read, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18f723190>, <ast.Name object at 0x7da18f721bd0>]]] in starred[call[call[name[dataformat]][constant[preformat_replace]].items, parameter[]]] begin[:]
variable[fbuffer] assign[=] call[name[re].sub, parameter[name[k], name[v], name[fbuffer]]]
variable[read_data] assign[=] call[name[np].genfromtxt, parameter[call[name[BytesIO], parameter[call[name[fbuffer].encode, parameter[]]]]]].T
variable[dind] assign[=] call[name[np].zeros, parameter[call[name[read_data].shape][constant[0]]]]
for taget[name[a]] in starred[name[analytes]] begin[:]
call[name[dind]][compare[name[columns] equal[==] name[a]]] assign[=] constant[True]
variable[data] assign[=] call[name[Bunch], parameter[]]
call[name[data]][constant[Time]] assign[=] call[name[read_data]][call[call[name[dataformat]][constant[column_id]]][constant[timecolumn]]]
if compare[constant[time_unit] in call[name[dataformat]][constant[column_id]]] begin[:]
if call[name[isinstance], parameter[call[call[name[dataformat]][constant[column_id]]][constant[time_unit]], tuple[[<ast.Name object at 0x7da18f721510>, <ast.Name object at 0x7da18f7201f0>]]]] begin[:]
variable[time_mult] assign[=] call[call[name[dataformat]][constant[column_id]]][constant[time_unit]]
<ast.AugAssign object at 0x7da18f7230a0>
call[name[data]][constant[rawdata]] assign[=] call[name[Bunch], parameter[call[name[zip], parameter[name[analytes], call[name[read_data]][name[dind]]]]]]
call[name[data]][constant[total_counts]] assign[=] call[name[np].nansum, parameter[call[name[read_data]][name[dind]], constant[0]]]
return[tuple[[<ast.Name object at 0x7da18f720460>, <ast.Name object at 0x7da18f7217e0>, <ast.Name object at 0x7da18f7218a0>, <ast.Name object at 0x7da18f722bc0>]]] | keyword[def] identifier[read_data] ( identifier[data_file] , identifier[dataformat] , identifier[name_mode] ):
literal[string]
keyword[with] identifier[open] ( identifier[data_file] ) keyword[as] identifier[f] :
identifier[lines] = identifier[f] . identifier[readlines] ()
keyword[if] literal[string] keyword[in] identifier[dataformat] . identifier[keys] ():
identifier[meta] = identifier[Bunch] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dataformat] [ literal[string] ]. identifier[items] ():
keyword[try] :
identifier[out] = identifier[re] . identifier[search] ( identifier[v] [- literal[int] ], identifier[lines] [ identifier[int] ( identifier[k] )]). identifier[groups] ()
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[v] [- literal[int] ], identifier[lines] [ identifier[int] ( identifier[k] )]))
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[arange] ( identifier[len] ( identifier[v] [ literal[int] ])):
identifier[meta] [ identifier[v] [ literal[int] ][ identifier[i] ]]= identifier[out] [ identifier[i] ]
keyword[else] :
identifier[meta] ={}
keyword[if] identifier[name_mode] == literal[string] :
identifier[sample] = identifier[os] . identifier[path] . identifier[basename] ( identifier[data_file] ). identifier[split] ( literal[string] )[ literal[int] ]
keyword[elif] identifier[name_mode] == literal[string] :
identifier[sample] = identifier[meta] [ literal[string] ]
keyword[else] :
identifier[sample] = identifier[name_mode]
identifier[columns] = identifier[np] . identifier[array] ( identifier[lines] [ identifier[dataformat] [ literal[string] ][ literal[string] ]]. identifier[strip] (). identifier[split] (
identifier[dataformat] [ literal[string] ][ literal[string] ]))
keyword[if] literal[string] keyword[in] identifier[dataformat] [ literal[string] ]. identifier[keys] ():
identifier[pr] = identifier[re] . identifier[compile] ( identifier[dataformat] [ literal[string] ][ literal[string] ])
identifier[analytes] =[ identifier[pr] . identifier[match] ( identifier[c] ). identifier[groups] ()[ literal[int] ] keyword[for] identifier[c] keyword[in] identifier[columns] keyword[if] identifier[pr] . identifier[match] ( identifier[c] )]
keyword[if] literal[string] keyword[in] identifier[dataformat] . identifier[keys] ():
keyword[with] identifier[open] ( identifier[data_file] ) keyword[as] identifier[f] :
identifier[fbuffer] = identifier[f] . identifier[read] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dataformat] [ literal[string] ]. identifier[items] ():
identifier[fbuffer] = identifier[re] . identifier[sub] ( identifier[k] , identifier[v] , identifier[fbuffer] )
identifier[read_data] = identifier[np] . identifier[genfromtxt] ( identifier[BytesIO] ( identifier[fbuffer] . identifier[encode] ()),
** identifier[dataformat] [ literal[string] ]). identifier[T]
keyword[else] :
identifier[read_data] = identifier[np] . identifier[genfromtxt] ( identifier[data_file] ,
** identifier[dataformat] [ literal[string] ]). identifier[T]
identifier[dind] = identifier[np] . identifier[zeros] ( identifier[read_data] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[bool] )
keyword[for] identifier[a] keyword[in] identifier[analytes] :
identifier[dind] [ identifier[columns] == identifier[a] ]= keyword[True]
identifier[data] = identifier[Bunch] ()
identifier[data] [ literal[string] ]= identifier[read_data] [ identifier[dataformat] [ literal[string] ][ literal[string] ]]
keyword[if] literal[string] keyword[in] identifier[dataformat] [ literal[string] ]:
keyword[if] identifier[isinstance] ( identifier[dataformat] [ literal[string] ][ literal[string] ],( identifier[float] , identifier[int] )):
identifier[time_mult] = identifier[dataformat] [ literal[string] ][ literal[string] ]
keyword[elif] identifier[isinstance] ( identifier[dataformat] [ literal[string] ][ literal[string] ], identifier[str] ):
identifier[unit_multipliers] ={ literal[string] : literal[int] / literal[int] ,
literal[string] : literal[int] / literal[int] ,
literal[string] : literal[int] }
keyword[try] :
identifier[time_mult] = identifier[unit_multipliers] [ identifier[dataformat] [ literal[string] ][ literal[string] ]]
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[data] [ literal[string] ]*= identifier[time_mult]
identifier[data] [ literal[string] ]= identifier[Bunch] ( identifier[zip] ( identifier[analytes] , identifier[read_data] [ identifier[dind] ]))
identifier[data] [ literal[string] ]= identifier[np] . identifier[nansum] ( identifier[read_data] [ identifier[dind] ], literal[int] )
keyword[return] identifier[sample] , identifier[analytes] , identifier[data] , identifier[meta] | def read_data(data_file, dataformat, name_mode):
"""
Load data_file described by a dataformat dict.
Parameters
----------
data_file : str
Path to data file, including extension.
dataformat : dict
A dataformat dict, see example below.
name_mode : str
How to identyfy sample names. If 'file_names' uses the
input name of the file, stripped of the extension. If
'metadata_names' uses the 'name' attribute of the 'meta'
sub-dictionary in dataformat. If any other str, uses this
str as the sample name.
Example
-------
>>>
{'genfromtext_args': {'delimiter': ',',
'skip_header': 4}, # passed directly to np.genfromtxt
'column_id': {'name_row': 3, # which row contains the column names
'delimiter': ',', # delimeter between column names
'timecolumn': 0, # which column contains the 'time' variable
'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names
'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs
0: (['path'], '(.*)'),
2: (['date', 'method'], # MUST include date
'([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\\.m)')
}
}
Returns
-------
sample, analytes, data, meta : tuple
"""
with open(data_file) as f:
lines = f.readlines() # depends on [control=['with'], data=['f']]
if 'meta_regex' in dataformat.keys():
meta = Bunch()
for (k, v) in dataformat['meta_regex'].items():
try:
out = re.search(v[-1], lines[int(k)]).groups() # depends on [control=['try'], data=[]]
except:
raise ValueError('Failed reading metadata when applying:\n regex: {}\nto\n line: {}'.format(v[-1], lines[int(k)])) # depends on [control=['except'], data=[]]
for i in np.arange(len(v[0])):
meta[v[0][i]] = out[i] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
meta = {}
# sample name
if name_mode == 'file_names':
sample = os.path.basename(data_file).split('.')[0] # depends on [control=['if'], data=[]]
elif name_mode == 'metadata_names':
sample = meta['name'] # depends on [control=['if'], data=[]]
else:
sample = name_mode
# column and analyte names
columns = np.array(lines[dataformat['column_id']['name_row']].strip().split(dataformat['column_id']['delimiter']))
if 'pattern' in dataformat['column_id'].keys():
pr = re.compile(dataformat['column_id']['pattern'])
analytes = [pr.match(c).groups()[0] for c in columns if pr.match(c)] # depends on [control=['if'], data=[]]
# do any required pre-formatting
if 'preformat_replace' in dataformat.keys():
with open(data_file) as f:
fbuffer = f.read() # depends on [control=['with'], data=['f']]
for (k, v) in dataformat['preformat_replace'].items():
fbuffer = re.sub(k, v, fbuffer) # depends on [control=['for'], data=[]]
# dead data
read_data = np.genfromtxt(BytesIO(fbuffer.encode()), **dataformat['genfromtext_args']).T # depends on [control=['if'], data=[]]
else:
# read data
read_data = np.genfromtxt(data_file, **dataformat['genfromtext_args']).T
# data dict
dind = np.zeros(read_data.shape[0], dtype=bool)
for a in analytes:
dind[columns == a] = True # depends on [control=['for'], data=['a']]
data = Bunch()
data['Time'] = read_data[dataformat['column_id']['timecolumn']]
# deal with time units
if 'time_unit' in dataformat['column_id']:
if isinstance(dataformat['column_id']['time_unit'], (float, int)):
time_mult = dataformat['column_id']['time_unit'] # depends on [control=['if'], data=[]]
elif isinstance(dataformat['column_id']['time_unit'], str):
unit_multipliers = {'ms': 1 / 1000, 'min': 60 / 1, 's': 1}
try:
time_mult = unit_multipliers[dataformat['column_id']['time_unit']] # depends on [control=['try'], data=[]]
except:
raise ValueError("In dataformat: time_unit must be a number, 'ms', 'min' or 's'") # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
data['Time'] *= time_mult # depends on [control=['if'], data=[]]
# convert raw data into counts
# TODO: Is this correct? Should actually be per-analyte dwell?
# if 'unit' in dataformat:
# if dataformat['unit'] == 'cps':
# tstep = data['Time'][1] - data['Time'][0]
# read_data[dind] *= tstep
# else:
# pass
data['rawdata'] = Bunch(zip(analytes, read_data[dind]))
data['total_counts'] = np.nansum(read_data[dind], 0)
return (sample, analytes, data, meta) |
def _column_pad_filter(self, next_filter):
""" Expand blank lines caused from overflow of other columns to blank
whitespace. E.g.
INPUT: [
["1a", "2a"],
[None, "2b"],
["1b", "2c"],
[None, "2d"]
]
OUTPUT: [
["1a", "2a"],
[<blank>, "2b"],
["1b", "2c"],
[<blank>, "2d"]
]
"""
next(next_filter)
while True:
line = list((yield))
for i, col in enumerate(line):
if col is None:
line[i] = self._get_blank_cell(i)
next_filter.send(line) | def function[_column_pad_filter, parameter[self, next_filter]]:
constant[ Expand blank lines caused from overflow of other columns to blank
whitespace. E.g.
INPUT: [
["1a", "2a"],
[None, "2b"],
["1b", "2c"],
[None, "2d"]
]
OUTPUT: [
["1a", "2a"],
[<blank>, "2b"],
["1b", "2c"],
[<blank>, "2d"]
]
]
call[name[next], parameter[name[next_filter]]]
while constant[True] begin[:]
variable[line] assign[=] call[name[list], parameter[<ast.Yield object at 0x7da204621e10>]]
for taget[tuple[[<ast.Name object at 0x7da2046218d0>, <ast.Name object at 0x7da204620400>]]] in starred[call[name[enumerate], parameter[name[line]]]] begin[:]
if compare[name[col] is constant[None]] begin[:]
call[name[line]][name[i]] assign[=] call[name[self]._get_blank_cell, parameter[name[i]]]
call[name[next_filter].send, parameter[name[line]]] | keyword[def] identifier[_column_pad_filter] ( identifier[self] , identifier[next_filter] ):
literal[string]
identifier[next] ( identifier[next_filter] )
keyword[while] keyword[True] :
identifier[line] = identifier[list] (( keyword[yield] ))
keyword[for] identifier[i] , identifier[col] keyword[in] identifier[enumerate] ( identifier[line] ):
keyword[if] identifier[col] keyword[is] keyword[None] :
identifier[line] [ identifier[i] ]= identifier[self] . identifier[_get_blank_cell] ( identifier[i] )
identifier[next_filter] . identifier[send] ( identifier[line] ) | def _column_pad_filter(self, next_filter):
""" Expand blank lines caused from overflow of other columns to blank
whitespace. E.g.
INPUT: [
["1a", "2a"],
[None, "2b"],
["1b", "2c"],
[None, "2d"]
]
OUTPUT: [
["1a", "2a"],
[<blank>, "2b"],
["1b", "2c"],
[<blank>, "2d"]
]
"""
next(next_filter)
while True:
line = list((yield))
for (i, col) in enumerate(line):
if col is None:
line[i] = self._get_blank_cell(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
next_filter.send(line) # depends on [control=['while'], data=[]] |
def check_successful_tx(web3: Web3, txid: str, timeout=180) -> Tuple[dict, dict]:
"""See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
"""
receipt = wait_for_transaction_receipt(web3=web3, txid=txid, timeout=timeout)
txinfo = web3.eth.getTransaction(txid)
if 'status' not in receipt:
raise KeyError(
'A transaction receipt does not contain the "status" field. '
'Does your chain have Byzantium rules enabled?',
)
if receipt['status'] == 0:
raise ValueError(f'Status 0 indicates failure')
if txinfo['gas'] == receipt['gasUsed']:
raise ValueError(f'Gas is completely used ({txinfo["gas"]}). Failure?')
return (receipt, txinfo) | def function[check_successful_tx, parameter[web3, txid, timeout]]:
constant[See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
]
variable[receipt] assign[=] call[name[wait_for_transaction_receipt], parameter[]]
variable[txinfo] assign[=] call[name[web3].eth.getTransaction, parameter[name[txid]]]
if compare[constant[status] <ast.NotIn object at 0x7da2590d7190> name[receipt]] begin[:]
<ast.Raise object at 0x7da1b26af1c0>
if compare[call[name[receipt]][constant[status]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b26af7c0>
if compare[call[name[txinfo]][constant[gas]] equal[==] call[name[receipt]][constant[gasUsed]]] begin[:]
<ast.Raise object at 0x7da1b26ae740>
return[tuple[[<ast.Name object at 0x7da1b26ae5f0>, <ast.Name object at 0x7da1b26aead0>]]] | keyword[def] identifier[check_successful_tx] ( identifier[web3] : identifier[Web3] , identifier[txid] : identifier[str] , identifier[timeout] = literal[int] )-> identifier[Tuple] [ identifier[dict] , identifier[dict] ]:
literal[string]
identifier[receipt] = identifier[wait_for_transaction_receipt] ( identifier[web3] = identifier[web3] , identifier[txid] = identifier[txid] , identifier[timeout] = identifier[timeout] )
identifier[txinfo] = identifier[web3] . identifier[eth] . identifier[getTransaction] ( identifier[txid] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[receipt] :
keyword[raise] identifier[KeyError] (
literal[string]
literal[string] ,
)
keyword[if] identifier[receipt] [ literal[string] ]== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[txinfo] [ literal[string] ]== identifier[receipt] [ literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] ( identifier[receipt] , identifier[txinfo] ) | def check_successful_tx(web3: Web3, txid: str, timeout=180) -> Tuple[dict, dict]:
"""See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
"""
receipt = wait_for_transaction_receipt(web3=web3, txid=txid, timeout=timeout)
txinfo = web3.eth.getTransaction(txid)
if 'status' not in receipt:
raise KeyError('A transaction receipt does not contain the "status" field. Does your chain have Byzantium rules enabled?') # depends on [control=['if'], data=[]]
if receipt['status'] == 0:
raise ValueError(f'Status 0 indicates failure') # depends on [control=['if'], data=[]]
if txinfo['gas'] == receipt['gasUsed']:
raise ValueError(f"Gas is completely used ({txinfo['gas']}). Failure?") # depends on [control=['if'], data=[]]
return (receipt, txinfo) |
def parse_args():
""" Parse the args, returns if the type of update:
Major, minor, fix
"""
parser = argparse.ArgumentParser()
parser.add_argument('-M', action='store_true')
parser.add_argument('-m', action='store_true')
parser.add_argument('-f', action='store_true')
args = parser.parse_args()
major, minor, fix = args.M, args.m, args.f
if major + minor + fix != 1:
fail('Please select one and only one action.')
return major, minor, fix | def function[parse_args, parameter[]]:
constant[ Parse the args, returns if the type of update:
Major, minor, fix
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-M]]]
call[name[parser].add_argument, parameter[constant[-m]]]
call[name[parser].add_argument, parameter[constant[-f]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
<ast.Tuple object at 0x7da1b1ea33d0> assign[=] tuple[[<ast.Attribute object at 0x7da1b1e94430>, <ast.Attribute object at 0x7da1b1e961d0>, <ast.Attribute object at 0x7da1b1e943a0>]]
if compare[binary_operation[binary_operation[name[major] + name[minor]] + name[fix]] not_equal[!=] constant[1]] begin[:]
call[name[fail], parameter[constant[Please select one and only one action.]]]
return[tuple[[<ast.Name object at 0x7da1b1e97ac0>, <ast.Name object at 0x7da1b1e95de0>, <ast.Name object at 0x7da1b1e97d90>]]] | keyword[def] identifier[parse_args] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[major] , identifier[minor] , identifier[fix] = identifier[args] . identifier[M] , identifier[args] . identifier[m] , identifier[args] . identifier[f]
keyword[if] identifier[major] + identifier[minor] + identifier[fix] != literal[int] :
identifier[fail] ( literal[string] )
keyword[return] identifier[major] , identifier[minor] , identifier[fix] | def parse_args():
""" Parse the args, returns if the type of update:
Major, minor, fix
"""
parser = argparse.ArgumentParser()
parser.add_argument('-M', action='store_true')
parser.add_argument('-m', action='store_true')
parser.add_argument('-f', action='store_true')
args = parser.parse_args()
(major, minor, fix) = (args.M, args.m, args.f)
if major + minor + fix != 1:
fail('Please select one and only one action.') # depends on [control=['if'], data=[]]
return (major, minor, fix) |
def listclip(list_, num, fromback=False):
r"""
DEPRICATE: use slices instead
Args:
list_ (list):
num (int):
Returns:
sublist:
CommandLine:
python -m utool.util_list --test-listclip
Example1:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result_list += [ut.listclip(list_, num)]
>>> num = 9
>>> result_list += [ut.listclip(list_, num)]
>>> # verify results
>>> result = ut.repr4(result_list)
>>> print(result)
[
[1, 2, 3],
[1, 2, 3, 4, 5],
]
Example2:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result = ut.listclip(list_, num, fromback=True)
>>> print(result)
[3, 4, 5]
"""
if num is None:
num_ = len(list_)
else:
num_ = min(len(list_), num)
if fromback:
sublist = list_[-num_:]
else:
sublist = list_[:num_]
return sublist | def function[listclip, parameter[list_, num, fromback]]:
constant[
DEPRICATE: use slices instead
Args:
list_ (list):
num (int):
Returns:
sublist:
CommandLine:
python -m utool.util_list --test-listclip
Example1:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result_list += [ut.listclip(list_, num)]
>>> num = 9
>>> result_list += [ut.listclip(list_, num)]
>>> # verify results
>>> result = ut.repr4(result_list)
>>> print(result)
[
[1, 2, 3],
[1, 2, 3, 4, 5],
]
Example2:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result = ut.listclip(list_, num, fromback=True)
>>> print(result)
[3, 4, 5]
]
if compare[name[num] is constant[None]] begin[:]
variable[num_] assign[=] call[name[len], parameter[name[list_]]]
if name[fromback] begin[:]
variable[sublist] assign[=] call[name[list_]][<ast.Slice object at 0x7da1b24c47c0>]
return[name[sublist]] | keyword[def] identifier[listclip] ( identifier[list_] , identifier[num] , identifier[fromback] = keyword[False] ):
literal[string]
keyword[if] identifier[num] keyword[is] keyword[None] :
identifier[num_] = identifier[len] ( identifier[list_] )
keyword[else] :
identifier[num_] = identifier[min] ( identifier[len] ( identifier[list_] ), identifier[num] )
keyword[if] identifier[fromback] :
identifier[sublist] = identifier[list_] [- identifier[num_] :]
keyword[else] :
identifier[sublist] = identifier[list_] [: identifier[num_] ]
keyword[return] identifier[sublist] | def listclip(list_, num, fromback=False):
"""
DEPRICATE: use slices instead
Args:
list_ (list):
num (int):
Returns:
sublist:
CommandLine:
python -m utool.util_list --test-listclip
Example1:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result_list += [ut.listclip(list_, num)]
>>> num = 9
>>> result_list += [ut.listclip(list_, num)]
>>> # verify results
>>> result = ut.repr4(result_list)
>>> print(result)
[
[1, 2, 3],
[1, 2, 3, 4, 5],
]
Example2:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> # build test data
>>> list_ = [1, 2, 3, 4, 5]
>>> result_list = []
>>> # execute function
>>> num = 3
>>> result = ut.listclip(list_, num, fromback=True)
>>> print(result)
[3, 4, 5]
"""
if num is None:
num_ = len(list_) # depends on [control=['if'], data=[]]
else:
num_ = min(len(list_), num)
if fromback:
sublist = list_[-num_:] # depends on [control=['if'], data=[]]
else:
sublist = list_[:num_]
return sublist |
def sell(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#sell-bitcoin"""
if 'amount' not in params and 'total' not in params:
raise ValueError("Missing required parameter: 'amount' or 'total'")
for required in ['currency']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'accounts', account_id, 'sells', data=params)
return self._make_api_object(response, Sell) | def function[sell, parameter[self, account_id]]:
constant[https://developers.coinbase.com/api/v2#sell-bitcoin]
if <ast.BoolOp object at 0x7da18c4ccee0> begin[:]
<ast.Raise object at 0x7da18c4cc310>
for taget[name[required]] in starred[list[[<ast.Constant object at 0x7da18c4cdb70>]]] begin[:]
if compare[name[required] <ast.NotIn object at 0x7da2590d7190> name[params]] begin[:]
<ast.Raise object at 0x7da18c4ce1a0>
variable[response] assign[=] call[name[self]._post, parameter[constant[v2], constant[accounts], name[account_id], constant[sells]]]
return[call[name[self]._make_api_object, parameter[name[response], name[Sell]]]] | keyword[def] identifier[sell] ( identifier[self] , identifier[account_id] ,** identifier[params] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] keyword[and] literal[string] keyword[not] keyword[in] identifier[params] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[required] keyword[in] [ literal[string] ]:
keyword[if] identifier[required] keyword[not] keyword[in] identifier[params] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[required] )
identifier[response] = identifier[self] . identifier[_post] ( literal[string] , literal[string] , identifier[account_id] , literal[string] , identifier[data] = identifier[params] )
keyword[return] identifier[self] . identifier[_make_api_object] ( identifier[response] , identifier[Sell] ) | def sell(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#sell-bitcoin"""
if 'amount' not in params and 'total' not in params:
raise ValueError("Missing required parameter: 'amount' or 'total'") # depends on [control=['if'], data=[]]
for required in ['currency']:
if required not in params:
raise ValueError('Missing required parameter: %s' % required) # depends on [control=['if'], data=['required']] # depends on [control=['for'], data=['required']]
response = self._post('v2', 'accounts', account_id, 'sells', data=params)
return self._make_api_object(response, Sell) |
def _base_type(self):
"""Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
"""
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return "enum.%s" % subclass
raise NotImplementedError("unexpected dimension type class '%s'" % type_class) | def function[_base_type, parameter[self]]:
constant[Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
]
variable[type_class] assign[=] call[call[name[self]._dimension_dict][constant[type]]][constant[class]]
if compare[name[type_class] equal[==] constant[categorical]] begin[:]
return[constant[categorical]]
if compare[name[type_class] equal[==] constant[enum]] begin[:]
variable[subclass] assign[=] call[call[call[name[self]._dimension_dict][constant[type]]][constant[subtype]]][constant[class]]
return[binary_operation[constant[enum.%s] <ast.Mod object at 0x7da2590d6920> name[subclass]]]
<ast.Raise object at 0x7da18dc981c0> | keyword[def] identifier[_base_type] ( identifier[self] ):
literal[string]
identifier[type_class] = identifier[self] . identifier[_dimension_dict] [ literal[string] ][ literal[string] ]
keyword[if] identifier[type_class] == literal[string] :
keyword[return] literal[string]
keyword[if] identifier[type_class] == literal[string] :
identifier[subclass] = identifier[self] . identifier[_dimension_dict] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[return] literal[string] % identifier[subclass]
keyword[raise] identifier[NotImplementedError] ( literal[string] % identifier[type_class] ) | def _base_type(self):
"""Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
"""
type_class = self._dimension_dict['type']['class']
if type_class == 'categorical':
return 'categorical' # depends on [control=['if'], data=[]]
if type_class == 'enum':
subclass = self._dimension_dict['type']['subtype']['class']
return 'enum.%s' % subclass # depends on [control=['if'], data=[]]
raise NotImplementedError("unexpected dimension type class '%s'" % type_class) |
def _argcheck(*args, **kwargs):
"""
Check that arguments are consistent with spark array construction.
Conditions are:
(1) a positional argument is a SparkContext
(2) keyword arg 'context' is a SparkContext
(3) an argument is a BoltArraySpark, or
(4) an argument is a nested list containing a BoltArraySpark
"""
try:
from pyspark import SparkContext
except ImportError:
return False
cond1 = any([isinstance(arg, SparkContext) for arg in args])
cond2 = isinstance(kwargs.get('context', None), SparkContext)
cond3 = any([isinstance(arg, BoltArraySpark) for arg in args])
cond4 = any([any([isinstance(sub, BoltArraySpark) for sub in arg])
if isinstance(arg, (tuple, list)) else False for arg in args])
return cond1 or cond2 or cond3 or cond4 | def function[_argcheck, parameter[]]:
constant[
Check that arguments are consistent with spark array construction.
Conditions are:
(1) a positional argument is a SparkContext
(2) keyword arg 'context' is a SparkContext
(3) an argument is a BoltArraySpark, or
(4) an argument is a nested list containing a BoltArraySpark
]
<ast.Try object at 0x7da2054a4eb0>
variable[cond1] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da2054a5f90>]]
variable[cond2] assign[=] call[name[isinstance], parameter[call[name[kwargs].get, parameter[constant[context], constant[None]]], name[SparkContext]]]
variable[cond3] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da2054a5900>]]
variable[cond4] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da2054a5ab0>]]
return[<ast.BoolOp object at 0x7da2054a7d00>] | keyword[def] identifier[_argcheck] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[from] identifier[pyspark] keyword[import] identifier[SparkContext]
keyword[except] identifier[ImportError] :
keyword[return] keyword[False]
identifier[cond1] = identifier[any] ([ identifier[isinstance] ( identifier[arg] , identifier[SparkContext] ) keyword[for] identifier[arg] keyword[in] identifier[args] ])
identifier[cond2] = identifier[isinstance] ( identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ), identifier[SparkContext] )
identifier[cond3] = identifier[any] ([ identifier[isinstance] ( identifier[arg] , identifier[BoltArraySpark] ) keyword[for] identifier[arg] keyword[in] identifier[args] ])
identifier[cond4] = identifier[any] ([ identifier[any] ([ identifier[isinstance] ( identifier[sub] , identifier[BoltArraySpark] ) keyword[for] identifier[sub] keyword[in] identifier[arg] ])
keyword[if] identifier[isinstance] ( identifier[arg] ,( identifier[tuple] , identifier[list] )) keyword[else] keyword[False] keyword[for] identifier[arg] keyword[in] identifier[args] ])
keyword[return] identifier[cond1] keyword[or] identifier[cond2] keyword[or] identifier[cond3] keyword[or] identifier[cond4] | def _argcheck(*args, **kwargs):
"""
Check that arguments are consistent with spark array construction.
Conditions are:
(1) a positional argument is a SparkContext
(2) keyword arg 'context' is a SparkContext
(3) an argument is a BoltArraySpark, or
(4) an argument is a nested list containing a BoltArraySpark
"""
try:
from pyspark import SparkContext # depends on [control=['try'], data=[]]
except ImportError:
return False # depends on [control=['except'], data=[]]
cond1 = any([isinstance(arg, SparkContext) for arg in args])
cond2 = isinstance(kwargs.get('context', None), SparkContext)
cond3 = any([isinstance(arg, BoltArraySpark) for arg in args])
cond4 = any([any([isinstance(sub, BoltArraySpark) for sub in arg]) if isinstance(arg, (tuple, list)) else False for arg in args])
return cond1 or cond2 or cond3 or cond4 |
def send(self, message): # usually a json string...
"""
sends whatever it is to each transport
"""
for transport in self.transports.values():
transport.protocol.sendMessage(message) | def function[send, parameter[self, message]]:
constant[
sends whatever it is to each transport
]
for taget[name[transport]] in starred[call[name[self].transports.values, parameter[]]] begin[:]
call[name[transport].protocol.sendMessage, parameter[name[message]]] | keyword[def] identifier[send] ( identifier[self] , identifier[message] ):
literal[string]
keyword[for] identifier[transport] keyword[in] identifier[self] . identifier[transports] . identifier[values] ():
identifier[transport] . identifier[protocol] . identifier[sendMessage] ( identifier[message] ) | def send(self, message): # usually a json string...
'\n sends whatever it is to each transport\n '
for transport in self.transports.values():
transport.protocol.sendMessage(message) # depends on [control=['for'], data=['transport']] |
def set_maxsize(self, maxsize, **kwargs):
"""
Set maxsize. This involves creating a new cache and transferring the items.
"""
new_cache = self._get_cache_impl(self.impl_name, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache | def function[set_maxsize, parameter[self, maxsize]]:
constant[
Set maxsize. This involves creating a new cache and transferring the items.
]
variable[new_cache] assign[=] call[name[self]._get_cache_impl, parameter[name[self].impl_name, name[maxsize]]]
call[name[self]._populate_new_cache, parameter[name[new_cache]]]
name[self].cache assign[=] name[new_cache] | keyword[def] identifier[set_maxsize] ( identifier[self] , identifier[maxsize] ,** identifier[kwargs] ):
literal[string]
identifier[new_cache] = identifier[self] . identifier[_get_cache_impl] ( identifier[self] . identifier[impl_name] , identifier[maxsize] ,** identifier[kwargs] )
identifier[self] . identifier[_populate_new_cache] ( identifier[new_cache] )
identifier[self] . identifier[cache] = identifier[new_cache] | def set_maxsize(self, maxsize, **kwargs):
"""
Set maxsize. This involves creating a new cache and transferring the items.
"""
new_cache = self._get_cache_impl(self.impl_name, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache |
def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj: return c
raise ValueError("Cannot find event class associated to %s" % obj)
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj | def function[as_event_class, parameter[obj]]:
constant[
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
]
if call[name[is_string], parameter[name[obj]]] begin[:]
for taget[name[c]] in starred[call[name[all_subclasses], parameter[name[AbinitEvent]]]] begin[:]
if <ast.BoolOp object at 0x7da207f987f0> begin[:]
return[name[c]]
<ast.Raise object at 0x7da207f9be80>
assert[compare[name[obj] in call[name[all_subclasses], parameter[name[AbinitEvent]]]]]
return[name[obj]] | keyword[def] identifier[as_event_class] ( identifier[obj] ):
literal[string]
keyword[if] identifier[is_string] ( identifier[obj] ):
keyword[for] identifier[c] keyword[in] identifier[all_subclasses] ( identifier[AbinitEvent] ):
keyword[if] identifier[c] . identifier[__name__] == identifier[obj] keyword[or] identifier[c] . identifier[yaml_tag] == identifier[obj] : keyword[return] identifier[c]
keyword[raise] identifier[ValueError] ( literal[string] % identifier[obj] )
keyword[assert] identifier[obj] keyword[in] identifier[all_subclasses] ( identifier[AbinitEvent] )
keyword[return] identifier[obj] | def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj:
return c # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
raise ValueError('Cannot find event class associated to %s' % obj) # depends on [control=['if'], data=[]]
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj |
def _displayattrs(attrib, expandattrs):
"""
Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes
"""
if not attrib:
return ''
if expandattrs:
alist = ['%s=%r' % item for item in sorted(attrib.items())]
else:
alist = list(attrib)
return '{%s}' % ', '.join(alist) | def function[_displayattrs, parameter[attrib, expandattrs]]:
constant[
Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes
]
if <ast.UnaryOp object at 0x7da2046229b0> begin[:]
return[constant[]]
if name[expandattrs] begin[:]
variable[alist] assign[=] <ast.ListComp object at 0x7da204622dd0>
return[binary_operation[constant[{%s}] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[name[alist]]]]] | keyword[def] identifier[_displayattrs] ( identifier[attrib] , identifier[expandattrs] ):
literal[string]
keyword[if] keyword[not] identifier[attrib] :
keyword[return] literal[string]
keyword[if] identifier[expandattrs] :
identifier[alist] =[ literal[string] % identifier[item] keyword[for] identifier[item] keyword[in] identifier[sorted] ( identifier[attrib] . identifier[items] ())]
keyword[else] :
identifier[alist] = identifier[list] ( identifier[attrib] )
keyword[return] literal[string] % literal[string] . identifier[join] ( identifier[alist] ) | def _displayattrs(attrib, expandattrs):
"""
Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes
"""
if not attrib:
return '' # depends on [control=['if'], data=[]]
if expandattrs:
alist = ['%s=%r' % item for item in sorted(attrib.items())] # depends on [control=['if'], data=[]]
else:
alist = list(attrib)
return '{%s}' % ', '.join(alist) |
def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening HotAir recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = "Recruitment requests will open browser windows automatically."
return {"items": recruitments, "message": message} | def function[open_recruitment, parameter[self, n]]:
constant[Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
]
call[name[logger].info, parameter[call[constant[Opening HotAir recruitment for {} participants].format, parameter[name[n]]]]]
variable[recruitments] assign[=] call[name[self].recruit, parameter[name[n]]]
variable[message] assign[=] constant[Recruitment requests will open browser windows automatically.]
return[dictionary[[<ast.Constant object at 0x7da1b0397a30>, <ast.Constant object at 0x7da1b0394430>], [<ast.Name object at 0x7da1b0397b50>, <ast.Name object at 0x7da1b03979d0>]]] | keyword[def] identifier[open_recruitment] ( identifier[self] , identifier[n] = literal[int] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[n] ))
identifier[recruitments] = identifier[self] . identifier[recruit] ( identifier[n] )
identifier[message] = literal[string]
keyword[return] { literal[string] : identifier[recruitments] , literal[string] : identifier[message] } | def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info('Opening HotAir recruitment for {} participants'.format(n))
recruitments = self.recruit(n)
message = 'Recruitment requests will open browser windows automatically.'
return {'items': recruitments, 'message': message} |
def write(self, data):
'Put, possibly replace, file contents with (new) data'
if not hasattr(data, 'read'):
data = six.BytesIO(data)#StringIO(data)
self.jfs.up(self.path, data) | def function[write, parameter[self, data]]:
constant[Put, possibly replace, file contents with (new) data]
if <ast.UnaryOp object at 0x7da18f7223b0> begin[:]
variable[data] assign[=] call[name[six].BytesIO, parameter[name[data]]]
call[name[self].jfs.up, parameter[name[self].path, name[data]]] | keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] = identifier[six] . identifier[BytesIO] ( identifier[data] )
identifier[self] . identifier[jfs] . identifier[up] ( identifier[self] . identifier[path] , identifier[data] ) | def write(self, data):
"""Put, possibly replace, file contents with (new) data"""
if not hasattr(data, 'read'):
data = six.BytesIO(data) #StringIO(data) # depends on [control=['if'], data=[]]
self.jfs.up(self.path, data) |
def from_file(self, fname, lmax=None, format='shtools', kind='real',
normalization='4pi', skip=0, header=False,
csphase=1, **kwargs):
"""
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHCoeffs.from_file(filename, [format='shtools', lmax,
normalization, csphase, skip,
header])
x = SHCoeffs.from_file(filename, [format='npy', normalization,
csphase, **kwargs])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
format : str, optional, default = 'shtools'
'shtools' format or binary numpy 'npy' format.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from 'shtools'
formatted files.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file when format is
'shtools'.
header : bool, optional, default = False
If True, read a list of values from the header line of an 'shtools'
formatted file.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Description
-----------
If format='shtools', spherical harmonic coefficients will be read from
a text file. The optional parameter `skip` specifies how many lines
should be skipped before attempting to parse the file, the optional
parameter `header` specifies whether to read a list of values from a
header line, and the optional parameter `lmax` specifies the maximum
degree to read from the file. All lines that do not start with 2
integers and that are less than 3 words long will be treated as
comments and ignored. For this format, each line of the file must
contain
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. The terms coeffs[1, l, 0] can be neglected as they are
zero. For more information, see `shio.shread()`.
If format='npy', a binary numpy 'npy' file will be read using
numpy.load().
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Provided value was {:s}"
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
header_list = None
if format.lower() == 'shtools':
if header is True:
coeffs, lmaxout, header_list = _shread(fname, lmax=lmax,
skip=skip, header=True)
else:
coeffs, lmaxout = _shread(fname, lmax=lmax, skip=skip)
elif format.lower() == 'npy':
coeffs = _np.load(fname, **kwargs)
lmaxout = coeffs.shape[1] - 1
else:
raise NotImplementedError(
'format={:s} not implemented'.format(repr(format)))
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase, header=header_list) | def function[from_file, parameter[self, fname, lmax, format, kind, normalization, skip, header, csphase]]:
constant[
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHCoeffs.from_file(filename, [format='shtools', lmax,
normalization, csphase, skip,
header])
x = SHCoeffs.from_file(filename, [format='npy', normalization,
csphase, **kwargs])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
format : str, optional, default = 'shtools'
'shtools' format or binary numpy 'npy' format.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from 'shtools'
formatted files.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file when format is
'shtools'.
header : bool, optional, default = False
If True, read a list of values from the header line of an 'shtools'
formatted file.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Description
-----------
If format='shtools', spherical harmonic coefficients will be read from
a text file. The optional parameter `skip` specifies how many lines
should be skipped before attempting to parse the file, the optional
parameter `header` specifies whether to read a list of values from a
header line, and the optional parameter `lmax` specifies the maximum
degree to read from the file. All lines that do not start with 2
integers and that are less than 3 words long will be treated as
comments and ignored. For this format, each line of the file must
contain
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. The terms coeffs[1, l, 0] can be neglected as they are
zero. For more information, see `shio.shread()`.
If format='npy', a binary numpy 'npy' file will be read using
numpy.load().
]
if compare[call[name[type], parameter[name[normalization]]] not_equal[!=] name[str]] begin[:]
<ast.Raise object at 0x7da2047eb880>
if compare[call[name[normalization].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da2047e96c0>, <ast.Constant object at 0x7da2047eb820>, <ast.Constant object at 0x7da18f00f8b0>, <ast.Constant object at 0x7da18f00fa00>]]] begin[:]
<ast.Raise object at 0x7da18f00f130>
if <ast.BoolOp object at 0x7da18f00dbd0> begin[:]
<ast.Raise object at 0x7da18f00d0f0>
variable[header_list] assign[=] constant[None]
if compare[call[name[format].lower, parameter[]] equal[==] constant[shtools]] begin[:]
if compare[name[header] is constant[True]] begin[:]
<ast.Tuple object at 0x7da18f00e140> assign[=] call[name[_shread], parameter[name[fname]]]
if <ast.BoolOp object at 0x7da18f00e2c0> begin[:]
call[name[_warnings].warn, parameter[binary_operation[binary_operation[binary_operation[constant[Calculations using unnormalized coefficients ] + constant[are stable only for degrees less than or equal ]] + constant[to 85. lmax for the coefficients will be set to ]] + call[constant[85. Input value was {:d}.].format, parameter[name[lmaxout]]]]]]
variable[lmaxout] assign[=] constant[85]
if call[name[_np].iscomplexobj, parameter[name[coeffs]]] begin[:]
variable[kind] assign[=] constant[complex]
for taget[name[cls]] in starred[call[name[self].__subclasses__, parameter[]]] begin[:]
if call[name[cls].istype, parameter[name[kind]]] begin[:]
return[call[name[cls], parameter[name[coeffs]]]] | keyword[def] identifier[from_file] ( identifier[self] , identifier[fname] , identifier[lmax] = keyword[None] , identifier[format] = literal[string] , identifier[kind] = literal[string] ,
identifier[normalization] = literal[string] , identifier[skip] = literal[int] , identifier[header] = keyword[False] ,
identifier[csphase] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[type] ( identifier[normalization] )!= identifier[str] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
. identifier[format] ( identifier[str] ( identifier[type] ( identifier[normalization] ))))
keyword[if] identifier[normalization] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
. identifier[format] ( identifier[repr] ( identifier[normalization] ))
)
keyword[if] identifier[csphase] != literal[int] keyword[and] identifier[csphase] !=- literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
. identifier[format] ( identifier[repr] ( identifier[csphase] ))
)
identifier[header_list] = keyword[None]
keyword[if] identifier[format] . identifier[lower] ()== literal[string] :
keyword[if] identifier[header] keyword[is] keyword[True] :
identifier[coeffs] , identifier[lmaxout] , identifier[header_list] = identifier[_shread] ( identifier[fname] , identifier[lmax] = identifier[lmax] ,
identifier[skip] = identifier[skip] , identifier[header] = keyword[True] )
keyword[else] :
identifier[coeffs] , identifier[lmaxout] = identifier[_shread] ( identifier[fname] , identifier[lmax] = identifier[lmax] , identifier[skip] = identifier[skip] )
keyword[elif] identifier[format] . identifier[lower] ()== literal[string] :
identifier[coeffs] = identifier[_np] . identifier[load] ( identifier[fname] ,** identifier[kwargs] )
identifier[lmaxout] = identifier[coeffs] . identifier[shape] [ literal[int] ]- literal[int]
keyword[else] :
keyword[raise] identifier[NotImplementedError] (
literal[string] . identifier[format] ( identifier[repr] ( identifier[format] )))
keyword[if] identifier[normalization] . identifier[lower] ()== literal[string] keyword[and] identifier[lmaxout] > literal[int] :
identifier[_warnings] . identifier[warn] ( literal[string] +
literal[string] +
literal[string] +
literal[string] . identifier[format] ( identifier[lmaxout] ),
identifier[category] = identifier[RuntimeWarning] )
identifier[lmaxout] = literal[int]
keyword[if] identifier[_np] . identifier[iscomplexobj] ( identifier[coeffs] ):
identifier[kind] = literal[string]
keyword[else] :
identifier[kind] = literal[string]
keyword[for] identifier[cls] keyword[in] identifier[self] . identifier[__subclasses__] ():
keyword[if] identifier[cls] . identifier[istype] ( identifier[kind] ):
keyword[return] identifier[cls] ( identifier[coeffs] , identifier[normalization] = identifier[normalization] . identifier[lower] (),
identifier[csphase] = identifier[csphase] , identifier[header] = identifier[header_list] ) | def from_file(self, fname, lmax=None, format='shtools', kind='real', normalization='4pi', skip=0, header=False, csphase=1, **kwargs):
"""
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHCoeffs.from_file(filename, [format='shtools', lmax,
normalization, csphase, skip,
header])
x = SHCoeffs.from_file(filename, [format='npy', normalization,
csphase, **kwargs])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
format : str, optional, default = 'shtools'
'shtools' format or binary numpy 'npy' format.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from 'shtools'
formatted files.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file when format is
'shtools'.
header : bool, optional, default = False
If True, read a list of values from the header line of an 'shtools'
formatted file.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Description
-----------
If format='shtools', spherical harmonic coefficients will be read from
a text file. The optional parameter `skip` specifies how many lines
should be skipped before attempting to parse the file, the optional
parameter `header` specifies whether to read a list of values from a
header line, and the optional parameter `lmax` specifies the maximum
degree to read from the file. All lines that do not start with 2
integers and that are less than 3 words long will be treated as
comments and ignored. For this format, each line of the file must
contain
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. The terms coeffs[1, l, 0] can be neglected as they are
zero. For more information, see `shio.shread()`.
If format='npy', a binary numpy 'npy' file will be read using
numpy.load().
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. Input type was {:s}'.format(str(type(normalization)))) # depends on [control=['if'], data=['str']]
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The input normalization must be '4pi', 'ortho', 'schmidt', or 'unnorm'. Provided value was {:s}".format(repr(normalization))) # depends on [control=['if'], data=[]]
if csphase != 1 and csphase != -1:
raise ValueError('csphase must be 1 or -1. Input value was {:s}'.format(repr(csphase))) # depends on [control=['if'], data=[]]
header_list = None
if format.lower() == 'shtools':
if header is True:
(coeffs, lmaxout, header_list) = _shread(fname, lmax=lmax, skip=skip, header=True) # depends on [control=['if'], data=[]]
else:
(coeffs, lmaxout) = _shread(fname, lmax=lmax, skip=skip) # depends on [control=['if'], data=[]]
elif format.lower() == 'npy':
coeffs = _np.load(fname, **kwargs)
lmaxout = coeffs.shape[1] - 1 # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('format={:s} not implemented'.format(repr(format)))
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn('Calculations using unnormalized coefficients ' + 'are stable only for degrees less than or equal ' + 'to 85. lmax for the coefficients will be set to ' + '85. Input value was {:d}.'.format(lmaxout), category=RuntimeWarning)
lmaxout = 85 # depends on [control=['if'], data=[]]
if _np.iscomplexobj(coeffs):
kind = 'complex' # depends on [control=['if'], data=[]]
else:
kind = 'real'
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(), csphase=csphase, header=header_list) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cls']] |
def content(self):
"""Function returns the body of the mdn message as a byte string"""
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return '' | def function[content, parameter[self]]:
constant[Function returns the body of the mdn message as a byte string]
if name[self].payload begin[:]
variable[message_bytes] assign[=] call[call[name[mime_to_bytes], parameter[name[self].payload, constant[0]]].replace, parameter[constant[b'\n'], constant[b'\r\n']]]
variable[boundary] assign[=] binary_operation[constant[b'--'] + call[call[name[self].payload.get_boundary, parameter[]].encode, parameter[constant[utf-8]]]]
variable[temp] assign[=] call[name[message_bytes].split, parameter[name[boundary]]]
call[name[temp].pop, parameter[constant[0]]]
return[binary_operation[name[boundary] + call[name[boundary].join, parameter[name[temp]]]]] | keyword[def] identifier[content] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[payload] :
identifier[message_bytes] = identifier[mime_to_bytes] (
identifier[self] . identifier[payload] , literal[int] ). identifier[replace] ( literal[string] , literal[string] )
identifier[boundary] = literal[string] + identifier[self] . identifier[payload] . identifier[get_boundary] (). identifier[encode] ( literal[string] )
identifier[temp] = identifier[message_bytes] . identifier[split] ( identifier[boundary] )
identifier[temp] . identifier[pop] ( literal[int] )
keyword[return] identifier[boundary] + identifier[boundary] . identifier[join] ( identifier[temp] )
keyword[else] :
keyword[return] literal[string] | def content(self):
"""Function returns the body of the mdn message as a byte string"""
if self.payload:
message_bytes = mime_to_bytes(self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp) # depends on [control=['if'], data=[]]
else:
return '' |
def get_raw_input(description, default=False):
"""Get user input from the command line via raw_input / input.
description (unicode): Text to display before prompt.
default (unicode or False/None): Default value to display with prompt.
RETURNS (unicode): User input.
"""
additional = ' (default: %s)' % default if default else ''
prompt = ' %s%s: ' % (description, additional)
user_input = input_(prompt)
return user_input | def function[get_raw_input, parameter[description, default]]:
constant[Get user input from the command line via raw_input / input.
description (unicode): Text to display before prompt.
default (unicode or False/None): Default value to display with prompt.
RETURNS (unicode): User input.
]
variable[additional] assign[=] <ast.IfExp object at 0x7da1b191c5b0>
variable[prompt] assign[=] binary_operation[constant[ %s%s: ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b191c4c0>, <ast.Name object at 0x7da1b191e080>]]]
variable[user_input] assign[=] call[name[input_], parameter[name[prompt]]]
return[name[user_input]] | keyword[def] identifier[get_raw_input] ( identifier[description] , identifier[default] = keyword[False] ):
literal[string]
identifier[additional] = literal[string] % identifier[default] keyword[if] identifier[default] keyword[else] literal[string]
identifier[prompt] = literal[string] %( identifier[description] , identifier[additional] )
identifier[user_input] = identifier[input_] ( identifier[prompt] )
keyword[return] identifier[user_input] | def get_raw_input(description, default=False):
"""Get user input from the command line via raw_input / input.
description (unicode): Text to display before prompt.
default (unicode or False/None): Default value to display with prompt.
RETURNS (unicode): User input.
"""
additional = ' (default: %s)' % default if default else ''
prompt = ' %s%s: ' % (description, additional)
user_input = input_(prompt)
return user_input |
def get_chunk_hash(file,
seed,
filesz=None,
chunksz=DEFAULT_CHUNK_SIZE,
bufsz=DEFAULT_BUFFER_SIZE):
"""returns a hash of a chunk of the file provided. the position of
the chunk is determined by the seed. additionally, the hmac of the
chunk is calculated from the seed.
:param file: a file like object to get the chunk hash from. should
support `read()`, `seek()` and `tell()`.
:param seed: the seed to use for calculating the chunk position and
chunk hash
:param chunksz: the size of the chunk to check
:param bufsz: an optional buffer size to use for reading the file.
"""
if (filesz is None):
file.seek(0, 2)
filesz = file.tell()
if (filesz < chunksz):
chunksz = filesz
prf = KeyedPRF(seed, filesz - chunksz + 1)
i = prf.eval(0)
file.seek(i)
h = hmac.new(seed, None, hashlib.sha256)
while (True):
if (chunksz < bufsz):
bufsz = chunksz
buffer = file.read(bufsz)
h.update(buffer)
chunksz -= len(buffer)
assert(chunksz >= 0)
if (chunksz == 0):
break
return h.digest() | def function[get_chunk_hash, parameter[file, seed, filesz, chunksz, bufsz]]:
constant[returns a hash of a chunk of the file provided. the position of
the chunk is determined by the seed. additionally, the hmac of the
chunk is calculated from the seed.
:param file: a file like object to get the chunk hash from. should
support `read()`, `seek()` and `tell()`.
:param seed: the seed to use for calculating the chunk position and
chunk hash
:param chunksz: the size of the chunk to check
:param bufsz: an optional buffer size to use for reading the file.
]
if compare[name[filesz] is constant[None]] begin[:]
call[name[file].seek, parameter[constant[0], constant[2]]]
variable[filesz] assign[=] call[name[file].tell, parameter[]]
if compare[name[filesz] less[<] name[chunksz]] begin[:]
variable[chunksz] assign[=] name[filesz]
variable[prf] assign[=] call[name[KeyedPRF], parameter[name[seed], binary_operation[binary_operation[name[filesz] - name[chunksz]] + constant[1]]]]
variable[i] assign[=] call[name[prf].eval, parameter[constant[0]]]
call[name[file].seek, parameter[name[i]]]
variable[h] assign[=] call[name[hmac].new, parameter[name[seed], constant[None], name[hashlib].sha256]]
while constant[True] begin[:]
if compare[name[chunksz] less[<] name[bufsz]] begin[:]
variable[bufsz] assign[=] name[chunksz]
variable[buffer] assign[=] call[name[file].read, parameter[name[bufsz]]]
call[name[h].update, parameter[name[buffer]]]
<ast.AugAssign object at 0x7da18bc72680>
assert[compare[name[chunksz] greater_or_equal[>=] constant[0]]]
if compare[name[chunksz] equal[==] constant[0]] begin[:]
break
return[call[name[h].digest, parameter[]]] | keyword[def] identifier[get_chunk_hash] ( identifier[file] ,
identifier[seed] ,
identifier[filesz] = keyword[None] ,
identifier[chunksz] = identifier[DEFAULT_CHUNK_SIZE] ,
identifier[bufsz] = identifier[DEFAULT_BUFFER_SIZE] ):
literal[string]
keyword[if] ( identifier[filesz] keyword[is] keyword[None] ):
identifier[file] . identifier[seek] ( literal[int] , literal[int] )
identifier[filesz] = identifier[file] . identifier[tell] ()
keyword[if] ( identifier[filesz] < identifier[chunksz] ):
identifier[chunksz] = identifier[filesz]
identifier[prf] = identifier[KeyedPRF] ( identifier[seed] , identifier[filesz] - identifier[chunksz] + literal[int] )
identifier[i] = identifier[prf] . identifier[eval] ( literal[int] )
identifier[file] . identifier[seek] ( identifier[i] )
identifier[h] = identifier[hmac] . identifier[new] ( identifier[seed] , keyword[None] , identifier[hashlib] . identifier[sha256] )
keyword[while] ( keyword[True] ):
keyword[if] ( identifier[chunksz] < identifier[bufsz] ):
identifier[bufsz] = identifier[chunksz]
identifier[buffer] = identifier[file] . identifier[read] ( identifier[bufsz] )
identifier[h] . identifier[update] ( identifier[buffer] )
identifier[chunksz] -= identifier[len] ( identifier[buffer] )
keyword[assert] ( identifier[chunksz] >= literal[int] )
keyword[if] ( identifier[chunksz] == literal[int] ):
keyword[break]
keyword[return] identifier[h] . identifier[digest] () | def get_chunk_hash(file, seed, filesz=None, chunksz=DEFAULT_CHUNK_SIZE, bufsz=DEFAULT_BUFFER_SIZE):
"""returns a hash of a chunk of the file provided. the position of
the chunk is determined by the seed. additionally, the hmac of the
chunk is calculated from the seed.
:param file: a file like object to get the chunk hash from. should
support `read()`, `seek()` and `tell()`.
:param seed: the seed to use for calculating the chunk position and
chunk hash
:param chunksz: the size of the chunk to check
:param bufsz: an optional buffer size to use for reading the file.
"""
if filesz is None:
file.seek(0, 2)
filesz = file.tell() # depends on [control=['if'], data=['filesz']]
if filesz < chunksz:
chunksz = filesz # depends on [control=['if'], data=['filesz', 'chunksz']]
prf = KeyedPRF(seed, filesz - chunksz + 1)
i = prf.eval(0)
file.seek(i)
h = hmac.new(seed, None, hashlib.sha256)
while True:
if chunksz < bufsz:
bufsz = chunksz # depends on [control=['if'], data=['chunksz', 'bufsz']]
buffer = file.read(bufsz)
h.update(buffer)
chunksz -= len(buffer)
assert chunksz >= 0
if chunksz == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return h.digest() |
def encode(self):
"""
Encodes the current state of the object into a string.
:return: The encoded string
"""
opt_dict = {}
for k, v in self.options.items():
opt_dict[k] = v[0]
ss = '{0}://{1}'.format(self.scheme, ','.join(self.hosts))
if self.bucket:
ss += '/' + self.bucket
# URL encode options then decoded forward slash /
ss += '?' + urlencode(opt_dict).replace('%2F', '/')
return ss | def function[encode, parameter[self]]:
constant[
Encodes the current state of the object into a string.
:return: The encoded string
]
variable[opt_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da207f99ab0>, <ast.Name object at 0x7da207f99060>]]] in starred[call[name[self].options.items, parameter[]]] begin[:]
call[name[opt_dict]][name[k]] assign[=] call[name[v]][constant[0]]
variable[ss] assign[=] call[constant[{0}://{1}].format, parameter[name[self].scheme, call[constant[,].join, parameter[name[self].hosts]]]]
if name[self].bucket begin[:]
<ast.AugAssign object at 0x7da207f9ab60>
<ast.AugAssign object at 0x7da207f9bf10>
return[name[ss]] | keyword[def] identifier[encode] ( identifier[self] ):
literal[string]
identifier[opt_dict] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[options] . identifier[items] ():
identifier[opt_dict] [ identifier[k] ]= identifier[v] [ literal[int] ]
identifier[ss] = literal[string] . identifier[format] ( identifier[self] . identifier[scheme] , literal[string] . identifier[join] ( identifier[self] . identifier[hosts] ))
keyword[if] identifier[self] . identifier[bucket] :
identifier[ss] += literal[string] + identifier[self] . identifier[bucket]
identifier[ss] += literal[string] + identifier[urlencode] ( identifier[opt_dict] ). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[ss] | def encode(self):
"""
Encodes the current state of the object into a string.
:return: The encoded string
"""
opt_dict = {}
for (k, v) in self.options.items():
opt_dict[k] = v[0] # depends on [control=['for'], data=[]]
ss = '{0}://{1}'.format(self.scheme, ','.join(self.hosts))
if self.bucket:
ss += '/' + self.bucket # depends on [control=['if'], data=[]]
# URL encode options then decoded forward slash /
ss += '?' + urlencode(opt_dict).replace('%2F', '/')
return ss |
def format_data(data):
"""Format data."""
# Format negative features
neg_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] < 0])
neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False))
# Format postive features
pos_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] >= 0])
pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True))
# Define link function
if data['link'] == 'identity':
convert_func = lambda x: x
elif data['link'] == 'logit':
convert_func = lambda x: 1 / (1 + np.exp(-x))
else:
assert False, "ERROR: Unrecognized link function: " + str(data['link'])
# Convert negative feature values to plot values
neg_val = data['outValue']
for i in neg_features:
val = float(i[0])
neg_val = neg_val + np.abs(val)
i[0] = convert_func(neg_val)
if len(neg_features) > 0:
total_neg = np.max(neg_features[:, 0].astype(float)) - \
np.min(neg_features[:, 0].astype(float))
else:
total_neg = 0
# Convert positive feature values to plot values
pos_val = data['outValue']
for i in pos_features:
val = float(i[0])
pos_val = pos_val - np.abs(val)
i[0] = convert_func(pos_val)
if len(pos_features) > 0:
total_pos = np.max(pos_features[:, 0].astype(float)) - \
np.min(pos_features[:, 0].astype(float))
else:
total_pos = 0
# Convert output value and base value
data['outValue'] = convert_func(data['outValue'])
data['baseValue'] = convert_func(data['baseValue'])
return neg_features, total_neg, pos_features, total_pos | def function[format_data, parameter[data]]:
constant[Format data.]
variable[neg_features] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18f8138e0>]]
variable[neg_features] assign[=] call[name[np].array, parameter[call[name[sorted], parameter[name[neg_features]]]]]
variable[pos_features] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1fa4a30>]]
variable[pos_features] assign[=] call[name[np].array, parameter[call[name[sorted], parameter[name[pos_features]]]]]
if compare[call[name[data]][constant[link]] equal[==] constant[identity]] begin[:]
variable[convert_func] assign[=] <ast.Lambda object at 0x7da20c795360>
variable[neg_val] assign[=] call[name[data]][constant[outValue]]
for taget[name[i]] in starred[name[neg_features]] begin[:]
variable[val] assign[=] call[name[float], parameter[call[name[i]][constant[0]]]]
variable[neg_val] assign[=] binary_operation[name[neg_val] + call[name[np].abs, parameter[name[val]]]]
call[name[i]][constant[0]] assign[=] call[name[convert_func], parameter[name[neg_val]]]
if compare[call[name[len], parameter[name[neg_features]]] greater[>] constant[0]] begin[:]
variable[total_neg] assign[=] binary_operation[call[name[np].max, parameter[call[call[name[neg_features]][tuple[[<ast.Slice object at 0x7da2041dbaf0>, <ast.Constant object at 0x7da2041d97b0>]]].astype, parameter[name[float]]]]] - call[name[np].min, parameter[call[call[name[neg_features]][tuple[[<ast.Slice object at 0x7da2041da9b0>, <ast.Constant object at 0x7da2041d9f00>]]].astype, parameter[name[float]]]]]]
variable[pos_val] assign[=] call[name[data]][constant[outValue]]
for taget[name[i]] in starred[name[pos_features]] begin[:]
variable[val] assign[=] call[name[float], parameter[call[name[i]][constant[0]]]]
variable[pos_val] assign[=] binary_operation[name[pos_val] - call[name[np].abs, parameter[name[val]]]]
call[name[i]][constant[0]] assign[=] call[name[convert_func], parameter[name[pos_val]]]
if compare[call[name[len], parameter[name[pos_features]]] greater[>] constant[0]] begin[:]
variable[total_pos] assign[=] binary_operation[call[name[np].max, parameter[call[call[name[pos_features]][tuple[[<ast.Slice object at 0x7da2041d9960>, <ast.Constant object at 0x7da2041da5f0>]]].astype, parameter[name[float]]]]] - call[name[np].min, parameter[call[call[name[pos_features]][tuple[[<ast.Slice object at 0x7da2041dada0>, <ast.Constant object at 0x7da2041d9f60>]]].astype, parameter[name[float]]]]]]
call[name[data]][constant[outValue]] assign[=] call[name[convert_func], parameter[call[name[data]][constant[outValue]]]]
call[name[data]][constant[baseValue]] assign[=] call[name[convert_func], parameter[call[name[data]][constant[baseValue]]]]
return[tuple[[<ast.Name object at 0x7da2041db8b0>, <ast.Name object at 0x7da2041d84c0>, <ast.Name object at 0x7da2041d8a30>, <ast.Name object at 0x7da2041db0d0>]]] | keyword[def] identifier[format_data] ( identifier[data] ):
literal[string]
identifier[neg_features] = identifier[np] . identifier[array] ([[ identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ],
identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ],
identifier[data] [ literal[string] ][ identifier[x] ]]
keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ]. identifier[keys] () keyword[if] identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ]< literal[int] ])
identifier[neg_features] = identifier[np] . identifier[array] ( identifier[sorted] ( identifier[neg_features] , identifier[key] = keyword[lambda] identifier[x] : identifier[float] ( identifier[x] [ literal[int] ]), identifier[reverse] = keyword[False] ))
identifier[pos_features] = identifier[np] . identifier[array] ([[ identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ],
identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ],
identifier[data] [ literal[string] ][ identifier[x] ]]
keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ]. identifier[keys] () keyword[if] identifier[data] [ literal[string] ][ identifier[x] ][ literal[string] ]>= literal[int] ])
identifier[pos_features] = identifier[np] . identifier[array] ( identifier[sorted] ( identifier[pos_features] , identifier[key] = keyword[lambda] identifier[x] : identifier[float] ( identifier[x] [ literal[int] ]), identifier[reverse] = keyword[True] ))
keyword[if] identifier[data] [ literal[string] ]== literal[string] :
identifier[convert_func] = keyword[lambda] identifier[x] : identifier[x]
keyword[elif] identifier[data] [ literal[string] ]== literal[string] :
identifier[convert_func] = keyword[lambda] identifier[x] : literal[int] /( literal[int] + identifier[np] . identifier[exp] (- identifier[x] ))
keyword[else] :
keyword[assert] keyword[False] , literal[string] + identifier[str] ( identifier[data] [ literal[string] ])
identifier[neg_val] = identifier[data] [ literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[neg_features] :
identifier[val] = identifier[float] ( identifier[i] [ literal[int] ])
identifier[neg_val] = identifier[neg_val] + identifier[np] . identifier[abs] ( identifier[val] )
identifier[i] [ literal[int] ]= identifier[convert_func] ( identifier[neg_val] )
keyword[if] identifier[len] ( identifier[neg_features] )> literal[int] :
identifier[total_neg] = identifier[np] . identifier[max] ( identifier[neg_features] [:, literal[int] ]. identifier[astype] ( identifier[float] ))- identifier[np] . identifier[min] ( identifier[neg_features] [:, literal[int] ]. identifier[astype] ( identifier[float] ))
keyword[else] :
identifier[total_neg] = literal[int]
identifier[pos_val] = identifier[data] [ literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[pos_features] :
identifier[val] = identifier[float] ( identifier[i] [ literal[int] ])
identifier[pos_val] = identifier[pos_val] - identifier[np] . identifier[abs] ( identifier[val] )
identifier[i] [ literal[int] ]= identifier[convert_func] ( identifier[pos_val] )
keyword[if] identifier[len] ( identifier[pos_features] )> literal[int] :
identifier[total_pos] = identifier[np] . identifier[max] ( identifier[pos_features] [:, literal[int] ]. identifier[astype] ( identifier[float] ))- identifier[np] . identifier[min] ( identifier[pos_features] [:, literal[int] ]. identifier[astype] ( identifier[float] ))
keyword[else] :
identifier[total_pos] = literal[int]
identifier[data] [ literal[string] ]= identifier[convert_func] ( identifier[data] [ literal[string] ])
identifier[data] [ literal[string] ]= identifier[convert_func] ( identifier[data] [ literal[string] ])
keyword[return] identifier[neg_features] , identifier[total_neg] , identifier[pos_features] , identifier[total_pos] | def format_data(data):
"""Format data."""
# Format negative features
neg_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] < 0])
neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False))
# Format postive features
pos_features = np.array([[data['features'][x]['effect'], data['features'][x]['value'], data['featureNames'][x]] for x in data['features'].keys() if data['features'][x]['effect'] >= 0])
pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True))
# Define link function
if data['link'] == 'identity':
convert_func = lambda x: x # depends on [control=['if'], data=[]]
elif data['link'] == 'logit':
convert_func = lambda x: 1 / (1 + np.exp(-x)) # depends on [control=['if'], data=[]]
else:
assert False, 'ERROR: Unrecognized link function: ' + str(data['link'])
# Convert negative feature values to plot values
neg_val = data['outValue']
for i in neg_features:
val = float(i[0])
neg_val = neg_val + np.abs(val)
i[0] = convert_func(neg_val) # depends on [control=['for'], data=['i']]
if len(neg_features) > 0:
total_neg = np.max(neg_features[:, 0].astype(float)) - np.min(neg_features[:, 0].astype(float)) # depends on [control=['if'], data=[]]
else:
total_neg = 0
# Convert positive feature values to plot values
pos_val = data['outValue']
for i in pos_features:
val = float(i[0])
pos_val = pos_val - np.abs(val)
i[0] = convert_func(pos_val) # depends on [control=['for'], data=['i']]
if len(pos_features) > 0:
total_pos = np.max(pos_features[:, 0].astype(float)) - np.min(pos_features[:, 0].astype(float)) # depends on [control=['if'], data=[]]
else:
total_pos = 0
# Convert output value and base value
data['outValue'] = convert_func(data['outValue'])
data['baseValue'] = convert_func(data['baseValue'])
return (neg_features, total_neg, pos_features, total_pos) |
def element_value_should_be(self, locator, expected, strip=False):
"""Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = element.get_attribute('value')
if (strip):
value = value.strip()
if str(value) == expected:
return
else:
raise AssertionError("Element '%s' value was not '%s', it was '%s'" % (locator, expected, value)) | def function[element_value_should_be, parameter[self, locator, expected, strip]]:
constant[Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |]
call[name[self]._info, parameter[binary_operation[constant[Verifying element '%s' value is '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2047eabf0>, <ast.Name object at 0x7da2047e89a0>]]]]]
variable[element] assign[=] call[name[self]._element_find, parameter[name[locator], constant[True], constant[True]]]
variable[value] assign[=] call[name[element].get_attribute, parameter[constant[value]]]
if name[strip] begin[:]
variable[value] assign[=] call[name[value].strip, parameter[]]
if compare[call[name[str], parameter[name[value]]] equal[==] name[expected]] begin[:]
return[None] | keyword[def] identifier[element_value_should_be] ( identifier[self] , identifier[locator] , identifier[expected] , identifier[strip] = keyword[False] ):
literal[string]
identifier[self] . identifier[_info] ( literal[string] %( identifier[locator] , identifier[expected] ))
identifier[element] = identifier[self] . identifier[_element_find] ( identifier[locator] , keyword[True] , keyword[True] )
identifier[value] = identifier[element] . identifier[get_attribute] ( literal[string] )
keyword[if] ( identifier[strip] ):
identifier[value] = identifier[value] . identifier[strip] ()
keyword[if] identifier[str] ( identifier[value] )== identifier[expected] :
keyword[return]
keyword[else] :
keyword[raise] identifier[AssertionError] ( literal[string] %( identifier[locator] , identifier[expected] , identifier[value] )) | def element_value_should_be(self, locator, expected, strip=False):
"""Verifies the element identified by `locator` has the expected value.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | Boolean, determines whether it should strip the field's value before comparison or not | ${True} / ${False} |"""
self._info("Verifying element '%s' value is '%s'" % (locator, expected))
element = self._element_find(locator, True, True)
value = element.get_attribute('value')
if strip:
value = value.strip() # depends on [control=['if'], data=[]]
if str(value) == expected:
return # depends on [control=['if'], data=[]]
else:
raise AssertionError("Element '%s' value was not '%s', it was '%s'" % (locator, expected, value)) |
def expectedBody(self, data):
"""
Read header and wait header value to call next state
@param data: Stream that length are to header length (1|2|4 bytes)
set next state to callBack body when length read from header
are received
"""
bodyLen = None
if data.len == 1:
bodyLen = UInt8()
elif data.len == 2:
bodyLen = UInt16Be()
elif data.len == 4:
bodyLen = UInt32Be()
else:
log.error("invalid header length")
return
data.readType(bodyLen)
self.expect(bodyLen.value, self._callbackBody) | def function[expectedBody, parameter[self, data]]:
constant[
Read header and wait header value to call next state
@param data: Stream that length are to header length (1|2|4 bytes)
set next state to callBack body when length read from header
are received
]
variable[bodyLen] assign[=] constant[None]
if compare[name[data].len equal[==] constant[1]] begin[:]
variable[bodyLen] assign[=] call[name[UInt8], parameter[]]
call[name[data].readType, parameter[name[bodyLen]]]
call[name[self].expect, parameter[name[bodyLen].value, name[self]._callbackBody]] | keyword[def] identifier[expectedBody] ( identifier[self] , identifier[data] ):
literal[string]
identifier[bodyLen] = keyword[None]
keyword[if] identifier[data] . identifier[len] == literal[int] :
identifier[bodyLen] = identifier[UInt8] ()
keyword[elif] identifier[data] . identifier[len] == literal[int] :
identifier[bodyLen] = identifier[UInt16Be] ()
keyword[elif] identifier[data] . identifier[len] == literal[int] :
identifier[bodyLen] = identifier[UInt32Be] ()
keyword[else] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return]
identifier[data] . identifier[readType] ( identifier[bodyLen] )
identifier[self] . identifier[expect] ( identifier[bodyLen] . identifier[value] , identifier[self] . identifier[_callbackBody] ) | def expectedBody(self, data):
"""
Read header and wait header value to call next state
@param data: Stream that length are to header length (1|2|4 bytes)
set next state to callBack body when length read from header
are received
"""
bodyLen = None
if data.len == 1:
bodyLen = UInt8() # depends on [control=['if'], data=[]]
elif data.len == 2:
bodyLen = UInt16Be() # depends on [control=['if'], data=[]]
elif data.len == 4:
bodyLen = UInt32Be() # depends on [control=['if'], data=[]]
else:
log.error('invalid header length')
return
data.readType(bodyLen)
self.expect(bodyLen.value, self._callbackBody) |
def delete(self):
""" Deletes one instance """
if self.instance is None:
raise CQLEngineException("DML Query instance attribute is None")
ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
for name, col in self.model._primary_keys.items():
val = getattr(self.instance, name)
if val is None and not col.partition_key:
continue
ds.add_where(col, EqualsOperator(), val)
self._execute(ds) | def function[delete, parameter[self]]:
constant[ Deletes one instance ]
if compare[name[self].instance is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6ab460>
variable[ds] assign[=] call[name[DeleteStatement], parameter[name[self].column_family_name]]
for taget[tuple[[<ast.Name object at 0x7da204620ca0>, <ast.Name object at 0x7da204622e30>]]] in starred[call[name[self].model._primary_keys.items, parameter[]]] begin[:]
variable[val] assign[=] call[name[getattr], parameter[name[self].instance, name[name]]]
if <ast.BoolOp object at 0x7da204620f70> begin[:]
continue
call[name[ds].add_where, parameter[name[col], call[name[EqualsOperator], parameter[]], name[val]]]
call[name[self]._execute, parameter[name[ds]]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[instance] keyword[is] keyword[None] :
keyword[raise] identifier[CQLEngineException] ( literal[string] )
identifier[ds] = identifier[DeleteStatement] ( identifier[self] . identifier[column_family_name] , identifier[timestamp] = identifier[self] . identifier[_timestamp] , identifier[conditionals] = identifier[self] . identifier[_conditional] , identifier[if_exists] = identifier[self] . identifier[_if_exists] )
keyword[for] identifier[name] , identifier[col] keyword[in] identifier[self] . identifier[model] . identifier[_primary_keys] . identifier[items] ():
identifier[val] = identifier[getattr] ( identifier[self] . identifier[instance] , identifier[name] )
keyword[if] identifier[val] keyword[is] keyword[None] keyword[and] keyword[not] identifier[col] . identifier[partition_key] :
keyword[continue]
identifier[ds] . identifier[add_where] ( identifier[col] , identifier[EqualsOperator] (), identifier[val] )
identifier[self] . identifier[_execute] ( identifier[ds] ) | def delete(self):
""" Deletes one instance """
if self.instance is None:
raise CQLEngineException('DML Query instance attribute is None') # depends on [control=['if'], data=[]]
ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
for (name, col) in self.model._primary_keys.items():
val = getattr(self.instance, name)
if val is None and (not col.partition_key):
continue # depends on [control=['if'], data=[]]
ds.add_where(col, EqualsOperator(), val) # depends on [control=['for'], data=[]]
self._execute(ds) |
def _check_stream(self):
"""Determines which output stream (stdout, stderr, or custom) to use"""
if self.stream:
try:
supported = ('PYCHARM_HOSTED' in os.environ or
os.isatty(sys.stdout.fileno()))
# a fix for IPython notebook "IOStream has no fileno."
except(UnsupportedOperation):
supported = True
else:
if self.stream is not None and hasattr(self.stream, 'write'):
self._stream_out = self.stream.write
self._stream_flush = self.stream.flush
if supported:
if self.stream == 1:
self._stream_out = sys.stdout.write
self._stream_flush = sys.stdout.flush
elif self.stream == 2:
self._stream_out = sys.stderr.write
self._stream_flush = sys.stderr.flush
else:
if self.stream is not None and hasattr(self.stream, 'write'):
self._stream_out = self.stream.write
self._stream_flush = self.stream.flush
else:
print('Warning: No valid output stream.') | def function[_check_stream, parameter[self]]:
constant[Determines which output stream (stdout, stderr, or custom) to use]
if name[self].stream begin[:]
<ast.Try object at 0x7da1b1234700>
if name[supported] begin[:]
if compare[name[self].stream equal[==] constant[1]] begin[:]
name[self]._stream_out assign[=] name[sys].stdout.write
name[self]._stream_flush assign[=] name[sys].stdout.flush | keyword[def] identifier[_check_stream] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[stream] :
keyword[try] :
identifier[supported] =( literal[string] keyword[in] identifier[os] . identifier[environ] keyword[or]
identifier[os] . identifier[isatty] ( identifier[sys] . identifier[stdout] . identifier[fileno] ()))
keyword[except] ( identifier[UnsupportedOperation] ):
identifier[supported] = keyword[True]
keyword[else] :
keyword[if] identifier[self] . identifier[stream] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] . identifier[stream] , literal[string] ):
identifier[self] . identifier[_stream_out] = identifier[self] . identifier[stream] . identifier[write]
identifier[self] . identifier[_stream_flush] = identifier[self] . identifier[stream] . identifier[flush]
keyword[if] identifier[supported] :
keyword[if] identifier[self] . identifier[stream] == literal[int] :
identifier[self] . identifier[_stream_out] = identifier[sys] . identifier[stdout] . identifier[write]
identifier[self] . identifier[_stream_flush] = identifier[sys] . identifier[stdout] . identifier[flush]
keyword[elif] identifier[self] . identifier[stream] == literal[int] :
identifier[self] . identifier[_stream_out] = identifier[sys] . identifier[stderr] . identifier[write]
identifier[self] . identifier[_stream_flush] = identifier[sys] . identifier[stderr] . identifier[flush]
keyword[else] :
keyword[if] identifier[self] . identifier[stream] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] . identifier[stream] , literal[string] ):
identifier[self] . identifier[_stream_out] = identifier[self] . identifier[stream] . identifier[write]
identifier[self] . identifier[_stream_flush] = identifier[self] . identifier[stream] . identifier[flush]
keyword[else] :
identifier[print] ( literal[string] ) | def _check_stream(self):
"""Determines which output stream (stdout, stderr, or custom) to use"""
if self.stream:
try:
supported = 'PYCHARM_HOSTED' in os.environ or os.isatty(sys.stdout.fileno()) # depends on [control=['try'], data=[]]
# a fix for IPython notebook "IOStream has no fileno."
except UnsupportedOperation:
supported = True # depends on [control=['except'], data=[]]
else:
if self.stream is not None and hasattr(self.stream, 'write'):
self._stream_out = self.stream.write
self._stream_flush = self.stream.flush # depends on [control=['if'], data=[]]
if supported:
if self.stream == 1:
self._stream_out = sys.stdout.write
self._stream_flush = sys.stdout.flush # depends on [control=['if'], data=[]]
elif self.stream == 2:
self._stream_out = sys.stderr.write
self._stream_flush = sys.stderr.flush # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.stream is not None and hasattr(self.stream, 'write'):
self._stream_out = self.stream.write
self._stream_flush = self.stream.flush # depends on [control=['if'], data=[]]
else:
print('Warning: No valid output stream.') # depends on [control=['if'], data=[]] |
def __render_videoframe(self):
""" Retrieves a new videoframe from the stream.
Sets the frame as the __current_video_frame and passes it on to
__videorenderfunc() if it is set. """
new_videoframe = self.clip.get_frame(self.clock.time)
# Pass it to the callback function if this is set
if callable(self.__videorenderfunc):
self.__videorenderfunc(new_videoframe)
# Set current_frame to current frame (...)
self.__current_videoframe = new_videoframe | def function[__render_videoframe, parameter[self]]:
constant[ Retrieves a new videoframe from the stream.
Sets the frame as the __current_video_frame and passes it on to
__videorenderfunc() if it is set. ]
variable[new_videoframe] assign[=] call[name[self].clip.get_frame, parameter[name[self].clock.time]]
if call[name[callable], parameter[name[self].__videorenderfunc]] begin[:]
call[name[self].__videorenderfunc, parameter[name[new_videoframe]]]
name[self].__current_videoframe assign[=] name[new_videoframe] | keyword[def] identifier[__render_videoframe] ( identifier[self] ):
literal[string]
identifier[new_videoframe] = identifier[self] . identifier[clip] . identifier[get_frame] ( identifier[self] . identifier[clock] . identifier[time] )
keyword[if] identifier[callable] ( identifier[self] . identifier[__videorenderfunc] ):
identifier[self] . identifier[__videorenderfunc] ( identifier[new_videoframe] )
identifier[self] . identifier[__current_videoframe] = identifier[new_videoframe] | def __render_videoframe(self):
""" Retrieves a new videoframe from the stream.
Sets the frame as the __current_video_frame and passes it on to
__videorenderfunc() if it is set. """
new_videoframe = self.clip.get_frame(self.clock.time) # Pass it to the callback function if this is set
if callable(self.__videorenderfunc):
self.__videorenderfunc(new_videoframe) # depends on [control=['if'], data=[]] # Set current_frame to current frame (...)
self.__current_videoframe = new_videoframe |
def _illegal_character(c, ctx, message=''):
"""Raises an IonException upon encountering the given illegal character in the given context.
Args:
c (int|None): Ordinal of the illegal character.
ctx (_HandlerContext): Context in which the illegal character was encountered.
message (Optional[str]): Additional information, as necessary.
"""
container_type = ctx.container.ion_type is None and 'top-level' or ctx.container.ion_type.name
value_type = ctx.ion_type is None and 'unknown' or ctx.ion_type.name
if c is None:
header = 'Illegal token'
else:
c = 'EOF' if BufferQueue.is_eof(c) else _chr(c)
header = 'Illegal character %s' % (c,)
raise IonException('%s at position %d in %s value contained in %s. %s Pending value: %s'
% (header, ctx.queue.position, value_type, container_type, message, ctx.value)) | def function[_illegal_character, parameter[c, ctx, message]]:
constant[Raises an IonException upon encountering the given illegal character in the given context.
Args:
c (int|None): Ordinal of the illegal character.
ctx (_HandlerContext): Context in which the illegal character was encountered.
message (Optional[str]): Additional information, as necessary.
]
variable[container_type] assign[=] <ast.BoolOp object at 0x7da1b15d3a60>
variable[value_type] assign[=] <ast.BoolOp object at 0x7da1b15d2c20>
if compare[name[c] is constant[None]] begin[:]
variable[header] assign[=] constant[Illegal token]
<ast.Raise object at 0x7da1b15ae530> | keyword[def] identifier[_illegal_character] ( identifier[c] , identifier[ctx] , identifier[message] = literal[string] ):
literal[string]
identifier[container_type] = identifier[ctx] . identifier[container] . identifier[ion_type] keyword[is] keyword[None] keyword[and] literal[string] keyword[or] identifier[ctx] . identifier[container] . identifier[ion_type] . identifier[name]
identifier[value_type] = identifier[ctx] . identifier[ion_type] keyword[is] keyword[None] keyword[and] literal[string] keyword[or] identifier[ctx] . identifier[ion_type] . identifier[name]
keyword[if] identifier[c] keyword[is] keyword[None] :
identifier[header] = literal[string]
keyword[else] :
identifier[c] = literal[string] keyword[if] identifier[BufferQueue] . identifier[is_eof] ( identifier[c] ) keyword[else] identifier[_chr] ( identifier[c] )
identifier[header] = literal[string] %( identifier[c] ,)
keyword[raise] identifier[IonException] ( literal[string]
%( identifier[header] , identifier[ctx] . identifier[queue] . identifier[position] , identifier[value_type] , identifier[container_type] , identifier[message] , identifier[ctx] . identifier[value] )) | def _illegal_character(c, ctx, message=''):
"""Raises an IonException upon encountering the given illegal character in the given context.
Args:
c (int|None): Ordinal of the illegal character.
ctx (_HandlerContext): Context in which the illegal character was encountered.
message (Optional[str]): Additional information, as necessary.
"""
container_type = ctx.container.ion_type is None and 'top-level' or ctx.container.ion_type.name
value_type = ctx.ion_type is None and 'unknown' or ctx.ion_type.name
if c is None:
header = 'Illegal token' # depends on [control=['if'], data=[]]
else:
c = 'EOF' if BufferQueue.is_eof(c) else _chr(c)
header = 'Illegal character %s' % (c,)
raise IonException('%s at position %d in %s value contained in %s. %s Pending value: %s' % (header, ctx.queue.position, value_type, container_type, message, ctx.value)) |
def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])]
return [filenames[x] for x in indexes] | def function[sort_filenames, parameter[filenames]]:
constant[
sort a list of files by filename only, ignoring the directory names
]
variable[basenames] assign[=] <ast.ListComp object at 0x7da1b17a5de0>
variable[indexes] assign[=] <ast.ListComp object at 0x7da1b17a6ad0>
return[<ast.ListComp object at 0x7da1b17a65f0>] | keyword[def] identifier[sort_filenames] ( identifier[filenames] ):
literal[string]
identifier[basenames] =[ identifier[os] . identifier[path] . identifier[basename] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[filenames] ]
identifier[indexes] =[ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[sorted] ( identifier[enumerate] ( identifier[basenames] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])]
keyword[return] [ identifier[filenames] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[indexes] ] | def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x: x[1])]
return [filenames[x] for x in indexes] |
def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret | def function[zip_namedtuple, parameter[nt_list]]:
constant[ accept list of namedtuple, return a dict of zipped fields ]
if <ast.UnaryOp object at 0x7da1b1e042e0> begin[:]
return[call[name[dict], parameter[]]]
if <ast.UnaryOp object at 0x7da1b1e043d0> begin[:]
variable[nt_list] assign[=] list[[<ast.Name object at 0x7da1b1e04610>]]
for taget[name[nt]] in starred[name[nt_list]] begin[:]
assert[compare[call[name[type], parameter[name[nt]]] equal[==] call[name[type], parameter[call[name[nt_list]][constant[0]]]]]]
variable[ret] assign[=] <ast.DictComp object at 0x7da1b1e41ab0>
for taget[name[nt]] in starred[call[name[nt_list]][<ast.Slice object at 0x7da1b1e41780>]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1ef0bb0>, <ast.Name object at 0x7da1b1ef23e0>]]] in starred[call[call[name[nt]._asdict, parameter[]].items, parameter[]]] begin[:]
call[call[name[ret]][name[k]].append, parameter[name[v]]]
return[name[ret]] | keyword[def] identifier[zip_namedtuple] ( identifier[nt_list] ):
literal[string]
keyword[if] keyword[not] identifier[nt_list] :
keyword[return] identifier[dict] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[nt_list] , identifier[list] ):
identifier[nt_list] =[ identifier[nt_list] ]
keyword[for] identifier[nt] keyword[in] identifier[nt_list] :
keyword[assert] identifier[type] ( identifier[nt] )== identifier[type] ( identifier[nt_list] [ literal[int] ])
identifier[ret] ={ identifier[k] :[ identifier[v] ] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[nt_list] [ literal[int] ]. identifier[_asdict] (). identifier[items] ()}
keyword[for] identifier[nt] keyword[in] identifier[nt_list] [ literal[int] :]:
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[nt] . identifier[_asdict] (). identifier[items] ():
identifier[ret] [ identifier[k] ]. identifier[append] ( identifier[v] )
keyword[return] identifier[ret] | def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict() # depends on [control=['if'], data=[]]
if not isinstance(nt_list, list):
nt_list = [nt_list] # depends on [control=['if'], data=[]]
for nt in nt_list:
assert type(nt) == type(nt_list[0]) # depends on [control=['for'], data=['nt']]
ret = {k: [v] for (k, v) in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for (k, v) in nt._asdict().items():
ret[k].append(v) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['nt']]
return ret |
def prepare(args):
"""
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
"""
from jcvi.formats.fasta import Fasta
p = OptionParser(prepare.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
outfile = opts.outfile
if len(args) == 2:
pairsfile, cdsfile = args
pepfile = None
elif len(args) == 3:
pairsfile, cdsfile, pepfile = args
else:
sys.exit(not p.print_help())
f = Fasta(cdsfile)
fp = open(pairsfile)
fw = must_open(outfile, "w")
if pepfile:
assert outfile != "stdout", "Please specify outfile name."
f2 = Fasta(pepfile)
fw2 = must_open(outfile + ".pep", "w")
for row in fp:
if row[0] == '#':
continue
a, b = row.split()[:2]
if a == b:
logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b))
continue
if a not in f:
a = find_first_isoform(a, f)
assert a, a
if b not in f:
b = find_first_isoform(b, f)
assert b, b
acds = f[a]
bcds = f[b]
SeqIO.write((acds, bcds), fw, "fasta")
if pepfile:
apep = f2[a]
bpep = f2[b]
SeqIO.write((apep, bpep), fw2, "fasta")
fw.close()
if pepfile:
fw2.close() | def function[prepare, parameter[args]]:
constant[
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
]
from relative_module[jcvi.formats.fasta] import module[Fasta]
variable[p] assign[=] call[name[OptionParser], parameter[name[prepare].__doc__]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da207f98040> assign[=] call[name[p].parse_args, parameter[name[args]]]
variable[outfile] assign[=] name[opts].outfile
if compare[call[name[len], parameter[name[args]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da207f99780> assign[=] name[args]
variable[pepfile] assign[=] constant[None]
variable[f] assign[=] call[name[Fasta], parameter[name[cdsfile]]]
variable[fp] assign[=] call[name[open], parameter[name[pairsfile]]]
variable[fw] assign[=] call[name[must_open], parameter[name[outfile], constant[w]]]
if name[pepfile] begin[:]
assert[compare[name[outfile] not_equal[!=] constant[stdout]]]
variable[f2] assign[=] call[name[Fasta], parameter[name[pepfile]]]
variable[fw2] assign[=] call[name[must_open], parameter[binary_operation[name[outfile] + constant[.pep]], constant[w]]]
for taget[name[row]] in starred[name[fp]] begin[:]
if compare[call[name[row]][constant[0]] equal[==] constant[#]] begin[:]
continue
<ast.Tuple object at 0x7da207f98250> assign[=] call[call[name[row].split, parameter[]]][<ast.Slice object at 0x7da207f99d80>]
if compare[name[a] equal[==] name[b]] begin[:]
call[name[logging].debug, parameter[call[constant[Self pairs found: {0} - {1}. Ignored].format, parameter[name[a], name[b]]]]]
continue
if compare[name[a] <ast.NotIn object at 0x7da2590d7190> name[f]] begin[:]
variable[a] assign[=] call[name[find_first_isoform], parameter[name[a], name[f]]]
assert[name[a]]
if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[f]] begin[:]
variable[b] assign[=] call[name[find_first_isoform], parameter[name[b], name[f]]]
assert[name[b]]
variable[acds] assign[=] call[name[f]][name[a]]
variable[bcds] assign[=] call[name[f]][name[b]]
call[name[SeqIO].write, parameter[tuple[[<ast.Name object at 0x7da207f9b550>, <ast.Name object at 0x7da207f99bd0>]], name[fw], constant[fasta]]]
if name[pepfile] begin[:]
variable[apep] assign[=] call[name[f2]][name[a]]
variable[bpep] assign[=] call[name[f2]][name[b]]
call[name[SeqIO].write, parameter[tuple[[<ast.Name object at 0x7da207f02830>, <ast.Name object at 0x7da207f03790>]], name[fw2], constant[fasta]]]
call[name[fw].close, parameter[]]
if name[pepfile] begin[:]
call[name[fw2].close, parameter[]] | keyword[def] identifier[prepare] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[fasta] keyword[import] identifier[Fasta]
identifier[p] = identifier[OptionParser] ( identifier[prepare] . identifier[__doc__] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
identifier[outfile] = identifier[opts] . identifier[outfile]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[pairsfile] , identifier[cdsfile] = identifier[args]
identifier[pepfile] = keyword[None]
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[pairsfile] , identifier[cdsfile] , identifier[pepfile] = identifier[args]
keyword[else] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[f] = identifier[Fasta] ( identifier[cdsfile] )
identifier[fp] = identifier[open] ( identifier[pairsfile] )
identifier[fw] = identifier[must_open] ( identifier[outfile] , literal[string] )
keyword[if] identifier[pepfile] :
keyword[assert] identifier[outfile] != literal[string] , literal[string]
identifier[f2] = identifier[Fasta] ( identifier[pepfile] )
identifier[fw2] = identifier[must_open] ( identifier[outfile] + literal[string] , literal[string] )
keyword[for] identifier[row] keyword[in] identifier[fp] :
keyword[if] identifier[row] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[a] , identifier[b] = identifier[row] . identifier[split] ()[: literal[int] ]
keyword[if] identifier[a] == identifier[b] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[a] , identifier[b] ))
keyword[continue]
keyword[if] identifier[a] keyword[not] keyword[in] identifier[f] :
identifier[a] = identifier[find_first_isoform] ( identifier[a] , identifier[f] )
keyword[assert] identifier[a] , identifier[a]
keyword[if] identifier[b] keyword[not] keyword[in] identifier[f] :
identifier[b] = identifier[find_first_isoform] ( identifier[b] , identifier[f] )
keyword[assert] identifier[b] , identifier[b]
identifier[acds] = identifier[f] [ identifier[a] ]
identifier[bcds] = identifier[f] [ identifier[b] ]
identifier[SeqIO] . identifier[write] (( identifier[acds] , identifier[bcds] ), identifier[fw] , literal[string] )
keyword[if] identifier[pepfile] :
identifier[apep] = identifier[f2] [ identifier[a] ]
identifier[bpep] = identifier[f2] [ identifier[b] ]
identifier[SeqIO] . identifier[write] (( identifier[apep] , identifier[bpep] ), identifier[fw2] , literal[string] )
identifier[fw] . identifier[close] ()
keyword[if] identifier[pepfile] :
identifier[fw2] . identifier[close] () | def prepare(args):
"""
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
"""
from jcvi.formats.fasta import Fasta
p = OptionParser(prepare.__doc__)
p.set_outfile()
(opts, args) = p.parse_args(args)
outfile = opts.outfile
if len(args) == 2:
(pairsfile, cdsfile) = args
pepfile = None # depends on [control=['if'], data=[]]
elif len(args) == 3:
(pairsfile, cdsfile, pepfile) = args # depends on [control=['if'], data=[]]
else:
sys.exit(not p.print_help())
f = Fasta(cdsfile)
fp = open(pairsfile)
fw = must_open(outfile, 'w')
if pepfile:
assert outfile != 'stdout', 'Please specify outfile name.'
f2 = Fasta(pepfile)
fw2 = must_open(outfile + '.pep', 'w') # depends on [control=['if'], data=[]]
for row in fp:
if row[0] == '#':
continue # depends on [control=['if'], data=[]]
(a, b) = row.split()[:2]
if a == b:
logging.debug('Self pairs found: {0} - {1}. Ignored'.format(a, b))
continue # depends on [control=['if'], data=['a', 'b']]
if a not in f:
a = find_first_isoform(a, f)
assert a, a # depends on [control=['if'], data=['a', 'f']]
if b not in f:
b = find_first_isoform(b, f)
assert b, b # depends on [control=['if'], data=['b', 'f']]
acds = f[a]
bcds = f[b]
SeqIO.write((acds, bcds), fw, 'fasta')
if pepfile:
apep = f2[a]
bpep = f2[b]
SeqIO.write((apep, bpep), fw2, 'fasta') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
fw.close()
if pepfile:
fw2.close() # depends on [control=['if'], data=[]] |
def match_and(self, tokens, item):
"""Matches and."""
for match in tokens:
self.match(match, item) | def function[match_and, parameter[self, tokens, item]]:
constant[Matches and.]
for taget[name[match]] in starred[name[tokens]] begin[:]
call[name[self].match, parameter[name[match], name[item]]] | keyword[def] identifier[match_and] ( identifier[self] , identifier[tokens] , identifier[item] ):
literal[string]
keyword[for] identifier[match] keyword[in] identifier[tokens] :
identifier[self] . identifier[match] ( identifier[match] , identifier[item] ) | def match_and(self, tokens, item):
"""Matches and."""
for match in tokens:
self.match(match, item) # depends on [control=['for'], data=['match']] |
def find_attr_start_line(self, lines, min_line=4, max_line=9):
"""
Return line number of the first real attribute and value.
The first line is 0. If the 'ATTRIBUTE_NAME' header is not
found, return the index after max_line.
"""
for idx, line in enumerate(lines[min_line:max_line]):
col = line.split()
if len(col) > 1 and col[1] == 'ATTRIBUTE_NAME':
return idx + min_line + 1
self.log.warn('ATTRIBUTE_NAME not found in second column of'
' smartctl output between lines %d and %d.'
% (min_line, max_line))
return max_line + 1 | def function[find_attr_start_line, parameter[self, lines, min_line, max_line]]:
constant[
Return line number of the first real attribute and value.
The first line is 0. If the 'ATTRIBUTE_NAME' header is not
found, return the index after max_line.
]
for taget[tuple[[<ast.Name object at 0x7da18dc06680>, <ast.Name object at 0x7da18dc06d40>]]] in starred[call[name[enumerate], parameter[call[name[lines]][<ast.Slice object at 0x7da18dc04df0>]]]] begin[:]
variable[col] assign[=] call[name[line].split, parameter[]]
if <ast.BoolOp object at 0x7da18f720c10> begin[:]
return[binary_operation[binary_operation[name[idx] + name[min_line]] + constant[1]]]
call[name[self].log.warn, parameter[binary_operation[constant[ATTRIBUTE_NAME not found in second column of smartctl output between lines %d and %d.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f720dc0>, <ast.Name object at 0x7da18f7208e0>]]]]]
return[binary_operation[name[max_line] + constant[1]]] | keyword[def] identifier[find_attr_start_line] ( identifier[self] , identifier[lines] , identifier[min_line] = literal[int] , identifier[max_line] = literal[int] ):
literal[string]
keyword[for] identifier[idx] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] [ identifier[min_line] : identifier[max_line] ]):
identifier[col] = identifier[line] . identifier[split] ()
keyword[if] identifier[len] ( identifier[col] )> literal[int] keyword[and] identifier[col] [ literal[int] ]== literal[string] :
keyword[return] identifier[idx] + identifier[min_line] + literal[int]
identifier[self] . identifier[log] . identifier[warn] ( literal[string]
literal[string]
%( identifier[min_line] , identifier[max_line] ))
keyword[return] identifier[max_line] + literal[int] | def find_attr_start_line(self, lines, min_line=4, max_line=9):
"""
Return line number of the first real attribute and value.
The first line is 0. If the 'ATTRIBUTE_NAME' header is not
found, return the index after max_line.
"""
for (idx, line) in enumerate(lines[min_line:max_line]):
col = line.split()
if len(col) > 1 and col[1] == 'ATTRIBUTE_NAME':
return idx + min_line + 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self.log.warn('ATTRIBUTE_NAME not found in second column of smartctl output between lines %d and %d.' % (min_line, max_line))
return max_line + 1 |
def objs(self):
"""
Returns a generator list of tracked objects which are recognized with
this profile and are in the current session.
"""
for obj in self.objects.itervalues():
if obj.sessionid in self.sessions:
yield obj | def function[objs, parameter[self]]:
constant[
Returns a generator list of tracked objects which are recognized with
this profile and are in the current session.
]
for taget[name[obj]] in starred[call[name[self].objects.itervalues, parameter[]]] begin[:]
if compare[name[obj].sessionid in name[self].sessions] begin[:]
<ast.Yield object at 0x7da1aff56e00> | keyword[def] identifier[objs] ( identifier[self] ):
literal[string]
keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[objects] . identifier[itervalues] ():
keyword[if] identifier[obj] . identifier[sessionid] keyword[in] identifier[self] . identifier[sessions] :
keyword[yield] identifier[obj] | def objs(self):
"""
Returns a generator list of tracked objects which are recognized with
this profile and are in the current session.
"""
for obj in self.objects.itervalues():
if obj.sessionid in self.sessions:
yield obj # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] |
def allow_cors(func):
"""This is a decorator which enable CORS for the specified endpoint."""
def wrapper(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = \
'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return func(*args, **kwargs)
return wrapper | def function[allow_cors, parameter[func]]:
constant[This is a decorator which enable CORS for the specified endpoint.]
def function[wrapper, parameter[]]:
call[name[response].headers][constant[Access-Control-Allow-Origin]] assign[=] constant[*]
call[name[response].headers][constant[Access-Control-Allow-Methods]] assign[=] constant[PUT, GET, POST, DELETE, OPTIONS]
call[name[response].headers][constant[Access-Control-Allow-Headers]] assign[=] constant[Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token]
return[call[name[func], parameter[<ast.Starred object at 0x7da20c7cb9a0>]]]
return[name[wrapper]] | keyword[def] identifier[allow_cors] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[response] . identifier[headers] [ literal[string] ]= literal[string]
identifier[response] . identifier[headers] [ literal[string] ]= literal[string]
identifier[response] . identifier[headers] [ literal[string] ]= literal[string]
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def allow_cors(func):
"""This is a decorator which enable CORS for the specified endpoint."""
def wrapper(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return func(*args, **kwargs)
return wrapper |
def proto_to_dict(message):
"""Converts protobuf message instance to dict
:param message: protobuf message instance
:return: parameters and their values
:rtype: dict
:raises: :class:`.TypeError` if ``message`` is not a proto message
"""
if not isinstance(message, _ProtoMessageType):
raise TypeError("Expected `message` to be a instance of protobuf message")
data = {}
for desc, field in message.ListFields():
if desc.type == desc.TYPE_MESSAGE:
if desc.label == desc.LABEL_REPEATED:
data[desc.name] = list(map(proto_to_dict, field))
else:
data[desc.name] = proto_to_dict(field)
else:
data[desc.name] = list(field) if desc.label == desc.LABEL_REPEATED else field
return data | def function[proto_to_dict, parameter[message]]:
constant[Converts protobuf message instance to dict
:param message: protobuf message instance
:return: parameters and their values
:rtype: dict
:raises: :class:`.TypeError` if ``message`` is not a proto message
]
if <ast.UnaryOp object at 0x7da1b2315120> begin[:]
<ast.Raise object at 0x7da1b2317460>
variable[data] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b2316560>, <ast.Name object at 0x7da1b2317b20>]]] in starred[call[name[message].ListFields, parameter[]]] begin[:]
if compare[name[desc].type equal[==] name[desc].TYPE_MESSAGE] begin[:]
if compare[name[desc].label equal[==] name[desc].LABEL_REPEATED] begin[:]
call[name[data]][name[desc].name] assign[=] call[name[list], parameter[call[name[map], parameter[name[proto_to_dict], name[field]]]]]
return[name[data]] | keyword[def] identifier[proto_to_dict] ( identifier[message] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[message] , identifier[_ProtoMessageType] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[data] ={}
keyword[for] identifier[desc] , identifier[field] keyword[in] identifier[message] . identifier[ListFields] ():
keyword[if] identifier[desc] . identifier[type] == identifier[desc] . identifier[TYPE_MESSAGE] :
keyword[if] identifier[desc] . identifier[label] == identifier[desc] . identifier[LABEL_REPEATED] :
identifier[data] [ identifier[desc] . identifier[name] ]= identifier[list] ( identifier[map] ( identifier[proto_to_dict] , identifier[field] ))
keyword[else] :
identifier[data] [ identifier[desc] . identifier[name] ]= identifier[proto_to_dict] ( identifier[field] )
keyword[else] :
identifier[data] [ identifier[desc] . identifier[name] ]= identifier[list] ( identifier[field] ) keyword[if] identifier[desc] . identifier[label] == identifier[desc] . identifier[LABEL_REPEATED] keyword[else] identifier[field]
keyword[return] identifier[data] | def proto_to_dict(message):
"""Converts protobuf message instance to dict
:param message: protobuf message instance
:return: parameters and their values
:rtype: dict
:raises: :class:`.TypeError` if ``message`` is not a proto message
"""
if not isinstance(message, _ProtoMessageType):
raise TypeError('Expected `message` to be a instance of protobuf message') # depends on [control=['if'], data=[]]
data = {}
for (desc, field) in message.ListFields():
if desc.type == desc.TYPE_MESSAGE:
if desc.label == desc.LABEL_REPEATED:
data[desc.name] = list(map(proto_to_dict, field)) # depends on [control=['if'], data=[]]
else:
data[desc.name] = proto_to_dict(field) # depends on [control=['if'], data=[]]
else:
data[desc.name] = list(field) if desc.label == desc.LABEL_REPEATED else field # depends on [control=['for'], data=[]]
return data |
def process(self, request, response, environ):
"""
Create a new access token.
:param request: The incoming :class:`oauth2.web.Request`.
:param response: The :class:`oauth2.web.Response` that will be returned
to the client.
:param environ: A ``dict`` containing data of the environment.
:return: :class:`oauth2.web.Response`
"""
token_data = self.token_generator.create_access_token_data(self.refresh_grant_type)
expires_at = int(time.time()) + token_data["expires_in"]
access_token = AccessToken(client_id=self.client.identifier,
token=token_data["access_token"],
grant_type=self.refresh_grant_type,
data=self.data, expires_at=expires_at,
scopes=self.scope_handler.scopes,
user_id=self.user_id)
if self.reissue_refresh_tokens:
self.access_token_store.delete_refresh_token(self.refresh_token)
access_token.refresh_token = token_data["refresh_token"]
refresh_expires_in = self.token_generator.refresh_expires_in
refresh_expires_at = int(time.time()) + refresh_expires_in
access_token.refresh_expires_at = refresh_expires_at
else:
del token_data["refresh_token"]
self.access_token_store.save_token(access_token)
json_success_response(data=token_data, response=response)
return response | def function[process, parameter[self, request, response, environ]]:
constant[
Create a new access token.
:param request: The incoming :class:`oauth2.web.Request`.
:param response: The :class:`oauth2.web.Response` that will be returned
to the client.
:param environ: A ``dict`` containing data of the environment.
:return: :class:`oauth2.web.Response`
]
variable[token_data] assign[=] call[name[self].token_generator.create_access_token_data, parameter[name[self].refresh_grant_type]]
variable[expires_at] assign[=] binary_operation[call[name[int], parameter[call[name[time].time, parameter[]]]] + call[name[token_data]][constant[expires_in]]]
variable[access_token] assign[=] call[name[AccessToken], parameter[]]
if name[self].reissue_refresh_tokens begin[:]
call[name[self].access_token_store.delete_refresh_token, parameter[name[self].refresh_token]]
name[access_token].refresh_token assign[=] call[name[token_data]][constant[refresh_token]]
variable[refresh_expires_in] assign[=] name[self].token_generator.refresh_expires_in
variable[refresh_expires_at] assign[=] binary_operation[call[name[int], parameter[call[name[time].time, parameter[]]]] + name[refresh_expires_in]]
name[access_token].refresh_expires_at assign[=] name[refresh_expires_at]
call[name[self].access_token_store.save_token, parameter[name[access_token]]]
call[name[json_success_response], parameter[]]
return[name[response]] | keyword[def] identifier[process] ( identifier[self] , identifier[request] , identifier[response] , identifier[environ] ):
literal[string]
identifier[token_data] = identifier[self] . identifier[token_generator] . identifier[create_access_token_data] ( identifier[self] . identifier[refresh_grant_type] )
identifier[expires_at] = identifier[int] ( identifier[time] . identifier[time] ())+ identifier[token_data] [ literal[string] ]
identifier[access_token] = identifier[AccessToken] ( identifier[client_id] = identifier[self] . identifier[client] . identifier[identifier] ,
identifier[token] = identifier[token_data] [ literal[string] ],
identifier[grant_type] = identifier[self] . identifier[refresh_grant_type] ,
identifier[data] = identifier[self] . identifier[data] , identifier[expires_at] = identifier[expires_at] ,
identifier[scopes] = identifier[self] . identifier[scope_handler] . identifier[scopes] ,
identifier[user_id] = identifier[self] . identifier[user_id] )
keyword[if] identifier[self] . identifier[reissue_refresh_tokens] :
identifier[self] . identifier[access_token_store] . identifier[delete_refresh_token] ( identifier[self] . identifier[refresh_token] )
identifier[access_token] . identifier[refresh_token] = identifier[token_data] [ literal[string] ]
identifier[refresh_expires_in] = identifier[self] . identifier[token_generator] . identifier[refresh_expires_in]
identifier[refresh_expires_at] = identifier[int] ( identifier[time] . identifier[time] ())+ identifier[refresh_expires_in]
identifier[access_token] . identifier[refresh_expires_at] = identifier[refresh_expires_at]
keyword[else] :
keyword[del] identifier[token_data] [ literal[string] ]
identifier[self] . identifier[access_token_store] . identifier[save_token] ( identifier[access_token] )
identifier[json_success_response] ( identifier[data] = identifier[token_data] , identifier[response] = identifier[response] )
keyword[return] identifier[response] | def process(self, request, response, environ):
"""
Create a new access token.
:param request: The incoming :class:`oauth2.web.Request`.
:param response: The :class:`oauth2.web.Response` that will be returned
to the client.
:param environ: A ``dict`` containing data of the environment.
:return: :class:`oauth2.web.Response`
"""
token_data = self.token_generator.create_access_token_data(self.refresh_grant_type)
expires_at = int(time.time()) + token_data['expires_in']
access_token = AccessToken(client_id=self.client.identifier, token=token_data['access_token'], grant_type=self.refresh_grant_type, data=self.data, expires_at=expires_at, scopes=self.scope_handler.scopes, user_id=self.user_id)
if self.reissue_refresh_tokens:
self.access_token_store.delete_refresh_token(self.refresh_token)
access_token.refresh_token = token_data['refresh_token']
refresh_expires_in = self.token_generator.refresh_expires_in
refresh_expires_at = int(time.time()) + refresh_expires_in
access_token.refresh_expires_at = refresh_expires_at # depends on [control=['if'], data=[]]
else:
del token_data['refresh_token']
self.access_token_store.save_token(access_token)
json_success_response(data=token_data, response=response)
return response |
def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X']
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']],
columns=Z.columns,
dtype=Z.dtype,
bsize=Z.bsize)
else:
return X_rdd | def function[transform, parameter[self, Z]]:
constant[TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
]
if call[name[isinstance], parameter[name[Z], name[DictRDD]]] begin[:]
variable[X] assign[=] call[name[Z]][tuple[[<ast.Slice object at 0x7da18f09f970>, <ast.Constant object at 0x7da20c991570>]]]
variable[Zs] assign[=] <ast.ListComp object at 0x7da20c993d00>
variable[X_rdd] assign[=] call[name[reduce], parameter[<ast.Lambda object at 0x7da20c991900>, name[Zs]]]
variable[X_rdd] assign[=] call[name[X_rdd].map, parameter[name[flatten]]]
variable[mapper] assign[=] name[np].hstack
for taget[name[item]] in starred[call[name[X_rdd].first, parameter[]]] begin[:]
if call[name[sp].issparse, parameter[name[item]]] begin[:]
variable[mapper] assign[=] name[sp].hstack
variable[X_rdd] assign[=] call[name[X_rdd].map, parameter[<ast.Lambda object at 0x7da18ede7a60>]]
if call[name[isinstance], parameter[name[Z], name[DictRDD]]] begin[:]
return[call[name[DictRDD], parameter[list[[<ast.Name object at 0x7da18ede57e0>, <ast.Subscript object at 0x7da18ede5d80>]]]]] | keyword[def] identifier[transform] ( identifier[self] , identifier[Z] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[Z] , identifier[DictRDD] ):
identifier[X] = identifier[Z] [:, literal[string] ]
keyword[else] :
identifier[X] = identifier[Z]
identifier[Zs] =[ identifier[_transform_one] ( identifier[trans] , identifier[name] , identifier[X] , identifier[self] . identifier[transformer_weights] )
keyword[for] identifier[name] , identifier[trans] keyword[in] identifier[self] . identifier[transformer_list] ]
identifier[X_rdd] = identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] . identifier[zip] ( identifier[y] . identifier[_rdd] ), identifier[Zs] )
identifier[X_rdd] = identifier[X_rdd] . identifier[map] ( identifier[flatten] )
identifier[mapper] = identifier[np] . identifier[hstack]
keyword[for] identifier[item] keyword[in] identifier[X_rdd] . identifier[first] ():
keyword[if] identifier[sp] . identifier[issparse] ( identifier[item] ):
identifier[mapper] = identifier[sp] . identifier[hstack]
identifier[X_rdd] = identifier[X_rdd] . identifier[map] ( keyword[lambda] identifier[x] : identifier[mapper] ( identifier[x] ))
keyword[if] identifier[isinstance] ( identifier[Z] , identifier[DictRDD] ):
keyword[return] identifier[DictRDD] ([ identifier[X_rdd] , identifier[Z] [:, literal[string] ]],
identifier[columns] = identifier[Z] . identifier[columns] ,
identifier[dtype] = identifier[Z] . identifier[dtype] ,
identifier[bsize] = identifier[Z] . identifier[bsize] )
keyword[else] :
keyword[return] identifier[X_rdd] | def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X'] # depends on [control=['if'], data=[]]
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights) for (name, trans) in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']], columns=Z.columns, dtype=Z.dtype, bsize=Z.bsize) # depends on [control=['if'], data=[]]
else:
return X_rdd |
def describe_api_resource(restApiId, path,
region=None, key=None, keyid=None, profile=None):
'''
Given rest api id, and an absolute resource path, returns the resource id for
the given path.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_resource myapi_id resource_path
'''
r = describe_api_resources(restApiId, region=region, key=key, keyid=keyid, profile=profile)
resources = r.get('resources')
if resources is None:
return r
for resource in resources:
if resource['path'] == path:
return {'resource': resource}
return {'resource': None} | def function[describe_api_resource, parameter[restApiId, path, region, key, keyid, profile]]:
constant[
Given rest api id, and an absolute resource path, returns the resource id for
the given path.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_resource myapi_id resource_path
]
variable[r] assign[=] call[name[describe_api_resources], parameter[name[restApiId]]]
variable[resources] assign[=] call[name[r].get, parameter[constant[resources]]]
if compare[name[resources] is constant[None]] begin[:]
return[name[r]]
for taget[name[resource]] in starred[name[resources]] begin[:]
if compare[call[name[resource]][constant[path]] equal[==] name[path]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b2162830>], [<ast.Name object at 0x7da1b21627a0>]]]
return[dictionary[[<ast.Constant object at 0x7da1b21638b0>], [<ast.Constant object at 0x7da1b2163760>]]] | keyword[def] identifier[describe_api_resource] ( identifier[restApiId] , identifier[path] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[r] = identifier[describe_api_resources] ( identifier[restApiId] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[resources] = identifier[r] . identifier[get] ( literal[string] )
keyword[if] identifier[resources] keyword[is] keyword[None] :
keyword[return] identifier[r]
keyword[for] identifier[resource] keyword[in] identifier[resources] :
keyword[if] identifier[resource] [ literal[string] ]== identifier[path] :
keyword[return] { literal[string] : identifier[resource] }
keyword[return] { literal[string] : keyword[None] } | def describe_api_resource(restApiId, path, region=None, key=None, keyid=None, profile=None):
"""
Given rest api id, and an absolute resource path, returns the resource id for
the given path.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_resource myapi_id resource_path
"""
r = describe_api_resources(restApiId, region=region, key=key, keyid=keyid, profile=profile)
resources = r.get('resources')
if resources is None:
return r # depends on [control=['if'], data=[]]
for resource in resources:
if resource['path'] == path:
return {'resource': resource} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['resource']]
return {'resource': None} |
def start_date(self) -> Optional[datetime.date]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None
return self.start_datetime().date() | def function[start_date, parameter[self]]:
constant[
Returns the start date of the set of intervals, or ``None`` if empty.
]
if <ast.UnaryOp object at 0x7da1b190e260> begin[:]
return[constant[None]]
return[call[call[name[self].start_datetime, parameter[]].date, parameter[]]] | keyword[def] identifier[start_date] ( identifier[self] )-> identifier[Optional] [ identifier[datetime] . identifier[date] ]:
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[intervals] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[start_datetime] (). identifier[date] () | def start_date(self) -> Optional[datetime.date]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None # depends on [control=['if'], data=[]]
return self.start_datetime().date() |
def from_json(json, cutout=None):
"""
Converts JSON to a python list of RAMON objects. if `cutout` is provided,
the `cutout` attribute of the RAMON object is populated. Otherwise, it's
left empty. `json` should be an ID-level dictionary, like so:
{
16: {
type: "segment",
metadata: {
. . .
}
},
}
NOTE: If more than one item is in the dictionary, then a Python list of
RAMON objects is returned instead of a single RAMON.
Arguments:
json (str or dict): The JSON to import to RAMON objects
cutout: Currently not supported.
Returns:
[RAMON]
"""
if type(json) is str:
json = jsonlib.loads(json)
out_ramons = []
for (rid, rdata) in six.iteritems(json):
_md = rdata['metadata']
r = AnnotationType.RAMON(rdata['type'])(
id=rid,
author=_md['author'],
status=_md['status'],
confidence=_md['confidence'],
kvpairs=copy.deepcopy(_md['kvpairs'])
)
if rdata['type'] == 'segment':
r.segmentclass = _md.get('segmentclass')
r.neuron = _md.get('neuron')
if 'synapses' in _md:
r.synapses = _md['synapses'][:]
if 'organelles' in _md:
r.organelles = _md['organelles'][:]
elif rdata['type'] in ['neuron', 'synapse']:
if 'segments' in _md:
r.segments = _md['segments'][:]
elif rdata['type'] == 'organelle':
r.organelle_class = _md['organelleclass'][:]
elif rdata['type'] == 'synapse':
r.synapse_type = _md.get('synapse_type')
r.weight = _md.get('weight')
out_ramons.append(r)
return out_ramons | def function[from_json, parameter[json, cutout]]:
constant[
Converts JSON to a python list of RAMON objects. if `cutout` is provided,
the `cutout` attribute of the RAMON object is populated. Otherwise, it's
left empty. `json` should be an ID-level dictionary, like so:
{
16: {
type: "segment",
metadata: {
. . .
}
},
}
NOTE: If more than one item is in the dictionary, then a Python list of
RAMON objects is returned instead of a single RAMON.
Arguments:
json (str or dict): The JSON to import to RAMON objects
cutout: Currently not supported.
Returns:
[RAMON]
]
if compare[call[name[type], parameter[name[json]]] is name[str]] begin[:]
variable[json] assign[=] call[name[jsonlib].loads, parameter[name[json]]]
variable[out_ramons] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b01dbac0>, <ast.Name object at 0x7da1b01dba90>]]] in starred[call[name[six].iteritems, parameter[name[json]]]] begin[:]
variable[_md] assign[=] call[name[rdata]][constant[metadata]]
variable[r] assign[=] call[call[name[AnnotationType].RAMON, parameter[call[name[rdata]][constant[type]]]], parameter[]]
if compare[call[name[rdata]][constant[type]] equal[==] constant[segment]] begin[:]
name[r].segmentclass assign[=] call[name[_md].get, parameter[constant[segmentclass]]]
name[r].neuron assign[=] call[name[_md].get, parameter[constant[neuron]]]
if compare[constant[synapses] in name[_md]] begin[:]
name[r].synapses assign[=] call[call[name[_md]][constant[synapses]]][<ast.Slice object at 0x7da1b01da470>]
if compare[constant[organelles] in name[_md]] begin[:]
name[r].organelles assign[=] call[call[name[_md]][constant[organelles]]][<ast.Slice object at 0x7da1b01da230>]
call[name[out_ramons].append, parameter[name[r]]]
return[name[out_ramons]] | keyword[def] identifier[from_json] ( identifier[json] , identifier[cutout] = keyword[None] ):
literal[string]
keyword[if] identifier[type] ( identifier[json] ) keyword[is] identifier[str] :
identifier[json] = identifier[jsonlib] . identifier[loads] ( identifier[json] )
identifier[out_ramons] =[]
keyword[for] ( identifier[rid] , identifier[rdata] ) keyword[in] identifier[six] . identifier[iteritems] ( identifier[json] ):
identifier[_md] = identifier[rdata] [ literal[string] ]
identifier[r] = identifier[AnnotationType] . identifier[RAMON] ( identifier[rdata] [ literal[string] ])(
identifier[id] = identifier[rid] ,
identifier[author] = identifier[_md] [ literal[string] ],
identifier[status] = identifier[_md] [ literal[string] ],
identifier[confidence] = identifier[_md] [ literal[string] ],
identifier[kvpairs] = identifier[copy] . identifier[deepcopy] ( identifier[_md] [ literal[string] ])
)
keyword[if] identifier[rdata] [ literal[string] ]== literal[string] :
identifier[r] . identifier[segmentclass] = identifier[_md] . identifier[get] ( literal[string] )
identifier[r] . identifier[neuron] = identifier[_md] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_md] :
identifier[r] . identifier[synapses] = identifier[_md] [ literal[string] ][:]
keyword[if] literal[string] keyword[in] identifier[_md] :
identifier[r] . identifier[organelles] = identifier[_md] [ literal[string] ][:]
keyword[elif] identifier[rdata] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[_md] :
identifier[r] . identifier[segments] = identifier[_md] [ literal[string] ][:]
keyword[elif] identifier[rdata] [ literal[string] ]== literal[string] :
identifier[r] . identifier[organelle_class] = identifier[_md] [ literal[string] ][:]
keyword[elif] identifier[rdata] [ literal[string] ]== literal[string] :
identifier[r] . identifier[synapse_type] = identifier[_md] . identifier[get] ( literal[string] )
identifier[r] . identifier[weight] = identifier[_md] . identifier[get] ( literal[string] )
identifier[out_ramons] . identifier[append] ( identifier[r] )
keyword[return] identifier[out_ramons] | def from_json(json, cutout=None):
"""
Converts JSON to a python list of RAMON objects. if `cutout` is provided,
the `cutout` attribute of the RAMON object is populated. Otherwise, it's
left empty. `json` should be an ID-level dictionary, like so:
{
16: {
type: "segment",
metadata: {
. . .
}
},
}
NOTE: If more than one item is in the dictionary, then a Python list of
RAMON objects is returned instead of a single RAMON.
Arguments:
json (str or dict): The JSON to import to RAMON objects
cutout: Currently not supported.
Returns:
[RAMON]
"""
if type(json) is str:
json = jsonlib.loads(json) # depends on [control=['if'], data=[]]
out_ramons = []
for (rid, rdata) in six.iteritems(json):
_md = rdata['metadata']
r = AnnotationType.RAMON(rdata['type'])(id=rid, author=_md['author'], status=_md['status'], confidence=_md['confidence'], kvpairs=copy.deepcopy(_md['kvpairs']))
if rdata['type'] == 'segment':
r.segmentclass = _md.get('segmentclass')
r.neuron = _md.get('neuron')
if 'synapses' in _md:
r.synapses = _md['synapses'][:] # depends on [control=['if'], data=['_md']]
if 'organelles' in _md:
r.organelles = _md['organelles'][:] # depends on [control=['if'], data=['_md']] # depends on [control=['if'], data=[]]
elif rdata['type'] in ['neuron', 'synapse']:
if 'segments' in _md:
r.segments = _md['segments'][:] # depends on [control=['if'], data=['_md']] # depends on [control=['if'], data=[]]
elif rdata['type'] == 'organelle':
r.organelle_class = _md['organelleclass'][:] # depends on [control=['if'], data=[]]
elif rdata['type'] == 'synapse':
r.synapse_type = _md.get('synapse_type')
r.weight = _md.get('weight') # depends on [control=['if'], data=[]]
out_ramons.append(r) # depends on [control=['for'], data=[]]
return out_ramons |
def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ : overlay_data_offset ]
return self.__data__[:] | def function[trim, parameter[self]]:
constant[Return the just data defined by the PE headers, removing any overlayed data.]
variable[overlay_data_offset] assign[=] call[name[self].get_overlay_data_start_offset, parameter[]]
if compare[name[overlay_data_offset] is_not constant[None]] begin[:]
return[call[name[self].__data__][<ast.Slice object at 0x7da1b1d28760>]]
return[call[name[self].__data__][<ast.Slice object at 0x7da1b1d28d00>]] | keyword[def] identifier[trim] ( identifier[self] ):
literal[string]
identifier[overlay_data_offset] = identifier[self] . identifier[get_overlay_data_start_offset] ()
keyword[if] identifier[overlay_data_offset] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[__data__] [: identifier[overlay_data_offset] ]
keyword[return] identifier[self] . identifier[__data__] [:] | def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[:overlay_data_offset] # depends on [control=['if'], data=['overlay_data_offset']]
return self.__data__[:] |
def _write_data(self, file):
""" Writes case data to file in ReStructuredText format.
"""
self.write_case_data(file)
file.write("Bus Data\n")
file.write("-" * 8 + "\n")
self.write_bus_data(file)
file.write("\n")
file.write("Branch Data\n")
file.write("-" * 11 + "\n")
self.write_branch_data(file)
file.write("\n")
file.write("Generator Data\n")
file.write("-" * 14 + "\n")
self.write_generator_data(file)
file.write("\n") | def function[_write_data, parameter[self, file]]:
constant[ Writes case data to file in ReStructuredText format.
]
call[name[self].write_case_data, parameter[name[file]]]
call[name[file].write, parameter[constant[Bus Data
]]]
call[name[file].write, parameter[binary_operation[binary_operation[constant[-] * constant[8]] + constant[
]]]]
call[name[self].write_bus_data, parameter[name[file]]]
call[name[file].write, parameter[constant[
]]]
call[name[file].write, parameter[constant[Branch Data
]]]
call[name[file].write, parameter[binary_operation[binary_operation[constant[-] * constant[11]] + constant[
]]]]
call[name[self].write_branch_data, parameter[name[file]]]
call[name[file].write, parameter[constant[
]]]
call[name[file].write, parameter[constant[Generator Data
]]]
call[name[file].write, parameter[binary_operation[binary_operation[constant[-] * constant[14]] + constant[
]]]]
call[name[self].write_generator_data, parameter[name[file]]]
call[name[file].write, parameter[constant[
]]] | keyword[def] identifier[_write_data] ( identifier[self] , identifier[file] ):
literal[string]
identifier[self] . identifier[write_case_data] ( identifier[file] )
identifier[file] . identifier[write] ( literal[string] )
identifier[file] . identifier[write] ( literal[string] * literal[int] + literal[string] )
identifier[self] . identifier[write_bus_data] ( identifier[file] )
identifier[file] . identifier[write] ( literal[string] )
identifier[file] . identifier[write] ( literal[string] )
identifier[file] . identifier[write] ( literal[string] * literal[int] + literal[string] )
identifier[self] . identifier[write_branch_data] ( identifier[file] )
identifier[file] . identifier[write] ( literal[string] )
identifier[file] . identifier[write] ( literal[string] )
identifier[file] . identifier[write] ( literal[string] * literal[int] + literal[string] )
identifier[self] . identifier[write_generator_data] ( identifier[file] )
identifier[file] . identifier[write] ( literal[string] ) | def _write_data(self, file):
""" Writes case data to file in ReStructuredText format.
"""
self.write_case_data(file)
file.write('Bus Data\n')
file.write('-' * 8 + '\n')
self.write_bus_data(file)
file.write('\n')
file.write('Branch Data\n')
file.write('-' * 11 + '\n')
self.write_branch_data(file)
file.write('\n')
file.write('Generator Data\n')
file.write('-' * 14 + '\n')
self.write_generator_data(file)
file.write('\n') |
def lookup_symbol(self,
symbol,
as_of_date,
fuzzy=False,
country_code=None):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbol)
return f(
mapping,
country_code is None,
symbol,
as_of_date,
) | def function[lookup_symbol, parameter[self, symbol, as_of_date, fuzzy, country_code]]:
constant[Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
]
if compare[name[symbol] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2067190>
if name[fuzzy] begin[:]
variable[f] assign[=] name[self]._lookup_symbol_fuzzy
variable[mapping] assign[=] call[name[self]._choose_fuzzy_symbol_ownership_map, parameter[name[country_code]]]
if compare[name[mapping] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2005540>
return[call[name[f], parameter[name[mapping], compare[name[country_code] is constant[None]], name[symbol], name[as_of_date]]]] | keyword[def] identifier[lookup_symbol] ( identifier[self] ,
identifier[symbol] ,
identifier[as_of_date] ,
identifier[fuzzy] = keyword[False] ,
identifier[country_code] = keyword[None] ):
literal[string]
keyword[if] identifier[symbol] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] % identifier[as_of_date] )
keyword[if] identifier[fuzzy] :
identifier[f] = identifier[self] . identifier[_lookup_symbol_fuzzy]
identifier[mapping] = identifier[self] . identifier[_choose_fuzzy_symbol_ownership_map] ( identifier[country_code] )
keyword[else] :
identifier[f] = identifier[self] . identifier[_lookup_symbol_strict]
identifier[mapping] = identifier[self] . identifier[_choose_symbol_ownership_map] ( identifier[country_code] )
keyword[if] identifier[mapping] keyword[is] keyword[None] :
keyword[raise] identifier[SymbolNotFound] ( identifier[symbol] = identifier[symbol] )
keyword[return] identifier[f] (
identifier[mapping] ,
identifier[country_code] keyword[is] keyword[None] ,
identifier[symbol] ,
identifier[as_of_date] ,
) | def lookup_symbol(self, symbol, as_of_date, fuzzy=False, country_code=None):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
"""
if symbol is None:
raise TypeError('Cannot lookup asset for symbol of None for as of date %s.' % as_of_date) # depends on [control=['if'], data=[]]
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code) # depends on [control=['if'], data=[]]
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbol) # depends on [control=['if'], data=[]]
return f(mapping, country_code is None, symbol, as_of_date) |
def sql_index(self,index_name,column_names,unique=True):
"""Add a named index on given columns to improve performance."""
if type(column_names) == str:
column_names = [column_names]
try:
if len(column_names) == 0:
raise TypeError
except TypeError:
raise ValueError("Provide a list of column names for an index.")
if unique:
UNIQUE = "UNIQUE"
else:
UNIQUE = ""
table_name = self.name
columns = ",".join(column_names)
SQL = """CREATE %(UNIQUE)s INDEX %(index_name)s ON %(table_name)s """\
"""(%(columns)s)""" % locals()
self.sql(SQL) | def function[sql_index, parameter[self, index_name, column_names, unique]]:
constant[Add a named index on given columns to improve performance.]
if compare[call[name[type], parameter[name[column_names]]] equal[==] name[str]] begin[:]
variable[column_names] assign[=] list[[<ast.Name object at 0x7da1b28fe320>]]
<ast.Try object at 0x7da1b28fdea0>
if name[unique] begin[:]
variable[UNIQUE] assign[=] constant[UNIQUE]
variable[table_name] assign[=] name[self].name
variable[columns] assign[=] call[constant[,].join, parameter[name[column_names]]]
variable[SQL] assign[=] binary_operation[constant[CREATE %(UNIQUE)s INDEX %(index_name)s ON %(table_name)s (%(columns)s)] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
call[name[self].sql, parameter[name[SQL]]] | keyword[def] identifier[sql_index] ( identifier[self] , identifier[index_name] , identifier[column_names] , identifier[unique] = keyword[True] ):
literal[string]
keyword[if] identifier[type] ( identifier[column_names] )== identifier[str] :
identifier[column_names] =[ identifier[column_names] ]
keyword[try] :
keyword[if] identifier[len] ( identifier[column_names] )== literal[int] :
keyword[raise] identifier[TypeError]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[unique] :
identifier[UNIQUE] = literal[string]
keyword[else] :
identifier[UNIQUE] = literal[string]
identifier[table_name] = identifier[self] . identifier[name]
identifier[columns] = literal[string] . identifier[join] ( identifier[column_names] )
identifier[SQL] = literal[string] literal[string] % identifier[locals] ()
identifier[self] . identifier[sql] ( identifier[SQL] ) | def sql_index(self, index_name, column_names, unique=True):
"""Add a named index on given columns to improve performance."""
if type(column_names) == str:
column_names = [column_names] # depends on [control=['if'], data=[]]
try:
if len(column_names) == 0:
raise TypeError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError:
raise ValueError('Provide a list of column names for an index.') # depends on [control=['except'], data=[]]
if unique:
UNIQUE = 'UNIQUE' # depends on [control=['if'], data=[]]
else:
UNIQUE = ''
table_name = self.name
columns = ','.join(column_names)
SQL = 'CREATE %(UNIQUE)s INDEX %(index_name)s ON %(table_name)s (%(columns)s)' % locals()
self.sql(SQL) |
def from_file(cls, f):
"""Load vocab from a file.
:param (file) f: a file object, e.g. as returned by calling `open`
:return: a vocab object. The 0th line of the file is assigned to index 0, and so on...
"""
word2index = {}
counts = Counter()
for i, line in enumerate(f):
word, count_str = line.split('\t')
word = word.decode('utf-8')
word2index[word] = i
counts[word] = float(count_str)
if i == 0:
unk = word
return cls.from_dict(word2index, unk, counts) | def function[from_file, parameter[cls, f]]:
constant[Load vocab from a file.
:param (file) f: a file object, e.g. as returned by calling `open`
:return: a vocab object. The 0th line of the file is assigned to index 0, and so on...
]
variable[word2index] assign[=] dictionary[[], []]
variable[counts] assign[=] call[name[Counter], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b101a530>, <ast.Name object at 0x7da1b10193c0>]]] in starred[call[name[enumerate], parameter[name[f]]]] begin[:]
<ast.Tuple object at 0x7da1b101b550> assign[=] call[name[line].split, parameter[constant[ ]]]
variable[word] assign[=] call[name[word].decode, parameter[constant[utf-8]]]
call[name[word2index]][name[word]] assign[=] name[i]
call[name[counts]][name[word]] assign[=] call[name[float], parameter[name[count_str]]]
if compare[name[i] equal[==] constant[0]] begin[:]
variable[unk] assign[=] name[word]
return[call[name[cls].from_dict, parameter[name[word2index], name[unk], name[counts]]]] | keyword[def] identifier[from_file] ( identifier[cls] , identifier[f] ):
literal[string]
identifier[word2index] ={}
identifier[counts] = identifier[Counter] ()
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[f] ):
identifier[word] , identifier[count_str] = identifier[line] . identifier[split] ( literal[string] )
identifier[word] = identifier[word] . identifier[decode] ( literal[string] )
identifier[word2index] [ identifier[word] ]= identifier[i]
identifier[counts] [ identifier[word] ]= identifier[float] ( identifier[count_str] )
keyword[if] identifier[i] == literal[int] :
identifier[unk] = identifier[word]
keyword[return] identifier[cls] . identifier[from_dict] ( identifier[word2index] , identifier[unk] , identifier[counts] ) | def from_file(cls, f):
"""Load vocab from a file.
:param (file) f: a file object, e.g. as returned by calling `open`
:return: a vocab object. The 0th line of the file is assigned to index 0, and so on...
"""
word2index = {}
counts = Counter()
for (i, line) in enumerate(f):
(word, count_str) = line.split('\t')
word = word.decode('utf-8')
word2index[word] = i
counts[word] = float(count_str)
if i == 0:
unk = word # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return cls.from_dict(word2index, unk, counts) |
def parseDoc(self, doc_str, format="xml"):
"""Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
"""
self.parse(data=doc_str, format=format)
self._ore_initialized = True
return self | def function[parseDoc, parameter[self, doc_str, format]]:
constant[Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
]
call[name[self].parse, parameter[]]
name[self]._ore_initialized assign[=] constant[True]
return[name[self]] | keyword[def] identifier[parseDoc] ( identifier[self] , identifier[doc_str] , identifier[format] = literal[string] ):
literal[string]
identifier[self] . identifier[parse] ( identifier[data] = identifier[doc_str] , identifier[format] = identifier[format] )
identifier[self] . identifier[_ore_initialized] = keyword[True]
keyword[return] identifier[self] | def parseDoc(self, doc_str, format='xml'):
"""Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
"""
self.parse(data=doc_str, format=format)
self._ore_initialized = True
return self |
def _decompose_pattern(self, pattern):
"""
Given a path pattern with format declaration, generates a
four-tuple (glob_pattern, regexp pattern, fields, type map)
"""
sep = '~lancet~sep~'
float_codes = ['e','E','f', 'F','g', 'G', 'n']
typecodes = dict([(k,float) for k in float_codes]
+ [('b',bin), ('d',int), ('o',oct), ('x',hex)])
parse = list(string.Formatter().parse(pattern))
text, fields, codes, _ = zip(*parse)
# Finding the field types from format string
types = []
for (field, code) in zip(fields, codes):
if code in ['', None]: continue
constructor = typecodes.get(code[-1], None)
if constructor: types += [(field, constructor)]
stars = ['' if not f else '*' for f in fields]
globpat = ''.join(text+star for (text,star) in zip(text,stars))
refields = ['' if not f else sep+('(?P<%s>.*?)'% f)+sep for f in fields]
parts = ''.join(text+group for (text,group) in zip(text, refields)).split(sep)
for i in range(0, len(parts), 2): parts[i] = re.escape(parts[i])
regexp_pattern = ''.join(parts).replace('\\*','.*')
fields = list(f for f in fields if f)
return globpat, regexp_pattern , fields, dict(types) | def function[_decompose_pattern, parameter[self, pattern]]:
constant[
Given a path pattern with format declaration, generates a
four-tuple (glob_pattern, regexp pattern, fields, type map)
]
variable[sep] assign[=] constant[~lancet~sep~]
variable[float_codes] assign[=] list[[<ast.Constant object at 0x7da1afe193c0>, <ast.Constant object at 0x7da1afe1a9e0>, <ast.Constant object at 0x7da1afe19690>, <ast.Constant object at 0x7da1afe1a8c0>, <ast.Constant object at 0x7da1afe191e0>, <ast.Constant object at 0x7da1afe1a830>, <ast.Constant object at 0x7da1afe18580>]]
variable[typecodes] assign[=] call[name[dict], parameter[binary_operation[<ast.ListComp object at 0x7da1afe18040> + list[[<ast.Tuple object at 0x7da1afe191b0>, <ast.Tuple object at 0x7da1afe19db0>, <ast.Tuple object at 0x7da1afe19360>, <ast.Tuple object at 0x7da1afe1b9d0>]]]]]
variable[parse] assign[=] call[name[list], parameter[call[call[name[string].Formatter, parameter[]].parse, parameter[name[pattern]]]]]
<ast.Tuple object at 0x7da1afe1a020> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1afe1a560>]]
variable[types] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1afe19f30>, <ast.Name object at 0x7da1afe1bcd0>]]] in starred[call[name[zip], parameter[name[fields], name[codes]]]] begin[:]
if compare[name[code] in list[[<ast.Constant object at 0x7da1afe1abc0>, <ast.Constant object at 0x7da1afe1a500>]]] begin[:]
continue
variable[constructor] assign[=] call[name[typecodes].get, parameter[call[name[code]][<ast.UnaryOp object at 0x7da1afe18820>], constant[None]]]
if name[constructor] begin[:]
<ast.AugAssign object at 0x7da1afe3bca0>
variable[stars] assign[=] <ast.ListComp object at 0x7da1afe38880>
variable[globpat] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1afe3a2f0>]]
variable[refields] assign[=] <ast.ListComp object at 0x7da1afe1bf70>
variable[parts] assign[=] call[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1afe19780>]].split, parameter[name[sep]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[parts]]], constant[2]]]] begin[:]
call[name[parts]][name[i]] assign[=] call[name[re].escape, parameter[call[name[parts]][name[i]]]]
variable[regexp_pattern] assign[=] call[call[constant[].join, parameter[name[parts]]].replace, parameter[constant[\*], constant[.*]]]
variable[fields] assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1afe0ee30>]]
return[tuple[[<ast.Name object at 0x7da1afe0f040>, <ast.Name object at 0x7da1afe0f130>, <ast.Name object at 0x7da1afe0c460>, <ast.Call object at 0x7da1afe0fc10>]]] | keyword[def] identifier[_decompose_pattern] ( identifier[self] , identifier[pattern] ):
literal[string]
identifier[sep] = literal[string]
identifier[float_codes] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[typecodes] = identifier[dict] ([( identifier[k] , identifier[float] ) keyword[for] identifier[k] keyword[in] identifier[float_codes] ]
+[( literal[string] , identifier[bin] ),( literal[string] , identifier[int] ),( literal[string] , identifier[oct] ),( literal[string] , identifier[hex] )])
identifier[parse] = identifier[list] ( identifier[string] . identifier[Formatter] (). identifier[parse] ( identifier[pattern] ))
identifier[text] , identifier[fields] , identifier[codes] , identifier[_] = identifier[zip] (* identifier[parse] )
identifier[types] =[]
keyword[for] ( identifier[field] , identifier[code] ) keyword[in] identifier[zip] ( identifier[fields] , identifier[codes] ):
keyword[if] identifier[code] keyword[in] [ literal[string] , keyword[None] ]: keyword[continue]
identifier[constructor] = identifier[typecodes] . identifier[get] ( identifier[code] [- literal[int] ], keyword[None] )
keyword[if] identifier[constructor] : identifier[types] +=[( identifier[field] , identifier[constructor] )]
identifier[stars] =[ literal[string] keyword[if] keyword[not] identifier[f] keyword[else] literal[string] keyword[for] identifier[f] keyword[in] identifier[fields] ]
identifier[globpat] = literal[string] . identifier[join] ( identifier[text] + identifier[star] keyword[for] ( identifier[text] , identifier[star] ) keyword[in] identifier[zip] ( identifier[text] , identifier[stars] ))
identifier[refields] =[ literal[string] keyword[if] keyword[not] identifier[f] keyword[else] identifier[sep] +( literal[string] % identifier[f] )+ identifier[sep] keyword[for] identifier[f] keyword[in] identifier[fields] ]
identifier[parts] = literal[string] . identifier[join] ( identifier[text] + identifier[group] keyword[for] ( identifier[text] , identifier[group] ) keyword[in] identifier[zip] ( identifier[text] , identifier[refields] )). identifier[split] ( identifier[sep] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[parts] ), literal[int] ): identifier[parts] [ identifier[i] ]= identifier[re] . identifier[escape] ( identifier[parts] [ identifier[i] ])
identifier[regexp_pattern] = literal[string] . identifier[join] ( identifier[parts] ). identifier[replace] ( literal[string] , literal[string] )
identifier[fields] = identifier[list] ( identifier[f] keyword[for] identifier[f] keyword[in] identifier[fields] keyword[if] identifier[f] )
keyword[return] identifier[globpat] , identifier[regexp_pattern] , identifier[fields] , identifier[dict] ( identifier[types] ) | def _decompose_pattern(self, pattern):
"""
Given a path pattern with format declaration, generates a
four-tuple (glob_pattern, regexp pattern, fields, type map)
"""
sep = '~lancet~sep~'
float_codes = ['e', 'E', 'f', 'F', 'g', 'G', 'n']
typecodes = dict([(k, float) for k in float_codes] + [('b', bin), ('d', int), ('o', oct), ('x', hex)])
parse = list(string.Formatter().parse(pattern))
(text, fields, codes, _) = zip(*parse)
# Finding the field types from format string
types = []
for (field, code) in zip(fields, codes):
if code in ['', None]:
continue # depends on [control=['if'], data=[]]
constructor = typecodes.get(code[-1], None)
if constructor:
types += [(field, constructor)] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
stars = ['' if not f else '*' for f in fields]
globpat = ''.join((text + star for (text, star) in zip(text, stars)))
refields = ['' if not f else sep + '(?P<%s>.*?)' % f + sep for f in fields]
parts = ''.join((text + group for (text, group) in zip(text, refields))).split(sep)
for i in range(0, len(parts), 2):
parts[i] = re.escape(parts[i]) # depends on [control=['for'], data=['i']]
regexp_pattern = ''.join(parts).replace('\\*', '.*')
fields = list((f for f in fields if f))
return (globpat, regexp_pattern, fields, dict(types)) |
def verify_and_get_components_ids(topic_id, components_ids, component_types,
db_conn=None):
"""Process some verifications of the provided components ids."""
db_conn = db_conn or flask.g.db_conn
if len(components_ids) != len(component_types):
msg = 'The number of component ids does not match the number ' \
'of component types %s' % component_types
raise dci_exc.DCIException(msg, status_code=412)
# get the components from their ids
schedule_component_types = set()
for c_id in components_ids:
where_clause = sql.and_(models.COMPONENTS.c.id == c_id,
models.COMPONENTS.c.topic_id == topic_id,
models.COMPONENTS.c.export_control == True, # noqa
models.COMPONENTS.c.state == 'active')
query = (sql.select([models.COMPONENTS])
.where(where_clause))
cmpt = db_conn.execute(query).fetchone()
if cmpt is None:
msg = 'Component id %s not found or not exported' % c_id
raise dci_exc.DCIException(msg, status_code=412)
cmpt = dict(cmpt)
if cmpt['type'] in schedule_component_types:
msg = ('Component types malformed: type %s duplicated.' %
cmpt['type'])
raise dci_exc.DCIException(msg, status_code=412)
schedule_component_types.add(cmpt['type'])
return components_ids | def function[verify_and_get_components_ids, parameter[topic_id, components_ids, component_types, db_conn]]:
constant[Process some verifications of the provided components ids.]
variable[db_conn] assign[=] <ast.BoolOp object at 0x7da1b0fc6b30>
if compare[call[name[len], parameter[name[components_ids]]] not_equal[!=] call[name[len], parameter[name[component_types]]]] begin[:]
variable[msg] assign[=] binary_operation[constant[The number of component ids does not match the number of component types %s] <ast.Mod object at 0x7da2590d6920> name[component_types]]
<ast.Raise object at 0x7da1b0fc58d0>
variable[schedule_component_types] assign[=] call[name[set], parameter[]]
for taget[name[c_id]] in starred[name[components_ids]] begin[:]
variable[where_clause] assign[=] call[name[sql].and_, parameter[compare[name[models].COMPONENTS.c.id equal[==] name[c_id]], compare[name[models].COMPONENTS.c.topic_id equal[==] name[topic_id]], compare[name[models].COMPONENTS.c.export_control equal[==] constant[True]], compare[name[models].COMPONENTS.c.state equal[==] constant[active]]]]
variable[query] assign[=] call[call[name[sql].select, parameter[list[[<ast.Attribute object at 0x7da1b0d1e860>]]]].where, parameter[name[where_clause]]]
variable[cmpt] assign[=] call[call[name[db_conn].execute, parameter[name[query]]].fetchone, parameter[]]
if compare[name[cmpt] is constant[None]] begin[:]
variable[msg] assign[=] binary_operation[constant[Component id %s not found or not exported] <ast.Mod object at 0x7da2590d6920> name[c_id]]
<ast.Raise object at 0x7da1b0d1e0b0>
variable[cmpt] assign[=] call[name[dict], parameter[name[cmpt]]]
if compare[call[name[cmpt]][constant[type]] in name[schedule_component_types]] begin[:]
variable[msg] assign[=] binary_operation[constant[Component types malformed: type %s duplicated.] <ast.Mod object at 0x7da2590d6920> call[name[cmpt]][constant[type]]]
<ast.Raise object at 0x7da1b0d1dcc0>
call[name[schedule_component_types].add, parameter[call[name[cmpt]][constant[type]]]]
return[name[components_ids]] | keyword[def] identifier[verify_and_get_components_ids] ( identifier[topic_id] , identifier[components_ids] , identifier[component_types] ,
identifier[db_conn] = keyword[None] ):
literal[string]
identifier[db_conn] = identifier[db_conn] keyword[or] identifier[flask] . identifier[g] . identifier[db_conn]
keyword[if] identifier[len] ( identifier[components_ids] )!= identifier[len] ( identifier[component_types] ):
identifier[msg] = literal[string] literal[string] % identifier[component_types]
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[msg] , identifier[status_code] = literal[int] )
identifier[schedule_component_types] = identifier[set] ()
keyword[for] identifier[c_id] keyword[in] identifier[components_ids] :
identifier[where_clause] = identifier[sql] . identifier[and_] ( identifier[models] . identifier[COMPONENTS] . identifier[c] . identifier[id] == identifier[c_id] ,
identifier[models] . identifier[COMPONENTS] . identifier[c] . identifier[topic_id] == identifier[topic_id] ,
identifier[models] . identifier[COMPONENTS] . identifier[c] . identifier[export_control] == keyword[True] ,
identifier[models] . identifier[COMPONENTS] . identifier[c] . identifier[state] == literal[string] )
identifier[query] =( identifier[sql] . identifier[select] ([ identifier[models] . identifier[COMPONENTS] ])
. identifier[where] ( identifier[where_clause] ))
identifier[cmpt] = identifier[db_conn] . identifier[execute] ( identifier[query] ). identifier[fetchone] ()
keyword[if] identifier[cmpt] keyword[is] keyword[None] :
identifier[msg] = literal[string] % identifier[c_id]
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[msg] , identifier[status_code] = literal[int] )
identifier[cmpt] = identifier[dict] ( identifier[cmpt] )
keyword[if] identifier[cmpt] [ literal[string] ] keyword[in] identifier[schedule_component_types] :
identifier[msg] =( literal[string] %
identifier[cmpt] [ literal[string] ])
keyword[raise] identifier[dci_exc] . identifier[DCIException] ( identifier[msg] , identifier[status_code] = literal[int] )
identifier[schedule_component_types] . identifier[add] ( identifier[cmpt] [ literal[string] ])
keyword[return] identifier[components_ids] | def verify_and_get_components_ids(topic_id, components_ids, component_types, db_conn=None):
"""Process some verifications of the provided components ids."""
db_conn = db_conn or flask.g.db_conn
if len(components_ids) != len(component_types):
msg = 'The number of component ids does not match the number of component types %s' % component_types
raise dci_exc.DCIException(msg, status_code=412) # depends on [control=['if'], data=[]]
# get the components from their ids
schedule_component_types = set()
for c_id in components_ids: # noqa
where_clause = sql.and_(models.COMPONENTS.c.id == c_id, models.COMPONENTS.c.topic_id == topic_id, models.COMPONENTS.c.export_control == True, models.COMPONENTS.c.state == 'active')
query = sql.select([models.COMPONENTS]).where(where_clause)
cmpt = db_conn.execute(query).fetchone()
if cmpt is None:
msg = 'Component id %s not found or not exported' % c_id
raise dci_exc.DCIException(msg, status_code=412) # depends on [control=['if'], data=[]]
cmpt = dict(cmpt)
if cmpt['type'] in schedule_component_types:
msg = 'Component types malformed: type %s duplicated.' % cmpt['type']
raise dci_exc.DCIException(msg, status_code=412) # depends on [control=['if'], data=[]]
schedule_component_types.add(cmpt['type']) # depends on [control=['for'], data=['c_id']]
return components_ids |
def jsonify_timedelta(value):
"""Converts a `datetime.timedelta` to an ISO 8601 duration
string for JSON-ification.
:param value: something to convert
:type value: datetime.timedelta
:return: the value after conversion
:rtype unicode
"""
assert isinstance(value, datetime.timedelta)
# split seconds to larger units
seconds = value.total_seconds()
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
days, hours, minutes = map(int, (days, hours, minutes))
seconds = round(seconds, 6)
# build date
date = ''
if days:
date = '%sD' % days
# build time
time = u'T'
# hours
bigger_exists = date or hours
if bigger_exists:
time += '{:02}H'.format(hours)
# minutes
bigger_exists = bigger_exists or minutes
if bigger_exists:
time += '{:02}M'.format(minutes)
# seconds
if seconds.is_integer():
seconds = '{:02}'.format(int(seconds))
else:
# 9 chars long w/leading 0, 6 digits after decimal
seconds = '%09.6f' % seconds
# remove trailing zeros
seconds = seconds.rstrip('0')
time += '{}S'.format(seconds)
return u'P' + date + time | def function[jsonify_timedelta, parameter[value]]:
constant[Converts a `datetime.timedelta` to an ISO 8601 duration
string for JSON-ification.
:param value: something to convert
:type value: datetime.timedelta
:return: the value after conversion
:rtype unicode
]
assert[call[name[isinstance], parameter[name[value], name[datetime].timedelta]]]
variable[seconds] assign[=] call[name[value].total_seconds, parameter[]]
<ast.Tuple object at 0x7da1b0e179a0> assign[=] call[name[divmod], parameter[name[seconds], constant[60]]]
<ast.Tuple object at 0x7da1b0e17640> assign[=] call[name[divmod], parameter[name[minutes], constant[60]]]
<ast.Tuple object at 0x7da1b0e14d90> assign[=] call[name[divmod], parameter[name[hours], constant[24]]]
<ast.Tuple object at 0x7da1b0e14b50> assign[=] call[name[map], parameter[name[int], tuple[[<ast.Name object at 0x7da1b0e15de0>, <ast.Name object at 0x7da1b0e14c70>, <ast.Name object at 0x7da1b0e16c20>]]]]
variable[seconds] assign[=] call[name[round], parameter[name[seconds], constant[6]]]
variable[date] assign[=] constant[]
if name[days] begin[:]
variable[date] assign[=] binary_operation[constant[%sD] <ast.Mod object at 0x7da2590d6920> name[days]]
variable[time] assign[=] constant[T]
variable[bigger_exists] assign[=] <ast.BoolOp object at 0x7da1b0e17850>
if name[bigger_exists] begin[:]
<ast.AugAssign object at 0x7da1b0e14190>
variable[bigger_exists] assign[=] <ast.BoolOp object at 0x7da1b0e14f40>
if name[bigger_exists] begin[:]
<ast.AugAssign object at 0x7da1b0e154e0>
if call[name[seconds].is_integer, parameter[]] begin[:]
variable[seconds] assign[=] call[constant[{:02}].format, parameter[call[name[int], parameter[name[seconds]]]]]
<ast.AugAssign object at 0x7da1b0e14640>
return[binary_operation[binary_operation[constant[P] + name[date]] + name[time]]] | keyword[def] identifier[jsonify_timedelta] ( identifier[value] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[timedelta] )
identifier[seconds] = identifier[value] . identifier[total_seconds] ()
identifier[minutes] , identifier[seconds] = identifier[divmod] ( identifier[seconds] , literal[int] )
identifier[hours] , identifier[minutes] = identifier[divmod] ( identifier[minutes] , literal[int] )
identifier[days] , identifier[hours] = identifier[divmod] ( identifier[hours] , literal[int] )
identifier[days] , identifier[hours] , identifier[minutes] = identifier[map] ( identifier[int] ,( identifier[days] , identifier[hours] , identifier[minutes] ))
identifier[seconds] = identifier[round] ( identifier[seconds] , literal[int] )
identifier[date] = literal[string]
keyword[if] identifier[days] :
identifier[date] = literal[string] % identifier[days]
identifier[time] = literal[string]
identifier[bigger_exists] = identifier[date] keyword[or] identifier[hours]
keyword[if] identifier[bigger_exists] :
identifier[time] += literal[string] . identifier[format] ( identifier[hours] )
identifier[bigger_exists] = identifier[bigger_exists] keyword[or] identifier[minutes]
keyword[if] identifier[bigger_exists] :
identifier[time] += literal[string] . identifier[format] ( identifier[minutes] )
keyword[if] identifier[seconds] . identifier[is_integer] ():
identifier[seconds] = literal[string] . identifier[format] ( identifier[int] ( identifier[seconds] ))
keyword[else] :
identifier[seconds] = literal[string] % identifier[seconds]
identifier[seconds] = identifier[seconds] . identifier[rstrip] ( literal[string] )
identifier[time] += literal[string] . identifier[format] ( identifier[seconds] )
keyword[return] literal[string] + identifier[date] + identifier[time] | def jsonify_timedelta(value):
"""Converts a `datetime.timedelta` to an ISO 8601 duration
string for JSON-ification.
:param value: something to convert
:type value: datetime.timedelta
:return: the value after conversion
:rtype unicode
"""
assert isinstance(value, datetime.timedelta)
# split seconds to larger units
seconds = value.total_seconds()
(minutes, seconds) = divmod(seconds, 60)
(hours, minutes) = divmod(minutes, 60)
(days, hours) = divmod(hours, 24)
(days, hours, minutes) = map(int, (days, hours, minutes))
seconds = round(seconds, 6)
# build date
date = ''
if days:
date = '%sD' % days # depends on [control=['if'], data=[]]
# build time
time = u'T'
# hours
bigger_exists = date or hours
if bigger_exists:
time += '{:02}H'.format(hours) # depends on [control=['if'], data=[]]
# minutes
bigger_exists = bigger_exists or minutes
if bigger_exists:
time += '{:02}M'.format(minutes) # depends on [control=['if'], data=[]]
# seconds
if seconds.is_integer():
seconds = '{:02}'.format(int(seconds)) # depends on [control=['if'], data=[]]
else:
# 9 chars long w/leading 0, 6 digits after decimal
seconds = '%09.6f' % seconds
# remove trailing zeros
seconds = seconds.rstrip('0')
time += '{}S'.format(seconds)
return u'P' + date + time |
def read(self):
"""
If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None.
"""
if not self.ready_to_read():
return None
data = self._read()
if data is None:
return None
return self._parse_message(data) | def function[read, parameter[self]]:
constant[
If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None.
]
if <ast.UnaryOp object at 0x7da1b0e27d60> begin[:]
return[constant[None]]
variable[data] assign[=] call[name[self]._read, parameter[]]
if compare[name[data] is constant[None]] begin[:]
return[constant[None]]
return[call[name[self]._parse_message, parameter[name[data]]]] | keyword[def] identifier[read] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[ready_to_read] ():
keyword[return] keyword[None]
identifier[data] = identifier[self] . identifier[_read] ()
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[_parse_message] ( identifier[data] ) | def read(self):
"""
If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None.
"""
if not self.ready_to_read():
return None # depends on [control=['if'], data=[]]
data = self._read()
if data is None:
return None # depends on [control=['if'], data=[]]
return self._parse_message(data) |
def logpt(self, t, xp, x):
"""PDF of X_t|X_{t-1}=xp"""
return self.ssm.PX(t, xp).logpdf(x) | def function[logpt, parameter[self, t, xp, x]]:
constant[PDF of X_t|X_{t-1}=xp]
return[call[call[name[self].ssm.PX, parameter[name[t], name[xp]]].logpdf, parameter[name[x]]]] | keyword[def] identifier[logpt] ( identifier[self] , identifier[t] , identifier[xp] , identifier[x] ):
literal[string]
keyword[return] identifier[self] . identifier[ssm] . identifier[PX] ( identifier[t] , identifier[xp] ). identifier[logpdf] ( identifier[x] ) | def logpt(self, t, xp, x):
"""PDF of X_t|X_{t-1}=xp"""
return self.ssm.PX(t, xp).logpdf(x) |
def scheduleNextHeartbeat(self, nextRun):
"""
Schedules the next ping.
:param nextRun: when we should run next.
:param serverURL: the URL to ping.
:return:
"""
import threading
from datetime import datetime
tilNextTime = max(nextRun - datetime.utcnow().timestamp(), 0)
logging.getLogger('recorder').info("Scheduling next ping in " + str(round(tilNextTime, 3)) + " seconds")
threading.Timer(tilNextTime, self.ping).start() | def function[scheduleNextHeartbeat, parameter[self, nextRun]]:
constant[
Schedules the next ping.
:param nextRun: when we should run next.
:param serverURL: the URL to ping.
:return:
]
import module[threading]
from relative_module[datetime] import module[datetime]
variable[tilNextTime] assign[=] call[name[max], parameter[binary_operation[name[nextRun] - call[call[name[datetime].utcnow, parameter[]].timestamp, parameter[]]], constant[0]]]
call[call[name[logging].getLogger, parameter[constant[recorder]]].info, parameter[binary_operation[binary_operation[constant[Scheduling next ping in ] + call[name[str], parameter[call[name[round], parameter[name[tilNextTime], constant[3]]]]]] + constant[ seconds]]]]
call[call[name[threading].Timer, parameter[name[tilNextTime], name[self].ping]].start, parameter[]] | keyword[def] identifier[scheduleNextHeartbeat] ( identifier[self] , identifier[nextRun] ):
literal[string]
keyword[import] identifier[threading]
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
identifier[tilNextTime] = identifier[max] ( identifier[nextRun] - identifier[datetime] . identifier[utcnow] (). identifier[timestamp] (), literal[int] )
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[info] ( literal[string] + identifier[str] ( identifier[round] ( identifier[tilNextTime] , literal[int] ))+ literal[string] )
identifier[threading] . identifier[Timer] ( identifier[tilNextTime] , identifier[self] . identifier[ping] ). identifier[start] () | def scheduleNextHeartbeat(self, nextRun):
"""
Schedules the next ping.
:param nextRun: when we should run next.
:param serverURL: the URL to ping.
:return:
"""
import threading
from datetime import datetime
tilNextTime = max(nextRun - datetime.utcnow().timestamp(), 0)
logging.getLogger('recorder').info('Scheduling next ping in ' + str(round(tilNextTime, 3)) + ' seconds')
threading.Timer(tilNextTime, self.ping).start() |
def opts(self, dictobj):
"""
Add or update options
"""
for k in dictobj:
self.chart_opts[k] = dictobj[k] | def function[opts, parameter[self, dictobj]]:
constant[
Add or update options
]
for taget[name[k]] in starred[name[dictobj]] begin[:]
call[name[self].chart_opts][name[k]] assign[=] call[name[dictobj]][name[k]] | keyword[def] identifier[opts] ( identifier[self] , identifier[dictobj] ):
literal[string]
keyword[for] identifier[k] keyword[in] identifier[dictobj] :
identifier[self] . identifier[chart_opts] [ identifier[k] ]= identifier[dictobj] [ identifier[k] ] | def opts(self, dictobj):
"""
Add or update options
"""
for k in dictobj:
self.chart_opts[k] = dictobj[k] # depends on [control=['for'], data=['k']] |
def _check_error(response):
"""Raises an exception if the Spark Cloud returned an error."""
if (not response.ok) or (response.status_code != 200):
raise Exception(
response.json()['error'] + ': ' +
response.json()['error_description']
) | def function[_check_error, parameter[response]]:
constant[Raises an exception if the Spark Cloud returned an error.]
if <ast.BoolOp object at 0x7da2044c2fe0> begin[:]
<ast.Raise object at 0x7da2044c0310> | keyword[def] identifier[_check_error] ( identifier[response] ):
literal[string]
keyword[if] ( keyword[not] identifier[response] . identifier[ok] ) keyword[or] ( identifier[response] . identifier[status_code] != literal[int] ):
keyword[raise] identifier[Exception] (
identifier[response] . identifier[json] ()[ literal[string] ]+ literal[string] +
identifier[response] . identifier[json] ()[ literal[string] ]
) | def _check_error(response):
"""Raises an exception if the Spark Cloud returned an error."""
if not response.ok or response.status_code != 200:
raise Exception(response.json()['error'] + ': ' + response.json()['error_description']) # depends on [control=['if'], data=[]] |
def load_sampleset(self, f, name):
'''Read the sampleset from using the HDF5 format. Name is usually in {train, test}.'''
self.encoder_x = np.array(f[name + '_encoder_x'])
self.decoder_x = np.array(f[name + '_decoder_x'])
self.decoder_y = np.array(f[name + '_decoder_y']) | def function[load_sampleset, parameter[self, f, name]]:
constant[Read the sampleset from using the HDF5 format. Name is usually in {train, test}.]
name[self].encoder_x assign[=] call[name[np].array, parameter[call[name[f]][binary_operation[name[name] + constant[_encoder_x]]]]]
name[self].decoder_x assign[=] call[name[np].array, parameter[call[name[f]][binary_operation[name[name] + constant[_decoder_x]]]]]
name[self].decoder_y assign[=] call[name[np].array, parameter[call[name[f]][binary_operation[name[name] + constant[_decoder_y]]]]] | keyword[def] identifier[load_sampleset] ( identifier[self] , identifier[f] , identifier[name] ):
literal[string]
identifier[self] . identifier[encoder_x] = identifier[np] . identifier[array] ( identifier[f] [ identifier[name] + literal[string] ])
identifier[self] . identifier[decoder_x] = identifier[np] . identifier[array] ( identifier[f] [ identifier[name] + literal[string] ])
identifier[self] . identifier[decoder_y] = identifier[np] . identifier[array] ( identifier[f] [ identifier[name] + literal[string] ]) | def load_sampleset(self, f, name):
"""Read the sampleset from using the HDF5 format. Name is usually in {train, test}."""
self.encoder_x = np.array(f[name + '_encoder_x'])
self.decoder_x = np.array(f[name + '_decoder_x'])
self.decoder_y = np.array(f[name + '_decoder_y']) |
def links(self):
"""
Include a self link.
"""
links = Links()
links["self"] = Link.for_(
self._operation,
self._ns,
qs=self._page.to_items(),
**self._context
)
return links | def function[links, parameter[self]]:
constant[
Include a self link.
]
variable[links] assign[=] call[name[Links], parameter[]]
call[name[links]][constant[self]] assign[=] call[name[Link].for_, parameter[name[self]._operation, name[self]._ns]]
return[name[links]] | keyword[def] identifier[links] ( identifier[self] ):
literal[string]
identifier[links] = identifier[Links] ()
identifier[links] [ literal[string] ]= identifier[Link] . identifier[for_] (
identifier[self] . identifier[_operation] ,
identifier[self] . identifier[_ns] ,
identifier[qs] = identifier[self] . identifier[_page] . identifier[to_items] (),
** identifier[self] . identifier[_context]
)
keyword[return] identifier[links] | def links(self):
"""
Include a self link.
"""
links = Links()
links['self'] = Link.for_(self._operation, self._ns, qs=self._page.to_items(), **self._context)
return links |
def heartbeat(self):
'''
Check the heartbeat of the ordering API
Args: None
Returns: True or False
'''
url = '%s/heartbeat' % self.base_url
# Auth is not required to hit the heartbeat
r = requests.get(url)
try:
return r.json() == "ok"
except:
return False | def function[heartbeat, parameter[self]]:
constant[
Check the heartbeat of the ordering API
Args: None
Returns: True or False
]
variable[url] assign[=] binary_operation[constant[%s/heartbeat] <ast.Mod object at 0x7da2590d6920> name[self].base_url]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
<ast.Try object at 0x7da1b00051e0> | keyword[def] identifier[heartbeat] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] % identifier[self] . identifier[base_url]
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[try] :
keyword[return] identifier[r] . identifier[json] ()== literal[string]
keyword[except] :
keyword[return] keyword[False] | def heartbeat(self):
"""
Check the heartbeat of the ordering API
Args: None
Returns: True or False
"""
url = '%s/heartbeat' % self.base_url
# Auth is not required to hit the heartbeat
r = requests.get(url)
try:
return r.json() == 'ok' # depends on [control=['try'], data=[]]
except:
return False # depends on [control=['except'], data=[]] |
def generate_rfc3339(d, local_tz=True):
"""
generate rfc3339 time format
input :
d = date type
local_tz = use local time zone if true,
otherwise mark as utc
output :
rfc3339 string date format. ex : `2008-04-02T20:00:00+07:00`
"""
try:
if local_tz:
d = datetime.datetime.fromtimestamp(d)
else:
d = datetime.datetime.utcfromtimestamp(d)
except TypeError:
pass
if not isinstance(d, datetime.date):
raise TypeError('Not timestamp or date object. Got %r.' % type(d))
if not isinstance(d, datetime.datetime):
d = datetime.datetime(*d.timetuple()[:3])
return ('%04d-%02d-%02dT%02d:%02d:%02d%s' %
(d.year, d.month, d.day, d.hour, d.minute, d.second,
_generate_timezone(d, local_tz))) | def function[generate_rfc3339, parameter[d, local_tz]]:
constant[
generate rfc3339 time format
input :
d = date type
local_tz = use local time zone if true,
otherwise mark as utc
output :
rfc3339 string date format. ex : `2008-04-02T20:00:00+07:00`
]
<ast.Try object at 0x7da18f00f2b0>
if <ast.UnaryOp object at 0x7da2054a7040> begin[:]
<ast.Raise object at 0x7da2054a4ee0>
if <ast.UnaryOp object at 0x7da1b25d8ee0> begin[:]
variable[d] assign[=] call[name[datetime].datetime, parameter[<ast.Starred object at 0x7da1b25dbdc0>]]
return[binary_operation[constant[%04d-%02d-%02dT%02d:%02d:%02d%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b25a6140>, <ast.Attribute object at 0x7da1b25a60e0>, <ast.Attribute object at 0x7da1b25a6080>, <ast.Attribute object at 0x7da1b25a6020>, <ast.Attribute object at 0x7da1b25a5fc0>, <ast.Attribute object at 0x7da1b25a5f60>, <ast.Call object at 0x7da1b25a5f00>]]]] | keyword[def] identifier[generate_rfc3339] ( identifier[d] , identifier[local_tz] = keyword[True] ):
literal[string]
keyword[try] :
keyword[if] identifier[local_tz] :
identifier[d] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[d] )
keyword[else] :
identifier[d] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[d] )
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[if] keyword[not] identifier[isinstance] ( identifier[d] , identifier[datetime] . identifier[date] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[d] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[d] , identifier[datetime] . identifier[datetime] ):
identifier[d] = identifier[datetime] . identifier[datetime] (* identifier[d] . identifier[timetuple] ()[: literal[int] ])
keyword[return] ( literal[string] %
( identifier[d] . identifier[year] , identifier[d] . identifier[month] , identifier[d] . identifier[day] , identifier[d] . identifier[hour] , identifier[d] . identifier[minute] , identifier[d] . identifier[second] ,
identifier[_generate_timezone] ( identifier[d] , identifier[local_tz] ))) | def generate_rfc3339(d, local_tz=True):
"""
generate rfc3339 time format
input :
d = date type
local_tz = use local time zone if true,
otherwise mark as utc
output :
rfc3339 string date format. ex : `2008-04-02T20:00:00+07:00`
"""
try:
if local_tz:
d = datetime.datetime.fromtimestamp(d) # depends on [control=['if'], data=[]]
else:
d = datetime.datetime.utcfromtimestamp(d) # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
if not isinstance(d, datetime.date):
raise TypeError('Not timestamp or date object. Got %r.' % type(d)) # depends on [control=['if'], data=[]]
if not isinstance(d, datetime.datetime):
d = datetime.datetime(*d.timetuple()[:3]) # depends on [control=['if'], data=[]]
return '%04d-%02d-%02dT%02d:%02d:%02d%s' % (d.year, d.month, d.day, d.hour, d.minute, d.second, _generate_timezone(d, local_tz)) |
def start(self):
"""
Creates and starts the local API Gateway service. This method will block until the service is stopped
manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint
to invoke the Lambda function and receive a response.
NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM
"""
routing_list = self._make_routing_list(self.api_provider)
if not routing_list:
raise NoApisDefined("No APIs available in SAM template")
static_dir_path = self._make_static_dir_path(self.cwd, self.static_dir)
# We care about passing only stderr to the Service and not stdout because stdout from Docker container
# contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed
# to the console or a log file. stderr from Docker container contains runtime logs and output of print
# statements from the Lambda function
service = LocalApigwService(routing_list=routing_list,
lambda_runner=self.lambda_runner,
static_dir=static_dir_path,
port=self.port,
host=self.host,
stderr=self.stderr_stream)
service.create()
# Print out the list of routes that will be mounted
self._print_routes(self.api_provider, self.host, self.port)
LOG.info("You can now browse to the above endpoints to invoke your functions. "
"You do not need to restart/reload SAM CLI while working on your functions, "
"changes will be reflected instantly/automatically. You only need to restart "
"SAM CLI if you update your AWS SAM template")
service.run() | def function[start, parameter[self]]:
constant[
Creates and starts the local API Gateway service. This method will block until the service is stopped
manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint
to invoke the Lambda function and receive a response.
NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM
]
variable[routing_list] assign[=] call[name[self]._make_routing_list, parameter[name[self].api_provider]]
if <ast.UnaryOp object at 0x7da18f813eb0> begin[:]
<ast.Raise object at 0x7da18f812f20>
variable[static_dir_path] assign[=] call[name[self]._make_static_dir_path, parameter[name[self].cwd, name[self].static_dir]]
variable[service] assign[=] call[name[LocalApigwService], parameter[]]
call[name[service].create, parameter[]]
call[name[self]._print_routes, parameter[name[self].api_provider, name[self].host, name[self].port]]
call[name[LOG].info, parameter[constant[You can now browse to the above endpoints to invoke your functions. You do not need to restart/reload SAM CLI while working on your functions, changes will be reflected instantly/automatically. You only need to restart SAM CLI if you update your AWS SAM template]]]
call[name[service].run, parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[routing_list] = identifier[self] . identifier[_make_routing_list] ( identifier[self] . identifier[api_provider] )
keyword[if] keyword[not] identifier[routing_list] :
keyword[raise] identifier[NoApisDefined] ( literal[string] )
identifier[static_dir_path] = identifier[self] . identifier[_make_static_dir_path] ( identifier[self] . identifier[cwd] , identifier[self] . identifier[static_dir] )
identifier[service] = identifier[LocalApigwService] ( identifier[routing_list] = identifier[routing_list] ,
identifier[lambda_runner] = identifier[self] . identifier[lambda_runner] ,
identifier[static_dir] = identifier[static_dir_path] ,
identifier[port] = identifier[self] . identifier[port] ,
identifier[host] = identifier[self] . identifier[host] ,
identifier[stderr] = identifier[self] . identifier[stderr_stream] )
identifier[service] . identifier[create] ()
identifier[self] . identifier[_print_routes] ( identifier[self] . identifier[api_provider] , identifier[self] . identifier[host] , identifier[self] . identifier[port] )
identifier[LOG] . identifier[info] ( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[service] . identifier[run] () | def start(self):
"""
Creates and starts the local API Gateway service. This method will block until the service is stopped
manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint
to invoke the Lambda function and receive a response.
NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM
"""
routing_list = self._make_routing_list(self.api_provider)
if not routing_list:
raise NoApisDefined('No APIs available in SAM template') # depends on [control=['if'], data=[]]
static_dir_path = self._make_static_dir_path(self.cwd, self.static_dir)
# We care about passing only stderr to the Service and not stdout because stdout from Docker container
# contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed
# to the console or a log file. stderr from Docker container contains runtime logs and output of print
# statements from the Lambda function
service = LocalApigwService(routing_list=routing_list, lambda_runner=self.lambda_runner, static_dir=static_dir_path, port=self.port, host=self.host, stderr=self.stderr_stream)
service.create()
# Print out the list of routes that will be mounted
self._print_routes(self.api_provider, self.host, self.port)
LOG.info('You can now browse to the above endpoints to invoke your functions. You do not need to restart/reload SAM CLI while working on your functions, changes will be reflected instantly/automatically. You only need to restart SAM CLI if you update your AWS SAM template')
service.run() |
def _ReadPaddingDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a padding data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
PaddingtDefinition: padding definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if not is_member:
error_message = 'data type only supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_types.PaddingDefinition,
definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING)
alignment_size = definition_values.get('alignment_size', None)
if not alignment_size:
error_message = 'missing alignment_size'
raise errors.DefinitionReaderError(definition_name, error_message)
try:
int(alignment_size)
except ValueError:
error_message = 'unuspported alignment size attribute: {0!s}'.format(
alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message)
if alignment_size not in (2, 4, 8, 16):
error_message = 'unuspported alignment size value: {0!s}'.format(
alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.alignment_size = alignment_size
return definition_object | def function[_ReadPaddingDataTypeDefinition, parameter[self, definitions_registry, definition_values, definition_name, is_member]]:
constant[Reads a padding data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
PaddingtDefinition: padding definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
]
if <ast.UnaryOp object at 0x7da1b0d9ad40> begin[:]
variable[error_message] assign[=] constant[data type only supported as member]
<ast.Raise object at 0x7da1b0d9ace0>
variable[definition_object] assign[=] call[name[self]._ReadDataTypeDefinition, parameter[name[definitions_registry], name[definition_values], name[data_types].PaddingDefinition, name[definition_name], name[self]._SUPPORTED_DEFINITION_VALUES_PADDING]]
variable[alignment_size] assign[=] call[name[definition_values].get, parameter[constant[alignment_size], constant[None]]]
if <ast.UnaryOp object at 0x7da1b0d99ff0> begin[:]
variable[error_message] assign[=] constant[missing alignment_size]
<ast.Raise object at 0x7da1b0d99e40>
<ast.Try object at 0x7da1b0d99d20>
if compare[name[alignment_size] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0d99930>, <ast.Constant object at 0x7da1b0d998d0>, <ast.Constant object at 0x7da1b0d99900>, <ast.Constant object at 0x7da1b0d99780>]]] begin[:]
variable[error_message] assign[=] call[constant[unuspported alignment size value: {0!s}].format, parameter[name[alignment_size]]]
<ast.Raise object at 0x7da1b0d996c0>
name[definition_object].alignment_size assign[=] name[alignment_size]
return[name[definition_object]] | keyword[def] identifier[_ReadPaddingDataTypeDefinition] (
identifier[self] , identifier[definitions_registry] , identifier[definition_values] , identifier[definition_name] ,
identifier[is_member] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[is_member] :
identifier[error_message] = literal[string]
keyword[raise] identifier[errors] . identifier[DefinitionReaderError] ( identifier[definition_name] , identifier[error_message] )
identifier[definition_object] = identifier[self] . identifier[_ReadDataTypeDefinition] (
identifier[definitions_registry] , identifier[definition_values] , identifier[data_types] . identifier[PaddingDefinition] ,
identifier[definition_name] , identifier[self] . identifier[_SUPPORTED_DEFINITION_VALUES_PADDING] )
identifier[alignment_size] = identifier[definition_values] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[alignment_size] :
identifier[error_message] = literal[string]
keyword[raise] identifier[errors] . identifier[DefinitionReaderError] ( identifier[definition_name] , identifier[error_message] )
keyword[try] :
identifier[int] ( identifier[alignment_size] )
keyword[except] identifier[ValueError] :
identifier[error_message] = literal[string] . identifier[format] (
identifier[alignment_size] )
keyword[raise] identifier[errors] . identifier[DefinitionReaderError] ( identifier[definition_name] , identifier[error_message] )
keyword[if] identifier[alignment_size] keyword[not] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[error_message] = literal[string] . identifier[format] (
identifier[alignment_size] )
keyword[raise] identifier[errors] . identifier[DefinitionReaderError] ( identifier[definition_name] , identifier[error_message] )
identifier[definition_object] . identifier[alignment_size] = identifier[alignment_size]
keyword[return] identifier[definition_object] | def _ReadPaddingDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
"""Reads a padding data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
PaddingtDefinition: padding definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if not is_member:
error_message = 'data type only supported as member'
raise errors.DefinitionReaderError(definition_name, error_message) # depends on [control=['if'], data=[]]
definition_object = self._ReadDataTypeDefinition(definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING)
alignment_size = definition_values.get('alignment_size', None)
if not alignment_size:
error_message = 'missing alignment_size'
raise errors.DefinitionReaderError(definition_name, error_message) # depends on [control=['if'], data=[]]
try:
int(alignment_size) # depends on [control=['try'], data=[]]
except ValueError:
error_message = 'unuspported alignment size attribute: {0!s}'.format(alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message) # depends on [control=['except'], data=[]]
if alignment_size not in (2, 4, 8, 16):
error_message = 'unuspported alignment size value: {0!s}'.format(alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message) # depends on [control=['if'], data=['alignment_size']]
definition_object.alignment_size = alignment_size
return definition_object |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.