code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def load_streets(self, filename):
"""
Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
"""
with open(filename, 'r') as f:
for line in f:
self.streets.append(line.strip().lower()) | def function[load_streets, parameter[self, filename]]:
constant[
Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
call[name[self].streets.append, parameter[call[call[name[line].strip, parameter[]].lower, parameter[]]]] | keyword[def] identifier[load_streets] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[self] . identifier[streets] . identifier[append] ( identifier[line] . identifier[strip] (). identifier[lower] ()) | def load_streets(self, filename):
"""
Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
"""
with open(filename, 'r') as f:
for line in f:
self.streets.append(line.strip().lower()) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] |
def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(
prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument("config_location", type=str, help="Location of the configuration file")
parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str,
help="Directory from which variable settings groups may be sourced")
parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str,
help="Extensions to try adding to the variable to source location if it does not exist")
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(
arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,
url=arguments.url, token=arguments.token, debug=arguments.debug) | def function[_parse_args, parameter[args]]:
constant[
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[add_common_arguments], parameter[name[parser]]]
call[name[parser].add_argument, parameter[constant[config_location]]]
call[name[parser].add_argument, parameter[constant[--setting-repository]]]
call[name[parser].add_argument, parameter[constant[--default-setting-extension]]]
variable[arguments] assign[=] call[name[parser].parse_args, parameter[name[args]]]
return[call[name[_UpdateArgumentsRunConfig], parameter[name[arguments].config_location, name[arguments].setting_repository, name[arguments].default_setting_extensions]]] | keyword[def] identifier[_parse_args] ( identifier[args] : identifier[List] [ identifier[str] ])-> identifier[_UpdateArgumentsRunConfig] :
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[prog] = literal[string] , identifier[description] = literal[string] )
identifier[add_common_arguments] ( identifier[parser] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[str] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[str] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[str] ,
identifier[help] = literal[string] )
identifier[arguments] = identifier[parser] . identifier[parse_args] ( identifier[args] )
keyword[return] identifier[_UpdateArgumentsRunConfig] (
identifier[arguments] . identifier[config_location] , identifier[arguments] . identifier[setting_repository] , identifier[arguments] . identifier[default_setting_extensions] ,
identifier[url] = identifier[arguments] . identifier[url] , identifier[token] = identifier[arguments] . identifier[token] , identifier[debug] = identifier[arguments] . identifier[debug] ) | def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(prog='gitlab-update-variables', description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument('config_location', type=str, help='Location of the configuration file')
parser.add_argument('--setting-repository', dest='setting_repository', nargs='+', type=str, help='Directory from which variable settings groups may be sourced')
parser.add_argument('--default-setting-extension', dest='default_setting_extensions', nargs='+', type=str, help='Extensions to try adding to the variable to source location if it does not exist')
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions, url=arguments.url, token=arguments.token, debug=arguments.debug) |
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap | def function[voc_eval, parameter[detpath, annopath, imagesetfile, classname, cachedir, ovthresh, use_07_metric]]:
constant[rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
]
if <ast.UnaryOp object at 0x7da1b1f49690> begin[:]
call[name[os].mkdir, parameter[name[cachedir]]]
variable[cachefile] assign[=] call[name[os].path.join, parameter[name[cachedir], constant[annots.pkl]]]
with call[name[open], parameter[name[imagesetfile], constant[r]]] begin[:]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
variable[imagenames] assign[=] <ast.ListComp object at 0x7da1b1f49000>
if <ast.UnaryOp object at 0x7da1b1f48eb0> begin[:]
variable[recs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1f48c10>, <ast.Name object at 0x7da1b1f48b80>]]] in starred[call[name[enumerate], parameter[name[imagenames]]]] begin[:]
call[name[recs]][name[imagename]] assign[=] call[name[parse_rec], parameter[binary_operation[name[annopath] <ast.Mod object at 0x7da2590d6920> name[imagename]]]]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[100]] equal[==] constant[0]] begin[:]
call[name[print], parameter[call[constant[Reading annotation for {:d}/{:d}].format, parameter[binary_operation[name[i] + constant[1]], call[name[len], parameter[name[imagenames]]]]]]]
call[name[print], parameter[call[constant[Saving cached annotations to {:s}].format, parameter[name[cachefile]]]]]
with call[name[open], parameter[name[cachefile], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[recs], name[f]]]
variable[class_recs] assign[=] dictionary[[], []]
variable[npos] assign[=] constant[0]
for taget[name[imagename]] in starred[name[imagenames]] begin[:]
variable[R] assign[=] <ast.ListComp object at 0x7da1b1f4b760>
variable[bbox] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18f812590>]]
variable[difficult] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da18f811990>]].astype, parameter[name[np].bool]]
variable[det] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f812c50>]] * call[name[len], parameter[name[R]]]]
variable[npos] assign[=] binary_operation[name[npos] + call[name[sum], parameter[<ast.UnaryOp object at 0x7da18f810070>]]]
call[name[class_recs]][name[imagename]] assign[=] dictionary[[<ast.Constant object at 0x7da18f811720>, <ast.Constant object at 0x7da18f811120>, <ast.Constant object at 0x7da18f8123e0>], [<ast.Name object at 0x7da18f813700>, <ast.Name object at 0x7da18f8103d0>, <ast.Name object at 0x7da18f812aa0>]]
variable[detfile] assign[=] call[name[detpath].format, parameter[name[classname]]]
with call[name[open], parameter[name[detfile], constant[r]]] begin[:]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
if compare[call[name[any], parameter[name[lines]]] equal[==] constant[1]] begin[:]
variable[splitlines] assign[=] <ast.ListComp object at 0x7da18c4cc820>
variable[image_ids] assign[=] <ast.ListComp object at 0x7da18c4cd990>
variable[confidence] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18c4cc7f0>]]
variable[BB] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18c4ce5f0>]]
variable[sorted_ind] assign[=] call[name[np].argsort, parameter[<ast.UnaryOp object at 0x7da18c4cd390>]]
variable[sorted_scores] assign[=] call[name[np].sort, parameter[<ast.UnaryOp object at 0x7da18c4ce080>]]
variable[BB] assign[=] call[name[BB]][tuple[[<ast.Name object at 0x7da18c4ce200>, <ast.Slice object at 0x7da18c4cfa30>]]]
variable[image_ids] assign[=] <ast.ListComp object at 0x7da18c4cc190>
variable[nd] assign[=] call[name[len], parameter[name[image_ids]]]
variable[tp] assign[=] call[name[np].zeros, parameter[name[nd]]]
variable[fp] assign[=] call[name[np].zeros, parameter[name[nd]]]
for taget[name[d]] in starred[call[name[range], parameter[name[nd]]]] begin[:]
variable[R] assign[=] call[name[class_recs]][call[name[image_ids]][name[d]]]
variable[bb] assign[=] call[call[name[BB]][tuple[[<ast.Name object at 0x7da20cabf9a0>, <ast.Slice object at 0x7da20cabfa00>]]].astype, parameter[name[float]]]
variable[ovmax] assign[=] <ast.UnaryOp object at 0x7da20cabd090>
variable[BBGT] assign[=] call[call[name[R]][constant[bbox]].astype, parameter[name[float]]]
if compare[name[BBGT].size greater[>] constant[0]] begin[:]
variable[ixmin] assign[=] call[name[np].maximum, parameter[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabee60>, <ast.Constant object at 0x7da20cabd2a0>]]], call[name[bb]][constant[0]]]]
variable[iymin] assign[=] call[name[np].maximum, parameter[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabf6a0>, <ast.Constant object at 0x7da20cabea40>]]], call[name[bb]][constant[1]]]]
variable[ixmax] assign[=] call[name[np].minimum, parameter[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabfbb0>, <ast.Constant object at 0x7da20cabe4d0>]]], call[name[bb]][constant[2]]]]
variable[iymax] assign[=] call[name[np].minimum, parameter[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabe050>, <ast.Constant object at 0x7da20cabf1c0>]]], call[name[bb]][constant[3]]]]
variable[iw] assign[=] call[name[np].maximum, parameter[binary_operation[name[ixmax] - name[ixmin]], constant[0.0]]]
variable[ih] assign[=] call[name[np].maximum, parameter[binary_operation[name[iymax] - name[iymin]], constant[0.0]]]
variable[inters] assign[=] binary_operation[name[iw] * name[ih]]
variable[uni] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[bb]][constant[2]] - call[name[bb]][constant[0]]] * binary_operation[call[name[bb]][constant[3]] - call[name[bb]][constant[1]]]] + binary_operation[binary_operation[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabdf00>, <ast.Constant object at 0x7da20cabfb20>]]] - call[name[BBGT]][tuple[[<ast.Slice object at 0x7da20cabd180>, <ast.Constant object at 0x7da20cabe8f0>]]]] * binary_operation[call[name[BBGT]][tuple[[<ast.Slice object at 0x7da18dc988b0>, <ast.Constant object at 0x7da18dc9b7f0>]]] - call[name[BBGT]][tuple[[<ast.Slice object at 0x7da18dc9bf40>, <ast.Constant object at 0x7da18dc052a0>]]]]]] - name[inters]]
variable[overlaps] assign[=] binary_operation[name[inters] / name[uni]]
variable[ovmax] assign[=] call[name[np].max, parameter[name[overlaps]]]
variable[jmax] assign[=] call[name[np].argmax, parameter[name[overlaps]]]
if compare[name[ovmax] greater[>] name[ovthresh]] begin[:]
if <ast.UnaryOp object at 0x7da18dc05570> begin[:]
if <ast.UnaryOp object at 0x7da18dc06530> begin[:]
call[name[tp]][name[d]] assign[=] constant[1.0]
call[call[name[R]][constant[det]]][name[jmax]] assign[=] constant[1]
variable[fp] assign[=] call[name[np].cumsum, parameter[name[fp]]]
variable[tp] assign[=] call[name[np].cumsum, parameter[name[tp]]]
variable[rec] assign[=] binary_operation[name[tp] / call[name[float], parameter[name[npos]]]]
variable[prec] assign[=] binary_operation[name[tp] / call[name[np].maximum, parameter[binary_operation[name[tp] + name[fp]], call[name[np].finfo, parameter[name[np].float64]].eps]]]
variable[ap] assign[=] call[name[voc_ap], parameter[name[rec], name[prec], name[use_07_metric]]]
return[tuple[[<ast.Name object at 0x7da18dc04c40>, <ast.Name object at 0x7da18dc04dc0>, <ast.Name object at 0x7da18dc04f70>]]] | keyword[def] identifier[voc_eval] ( identifier[detpath] ,
identifier[annopath] ,
identifier[imagesetfile] ,
identifier[classname] ,
identifier[cachedir] ,
identifier[ovthresh] = literal[int] ,
identifier[use_07_metric] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[cachedir] ):
identifier[os] . identifier[mkdir] ( identifier[cachedir] )
identifier[cachefile] = identifier[os] . identifier[path] . identifier[join] ( identifier[cachedir] , literal[string] )
keyword[with] identifier[open] ( identifier[imagesetfile] , literal[string] ) keyword[as] identifier[f] :
identifier[lines] = identifier[f] . identifier[readlines] ()
identifier[imagenames] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[lines] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[cachefile] ):
identifier[recs] ={}
keyword[for] identifier[i] , identifier[imagename] keyword[in] identifier[enumerate] ( identifier[imagenames] ):
identifier[recs] [ identifier[imagename] ]= identifier[parse_rec] ( identifier[annopath] %( identifier[imagename] ))
keyword[if] identifier[i] % literal[int] == literal[int] :
identifier[print] ( literal[string] . identifier[format] (
identifier[i] + literal[int] , identifier[len] ( identifier[imagenames] )))
identifier[print] ( literal[string] . identifier[format] ( identifier[cachefile] ))
keyword[with] identifier[open] ( identifier[cachefile] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[recs] , identifier[f] )
keyword[else] :
keyword[with] identifier[open] ( identifier[cachefile] , literal[string] ) keyword[as] identifier[f] :
identifier[recs] = identifier[pickle] . identifier[load] ( identifier[f] )
identifier[class_recs] ={}
identifier[npos] = literal[int]
keyword[for] identifier[imagename] keyword[in] identifier[imagenames] :
identifier[R] =[ identifier[obj] keyword[for] identifier[obj] keyword[in] identifier[recs] [ identifier[imagename] ] keyword[if] identifier[obj] [ literal[string] ]== identifier[classname] ]
identifier[bbox] = identifier[np] . identifier[array] ([ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[R] ])
identifier[difficult] = identifier[np] . identifier[array] ([ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[R] ]). identifier[astype] ( identifier[np] . identifier[bool] )
identifier[det] =[ keyword[False] ]* identifier[len] ( identifier[R] )
identifier[npos] = identifier[npos] + identifier[sum] (~ identifier[difficult] )
identifier[class_recs] [ identifier[imagename] ]={ literal[string] : identifier[bbox] ,
literal[string] : identifier[difficult] ,
literal[string] : identifier[det] }
identifier[detfile] = identifier[detpath] . identifier[format] ( identifier[classname] )
keyword[with] identifier[open] ( identifier[detfile] , literal[string] ) keyword[as] identifier[f] :
identifier[lines] = identifier[f] . identifier[readlines] ()
keyword[if] identifier[any] ( identifier[lines] )== literal[int] :
identifier[splitlines] =[ identifier[x] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[lines] ]
identifier[image_ids] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[splitlines] ]
identifier[confidence] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[splitlines] ])
identifier[BB] = identifier[np] . identifier[array] ([[ identifier[float] ( identifier[z] ) keyword[for] identifier[z] keyword[in] identifier[x] [ literal[int] :]] keyword[for] identifier[x] keyword[in] identifier[splitlines] ])
identifier[sorted_ind] = identifier[np] . identifier[argsort] (- identifier[confidence] )
identifier[sorted_scores] = identifier[np] . identifier[sort] (- identifier[confidence] )
identifier[BB] = identifier[BB] [ identifier[sorted_ind] ,:]
identifier[image_ids] =[ identifier[image_ids] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[sorted_ind] ]
identifier[nd] = identifier[len] ( identifier[image_ids] )
identifier[tp] = identifier[np] . identifier[zeros] ( identifier[nd] )
identifier[fp] = identifier[np] . identifier[zeros] ( identifier[nd] )
keyword[for] identifier[d] keyword[in] identifier[range] ( identifier[nd] ):
identifier[R] = identifier[class_recs] [ identifier[image_ids] [ identifier[d] ]]
identifier[bb] = identifier[BB] [ identifier[d] ,:]. identifier[astype] ( identifier[float] )
identifier[ovmax] =- identifier[np] . identifier[inf]
identifier[BBGT] = identifier[R] [ literal[string] ]. identifier[astype] ( identifier[float] )
keyword[if] identifier[BBGT] . identifier[size] > literal[int] :
identifier[ixmin] = identifier[np] . identifier[maximum] ( identifier[BBGT] [:, literal[int] ], identifier[bb] [ literal[int] ])
identifier[iymin] = identifier[np] . identifier[maximum] ( identifier[BBGT] [:, literal[int] ], identifier[bb] [ literal[int] ])
identifier[ixmax] = identifier[np] . identifier[minimum] ( identifier[BBGT] [:, literal[int] ], identifier[bb] [ literal[int] ])
identifier[iymax] = identifier[np] . identifier[minimum] ( identifier[BBGT] [:, literal[int] ], identifier[bb] [ literal[int] ])
identifier[iw] = identifier[np] . identifier[maximum] ( identifier[ixmax] - identifier[ixmin] , literal[int] )
identifier[ih] = identifier[np] . identifier[maximum] ( identifier[iymax] - identifier[iymin] , literal[int] )
identifier[inters] = identifier[iw] * identifier[ih]
identifier[uni] =(( identifier[bb] [ literal[int] ]- identifier[bb] [ literal[int] ])*( identifier[bb] [ literal[int] ]- identifier[bb] [ literal[int] ])+
( identifier[BBGT] [:, literal[int] ]- identifier[BBGT] [:, literal[int] ])*
( identifier[BBGT] [:, literal[int] ]- identifier[BBGT] [:, literal[int] ])- identifier[inters] )
identifier[overlaps] = identifier[inters] / identifier[uni]
identifier[ovmax] = identifier[np] . identifier[max] ( identifier[overlaps] )
identifier[jmax] = identifier[np] . identifier[argmax] ( identifier[overlaps] )
keyword[if] identifier[ovmax] > identifier[ovthresh] :
keyword[if] keyword[not] identifier[R] [ literal[string] ][ identifier[jmax] ]:
keyword[if] keyword[not] identifier[R] [ literal[string] ][ identifier[jmax] ]:
identifier[tp] [ identifier[d] ]= literal[int]
identifier[R] [ literal[string] ][ identifier[jmax] ]= literal[int]
keyword[else] :
identifier[fp] [ identifier[d] ]= literal[int]
keyword[else] :
identifier[fp] [ identifier[d] ]= literal[int]
identifier[fp] = identifier[np] . identifier[cumsum] ( identifier[fp] )
identifier[tp] = identifier[np] . identifier[cumsum] ( identifier[tp] )
identifier[rec] = identifier[tp] / identifier[float] ( identifier[npos] )
identifier[prec] = identifier[tp] / identifier[np] . identifier[maximum] ( identifier[tp] + identifier[fp] , identifier[np] . identifier[finfo] ( identifier[np] . identifier[float64] ). identifier[eps] )
identifier[ap] = identifier[voc_ap] ( identifier[rec] , identifier[prec] , identifier[use_07_metric] )
keyword[else] :
identifier[rec] =- literal[int]
identifier[prec] =- literal[int]
identifier[ap] =- literal[int]
keyword[return] identifier[rec] , identifier[prec] , identifier[ap] | def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir) # depends on [control=['if'], data=[]]
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines() # depends on [control=['with'], data=['f']]
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for (i, imagename) in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % imagename)
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f) # depends on [control=['with'], data=['f']]
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} # depends on [control=['for'], data=['imagename']]
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines() # depends on [control=['with'], data=['f']]
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.0)
ih = np.maximum(iymax - iymin, 0.0)
inters = iw * ih
uni = (bb[2] - bb[0]) * (bb[3] - bb[1]) + (BBGT[:, 2] - BBGT[:, 0]) * (BBGT[:, 3] - BBGT[:, 1]) - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps) # depends on [control=['if'], data=[]]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1 # depends on [control=['if'], data=[]]
else:
fp[d] = 1.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
fp[d] = 1.0 # depends on [control=['for'], data=['d']]
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric) # depends on [control=['if'], data=[]]
else:
rec = -1.0
prec = -1.0
ap = -1.0
return (rec, prec, ap) |
def _get_drive_resource(self, drive_name):
"""Gets the DiskDrive resource if exists.
:param drive_name: can be either "PhysicalDrives" or
"LogicalDrives".
:returns the list of drives.
:raises: IloCommandNotSupportedError if the given drive resource
doesn't exist.
:raises: IloError, on an error from iLO.
"""
disk_details_list = []
array_uri_links = self._create_list_of_array_controllers()
for array_link in array_uri_links:
_, _, member_settings = (
self._rest_get(array_link['href']))
if ('links' in member_settings and
drive_name in member_settings['links']):
disk_uri = member_settings['links'][drive_name]['href']
headers, disk_member_uri, disk_mem = (
self._rest_get(disk_uri))
if ('links' in disk_mem and
'Member' in disk_mem['links']):
for disk_link in disk_mem['links']['Member']:
diskdrive_uri = disk_link['href']
_, _, disk_details = (
self._rest_get(diskdrive_uri))
disk_details_list.append(disk_details)
else:
msg = ('"links/Member" section in %s'
' does not exist', drive_name)
raise exception.IloCommandNotSupportedError(msg)
else:
msg = ('"links/%s" section in '
' ArrayController/links/Member does not exist',
drive_name)
raise exception.IloCommandNotSupportedError(msg)
if disk_details_list:
return disk_details_list | def function[_get_drive_resource, parameter[self, drive_name]]:
constant[Gets the DiskDrive resource if exists.
:param drive_name: can be either "PhysicalDrives" or
"LogicalDrives".
:returns the list of drives.
:raises: IloCommandNotSupportedError if the given drive resource
doesn't exist.
:raises: IloError, on an error from iLO.
]
variable[disk_details_list] assign[=] list[[]]
variable[array_uri_links] assign[=] call[name[self]._create_list_of_array_controllers, parameter[]]
for taget[name[array_link]] in starred[name[array_uri_links]] begin[:]
<ast.Tuple object at 0x7da1b1a8de70> assign[=] call[name[self]._rest_get, parameter[call[name[array_link]][constant[href]]]]
if <ast.BoolOp object at 0x7da20c993fa0> begin[:]
variable[disk_uri] assign[=] call[call[call[name[member_settings]][constant[links]]][name[drive_name]]][constant[href]]
<ast.Tuple object at 0x7da20c991930> assign[=] call[name[self]._rest_get, parameter[name[disk_uri]]]
if <ast.BoolOp object at 0x7da1b1a8fa60> begin[:]
for taget[name[disk_link]] in starred[call[call[name[disk_mem]][constant[links]]][constant[Member]]] begin[:]
variable[diskdrive_uri] assign[=] call[name[disk_link]][constant[href]]
<ast.Tuple object at 0x7da1b1a8dea0> assign[=] call[name[self]._rest_get, parameter[name[diskdrive_uri]]]
call[name[disk_details_list].append, parameter[name[disk_details]]]
if name[disk_details_list] begin[:]
return[name[disk_details_list]] | keyword[def] identifier[_get_drive_resource] ( identifier[self] , identifier[drive_name] ):
literal[string]
identifier[disk_details_list] =[]
identifier[array_uri_links] = identifier[self] . identifier[_create_list_of_array_controllers] ()
keyword[for] identifier[array_link] keyword[in] identifier[array_uri_links] :
identifier[_] , identifier[_] , identifier[member_settings] =(
identifier[self] . identifier[_rest_get] ( identifier[array_link] [ literal[string] ]))
keyword[if] ( literal[string] keyword[in] identifier[member_settings] keyword[and]
identifier[drive_name] keyword[in] identifier[member_settings] [ literal[string] ]):
identifier[disk_uri] = identifier[member_settings] [ literal[string] ][ identifier[drive_name] ][ literal[string] ]
identifier[headers] , identifier[disk_member_uri] , identifier[disk_mem] =(
identifier[self] . identifier[_rest_get] ( identifier[disk_uri] ))
keyword[if] ( literal[string] keyword[in] identifier[disk_mem] keyword[and]
literal[string] keyword[in] identifier[disk_mem] [ literal[string] ]):
keyword[for] identifier[disk_link] keyword[in] identifier[disk_mem] [ literal[string] ][ literal[string] ]:
identifier[diskdrive_uri] = identifier[disk_link] [ literal[string] ]
identifier[_] , identifier[_] , identifier[disk_details] =(
identifier[self] . identifier[_rest_get] ( identifier[diskdrive_uri] ))
identifier[disk_details_list] . identifier[append] ( identifier[disk_details] )
keyword[else] :
identifier[msg] =( literal[string]
literal[string] , identifier[drive_name] )
keyword[raise] identifier[exception] . identifier[IloCommandNotSupportedError] ( identifier[msg] )
keyword[else] :
identifier[msg] =( literal[string]
literal[string] ,
identifier[drive_name] )
keyword[raise] identifier[exception] . identifier[IloCommandNotSupportedError] ( identifier[msg] )
keyword[if] identifier[disk_details_list] :
keyword[return] identifier[disk_details_list] | def _get_drive_resource(self, drive_name):
"""Gets the DiskDrive resource if exists.
:param drive_name: can be either "PhysicalDrives" or
"LogicalDrives".
:returns the list of drives.
:raises: IloCommandNotSupportedError if the given drive resource
doesn't exist.
:raises: IloError, on an error from iLO.
"""
disk_details_list = []
array_uri_links = self._create_list_of_array_controllers()
for array_link in array_uri_links:
(_, _, member_settings) = self._rest_get(array_link['href'])
if 'links' in member_settings and drive_name in member_settings['links']:
disk_uri = member_settings['links'][drive_name]['href']
(headers, disk_member_uri, disk_mem) = self._rest_get(disk_uri)
if 'links' in disk_mem and 'Member' in disk_mem['links']:
for disk_link in disk_mem['links']['Member']:
diskdrive_uri = disk_link['href']
(_, _, disk_details) = self._rest_get(diskdrive_uri)
disk_details_list.append(disk_details) # depends on [control=['for'], data=['disk_link']] # depends on [control=['if'], data=[]]
else:
msg = ('"links/Member" section in %s does not exist', drive_name)
raise exception.IloCommandNotSupportedError(msg) # depends on [control=['if'], data=[]]
else:
msg = ('"links/%s" section in ArrayController/links/Member does not exist', drive_name)
raise exception.IloCommandNotSupportedError(msg) # depends on [control=['for'], data=['array_link']]
if disk_details_list:
return disk_details_list # depends on [control=['if'], data=[]] |
def check_conf_percentage_validity(conf_percentage):
"""
Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError
if otherwise.
"""
msg = "conf_percentage MUST be a number between 0.0 and 100."
condition_1 = isinstance(conf_percentage, Number)
if not condition_1:
raise ValueError(msg)
else:
condition_2 = 0 < conf_percentage < 100
if not condition_2:
raise ValueError(msg)
return None | def function[check_conf_percentage_validity, parameter[conf_percentage]]:
constant[
Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError
if otherwise.
]
variable[msg] assign[=] constant[conf_percentage MUST be a number between 0.0 and 100.]
variable[condition_1] assign[=] call[name[isinstance], parameter[name[conf_percentage], name[Number]]]
if <ast.UnaryOp object at 0x7da1b157ee00> begin[:]
<ast.Raise object at 0x7da1b157c160>
return[constant[None]] | keyword[def] identifier[check_conf_percentage_validity] ( identifier[conf_percentage] ):
literal[string]
identifier[msg] = literal[string]
identifier[condition_1] = identifier[isinstance] ( identifier[conf_percentage] , identifier[Number] )
keyword[if] keyword[not] identifier[condition_1] :
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[else] :
identifier[condition_2] = literal[int] < identifier[conf_percentage] < literal[int]
keyword[if] keyword[not] identifier[condition_2] :
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[return] keyword[None] | def check_conf_percentage_validity(conf_percentage):
"""
Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError
if otherwise.
"""
msg = 'conf_percentage MUST be a number between 0.0 and 100.'
condition_1 = isinstance(conf_percentage, Number)
if not condition_1:
raise ValueError(msg) # depends on [control=['if'], data=[]]
else:
condition_2 = 0 < conf_percentage < 100
if not condition_2:
raise ValueError(msg) # depends on [control=['if'], data=[]]
return None |
def dict_merge(o, v):
'''
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
'''
if not isinstance(v, dict):
return v
res = _deepcopy(o)
for key in v.keys():
if res.get(key) and isinstance(res[key], dict):
res[key] = dict_merge(res[key], v[key])
else:
res[key] = _deepcopy(v[key])
return res | def function[dict_merge, parameter[o, v]]:
constant[
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
]
if <ast.UnaryOp object at 0x7da2041dae00> begin[:]
return[name[v]]
variable[res] assign[=] call[name[_deepcopy], parameter[name[o]]]
for taget[name[key]] in starred[call[name[v].keys, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b15b35b0> begin[:]
call[name[res]][name[key]] assign[=] call[name[dict_merge], parameter[call[name[res]][name[key]], call[name[v]][name[key]]]]
return[name[res]] | keyword[def] identifier[dict_merge] ( identifier[o] , identifier[v] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[dict] ):
keyword[return] identifier[v]
identifier[res] = identifier[_deepcopy] ( identifier[o] )
keyword[for] identifier[key] keyword[in] identifier[v] . identifier[keys] ():
keyword[if] identifier[res] . identifier[get] ( identifier[key] ) keyword[and] identifier[isinstance] ( identifier[res] [ identifier[key] ], identifier[dict] ):
identifier[res] [ identifier[key] ]= identifier[dict_merge] ( identifier[res] [ identifier[key] ], identifier[v] [ identifier[key] ])
keyword[else] :
identifier[res] [ identifier[key] ]= identifier[_deepcopy] ( identifier[v] [ identifier[key] ])
keyword[return] identifier[res] | def dict_merge(o, v):
"""
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
"""
if not isinstance(v, dict):
return v # depends on [control=['if'], data=[]]
res = _deepcopy(o)
for key in v.keys():
if res.get(key) and isinstance(res[key], dict):
res[key] = dict_merge(res[key], v[key]) # depends on [control=['if'], data=[]]
else:
res[key] = _deepcopy(v[key]) # depends on [control=['for'], data=['key']]
return res |
def adjustMinimumWidth( self ):
"""
Updates the minimum width for this menu based on the font metrics \
for its title (if its shown). This method is called automatically \
when the menu is shown.
"""
if not self.showTitle():
return
metrics = QFontMetrics(self.font())
width = metrics.width(self.title()) + 20
if self.minimumWidth() < width:
self.setMinimumWidth(width) | def function[adjustMinimumWidth, parameter[self]]:
constant[
Updates the minimum width for this menu based on the font metrics for its title (if its shown). This method is called automatically when the menu is shown.
]
if <ast.UnaryOp object at 0x7da18f09df00> begin[:]
return[None]
variable[metrics] assign[=] call[name[QFontMetrics], parameter[call[name[self].font, parameter[]]]]
variable[width] assign[=] binary_operation[call[name[metrics].width, parameter[call[name[self].title, parameter[]]]] + constant[20]]
if compare[call[name[self].minimumWidth, parameter[]] less[<] name[width]] begin[:]
call[name[self].setMinimumWidth, parameter[name[width]]] | keyword[def] identifier[adjustMinimumWidth] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[showTitle] ():
keyword[return]
identifier[metrics] = identifier[QFontMetrics] ( identifier[self] . identifier[font] ())
identifier[width] = identifier[metrics] . identifier[width] ( identifier[self] . identifier[title] ())+ literal[int]
keyword[if] identifier[self] . identifier[minimumWidth] ()< identifier[width] :
identifier[self] . identifier[setMinimumWidth] ( identifier[width] ) | def adjustMinimumWidth(self):
"""
Updates the minimum width for this menu based on the font metrics for its title (if its shown). This method is called automatically when the menu is shown.
"""
if not self.showTitle():
return # depends on [control=['if'], data=[]]
metrics = QFontMetrics(self.font())
width = metrics.width(self.title()) + 20
if self.minimumWidth() < width:
self.setMinimumWidth(width) # depends on [control=['if'], data=['width']] |
def dump_junit(self):
"""Returns a string containing XML mapped to the JUnit schema."""
testsuites = ElementTree.Element('testsuites', name='therapist', time=str(round(self.execution_time, 2)),
tests=str(self.count()), failures=str(self.count(status=Result.FAILURE)),
errors=str(self.count(status=Result.ERROR)))
for result in self.objects:
failures = '1' if result.is_failure else '0'
errors = '1' if result.is_error else '0'
testsuite = ElementTree.SubElement(testsuites, 'testsuite', id=result.process.name,
name=str(result.process), time=str(round(result.execution_time, 2)),
tests='1', failures=failures, errors=errors)
testcase = ElementTree.SubElement(testsuite, 'testcase', time=str(round(result.execution_time, 2)))
testcase.attrib['name'] = result.process.name
if result.is_failure or result.is_error:
if result.is_failure:
element = ElementTree.SubElement(testcase, 'failure', type='failure')
else:
element = ElementTree.SubElement(testcase, 'error', type='error')
if result.error:
element.text = result.error
else:
element.text = result.output if result.output else ''
xmlstr = ElementTree.tostring(testsuites, encoding='utf-8').decode('utf-8')
return '<?xml version="1.0" encoding="UTF-8"?>\n{}'.format(xmlstr) | def function[dump_junit, parameter[self]]:
constant[Returns a string containing XML mapped to the JUnit schema.]
variable[testsuites] assign[=] call[name[ElementTree].Element, parameter[constant[testsuites]]]
for taget[name[result]] in starred[name[self].objects] begin[:]
variable[failures] assign[=] <ast.IfExp object at 0x7da1b0bae470>
variable[errors] assign[=] <ast.IfExp object at 0x7da1b0badba0>
variable[testsuite] assign[=] call[name[ElementTree].SubElement, parameter[name[testsuites], constant[testsuite]]]
variable[testcase] assign[=] call[name[ElementTree].SubElement, parameter[name[testsuite], constant[testcase]]]
call[name[testcase].attrib][constant[name]] assign[=] name[result].process.name
if <ast.BoolOp object at 0x7da1b0b30250> begin[:]
if name[result].is_failure begin[:]
variable[element] assign[=] call[name[ElementTree].SubElement, parameter[name[testcase], constant[failure]]]
if name[result].error begin[:]
name[element].text assign[=] name[result].error
variable[xmlstr] assign[=] call[call[name[ElementTree].tostring, parameter[name[testsuites]]].decode, parameter[constant[utf-8]]]
return[call[constant[<?xml version="1.0" encoding="UTF-8"?>
{}].format, parameter[name[xmlstr]]]] | keyword[def] identifier[dump_junit] ( identifier[self] ):
literal[string]
identifier[testsuites] = identifier[ElementTree] . identifier[Element] ( literal[string] , identifier[name] = literal[string] , identifier[time] = identifier[str] ( identifier[round] ( identifier[self] . identifier[execution_time] , literal[int] )),
identifier[tests] = identifier[str] ( identifier[self] . identifier[count] ()), identifier[failures] = identifier[str] ( identifier[self] . identifier[count] ( identifier[status] = identifier[Result] . identifier[FAILURE] )),
identifier[errors] = identifier[str] ( identifier[self] . identifier[count] ( identifier[status] = identifier[Result] . identifier[ERROR] )))
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[objects] :
identifier[failures] = literal[string] keyword[if] identifier[result] . identifier[is_failure] keyword[else] literal[string]
identifier[errors] = literal[string] keyword[if] identifier[result] . identifier[is_error] keyword[else] literal[string]
identifier[testsuite] = identifier[ElementTree] . identifier[SubElement] ( identifier[testsuites] , literal[string] , identifier[id] = identifier[result] . identifier[process] . identifier[name] ,
identifier[name] = identifier[str] ( identifier[result] . identifier[process] ), identifier[time] = identifier[str] ( identifier[round] ( identifier[result] . identifier[execution_time] , literal[int] )),
identifier[tests] = literal[string] , identifier[failures] = identifier[failures] , identifier[errors] = identifier[errors] )
identifier[testcase] = identifier[ElementTree] . identifier[SubElement] ( identifier[testsuite] , literal[string] , identifier[time] = identifier[str] ( identifier[round] ( identifier[result] . identifier[execution_time] , literal[int] )))
identifier[testcase] . identifier[attrib] [ literal[string] ]= identifier[result] . identifier[process] . identifier[name]
keyword[if] identifier[result] . identifier[is_failure] keyword[or] identifier[result] . identifier[is_error] :
keyword[if] identifier[result] . identifier[is_failure] :
identifier[element] = identifier[ElementTree] . identifier[SubElement] ( identifier[testcase] , literal[string] , identifier[type] = literal[string] )
keyword[else] :
identifier[element] = identifier[ElementTree] . identifier[SubElement] ( identifier[testcase] , literal[string] , identifier[type] = literal[string] )
keyword[if] identifier[result] . identifier[error] :
identifier[element] . identifier[text] = identifier[result] . identifier[error]
keyword[else] :
identifier[element] . identifier[text] = identifier[result] . identifier[output] keyword[if] identifier[result] . identifier[output] keyword[else] literal[string]
identifier[xmlstr] = identifier[ElementTree] . identifier[tostring] ( identifier[testsuites] , identifier[encoding] = literal[string] ). identifier[decode] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[xmlstr] ) | def dump_junit(self):
"""Returns a string containing XML mapped to the JUnit schema."""
testsuites = ElementTree.Element('testsuites', name='therapist', time=str(round(self.execution_time, 2)), tests=str(self.count()), failures=str(self.count(status=Result.FAILURE)), errors=str(self.count(status=Result.ERROR)))
for result in self.objects:
failures = '1' if result.is_failure else '0'
errors = '1' if result.is_error else '0'
testsuite = ElementTree.SubElement(testsuites, 'testsuite', id=result.process.name, name=str(result.process), time=str(round(result.execution_time, 2)), tests='1', failures=failures, errors=errors)
testcase = ElementTree.SubElement(testsuite, 'testcase', time=str(round(result.execution_time, 2)))
testcase.attrib['name'] = result.process.name
if result.is_failure or result.is_error:
if result.is_failure:
element = ElementTree.SubElement(testcase, 'failure', type='failure') # depends on [control=['if'], data=[]]
else:
element = ElementTree.SubElement(testcase, 'error', type='error')
if result.error:
element.text = result.error # depends on [control=['if'], data=[]]
else:
element.text = result.output if result.output else '' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']]
xmlstr = ElementTree.tostring(testsuites, encoding='utf-8').decode('utf-8')
return '<?xml version="1.0" encoding="UTF-8"?>\n{}'.format(xmlstr) |
def revnet_164_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = True
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams | def function[revnet_164_cifar, parameter[]]:
constant[Tiny hparams suitable for CIFAR/etc.]
variable[hparams] assign[=] call[name[revnet_cifar_base], parameter[]]
name[hparams].bottleneck assign[=] constant[True]
name[hparams].num_channels assign[=] list[[<ast.Constant object at 0x7da1b2089e70>, <ast.Constant object at 0x7da1b2088610>, <ast.Constant object at 0x7da1b208b190>]]
name[hparams].num_layers_per_block assign[=] list[[<ast.Constant object at 0x7da1b208b100>, <ast.Constant object at 0x7da1b20f9e70>, <ast.Constant object at 0x7da1b20f9480>]]
return[name[hparams]] | keyword[def] identifier[revnet_164_cifar] ():
literal[string]
identifier[hparams] = identifier[revnet_cifar_base] ()
identifier[hparams] . identifier[bottleneck] = keyword[True]
identifier[hparams] . identifier[num_channels] =[ literal[int] , literal[int] , literal[int] ]
identifier[hparams] . identifier[num_layers_per_block] =[ literal[int] , literal[int] , literal[int] ]
keyword[return] identifier[hparams] | def revnet_164_cifar():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_cifar_base()
hparams.bottleneck = True
hparams.num_channels = [16, 32, 64]
hparams.num_layers_per_block = [8, 8, 8]
return hparams |
def _features_as_kv(self):
"""
Return features row as kv pairs so that they can be stored in memcache or redis and
used at serving layer
:return: a nested hash for each column
"""
self._data = self.get_data()
key_list = self.key()
values_list = self.values()
result = {}
for column in values_list:
key_prefix = self.cache_key_prefix() + "#" + column
self._data['cache_key'] = self._data[key_list].apply(lambda xdf: key_prefix + "=" + '#'.join(xdf.astype(str).values), axis=1)
result[column] = dict(zip(self._data.cache_key.values, self._data[column].values))
return result | def function[_features_as_kv, parameter[self]]:
constant[
Return features row as kv pairs so that they can be stored in memcache or redis and
used at serving layer
:return: a nested hash for each column
]
name[self]._data assign[=] call[name[self].get_data, parameter[]]
variable[key_list] assign[=] call[name[self].key, parameter[]]
variable[values_list] assign[=] call[name[self].values, parameter[]]
variable[result] assign[=] dictionary[[], []]
for taget[name[column]] in starred[name[values_list]] begin[:]
variable[key_prefix] assign[=] binary_operation[binary_operation[call[name[self].cache_key_prefix, parameter[]] + constant[#]] + name[column]]
call[name[self]._data][constant[cache_key]] assign[=] call[call[name[self]._data][name[key_list]].apply, parameter[<ast.Lambda object at 0x7da2047ebac0>]]
call[name[result]][name[column]] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[self]._data.cache_key.values, call[name[self]._data][name[column]].values]]]]
return[name[result]] | keyword[def] identifier[_features_as_kv] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_data] = identifier[self] . identifier[get_data] ()
identifier[key_list] = identifier[self] . identifier[key] ()
identifier[values_list] = identifier[self] . identifier[values] ()
identifier[result] ={}
keyword[for] identifier[column] keyword[in] identifier[values_list] :
identifier[key_prefix] = identifier[self] . identifier[cache_key_prefix] ()+ literal[string] + identifier[column]
identifier[self] . identifier[_data] [ literal[string] ]= identifier[self] . identifier[_data] [ identifier[key_list] ]. identifier[apply] ( keyword[lambda] identifier[xdf] : identifier[key_prefix] + literal[string] + literal[string] . identifier[join] ( identifier[xdf] . identifier[astype] ( identifier[str] ). identifier[values] ), identifier[axis] = literal[int] )
identifier[result] [ identifier[column] ]= identifier[dict] ( identifier[zip] ( identifier[self] . identifier[_data] . identifier[cache_key] . identifier[values] , identifier[self] . identifier[_data] [ identifier[column] ]. identifier[values] ))
keyword[return] identifier[result] | def _features_as_kv(self):
"""
Return features row as kv pairs so that they can be stored in memcache or redis and
used at serving layer
:return: a nested hash for each column
"""
self._data = self.get_data()
key_list = self.key()
values_list = self.values()
result = {}
for column in values_list:
key_prefix = self.cache_key_prefix() + '#' + column
self._data['cache_key'] = self._data[key_list].apply(lambda xdf: key_prefix + '=' + '#'.join(xdf.astype(str).values), axis=1)
result[column] = dict(zip(self._data.cache_key.values, self._data[column].values)) # depends on [control=['for'], data=['column']]
return result |
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return() | def function[linefeed, parameter[self]]:
constant[Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
]
call[name[self].index, parameter[]]
if compare[name[mo].LNM in name[self].mode] begin[:]
call[name[self].carriage_return, parameter[]] | keyword[def] identifier[linefeed] ( identifier[self] ):
literal[string]
identifier[self] . identifier[index] ()
keyword[if] identifier[mo] . identifier[LNM] keyword[in] identifier[self] . identifier[mode] :
identifier[self] . identifier[carriage_return] () | def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return() # depends on [control=['if'], data=[]] |
def resize(self, dims):
"""Resize our drawing area to encompass a space defined by the
given dimensions.
"""
width, height = dims[:2]
self.dims = (width, height)
self.logger.debug("renderer reconfigured to %dx%d" % (
width, height))
# create cairo surface the size of the window
#surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
depth = len(self.rgb_order)
self.surface_arr = np.zeros((height, width, depth), dtype=np.uint8)
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,
width)
surface = cairo.ImageSurface.create_for_data(self.surface_arr,
cairo.FORMAT_ARGB32,
width, height, stride)
self.surface = surface | def function[resize, parameter[self, dims]]:
constant[Resize our drawing area to encompass a space defined by the
given dimensions.
]
<ast.Tuple object at 0x7da204960760> assign[=] call[name[dims]][<ast.Slice object at 0x7da204961e10>]
name[self].dims assign[=] tuple[[<ast.Name object at 0x7da2049637f0>, <ast.Name object at 0x7da204961db0>]]
call[name[self].logger.debug, parameter[binary_operation[constant[renderer reconfigured to %dx%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e954fa0>, <ast.Name object at 0x7da20e9575b0>]]]]]
variable[depth] assign[=] call[name[len], parameter[name[self].rgb_order]]
name[self].surface_arr assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20e956da0>, <ast.Name object at 0x7da20e9563e0>, <ast.Name object at 0x7da20e955630>]]]]
variable[stride] assign[=] call[name[cairo].ImageSurface.format_stride_for_width, parameter[name[cairo].FORMAT_ARGB32, name[width]]]
variable[surface] assign[=] call[name[cairo].ImageSurface.create_for_data, parameter[name[self].surface_arr, name[cairo].FORMAT_ARGB32, name[width], name[height], name[stride]]]
name[self].surface assign[=] name[surface] | keyword[def] identifier[resize] ( identifier[self] , identifier[dims] ):
literal[string]
identifier[width] , identifier[height] = identifier[dims] [: literal[int] ]
identifier[self] . identifier[dims] =( identifier[width] , identifier[height] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %(
identifier[width] , identifier[height] ))
identifier[depth] = identifier[len] ( identifier[self] . identifier[rgb_order] )
identifier[self] . identifier[surface_arr] = identifier[np] . identifier[zeros] (( identifier[height] , identifier[width] , identifier[depth] ), identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[stride] = identifier[cairo] . identifier[ImageSurface] . identifier[format_stride_for_width] ( identifier[cairo] . identifier[FORMAT_ARGB32] ,
identifier[width] )
identifier[surface] = identifier[cairo] . identifier[ImageSurface] . identifier[create_for_data] ( identifier[self] . identifier[surface_arr] ,
identifier[cairo] . identifier[FORMAT_ARGB32] ,
identifier[width] , identifier[height] , identifier[stride] )
identifier[self] . identifier[surface] = identifier[surface] | def resize(self, dims):
"""Resize our drawing area to encompass a space defined by the
given dimensions.
"""
(width, height) = dims[:2]
self.dims = (width, height)
self.logger.debug('renderer reconfigured to %dx%d' % (width, height))
# create cairo surface the size of the window
#surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
depth = len(self.rgb_order)
self.surface_arr = np.zeros((height, width, depth), dtype=np.uint8)
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32, width)
surface = cairo.ImageSurface.create_for_data(self.surface_arr, cairo.FORMAT_ARGB32, width, height, stride)
self.surface = surface |
def check_dependencies(req, indent=1, history=None):
"""
Given a setuptools package requirement (e.g. 'gryphon==2.42' or just
'gryphon'), print a tree of dependencies as they resolve in this
environment.
"""
# keep a history to avoid infinite loops
if history is None:
history = set()
if req in history:
return
history.add(req)
d = pkg_resources.get_distribution(req)
extras = parse_extras(req)
if indent == 1:
print_package(req, 0)
for r in d.requires(extras=extras):
print_package(r, indent)
check_dependencies(r, indent + 1, history) | def function[check_dependencies, parameter[req, indent, history]]:
constant[
Given a setuptools package requirement (e.g. 'gryphon==2.42' or just
'gryphon'), print a tree of dependencies as they resolve in this
environment.
]
if compare[name[history] is constant[None]] begin[:]
variable[history] assign[=] call[name[set], parameter[]]
if compare[name[req] in name[history]] begin[:]
return[None]
call[name[history].add, parameter[name[req]]]
variable[d] assign[=] call[name[pkg_resources].get_distribution, parameter[name[req]]]
variable[extras] assign[=] call[name[parse_extras], parameter[name[req]]]
if compare[name[indent] equal[==] constant[1]] begin[:]
call[name[print_package], parameter[name[req], constant[0]]]
for taget[name[r]] in starred[call[name[d].requires, parameter[]]] begin[:]
call[name[print_package], parameter[name[r], name[indent]]]
call[name[check_dependencies], parameter[name[r], binary_operation[name[indent] + constant[1]], name[history]]] | keyword[def] identifier[check_dependencies] ( identifier[req] , identifier[indent] = literal[int] , identifier[history] = keyword[None] ):
literal[string]
keyword[if] identifier[history] keyword[is] keyword[None] :
identifier[history] = identifier[set] ()
keyword[if] identifier[req] keyword[in] identifier[history] :
keyword[return]
identifier[history] . identifier[add] ( identifier[req] )
identifier[d] = identifier[pkg_resources] . identifier[get_distribution] ( identifier[req] )
identifier[extras] = identifier[parse_extras] ( identifier[req] )
keyword[if] identifier[indent] == literal[int] :
identifier[print_package] ( identifier[req] , literal[int] )
keyword[for] identifier[r] keyword[in] identifier[d] . identifier[requires] ( identifier[extras] = identifier[extras] ):
identifier[print_package] ( identifier[r] , identifier[indent] )
identifier[check_dependencies] ( identifier[r] , identifier[indent] + literal[int] , identifier[history] ) | def check_dependencies(req, indent=1, history=None):
"""
Given a setuptools package requirement (e.g. 'gryphon==2.42' or just
'gryphon'), print a tree of dependencies as they resolve in this
environment.
"""
# keep a history to avoid infinite loops
if history is None:
history = set() # depends on [control=['if'], data=['history']]
if req in history:
return # depends on [control=['if'], data=[]]
history.add(req)
d = pkg_resources.get_distribution(req)
extras = parse_extras(req)
if indent == 1:
print_package(req, 0) # depends on [control=['if'], data=[]]
for r in d.requires(extras=extras):
print_package(r, indent)
check_dependencies(r, indent + 1, history) # depends on [control=['for'], data=['r']] |
def revoke(self, paths: Union[str, Iterable[str]], users: Union[str, Iterable[str], User, Iterable[User]]):
"""
Revokes all access controls that are associated to the given path or collection of paths.
:param paths: the paths to remove access controls on
:param users: the users to revoke access controls for. User may be in the represented as a `User` object or in
the form "name#zone"
""" | def function[revoke, parameter[self, paths, users]]:
constant[
Revokes all access controls that are associated to the given path or collection of paths.
:param paths: the paths to remove access controls on
:param users: the users to revoke access controls for. User may be in the represented as a `User` object or in
the form "name#zone"
] | keyword[def] identifier[revoke] ( identifier[self] , identifier[paths] : identifier[Union] [ identifier[str] , identifier[Iterable] [ identifier[str] ]], identifier[users] : identifier[Union] [ identifier[str] , identifier[Iterable] [ identifier[str] ], identifier[User] , identifier[Iterable] [ identifier[User] ]]):
literal[string] | def revoke(self, paths: Union[str, Iterable[str]], users: Union[str, Iterable[str], User, Iterable[User]]):
"""
Revokes all access controls that are associated to the given path or collection of paths.
:param paths: the paths to remove access controls on
:param users: the users to revoke access controls for. User may be in the represented as a `User` object or in
the form "name#zone"
""" |
def job_success(self, job, queue, job_result):
"""
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
"""
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.SUCCESS)
queue.success.rpush(job.ident)
self.log(self.job_success_message(job, queue, job_result))
if hasattr(job, 'on_success'):
job.on_success(queue, job_result) | def function[job_success, parameter[self, job, queue, job_result]]:
constant[
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
]
call[name[job].queued.delete, parameter[]]
call[name[job].hmset, parameter[]]
call[name[queue].success.rpush, parameter[name[job].ident]]
call[name[self].log, parameter[call[name[self].job_success_message, parameter[name[job], name[queue], name[job_result]]]]]
if call[name[hasattr], parameter[name[job], constant[on_success]]] begin[:]
call[name[job].on_success, parameter[name[queue], name[job_result]]] | keyword[def] identifier[job_success] ( identifier[self] , identifier[job] , identifier[queue] , identifier[job_result] ):
literal[string]
identifier[job] . identifier[queued] . identifier[delete] ()
identifier[job] . identifier[hmset] ( identifier[end] = identifier[str] ( identifier[datetime] . identifier[utcnow] ()), identifier[status] = identifier[STATUSES] . identifier[SUCCESS] )
identifier[queue] . identifier[success] . identifier[rpush] ( identifier[job] . identifier[ident] )
identifier[self] . identifier[log] ( identifier[self] . identifier[job_success_message] ( identifier[job] , identifier[queue] , identifier[job_result] ))
keyword[if] identifier[hasattr] ( identifier[job] , literal[string] ):
identifier[job] . identifier[on_success] ( identifier[queue] , identifier[job_result] ) | def job_success(self, job, queue, job_result):
"""
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
"""
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.SUCCESS)
queue.success.rpush(job.ident)
self.log(self.job_success_message(job, queue, job_result))
if hasattr(job, 'on_success'):
job.on_success(queue, job_result) # depends on [control=['if'], data=[]] |
def lock_context(self, timeout='default', requested_key='exclusive'):
"""A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable.
"""
if requested_key == 'exclusive':
self.lock_excl(timeout)
access_key = None
else:
access_key = self.lock(timeout, requested_key)
try:
yield access_key
finally:
self.unlock() | def function[lock_context, parameter[self, timeout, requested_key]]:
constant[A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable.
]
if compare[name[requested_key] equal[==] constant[exclusive]] begin[:]
call[name[self].lock_excl, parameter[name[timeout]]]
variable[access_key] assign[=] constant[None]
<ast.Try object at 0x7da18dc98d60> | keyword[def] identifier[lock_context] ( identifier[self] , identifier[timeout] = literal[string] , identifier[requested_key] = literal[string] ):
literal[string]
keyword[if] identifier[requested_key] == literal[string] :
identifier[self] . identifier[lock_excl] ( identifier[timeout] )
identifier[access_key] = keyword[None]
keyword[else] :
identifier[access_key] = identifier[self] . identifier[lock] ( identifier[timeout] , identifier[requested_key] )
keyword[try] :
keyword[yield] identifier[access_key]
keyword[finally] :
identifier[self] . identifier[unlock] () | def lock_context(self, timeout='default', requested_key='exclusive'):
"""A context that locks
:param timeout: Absolute time period (in milliseconds) that a resource
waits to get unlocked by the locking session before
returning an error. (Defaults to self.timeout)
:param requested_key: When using default of 'exclusive' the lock
is an exclusive lock.
Otherwise it is the access key for the shared lock or
None to generate a new shared access key.
The returned context is the access_key if applicable.
"""
if requested_key == 'exclusive':
self.lock_excl(timeout)
access_key = None # depends on [control=['if'], data=[]]
else:
access_key = self.lock(timeout, requested_key)
try:
yield access_key # depends on [control=['try'], data=[]]
finally:
self.unlock() |
def validate(self, value):
"""validate function form OrValidator
Returns:
True if at least one of the validators
validate function return True
"""
errors = []
self._used_validator = []
for val in self._validators:
try:
val.validate(value)
self._used_validator.append(val)
except ValidatorException as e:
errors.append(e)
except Exception as e:
errors.append(ValidatorException("Unknown Error", e))
if len(errors) > 0:
raise ValidatorException.from_list(errors)
return value | def function[validate, parameter[self, value]]:
constant[validate function form OrValidator
Returns:
True if at least one of the validators
validate function return True
]
variable[errors] assign[=] list[[]]
name[self]._used_validator assign[=] list[[]]
for taget[name[val]] in starred[name[self]._validators] begin[:]
<ast.Try object at 0x7da1b2373190>
if compare[call[name[len], parameter[name[errors]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da1b2370f10>
return[name[value]] | keyword[def] identifier[validate] ( identifier[self] , identifier[value] ):
literal[string]
identifier[errors] =[]
identifier[self] . identifier[_used_validator] =[]
keyword[for] identifier[val] keyword[in] identifier[self] . identifier[_validators] :
keyword[try] :
identifier[val] . identifier[validate] ( identifier[value] )
identifier[self] . identifier[_used_validator] . identifier[append] ( identifier[val] )
keyword[except] identifier[ValidatorException] keyword[as] identifier[e] :
identifier[errors] . identifier[append] ( identifier[e] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[errors] . identifier[append] ( identifier[ValidatorException] ( literal[string] , identifier[e] ))
keyword[if] identifier[len] ( identifier[errors] )> literal[int] :
keyword[raise] identifier[ValidatorException] . identifier[from_list] ( identifier[errors] )
keyword[return] identifier[value] | def validate(self, value):
"""validate function form OrValidator
Returns:
True if at least one of the validators
validate function return True
"""
errors = []
self._used_validator = []
for val in self._validators:
try:
val.validate(value)
self._used_validator.append(val) # depends on [control=['try'], data=[]]
except ValidatorException as e:
errors.append(e) # depends on [control=['except'], data=['e']]
except Exception as e:
errors.append(ValidatorException('Unknown Error', e)) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['val']]
if len(errors) > 0:
raise ValidatorException.from_list(errors) # depends on [control=['if'], data=[]]
return value |
def _MergeByIdKeepNew(self):
"""Migrate all entities, discarding duplicates from the old/a schedule.
This method migrates all entities from the new/b schedule. It then migrates
entities in the old schedule where there isn't already an entity with the
same ID.
Unlike _MergeSameId this method migrates entities to the merged schedule
before comparing their IDs. This allows transfers to be compared when they
refer to stops that had their ID updated by migration.
This method makes use of various methods like _Migrate and _Add which
are not implemented in the abstract DataSetMerger class. These methods
should be overwritten in a subclass to allow _MergeByIdKeepNew to work with
different entity types.
Returns:
The number of merged entities.
"""
# Maps from migrated ID to tuple(original object, migrated object)
a_orig_migrated = {}
b_orig_migrated = {}
for orig in self._GetIter(self.feed_merger.a_schedule):
migrated = self._Migrate(orig, self.feed_merger.a_schedule)
a_orig_migrated[self._GetId(migrated)] = (orig, migrated)
for orig in self._GetIter(self.feed_merger.b_schedule):
migrated = self._Migrate(orig, self.feed_merger.b_schedule)
b_orig_migrated[self._GetId(migrated)] = (orig, migrated)
for migrated_id, (orig, migrated) in b_orig_migrated.items():
self._Add(None, orig, migrated)
self._num_not_merged_b += 1
for migrated_id, (orig, migrated) in a_orig_migrated.items():
if migrated_id not in b_orig_migrated:
self._Add(orig, None, migrated)
self._num_not_merged_a += 1
return self._num_merged | def function[_MergeByIdKeepNew, parameter[self]]:
constant[Migrate all entities, discarding duplicates from the old/a schedule.
This method migrates all entities from the new/b schedule. It then migrates
entities in the old schedule where there isn't already an entity with the
same ID.
Unlike _MergeSameId this method migrates entities to the merged schedule
before comparing their IDs. This allows transfers to be compared when they
refer to stops that had their ID updated by migration.
This method makes use of various methods like _Migrate and _Add which
are not implemented in the abstract DataSetMerger class. These methods
should be overwritten in a subclass to allow _MergeByIdKeepNew to work with
different entity types.
Returns:
The number of merged entities.
]
variable[a_orig_migrated] assign[=] dictionary[[], []]
variable[b_orig_migrated] assign[=] dictionary[[], []]
for taget[name[orig]] in starred[call[name[self]._GetIter, parameter[name[self].feed_merger.a_schedule]]] begin[:]
variable[migrated] assign[=] call[name[self]._Migrate, parameter[name[orig], name[self].feed_merger.a_schedule]]
call[name[a_orig_migrated]][call[name[self]._GetId, parameter[name[migrated]]]] assign[=] tuple[[<ast.Name object at 0x7da1b1849150>, <ast.Name object at 0x7da1b1848820>]]
for taget[name[orig]] in starred[call[name[self]._GetIter, parameter[name[self].feed_merger.b_schedule]]] begin[:]
variable[migrated] assign[=] call[name[self]._Migrate, parameter[name[orig], name[self].feed_merger.b_schedule]]
call[name[b_orig_migrated]][call[name[self]._GetId, parameter[name[migrated]]]] assign[=] tuple[[<ast.Name object at 0x7da1b18481c0>, <ast.Name object at 0x7da1b1848430>]]
for taget[tuple[[<ast.Name object at 0x7da1b184a3e0>, <ast.Tuple object at 0x7da1b184b070>]]] in starred[call[name[b_orig_migrated].items, parameter[]]] begin[:]
call[name[self]._Add, parameter[constant[None], name[orig], name[migrated]]]
<ast.AugAssign object at 0x7da1b1848be0>
for taget[tuple[[<ast.Name object at 0x7da1b184a4a0>, <ast.Tuple object at 0x7da1b1849480>]]] in starred[call[name[a_orig_migrated].items, parameter[]]] begin[:]
if compare[name[migrated_id] <ast.NotIn object at 0x7da2590d7190> name[b_orig_migrated]] begin[:]
call[name[self]._Add, parameter[name[orig], constant[None], name[migrated]]]
<ast.AugAssign object at 0x7da2041da050>
return[name[self]._num_merged] | keyword[def] identifier[_MergeByIdKeepNew] ( identifier[self] ):
literal[string]
identifier[a_orig_migrated] ={}
identifier[b_orig_migrated] ={}
keyword[for] identifier[orig] keyword[in] identifier[self] . identifier[_GetIter] ( identifier[self] . identifier[feed_merger] . identifier[a_schedule] ):
identifier[migrated] = identifier[self] . identifier[_Migrate] ( identifier[orig] , identifier[self] . identifier[feed_merger] . identifier[a_schedule] )
identifier[a_orig_migrated] [ identifier[self] . identifier[_GetId] ( identifier[migrated] )]=( identifier[orig] , identifier[migrated] )
keyword[for] identifier[orig] keyword[in] identifier[self] . identifier[_GetIter] ( identifier[self] . identifier[feed_merger] . identifier[b_schedule] ):
identifier[migrated] = identifier[self] . identifier[_Migrate] ( identifier[orig] , identifier[self] . identifier[feed_merger] . identifier[b_schedule] )
identifier[b_orig_migrated] [ identifier[self] . identifier[_GetId] ( identifier[migrated] )]=( identifier[orig] , identifier[migrated] )
keyword[for] identifier[migrated_id] ,( identifier[orig] , identifier[migrated] ) keyword[in] identifier[b_orig_migrated] . identifier[items] ():
identifier[self] . identifier[_Add] ( keyword[None] , identifier[orig] , identifier[migrated] )
identifier[self] . identifier[_num_not_merged_b] += literal[int]
keyword[for] identifier[migrated_id] ,( identifier[orig] , identifier[migrated] ) keyword[in] identifier[a_orig_migrated] . identifier[items] ():
keyword[if] identifier[migrated_id] keyword[not] keyword[in] identifier[b_orig_migrated] :
identifier[self] . identifier[_Add] ( identifier[orig] , keyword[None] , identifier[migrated] )
identifier[self] . identifier[_num_not_merged_a] += literal[int]
keyword[return] identifier[self] . identifier[_num_merged] | def _MergeByIdKeepNew(self):
"""Migrate all entities, discarding duplicates from the old/a schedule.
This method migrates all entities from the new/b schedule. It then migrates
entities in the old schedule where there isn't already an entity with the
same ID.
Unlike _MergeSameId this method migrates entities to the merged schedule
before comparing their IDs. This allows transfers to be compared when they
refer to stops that had their ID updated by migration.
This method makes use of various methods like _Migrate and _Add which
are not implemented in the abstract DataSetMerger class. These methods
should be overwritten in a subclass to allow _MergeByIdKeepNew to work with
different entity types.
Returns:
The number of merged entities.
"""
# Maps from migrated ID to tuple(original object, migrated object)
a_orig_migrated = {}
b_orig_migrated = {}
for orig in self._GetIter(self.feed_merger.a_schedule):
migrated = self._Migrate(orig, self.feed_merger.a_schedule)
a_orig_migrated[self._GetId(migrated)] = (orig, migrated) # depends on [control=['for'], data=['orig']]
for orig in self._GetIter(self.feed_merger.b_schedule):
migrated = self._Migrate(orig, self.feed_merger.b_schedule)
b_orig_migrated[self._GetId(migrated)] = (orig, migrated) # depends on [control=['for'], data=['orig']]
for (migrated_id, (orig, migrated)) in b_orig_migrated.items():
self._Add(None, orig, migrated)
self._num_not_merged_b += 1 # depends on [control=['for'], data=[]]
for (migrated_id, (orig, migrated)) in a_orig_migrated.items():
if migrated_id not in b_orig_migrated:
self._Add(orig, None, migrated)
self._num_not_merged_a += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return self._num_merged |
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
('%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()
).digest()
)
return ''.join(random.choice(allowed_chars) for i in range(length)) | def function[get_random_string, parameter[length, allowed_chars]]:
constant[
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
]
if <ast.UnaryOp object at 0x7da18c4cc4c0> begin[:]
call[name[random].seed, parameter[call[call[name[hashlib].sha256, parameter[call[binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18c4cf8e0>, <ast.Call object at 0x7da18c4cd660>, <ast.Attribute object at 0x7da18c4cf430>]]].encode, parameter[]]]].digest, parameter[]]]]
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18c4cf970>]]] | keyword[def] identifier[get_random_string] ( identifier[length] = literal[int] ,
identifier[allowed_chars] = literal[string]
literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[using_sysrandom] :
identifier[random] . identifier[seed] (
identifier[hashlib] . identifier[sha256] (
( literal[string] %( identifier[random] . identifier[getstate] (), identifier[time] . identifier[time] (), identifier[settings] . identifier[SECRET_KEY] )). identifier[encode] ()
). identifier[digest] ()
)
keyword[return] literal[string] . identifier[join] ( identifier[random] . identifier[choice] ( identifier[allowed_chars] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] )) | def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(hashlib.sha256(('%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()).digest())
return ''.join((random.choice(allowed_chars) for i in range(length))) # depends on [control=['if'], data=[]] |
def wait(self, timeout=None, poll_interval=1.0):
"""
Wait for the upload to complete or to err out.
Will return the resulting Activity or raise an exception if the upload fails.
:param timeout: The max seconds to wait. Will raise TimeoutExceeded
exception if this time passes without success or error response.
:type timeout: float
:param poll_interval: How long to wait between upload checks. Strava
recommends 1s minimum. (default 1.0s)
:type poll_interval: float
:return: The uploaded Activity object (fetched from server)
:rtype: :class:`stravalib.model.Activity`
:raise stravalib.exc.TimeoutExceeded: If a timeout was specified and
activity is still processing after
timeout has elapsed.
:raise stravalib.exc.ActivityUploadFailed: If the poll returns an error.
"""
start = time.time()
while self.activity_id is None:
self.poll()
time.sleep(poll_interval)
if timeout and (time.time() - start) > timeout:
raise exc.TimeoutExceeded()
# If we got this far, we must have an activity!
return self.client.get_activity(self.activity_id) | def function[wait, parameter[self, timeout, poll_interval]]:
constant[
Wait for the upload to complete or to err out.
Will return the resulting Activity or raise an exception if the upload fails.
:param timeout: The max seconds to wait. Will raise TimeoutExceeded
exception if this time passes without success or error response.
:type timeout: float
:param poll_interval: How long to wait between upload checks. Strava
recommends 1s minimum. (default 1.0s)
:type poll_interval: float
:return: The uploaded Activity object (fetched from server)
:rtype: :class:`stravalib.model.Activity`
:raise stravalib.exc.TimeoutExceeded: If a timeout was specified and
activity is still processing after
timeout has elapsed.
:raise stravalib.exc.ActivityUploadFailed: If the poll returns an error.
]
variable[start] assign[=] call[name[time].time, parameter[]]
while compare[name[self].activity_id is constant[None]] begin[:]
call[name[self].poll, parameter[]]
call[name[time].sleep, parameter[name[poll_interval]]]
if <ast.BoolOp object at 0x7da1b07045e0> begin[:]
<ast.Raise object at 0x7da1b0704520>
return[call[name[self].client.get_activity, parameter[name[self].activity_id]]] | keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] , identifier[poll_interval] = literal[int] ):
literal[string]
identifier[start] = identifier[time] . identifier[time] ()
keyword[while] identifier[self] . identifier[activity_id] keyword[is] keyword[None] :
identifier[self] . identifier[poll] ()
identifier[time] . identifier[sleep] ( identifier[poll_interval] )
keyword[if] identifier[timeout] keyword[and] ( identifier[time] . identifier[time] ()- identifier[start] )> identifier[timeout] :
keyword[raise] identifier[exc] . identifier[TimeoutExceeded] ()
keyword[return] identifier[self] . identifier[client] . identifier[get_activity] ( identifier[self] . identifier[activity_id] ) | def wait(self, timeout=None, poll_interval=1.0):
"""
Wait for the upload to complete or to err out.
Will return the resulting Activity or raise an exception if the upload fails.
:param timeout: The max seconds to wait. Will raise TimeoutExceeded
exception if this time passes without success or error response.
:type timeout: float
:param poll_interval: How long to wait between upload checks. Strava
recommends 1s minimum. (default 1.0s)
:type poll_interval: float
:return: The uploaded Activity object (fetched from server)
:rtype: :class:`stravalib.model.Activity`
:raise stravalib.exc.TimeoutExceeded: If a timeout was specified and
activity is still processing after
timeout has elapsed.
:raise stravalib.exc.ActivityUploadFailed: If the poll returns an error.
"""
start = time.time()
while self.activity_id is None:
self.poll()
time.sleep(poll_interval)
if timeout and time.time() - start > timeout:
raise exc.TimeoutExceeded() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# If we got this far, we must have an activity!
return self.client.get_activity(self.activity_id) |
def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile=None):
'''
Allocate a new Elastic IP address and associate it with your account.
domain
(string) Optional param - if set to exactly 'vpc', the address will be
allocated to the VPC. The default simply maps the EIP to your
account container.
returns
(dict) dict of 'interesting' information about the newly allocated EIP,
with probably the most interesting keys being 'public_ip'; and
'allocation_id' iff 'domain=vpc' was passed.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.allocate_eip_address domain=vpc
.. versionadded:: 2016.3.0
'''
if domain and domain != 'vpc':
raise SaltInvocationError('The only permitted value for the \'domain\' param is \'vpc\'.')
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
address = conn.allocate_address(domain=domain)
except boto.exception.BotoServerError as e:
log.error(e)
return False
interesting = ['allocation_id', 'association_id', 'domain', 'instance_id',
'network_interface_id', 'network_interface_owner_id', 'public_ip',
'private_ip_address']
return dict([(x, getattr(address, x)) for x in interesting]) | def function[allocate_eip_address, parameter[domain, region, key, keyid, profile]]:
constant[
Allocate a new Elastic IP address and associate it with your account.
domain
(string) Optional param - if set to exactly 'vpc', the address will be
allocated to the VPC. The default simply maps the EIP to your
account container.
returns
(dict) dict of 'interesting' information about the newly allocated EIP,
with probably the most interesting keys being 'public_ip'; and
'allocation_id' iff 'domain=vpc' was passed.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.allocate_eip_address domain=vpc
.. versionadded:: 2016.3.0
]
if <ast.BoolOp object at 0x7da1b2108ca0> begin[:]
<ast.Raise object at 0x7da1b2108580>
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da1b21083a0>
variable[interesting] assign[=] list[[<ast.Constant object at 0x7da1b21096f0>, <ast.Constant object at 0x7da1b21089d0>, <ast.Constant object at 0x7da1b2109150>, <ast.Constant object at 0x7da1b210bdf0>, <ast.Constant object at 0x7da1b21085b0>, <ast.Constant object at 0x7da1b21099f0>, <ast.Constant object at 0x7da1b21083d0>, <ast.Constant object at 0x7da1b2108520>]]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b210a950>]]] | keyword[def] identifier[allocate_eip_address] ( identifier[domain] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
keyword[if] identifier[domain] keyword[and] identifier[domain] != literal[string] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[address] = identifier[conn] . identifier[allocate_address] ( identifier[domain] = identifier[domain] )
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( identifier[e] )
keyword[return] keyword[False]
identifier[interesting] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] ]
keyword[return] identifier[dict] ([( identifier[x] , identifier[getattr] ( identifier[address] , identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[interesting] ]) | def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile=None):
"""
Allocate a new Elastic IP address and associate it with your account.
domain
(string) Optional param - if set to exactly 'vpc', the address will be
allocated to the VPC. The default simply maps the EIP to your
account container.
returns
(dict) dict of 'interesting' information about the newly allocated EIP,
with probably the most interesting keys being 'public_ip'; and
'allocation_id' iff 'domain=vpc' was passed.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.allocate_eip_address domain=vpc
.. versionadded:: 2016.3.0
"""
if domain and domain != 'vpc':
raise SaltInvocationError("The only permitted value for the 'domain' param is 'vpc'.") # depends on [control=['if'], data=[]]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
address = conn.allocate_address(domain=domain) # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as e:
log.error(e)
return False # depends on [control=['except'], data=['e']]
interesting = ['allocation_id', 'association_id', 'domain', 'instance_id', 'network_interface_id', 'network_interface_owner_id', 'public_ip', 'private_ip_address']
return dict([(x, getattr(address, x)) for x in interesting]) |
def getDjangoObjects(context):
"""
Returns a reference to the C{django_objects} on the context. If it doesn't
exist then it is created.
@rtype: Instance of L{DjangoReferenceCollection}
@since: 0.5
"""
c = context.extra
k = 'django_objects'
try:
return c[k]
except KeyError:
c[k] = DjangoReferenceCollection()
return c[k] | def function[getDjangoObjects, parameter[context]]:
constant[
Returns a reference to the C{django_objects} on the context. If it doesn't
exist then it is created.
@rtype: Instance of L{DjangoReferenceCollection}
@since: 0.5
]
variable[c] assign[=] name[context].extra
variable[k] assign[=] constant[django_objects]
<ast.Try object at 0x7da1b1578220>
return[call[name[c]][name[k]]] | keyword[def] identifier[getDjangoObjects] ( identifier[context] ):
literal[string]
identifier[c] = identifier[context] . identifier[extra]
identifier[k] = literal[string]
keyword[try] :
keyword[return] identifier[c] [ identifier[k] ]
keyword[except] identifier[KeyError] :
identifier[c] [ identifier[k] ]= identifier[DjangoReferenceCollection] ()
keyword[return] identifier[c] [ identifier[k] ] | def getDjangoObjects(context):
"""
Returns a reference to the C{django_objects} on the context. If it doesn't
exist then it is created.
@rtype: Instance of L{DjangoReferenceCollection}
@since: 0.5
"""
c = context.extra
k = 'django_objects'
try:
return c[k] # depends on [control=['try'], data=[]]
except KeyError:
c[k] = DjangoReferenceCollection() # depends on [control=['except'], data=[]]
return c[k] |
def _set_config(config):
"""Set gl configuration"""
pyglet_config = pyglet.gl.Config()
pyglet_config.red_size = config['red_size']
pyglet_config.green_size = config['green_size']
pyglet_config.blue_size = config['blue_size']
pyglet_config.alpha_size = config['alpha_size']
pyglet_config.accum_red_size = 0
pyglet_config.accum_green_size = 0
pyglet_config.accum_blue_size = 0
pyglet_config.accum_alpha_size = 0
pyglet_config.depth_size = config['depth_size']
pyglet_config.stencil_size = config['stencil_size']
pyglet_config.double_buffer = config['double_buffer']
pyglet_config.stereo = config['stereo']
pyglet_config.samples = config['samples']
return pyglet_config | def function[_set_config, parameter[config]]:
constant[Set gl configuration]
variable[pyglet_config] assign[=] call[name[pyglet].gl.Config, parameter[]]
name[pyglet_config].red_size assign[=] call[name[config]][constant[red_size]]
name[pyglet_config].green_size assign[=] call[name[config]][constant[green_size]]
name[pyglet_config].blue_size assign[=] call[name[config]][constant[blue_size]]
name[pyglet_config].alpha_size assign[=] call[name[config]][constant[alpha_size]]
name[pyglet_config].accum_red_size assign[=] constant[0]
name[pyglet_config].accum_green_size assign[=] constant[0]
name[pyglet_config].accum_blue_size assign[=] constant[0]
name[pyglet_config].accum_alpha_size assign[=] constant[0]
name[pyglet_config].depth_size assign[=] call[name[config]][constant[depth_size]]
name[pyglet_config].stencil_size assign[=] call[name[config]][constant[stencil_size]]
name[pyglet_config].double_buffer assign[=] call[name[config]][constant[double_buffer]]
name[pyglet_config].stereo assign[=] call[name[config]][constant[stereo]]
name[pyglet_config].samples assign[=] call[name[config]][constant[samples]]
return[name[pyglet_config]] | keyword[def] identifier[_set_config] ( identifier[config] ):
literal[string]
identifier[pyglet_config] = identifier[pyglet] . identifier[gl] . identifier[Config] ()
identifier[pyglet_config] . identifier[red_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[green_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[blue_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[alpha_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[accum_red_size] = literal[int]
identifier[pyglet_config] . identifier[accum_green_size] = literal[int]
identifier[pyglet_config] . identifier[accum_blue_size] = literal[int]
identifier[pyglet_config] . identifier[accum_alpha_size] = literal[int]
identifier[pyglet_config] . identifier[depth_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[stencil_size] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[double_buffer] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[stereo] = identifier[config] [ literal[string] ]
identifier[pyglet_config] . identifier[samples] = identifier[config] [ literal[string] ]
keyword[return] identifier[pyglet_config] | def _set_config(config):
"""Set gl configuration"""
pyglet_config = pyglet.gl.Config()
pyglet_config.red_size = config['red_size']
pyglet_config.green_size = config['green_size']
pyglet_config.blue_size = config['blue_size']
pyglet_config.alpha_size = config['alpha_size']
pyglet_config.accum_red_size = 0
pyglet_config.accum_green_size = 0
pyglet_config.accum_blue_size = 0
pyglet_config.accum_alpha_size = 0
pyglet_config.depth_size = config['depth_size']
pyglet_config.stencil_size = config['stencil_size']
pyglet_config.double_buffer = config['double_buffer']
pyglet_config.stereo = config['stereo']
pyglet_config.samples = config['samples']
return pyglet_config |
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search.
start (int): The index of the forst item to return.
max_items (int): The maximum number of items to return.
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([
('ObjectID', search),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata | def function[_music_lib_search, parameter[self, search, start, max_items]]:
constant[Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search.
start (int): The index of the forst item to return.
max_items (int): The maximum number of items to return.
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
]
variable[response] assign[=] call[name[self].contentDirectory.Browse, parameter[list[[<ast.Tuple object at 0x7da18dc05d50>, <ast.Tuple object at 0x7da18dc06530>, <ast.Tuple object at 0x7da18dc075b0>, <ast.Tuple object at 0x7da18dc051e0>, <ast.Tuple object at 0x7da18dc04cd0>, <ast.Tuple object at 0x7da18dc04e20>]]]]
variable[metadata] assign[=] dictionary[[], []]
for taget[name[tag]] in starred[list[[<ast.Constant object at 0x7da18dc05600>, <ast.Constant object at 0x7da18dc071c0>, <ast.Constant object at 0x7da18dc05b10>]]] begin[:]
call[name[metadata]][call[name[camel_to_underscore], parameter[name[tag]]]] assign[=] call[name[int], parameter[call[name[response]][name[tag]]]]
return[tuple[[<ast.Name object at 0x7da18dc063e0>, <ast.Name object at 0x7da18dc05180>]]] | keyword[def] identifier[_music_lib_search] ( identifier[self] , identifier[search] , identifier[start] , identifier[max_items] ):
literal[string]
identifier[response] = identifier[self] . identifier[contentDirectory] . identifier[Browse] ([
( literal[string] , identifier[search] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , identifier[start] ),
( literal[string] , identifier[max_items] ),
( literal[string] , literal[string] )
])
identifier[metadata] ={}
keyword[for] identifier[tag] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[metadata] [ identifier[camel_to_underscore] ( identifier[tag] )]= identifier[int] ( identifier[response] [ identifier[tag] ])
keyword[return] identifier[response] , identifier[metadata] | def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search.
start (int): The index of the forst item to return.
max_items (int): The maximum number of items to return.
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([('ObjectID', search), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '')])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag]) # depends on [control=['for'], data=['tag']]
return (response, metadata) |
def managed(name,
value=None,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME,
min_compress_len=DEFAULT_MIN_COMPRESS_LEN):
'''
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
try:
cur = __salt__['memcached.get'](name, host, port)
except CommandExecutionError as exc:
ret['comment'] = six.text_type(exc)
return ret
if cur == value:
ret['result'] = True
ret['comment'] = 'Key \'{0}\' does not need to be updated'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
if cur is None:
ret['comment'] = 'Key \'{0}\' would be added'.format(name)
else:
ret['comment'] = 'Value of key \'{0}\' would be changed'.format(name)
return ret
try:
ret['result'] = __salt__['memcached.set'](
name, value, host, port, time, min_compress_len
)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['comment'] = six.text_type(exc)
else:
if ret['result']:
ret['comment'] = 'Successfully set key \'{0}\''.format(name)
if cur is not None:
ret['changes'] = {'old': cur, 'new': value}
else:
ret['changes'] = {'key added': name, 'value': value}
else:
ret['comment'] = 'Failed to set key \'{0}\''.format(name)
return ret | def function[managed, parameter[name, value, host, port, time, min_compress_len]]:
constant[
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2041da2c0>, <ast.Constant object at 0x7da2041d8fa0>, <ast.Constant object at 0x7da2041da620>, <ast.Constant object at 0x7da2041db4c0>], [<ast.Name object at 0x7da2041db220>, <ast.Dict object at 0x7da2041dbd60>, <ast.Constant object at 0x7da2041da920>, <ast.Constant object at 0x7da2041d9e40>]]
<ast.Try object at 0x7da2041d9db0>
if compare[name[cur] equal[==] name[value]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Key '{0}' does not need to be updated].format, parameter[name[name]]]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
if compare[name[cur] is constant[None]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Key '{0}' would be added].format, parameter[name[name]]]
return[name[ret]]
<ast.Try object at 0x7da2041d85e0>
return[name[ret]] | keyword[def] identifier[managed] ( identifier[name] ,
identifier[value] = keyword[None] ,
identifier[host] = identifier[DEFAULT_HOST] ,
identifier[port] = identifier[DEFAULT_PORT] ,
identifier[time] = identifier[DEFAULT_TIME] ,
identifier[min_compress_len] = identifier[DEFAULT_MIN_COMPRESS_LEN] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
keyword[try] :
identifier[cur] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[host] , identifier[port] )
keyword[except] identifier[CommandExecutionError] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[exc] )
keyword[return] identifier[ret]
keyword[if] identifier[cur] == identifier[value] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[cur] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[try] :
identifier[ret] [ literal[string] ]= identifier[__salt__] [ literal[string] ](
identifier[name] , identifier[value] , identifier[host] , identifier[port] , identifier[time] , identifier[min_compress_len]
)
keyword[except] ( identifier[CommandExecutionError] , identifier[SaltInvocationError] ) keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[exc] )
keyword[else] :
keyword[if] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[if] identifier[cur] keyword[is] keyword[not] keyword[None] :
identifier[ret] [ literal[string] ]={ literal[string] : identifier[cur] , literal[string] : identifier[value] }
keyword[else] :
identifier[ret] [ literal[string] ]={ literal[string] : identifier[name] , literal[string] : identifier[value] }
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def managed(name, value=None, host=DEFAULT_HOST, port=DEFAULT_PORT, time=DEFAULT_TIME, min_compress_len=DEFAULT_MIN_COMPRESS_LEN):
"""
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
try:
cur = __salt__['memcached.get'](name, host, port) # depends on [control=['try'], data=[]]
except CommandExecutionError as exc:
ret['comment'] = six.text_type(exc)
return ret # depends on [control=['except'], data=['exc']]
if cur == value:
ret['result'] = True
ret['comment'] = "Key '{0}' does not need to be updated".format(name)
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['result'] = None
if cur is None:
ret['comment'] = "Key '{0}' would be added".format(name) # depends on [control=['if'], data=[]]
else:
ret['comment'] = "Value of key '{0}' would be changed".format(name)
return ret # depends on [control=['if'], data=[]]
try:
ret['result'] = __salt__['memcached.set'](name, value, host, port, time, min_compress_len) # depends on [control=['try'], data=[]]
except (CommandExecutionError, SaltInvocationError) as exc:
ret['comment'] = six.text_type(exc) # depends on [control=['except'], data=['exc']]
else:
if ret['result']:
ret['comment'] = "Successfully set key '{0}'".format(name)
if cur is not None:
ret['changes'] = {'old': cur, 'new': value} # depends on [control=['if'], data=['cur']]
else:
ret['changes'] = {'key added': name, 'value': value} # depends on [control=['if'], data=[]]
else:
ret['comment'] = "Failed to set key '{0}'".format(name)
return ret |
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
config = inliner.document.settings.env.app.config
if config.issues_user_uri:
ref = config.issues_user_uri.format(user=target)
else:
ref = "https://github.com/{0}".format(target)
if has_explicit_title:
text = title
else:
text = "@{0}".format(target)
link = nodes.reference(text=text, refuri=ref, **options)
return [link], [] | def function[user_role, parameter[name, rawtext, text, lineno, inliner, options, content]]:
constant[Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
]
variable[options] assign[=] <ast.BoolOp object at 0x7da2043475b0>
variable[content] assign[=] <ast.BoolOp object at 0x7da204346f20>
<ast.Tuple object at 0x7da204347d60> assign[=] call[name[split_explicit_title], parameter[name[text]]]
variable[target] assign[=] call[call[name[utils].unescape, parameter[name[target]]].strip, parameter[]]
variable[title] assign[=] call[call[name[utils].unescape, parameter[name[title]]].strip, parameter[]]
variable[config] assign[=] name[inliner].document.settings.env.app.config
if name[config].issues_user_uri begin[:]
variable[ref] assign[=] call[name[config].issues_user_uri.format, parameter[]]
if name[has_explicit_title] begin[:]
variable[text] assign[=] name[title]
variable[link] assign[=] call[name[nodes].reference, parameter[]]
return[tuple[[<ast.List object at 0x7da204346ce0>, <ast.List object at 0x7da204346170>]]] | keyword[def] identifier[user_role] ( identifier[name] , identifier[rawtext] , identifier[text] , identifier[lineno] , identifier[inliner] , identifier[options] = keyword[None] , identifier[content] = keyword[None] ):
literal[string]
identifier[options] = identifier[options] keyword[or] {}
identifier[content] = identifier[content] keyword[or] []
identifier[has_explicit_title] , identifier[title] , identifier[target] = identifier[split_explicit_title] ( identifier[text] )
identifier[target] = identifier[utils] . identifier[unescape] ( identifier[target] ). identifier[strip] ()
identifier[title] = identifier[utils] . identifier[unescape] ( identifier[title] ). identifier[strip] ()
identifier[config] = identifier[inliner] . identifier[document] . identifier[settings] . identifier[env] . identifier[app] . identifier[config]
keyword[if] identifier[config] . identifier[issues_user_uri] :
identifier[ref] = identifier[config] . identifier[issues_user_uri] . identifier[format] ( identifier[user] = identifier[target] )
keyword[else] :
identifier[ref] = literal[string] . identifier[format] ( identifier[target] )
keyword[if] identifier[has_explicit_title] :
identifier[text] = identifier[title]
keyword[else] :
identifier[text] = literal[string] . identifier[format] ( identifier[target] )
identifier[link] = identifier[nodes] . identifier[reference] ( identifier[text] = identifier[text] , identifier[refuri] = identifier[ref] ,** identifier[options] )
keyword[return] [ identifier[link] ],[] | def user_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
"""
options = options or {}
content = content or []
(has_explicit_title, title, target) = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
config = inliner.document.settings.env.app.config
if config.issues_user_uri:
ref = config.issues_user_uri.format(user=target) # depends on [control=['if'], data=[]]
else:
ref = 'https://github.com/{0}'.format(target)
if has_explicit_title:
text = title # depends on [control=['if'], data=[]]
else:
text = '@{0}'.format(target)
link = nodes.reference(text=text, refuri=ref, **options)
return ([link], []) |
def query(self, variables, evidence=None, joint=True):
"""
Query method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
joint: boolean
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
"""
return self._query(variables=variables, operation='marginalize', evidence=evidence, joint=joint) | def function[query, parameter[self, variables, evidence, joint]]:
constant[
Query method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
joint: boolean
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
]
return[call[name[self]._query, parameter[]]] | keyword[def] identifier[query] ( identifier[self] , identifier[variables] , identifier[evidence] = keyword[None] , identifier[joint] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[_query] ( identifier[variables] = identifier[variables] , identifier[operation] = literal[string] , identifier[evidence] = identifier[evidence] , identifier[joint] = identifier[joint] ) | def query(self, variables, evidence=None, joint=True):
"""
Query method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
joint: boolean
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
"""
return self._query(variables=variables, operation='marginalize', evidence=evidence, joint=joint) |
def _set_bundle_message(self, v, load=False):
"""
Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bundle_message is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bundle_message() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bundle_message must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__bundle_message = t
if hasattr(self, '_set'):
self._set() | def function[_set_bundle_message, parameter[self, v, load]]:
constant[
Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bundle_message is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bundle_message() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f721330>
name[self].__bundle_message assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_bundle_message] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[bundle_message] . identifier[bundle_message] , identifier[is_container] = literal[string] , identifier[presence] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__bundle_message] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_bundle_message(self, v, load=False):
"""
Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bundle_message is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bundle_message() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=bundle_message.bundle_message, is_container='container', presence=True, yang_name='bundle-message', rest_name='bundle-message', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'bundle_message must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=bundle_message.bundle_message, is_container=\'container\', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Refresh Reduction bundle messaging feature\', u\'alt-name\': u\'bundle-message\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__bundle_message = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def update_rwalk(self, blob):
"""Update the random walk proposal scale based on the current
number of accepted/rejected steps."""
self.scale = blob['scale']
accept, reject = blob['accept'], blob['reject']
facc = (1. * accept) / (accept + reject)
norm = max(self.facc, 1. - self.facc) * self.npdim
self.scale *= math.exp((facc - self.facc) / norm)
self.scale = min(self.scale, math.sqrt(self.npdim)) | def function[update_rwalk, parameter[self, blob]]:
constant[Update the random walk proposal scale based on the current
number of accepted/rejected steps.]
name[self].scale assign[=] call[name[blob]][constant[scale]]
<ast.Tuple object at 0x7da1b1ecf070> assign[=] tuple[[<ast.Subscript object at 0x7da1b1ecf130>, <ast.Subscript object at 0x7da1b1ecead0>]]
variable[facc] assign[=] binary_operation[binary_operation[constant[1.0] * name[accept]] / binary_operation[name[accept] + name[reject]]]
variable[norm] assign[=] binary_operation[call[name[max], parameter[name[self].facc, binary_operation[constant[1.0] - name[self].facc]]] * name[self].npdim]
<ast.AugAssign object at 0x7da1b1ece560>
name[self].scale assign[=] call[name[min], parameter[name[self].scale, call[name[math].sqrt, parameter[name[self].npdim]]]] | keyword[def] identifier[update_rwalk] ( identifier[self] , identifier[blob] ):
literal[string]
identifier[self] . identifier[scale] = identifier[blob] [ literal[string] ]
identifier[accept] , identifier[reject] = identifier[blob] [ literal[string] ], identifier[blob] [ literal[string] ]
identifier[facc] =( literal[int] * identifier[accept] )/( identifier[accept] + identifier[reject] )
identifier[norm] = identifier[max] ( identifier[self] . identifier[facc] , literal[int] - identifier[self] . identifier[facc] )* identifier[self] . identifier[npdim]
identifier[self] . identifier[scale] *= identifier[math] . identifier[exp] (( identifier[facc] - identifier[self] . identifier[facc] )/ identifier[norm] )
identifier[self] . identifier[scale] = identifier[min] ( identifier[self] . identifier[scale] , identifier[math] . identifier[sqrt] ( identifier[self] . identifier[npdim] )) | def update_rwalk(self, blob):
"""Update the random walk proposal scale based on the current
number of accepted/rejected steps."""
self.scale = blob['scale']
(accept, reject) = (blob['accept'], blob['reject'])
facc = 1.0 * accept / (accept + reject)
norm = max(self.facc, 1.0 - self.facc) * self.npdim
self.scale *= math.exp((facc - self.facc) / norm)
self.scale = min(self.scale, math.sqrt(self.npdim)) |
def patch(nml_path, nml_patch, out_path=None):
"""Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
"""
parser = Parser()
return parser.read(nml_path, nml_patch, out_path) | def function[patch, parameter[nml_path, nml_patch, out_path]]:
constant[Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
]
variable[parser] assign[=] call[name[Parser], parameter[]]
return[call[name[parser].read, parameter[name[nml_path], name[nml_patch], name[out_path]]]] | keyword[def] identifier[patch] ( identifier[nml_path] , identifier[nml_patch] , identifier[out_path] = keyword[None] ):
literal[string]
identifier[parser] = identifier[Parser] ()
keyword[return] identifier[parser] . identifier[read] ( identifier[nml_path] , identifier[nml_patch] , identifier[out_path] ) | def patch(nml_path, nml_patch, out_path=None):
"""Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
"""
parser = Parser()
return parser.read(nml_path, nml_patch, out_path) |
def update_user_type(self):
"""Return either 'tutor' or 'student' based on which radio
button is selected.
"""
if self.rb_tutor.isChecked():
self.user_type = 'tutor'
elif self.rb_student.isChecked():
self.user_type = 'student'
self.accept() | def function[update_user_type, parameter[self]]:
constant[Return either 'tutor' or 'student' based on which radio
button is selected.
]
if call[name[self].rb_tutor.isChecked, parameter[]] begin[:]
name[self].user_type assign[=] constant[tutor]
call[name[self].accept, parameter[]] | keyword[def] identifier[update_user_type] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[rb_tutor] . identifier[isChecked] ():
identifier[self] . identifier[user_type] = literal[string]
keyword[elif] identifier[self] . identifier[rb_student] . identifier[isChecked] ():
identifier[self] . identifier[user_type] = literal[string]
identifier[self] . identifier[accept] () | def update_user_type(self):
"""Return either 'tutor' or 'student' based on which radio
button is selected.
"""
if self.rb_tutor.isChecked():
self.user_type = 'tutor' # depends on [control=['if'], data=[]]
elif self.rb_student.isChecked():
self.user_type = 'student' # depends on [control=['if'], data=[]]
self.accept() |
def present(name, provider):
'''
Ensure the RackSpace queue exists.
name
Name of the Rackspace queue.
provider
Salt Cloud Provider
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_present = list(__salt__['cloud.action']('queues_exists', provider=provider, name=name)[provider].values())[0]
if not is_present:
if __opts__['test']:
msg = 'Rackspace queue {0} is set to be created.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
created = __salt__['cloud.action']('queues_create', provider=provider, name=name)
if created:
queue = __salt__['cloud.action']('queues_show', provider=provider, name=name)
ret['changes']['old'] = {}
ret['changes']['new'] = {'queue': queue}
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} Rackspace queue.'.format(name)
return ret
else:
ret['comment'] = '{0} present.'.format(name)
return ret | def function[present, parameter[name, provider]]:
constant[
Ensure the RackSpace queue exists.
name
Name of the Rackspace queue.
provider
Salt Cloud Provider
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c23460>, <ast.Constant object at 0x7da1b1c223e0>, <ast.Constant object at 0x7da1b1c235b0>, <ast.Constant object at 0x7da1b1c22260>], [<ast.Name object at 0x7da1b1c20370>, <ast.Constant object at 0x7da1b1c230d0>, <ast.Constant object at 0x7da1b1c22d40>, <ast.Dict object at 0x7da1b1c232b0>]]
variable[is_present] assign[=] call[call[name[list], parameter[call[call[call[call[name[__salt__]][constant[cloud.action]], parameter[constant[queues_exists]]]][name[provider]].values, parameter[]]]]][constant[0]]
if <ast.UnaryOp object at 0x7da1b1c22200> begin[:]
if call[name[__opts__]][constant[test]] begin[:]
variable[msg] assign[=] call[constant[Rackspace queue {0} is set to be created.].format, parameter[name[name]]]
call[name[ret]][constant[comment]] assign[=] name[msg]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
variable[created] assign[=] call[call[name[__salt__]][constant[cloud.action]], parameter[constant[queues_create]]]
if name[created] begin[:]
variable[queue] assign[=] call[call[name[__salt__]][constant[cloud.action]], parameter[constant[queues_show]]]
call[call[name[ret]][constant[changes]]][constant[old]] assign[=] dictionary[[], []]
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c23310>], [<ast.Name object at 0x7da1b1c21900>]]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] , identifier[provider] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
identifier[is_present] = identifier[list] ( identifier[__salt__] [ literal[string] ]( literal[string] , identifier[provider] = identifier[provider] , identifier[name] = identifier[name] )[ identifier[provider] ]. identifier[values] ())[ literal[int] ]
keyword[if] keyword[not] identifier[is_present] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= identifier[msg]
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[created] = identifier[__salt__] [ literal[string] ]( literal[string] , identifier[provider] = identifier[provider] , identifier[name] = identifier[name] )
keyword[if] identifier[created] :
identifier[queue] = identifier[__salt__] [ literal[string] ]( literal[string] , identifier[provider] = identifier[provider] , identifier[name] = identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]={}
identifier[ret] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[queue] }
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def present(name, provider):
"""
Ensure the RackSpace queue exists.
name
Name of the Rackspace queue.
provider
Salt Cloud Provider
"""
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_present = list(__salt__['cloud.action']('queues_exists', provider=provider, name=name)[provider].values())[0]
if not is_present:
if __opts__['test']:
msg = 'Rackspace queue {0} is set to be created.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
created = __salt__['cloud.action']('queues_create', provider=provider, name=name)
if created:
queue = __salt__['cloud.action']('queues_show', provider=provider, name=name)
ret['changes']['old'] = {}
ret['changes']['new'] = {'queue': queue} # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} Rackspace queue.'.format(name)
return ret # depends on [control=['if'], data=[]]
else:
ret['comment'] = '{0} present.'.format(name)
return ret |
def get_file(self, **kwargs):
"""
Return the FSEntry that matches parameters.
:param str file_uuid: UUID of the target FSEntry.
:param str label: structMap LABEL of the target FSEntry.
:param str type: structMap TYPE of the target FSEntry.
:returns: :class:`FSEntry` that matches parameters, or None.
"""
# TODO put this in a sqlite DB so it can be queried efficiently
# TODO handle multiple matches (with DB?)
# TODO check that kwargs are actual attrs
for entry in self.all_files():
if all(value == getattr(entry, key) for key, value in kwargs.items()):
return entry
return None | def function[get_file, parameter[self]]:
constant[
Return the FSEntry that matches parameters.
:param str file_uuid: UUID of the target FSEntry.
:param str label: structMap LABEL of the target FSEntry.
:param str type: structMap TYPE of the target FSEntry.
:returns: :class:`FSEntry` that matches parameters, or None.
]
for taget[name[entry]] in starred[call[name[self].all_files, parameter[]]] begin[:]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b1b7f1f0>]] begin[:]
return[name[entry]]
return[constant[None]] | keyword[def] identifier[get_file] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[all_files] ():
keyword[if] identifier[all] ( identifier[value] == identifier[getattr] ( identifier[entry] , identifier[key] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ()):
keyword[return] identifier[entry]
keyword[return] keyword[None] | def get_file(self, **kwargs):
"""
Return the FSEntry that matches parameters.
:param str file_uuid: UUID of the target FSEntry.
:param str label: structMap LABEL of the target FSEntry.
:param str type: structMap TYPE of the target FSEntry.
:returns: :class:`FSEntry` that matches parameters, or None.
"""
# TODO put this in a sqlite DB so it can be queried efficiently
# TODO handle multiple matches (with DB?)
# TODO check that kwargs are actual attrs
for entry in self.all_files():
if all((value == getattr(entry, key) for (key, value) in kwargs.items())):
return entry # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']]
return None |
def count_jobs_to_dequeue(self):
""" Returns the number of jobs that can be dequeued right now from the queue. """
# timed ZSET
if self.is_timed:
return context.connections.redis.zcount(
self.redis_key,
"-inf",
time.time())
# In all other cases, it's the same as .size()
else:
return self.size() | def function[count_jobs_to_dequeue, parameter[self]]:
constant[ Returns the number of jobs that can be dequeued right now from the queue. ]
if name[self].is_timed begin[:]
return[call[name[context].connections.redis.zcount, parameter[name[self].redis_key, constant[-inf], call[name[time].time, parameter[]]]]] | keyword[def] identifier[count_jobs_to_dequeue] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_timed] :
keyword[return] identifier[context] . identifier[connections] . identifier[redis] . identifier[zcount] (
identifier[self] . identifier[redis_key] ,
literal[string] ,
identifier[time] . identifier[time] ())
keyword[else] :
keyword[return] identifier[self] . identifier[size] () | def count_jobs_to_dequeue(self):
""" Returns the number of jobs that can be dequeued right now from the queue. """
# timed ZSET
if self.is_timed:
return context.connections.redis.zcount(self.redis_key, '-inf', time.time()) # depends on [control=['if'], data=[]]
else:
# In all other cases, it's the same as .size()
return self.size() |
def get_storage_location():
""" get the portal with the plone.api
"""
location = api.portal.getSite()
if location.get('bika_setup', False):
location = location['bika_setup']
return location | def function[get_storage_location, parameter[]]:
constant[ get the portal with the plone.api
]
variable[location] assign[=] call[name[api].portal.getSite, parameter[]]
if call[name[location].get, parameter[constant[bika_setup], constant[False]]] begin[:]
variable[location] assign[=] call[name[location]][constant[bika_setup]]
return[name[location]] | keyword[def] identifier[get_storage_location] ():
literal[string]
identifier[location] = identifier[api] . identifier[portal] . identifier[getSite] ()
keyword[if] identifier[location] . identifier[get] ( literal[string] , keyword[False] ):
identifier[location] = identifier[location] [ literal[string] ]
keyword[return] identifier[location] | def get_storage_location():
""" get the portal with the plone.api
"""
location = api.portal.getSite()
if location.get('bika_setup', False):
location = location['bika_setup'] # depends on [control=['if'], data=[]]
return location |
def H13(self):
"Information measure of correlation 2."
# An imaginary result has been encountered once in the Matlab
# version. The reason is unclear.
return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9()))) | def function[H13, parameter[self]]:
constant[Information measure of correlation 2.]
return[call[name[np].sqrt, parameter[binary_operation[constant[1] - call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da20c6c6b60> * binary_operation[name[self].hxy2 - call[name[self].H9, parameter[]]]]]]]]]] | keyword[def] identifier[H13] ( identifier[self] ):
literal[string]
keyword[return] identifier[np] . identifier[sqrt] ( literal[int] - identifier[np] . identifier[exp] (- literal[int] *( identifier[self] . identifier[hxy2] - identifier[self] . identifier[H9] ()))) | def H13(self):
"""Information measure of correlation 2."""
# An imaginary result has been encountered once in the Matlab
# version. The reason is unclear.
return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9()))) |
def symmetric_difference(self, sig: Scope) -> Scope:
""" Create a new Set with values present in only one Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new ^= sig
return new | def function[symmetric_difference, parameter[self, sig]]:
constant[ Create a new Set with values present in only one Set ]
variable[new] assign[=] call[name[Scope], parameter[]]
<ast.AugAssign object at 0x7da1b013d030>
return[name[new]] | keyword[def] identifier[symmetric_difference] ( identifier[self] , identifier[sig] : identifier[Scope] )-> identifier[Scope] :
literal[string]
identifier[new] = identifier[Scope] ( identifier[sig] = identifier[self] . identifier[_hsig] . identifier[values] (), identifier[state] = identifier[self] . identifier[state] )
identifier[new] ^= identifier[sig]
keyword[return] identifier[new] | def symmetric_difference(self, sig: Scope) -> Scope:
""" Create a new Set with values present in only one Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new ^= sig
return new |
def implements(*interfaces):
"""Can be used in the class definition of `Component`
subclasses to declare the extension points that are extended.
"""
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
msg = 'implements() can only be used in a class definition'
assert locals_ is not frame.f_globals and '__module__' in locals_, msg
locals_.setdefault('_implements', []).extend(interfaces) | def function[implements, parameter[]]:
constant[Can be used in the class definition of `Component`
subclasses to declare the extension points that are extended.
]
import module[sys]
variable[frame] assign[=] call[name[sys]._getframe, parameter[constant[1]]]
variable[locals_] assign[=] name[frame].f_locals
variable[msg] assign[=] constant[implements() can only be used in a class definition]
assert[<ast.BoolOp object at 0x7da1b15be950>]
call[call[name[locals_].setdefault, parameter[constant[_implements], list[[]]]].extend, parameter[name[interfaces]]] | keyword[def] identifier[implements] (* identifier[interfaces] ):
literal[string]
keyword[import] identifier[sys]
identifier[frame] = identifier[sys] . identifier[_getframe] ( literal[int] )
identifier[locals_] = identifier[frame] . identifier[f_locals]
identifier[msg] = literal[string]
keyword[assert] identifier[locals_] keyword[is] keyword[not] identifier[frame] . identifier[f_globals] keyword[and] literal[string] keyword[in] identifier[locals_] , identifier[msg]
identifier[locals_] . identifier[setdefault] ( literal[string] ,[]). identifier[extend] ( identifier[interfaces] ) | def implements(*interfaces):
"""Can be used in the class definition of `Component`
subclasses to declare the extension points that are extended.
"""
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
msg = 'implements() can only be used in a class definition'
assert locals_ is not frame.f_globals and '__module__' in locals_, msg
locals_.setdefault('_implements', []).extend(interfaces) |
def _leastUsedCell(cls, random, cells, connections):
"""
Gets the cell with the smallest number of segments.
Break ties randomly.
:param random: (Object)
Random number generator. Gets mutated.
:param cells: (list)
Indices of cells.
:param connections: (Object)
Connections instance for the TM.
:returns: (int) Cell index.
"""
leastUsedCells = []
minNumSegments = float("inf")
for cell in cells:
numSegments = connections.numSegments(cell)
if numSegments < minNumSegments:
minNumSegments = numSegments
leastUsedCells = []
if numSegments == minNumSegments:
leastUsedCells.append(cell)
i = random.getUInt32(len(leastUsedCells))
return leastUsedCells[i] | def function[_leastUsedCell, parameter[cls, random, cells, connections]]:
constant[
Gets the cell with the smallest number of segments.
Break ties randomly.
:param random: (Object)
Random number generator. Gets mutated.
:param cells: (list)
Indices of cells.
:param connections: (Object)
Connections instance for the TM.
:returns: (int) Cell index.
]
variable[leastUsedCells] assign[=] list[[]]
variable[minNumSegments] assign[=] call[name[float], parameter[constant[inf]]]
for taget[name[cell]] in starred[name[cells]] begin[:]
variable[numSegments] assign[=] call[name[connections].numSegments, parameter[name[cell]]]
if compare[name[numSegments] less[<] name[minNumSegments]] begin[:]
variable[minNumSegments] assign[=] name[numSegments]
variable[leastUsedCells] assign[=] list[[]]
if compare[name[numSegments] equal[==] name[minNumSegments]] begin[:]
call[name[leastUsedCells].append, parameter[name[cell]]]
variable[i] assign[=] call[name[random].getUInt32, parameter[call[name[len], parameter[name[leastUsedCells]]]]]
return[call[name[leastUsedCells]][name[i]]] | keyword[def] identifier[_leastUsedCell] ( identifier[cls] , identifier[random] , identifier[cells] , identifier[connections] ):
literal[string]
identifier[leastUsedCells] =[]
identifier[minNumSegments] = identifier[float] ( literal[string] )
keyword[for] identifier[cell] keyword[in] identifier[cells] :
identifier[numSegments] = identifier[connections] . identifier[numSegments] ( identifier[cell] )
keyword[if] identifier[numSegments] < identifier[minNumSegments] :
identifier[minNumSegments] = identifier[numSegments]
identifier[leastUsedCells] =[]
keyword[if] identifier[numSegments] == identifier[minNumSegments] :
identifier[leastUsedCells] . identifier[append] ( identifier[cell] )
identifier[i] = identifier[random] . identifier[getUInt32] ( identifier[len] ( identifier[leastUsedCells] ))
keyword[return] identifier[leastUsedCells] [ identifier[i] ] | def _leastUsedCell(cls, random, cells, connections):
"""
Gets the cell with the smallest number of segments.
Break ties randomly.
:param random: (Object)
Random number generator. Gets mutated.
:param cells: (list)
Indices of cells.
:param connections: (Object)
Connections instance for the TM.
:returns: (int) Cell index.
"""
leastUsedCells = []
minNumSegments = float('inf')
for cell in cells:
numSegments = connections.numSegments(cell)
if numSegments < minNumSegments:
minNumSegments = numSegments
leastUsedCells = [] # depends on [control=['if'], data=['numSegments', 'minNumSegments']]
if numSegments == minNumSegments:
leastUsedCells.append(cell) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cell']]
i = random.getUInt32(len(leastUsedCells))
return leastUsedCells[i] |
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result | def function[primeSieve, parameter[k]]:
constant[return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined]
def function[isPrime, parameter[n]]:
constant[return True is given number n is absolutely prime,
return False is otherwise.]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], binary_operation[call[name[int], parameter[binary_operation[name[n] ** constant[0.5]]]] + constant[1]]]]] begin[:]
if compare[binary_operation[name[n] <ast.Mod object at 0x7da2590d6920> name[i]] equal[==] constant[0]] begin[:]
return[constant[False]]
return[constant[True]]
variable[result] assign[=] binary_operation[list[[<ast.UnaryOp object at 0x7da1b26ad120>]] * binary_operation[name[k] + constant[1]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], call[name[int], parameter[binary_operation[name[k] + constant[1]]]]]]] begin[:]
if call[name[isPrime], parameter[name[i]]] begin[:]
call[name[result]][name[i]] assign[=] constant[1]
return[name[result]] | keyword[def] identifier[primeSieve] ( identifier[k] ):
literal[string]
keyword[def] identifier[isPrime] ( identifier[n] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[int] ( identifier[n] ** literal[int] )+ literal[int] ):
keyword[if] identifier[n] % identifier[i] == literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[result] =[- literal[int] ]*( identifier[k] + literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[int] ( identifier[k] + literal[int] )):
keyword[if] identifier[isPrime] ( identifier[i] ):
identifier[result] [ identifier[i] ]= literal[int]
keyword[else] :
identifier[result] [ identifier[i] ]= literal[int]
keyword[return] identifier[result] | def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1 # depends on [control=['if'], data=[]]
else:
result[i] = 0 # depends on [control=['for'], data=['i']]
return result |
def get_from_geo(self, lat, lng, distance, skip_cache=False):
"""
Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
# remove spaces and change case here due to caching
lat, lng, distance = float(lat), float(lng), float(distance)
if distance < 0:
raise IllegalDistanceException("Distance must not be negative")
self._check_point(lat, lng)
return self._lookup(skip_cache, get_from_geo, lat, lng, distance) | def function[get_from_geo, parameter[self, lat, lng, distance, skip_cache]]:
constant[
Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
]
<ast.Tuple object at 0x7da1b2411360> assign[=] tuple[[<ast.Call object at 0x7da1b2412a70>, <ast.Call object at 0x7da1b2413310>, <ast.Call object at 0x7da1b2412fe0>]]
if compare[name[distance] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da18f00f0a0>
call[name[self]._check_point, parameter[name[lat], name[lng]]]
return[call[name[self]._lookup, parameter[name[skip_cache], name[get_from_geo], name[lat], name[lng], name[distance]]]] | keyword[def] identifier[get_from_geo] ( identifier[self] , identifier[lat] , identifier[lng] , identifier[distance] , identifier[skip_cache] = keyword[False] ):
literal[string]
identifier[lat] , identifier[lng] , identifier[distance] = identifier[float] ( identifier[lat] ), identifier[float] ( identifier[lng] ), identifier[float] ( identifier[distance] )
keyword[if] identifier[distance] < literal[int] :
keyword[raise] identifier[IllegalDistanceException] ( literal[string] )
identifier[self] . identifier[_check_point] ( identifier[lat] , identifier[lng] )
keyword[return] identifier[self] . identifier[_lookup] ( identifier[skip_cache] , identifier[get_from_geo] , identifier[lat] , identifier[lng] , identifier[distance] ) | def get_from_geo(self, lat, lng, distance, skip_cache=False):
"""
Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
# remove spaces and change case here due to caching
(lat, lng, distance) = (float(lat), float(lng), float(distance))
if distance < 0:
raise IllegalDistanceException('Distance must not be negative') # depends on [control=['if'], data=[]]
self._check_point(lat, lng)
return self._lookup(skip_cache, get_from_geo, lat, lng, distance) |
def ParseNotificationcenterRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a message row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = MacNotificationCenterEventData()
event_data.bundle_name = self._GetRowValue(query_hash, row, 'bundle_name')
event_data.presented = self._GetRowValue(query_hash, row, 'presented')
blob = self._GetRowValue(query_hash, row, 'dataBlob')
try:
full_biplist = biplist.readPlistFromString(blob)
# req is the 'req' dictionary from the plist containing extra information
# about the notification entry.
req = full_biplist['req']
except (biplist.InvalidPlistException, KeyError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to read plist from database with error: {0!s}'.format(
exception))
return
event_data.title = req.get('titl', None)
event_data.subtitle = req.get('subt', None)
event_data.body = req.get('body', None)
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[ParseNotificationcenterRow, parameter[self, parser_mediator, query, row]]:
constant[Parses a message row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
]
variable[query_hash] assign[=] call[name[hash], parameter[name[query]]]
variable[event_data] assign[=] call[name[MacNotificationCenterEventData], parameter[]]
name[event_data].bundle_name assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[bundle_name]]]
name[event_data].presented assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[presented]]]
variable[blob] assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[dataBlob]]]
<ast.Try object at 0x7da20cabf130>
name[event_data].title assign[=] call[name[req].get, parameter[constant[titl], constant[None]]]
name[event_data].subtitle assign[=] call[name[req].get, parameter[constant[subt], constant[None]]]
name[event_data].body assign[=] call[name[req].get, parameter[constant[body], constant[None]]]
variable[timestamp] assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[timestamp]]]
variable[date_time] assign[=] call[name[dfdatetime_cocoa_time].CocoaTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_CREATION]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[ParseNotificationcenterRow] (
identifier[self] , identifier[parser_mediator] , identifier[query] , identifier[row] ,** identifier[unused_kwargs] ):
literal[string]
identifier[query_hash] = identifier[hash] ( identifier[query] )
identifier[event_data] = identifier[MacNotificationCenterEventData] ()
identifier[event_data] . identifier[bundle_name] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[presented] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[blob] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
keyword[try] :
identifier[full_biplist] = identifier[biplist] . identifier[readPlistFromString] ( identifier[blob] )
identifier[req] = identifier[full_biplist] [ literal[string] ]
keyword[except] ( identifier[biplist] . identifier[InvalidPlistException] , identifier[KeyError] ) keyword[as] identifier[exception] :
identifier[parser_mediator] . identifier[ProduceExtractionWarning] (
literal[string] . identifier[format] (
identifier[exception] ))
keyword[return]
identifier[event_data] . identifier[title] = identifier[req] . identifier[get] ( literal[string] , keyword[None] )
identifier[event_data] . identifier[subtitle] = identifier[req] . identifier[get] ( literal[string] , keyword[None] )
identifier[event_data] . identifier[body] = identifier[req] . identifier[get] ( literal[string] , keyword[None] )
identifier[timestamp] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[date_time] = identifier[dfdatetime_cocoa_time] . identifier[CocoaTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_CREATION] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def ParseNotificationcenterRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a message row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = MacNotificationCenterEventData()
event_data.bundle_name = self._GetRowValue(query_hash, row, 'bundle_name')
event_data.presented = self._GetRowValue(query_hash, row, 'presented')
blob = self._GetRowValue(query_hash, row, 'dataBlob')
try:
full_biplist = biplist.readPlistFromString(blob)
# req is the 'req' dictionary from the plist containing extra information
# about the notification entry.
req = full_biplist['req'] # depends on [control=['try'], data=[]]
except (biplist.InvalidPlistException, KeyError) as exception:
parser_mediator.ProduceExtractionWarning('unable to read plist from database with error: {0!s}'.format(exception))
return # depends on [control=['except'], data=['exception']]
event_data.title = req.get('titl', None)
event_data.subtitle = req.get('subt', None)
event_data.body = req.get('body', None)
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data) |
async def write_request(
self, method: constants.HttpRequestMethod, *,
uri: str="/", authority: Optional[str]=None,
scheme: Optional[str]=None,
headers: Optional[_HeaderType]=None) -> \
"writers.HttpRequestWriter":
"""
Send next request to the server.
"""
return await self._delegate.write_request(
method, uri=uri, authority=authority,
scheme=scheme, headers=headers) | <ast.AsyncFunctionDef object at 0x7da18c4cc490> | keyword[async] keyword[def] identifier[write_request] (
identifier[self] , identifier[method] : identifier[constants] . identifier[HttpRequestMethod] ,*,
identifier[uri] : identifier[str] = literal[string] , identifier[authority] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[scheme] : identifier[Optional] [ identifier[str] ]= keyword[None] ,
identifier[headers] : identifier[Optional] [ identifier[_HeaderType] ]= keyword[None] )-> literal[string] :
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[_delegate] . identifier[write_request] (
identifier[method] , identifier[uri] = identifier[uri] , identifier[authority] = identifier[authority] ,
identifier[scheme] = identifier[scheme] , identifier[headers] = identifier[headers] ) | async def write_request(self, method: constants.HttpRequestMethod, *, uri: str='/', authority: Optional[str]=None, scheme: Optional[str]=None, headers: Optional[_HeaderType]=None) -> 'writers.HttpRequestWriter':
"""
Send next request to the server.
"""
return await self._delegate.write_request(method, uri=uri, authority=authority, scheme=scheme, headers=headers) |
def create_label(self, label, doc=None):
"""
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now)
"""
label = copy.copy(label)
assert(label not in self.labels.values())
self.labels[label.name] = label
self.label_guesser.load(label.name)
# TODO(Jflesch): Should train with previous documents
if doc:
doc.add_label(label)
self.upd_doc(doc)
self.commit() | def function[create_label, parameter[self, label, doc]]:
constant[
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now)
]
variable[label] assign[=] call[name[copy].copy, parameter[name[label]]]
assert[compare[name[label] <ast.NotIn object at 0x7da2590d7190> call[name[self].labels.values, parameter[]]]]
call[name[self].labels][name[label].name] assign[=] name[label]
call[name[self].label_guesser.load, parameter[name[label].name]]
if name[doc] begin[:]
call[name[doc].add_label, parameter[name[label]]]
call[name[self].upd_doc, parameter[name[doc]]]
call[name[self].commit, parameter[]] | keyword[def] identifier[create_label] ( identifier[self] , identifier[label] , identifier[doc] = keyword[None] ):
literal[string]
identifier[label] = identifier[copy] . identifier[copy] ( identifier[label] )
keyword[assert] ( identifier[label] keyword[not] keyword[in] identifier[self] . identifier[labels] . identifier[values] ())
identifier[self] . identifier[labels] [ identifier[label] . identifier[name] ]= identifier[label]
identifier[self] . identifier[label_guesser] . identifier[load] ( identifier[label] . identifier[name] )
keyword[if] identifier[doc] :
identifier[doc] . identifier[add_label] ( identifier[label] )
identifier[self] . identifier[upd_doc] ( identifier[doc] )
identifier[self] . identifier[commit] () | def create_label(self, label, doc=None):
"""
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now)
"""
label = copy.copy(label)
assert label not in self.labels.values()
self.labels[label.name] = label
self.label_guesser.load(label.name)
# TODO(Jflesch): Should train with previous documents
if doc:
doc.add_label(label)
self.upd_doc(doc)
self.commit() # depends on [control=['if'], data=[]] |
def morphDataLists(fromList, toList, stepList):
'''
Iteratively morph fromList into toList using the values 0 to 1 in stepList
stepList: a value of 0 means no change and a value of 1 means a complete
change to the other value
'''
# If there are more than 1 pitch value, then we align the data in
# relative time.
# Each data point comes with a timestamp. The earliest timestamp is 0
# and the latest timestamp is 1. Using this method, for each relative
# timestamp in the source list, we find the closest relative timestamp
# in the target list. Just because two pitch values have the same index
# in the source and target lists does not mean that they correspond to
# the same speech event.
fromListRel, fromStartTime, fromEndTime = _makeTimingRelative(fromList)
toListRel = _makeTimingRelative(toList)[0]
# If fromList has more points, we'll have flat areas
# If toList has more points, we'll might miss peaks or valleys
fromTimeList = [dataTuple[0] for dataTuple in fromListRel]
toTimeList = [dataTuple[0] for dataTuple in toListRel]
indexList = _getNearestMappingIndexList(fromTimeList, toTimeList)
alignedToPitchRel = [toListRel[i] for i in indexList]
for stepAmount in stepList:
newPitchList = []
# Perform the interpolation
for fromTuple, toTuple in zip(fromListRel, alignedToPitchRel):
fromTime, fromValue = fromTuple
toTime, toValue = toTuple
# i + 1 b/c i_0 = 0 = no change
newValue = fromValue + (stepAmount * (toValue - fromValue))
newTime = fromTime + (stepAmount * (toTime - fromTime))
newPitchList.append((newTime, newValue))
newPitchList = _makeTimingAbsolute(newPitchList, fromStartTime,
fromEndTime)
yield stepAmount, newPitchList | def function[morphDataLists, parameter[fromList, toList, stepList]]:
constant[
Iteratively morph fromList into toList using the values 0 to 1 in stepList
stepList: a value of 0 means no change and a value of 1 means a complete
change to the other value
]
<ast.Tuple object at 0x7da1b10d6a10> assign[=] call[name[_makeTimingRelative], parameter[name[fromList]]]
variable[toListRel] assign[=] call[call[name[_makeTimingRelative], parameter[name[toList]]]][constant[0]]
variable[fromTimeList] assign[=] <ast.ListComp object at 0x7da1b10d5900>
variable[toTimeList] assign[=] <ast.ListComp object at 0x7da1b10d6320>
variable[indexList] assign[=] call[name[_getNearestMappingIndexList], parameter[name[fromTimeList], name[toTimeList]]]
variable[alignedToPitchRel] assign[=] <ast.ListComp object at 0x7da1b10d5600>
for taget[name[stepAmount]] in starred[name[stepList]] begin[:]
variable[newPitchList] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b10d4a30>, <ast.Name object at 0x7da1b10d6f80>]]] in starred[call[name[zip], parameter[name[fromListRel], name[alignedToPitchRel]]]] begin[:]
<ast.Tuple object at 0x7da1b10d44f0> assign[=] name[fromTuple]
<ast.Tuple object at 0x7da1b10d4730> assign[=] name[toTuple]
variable[newValue] assign[=] binary_operation[name[fromValue] + binary_operation[name[stepAmount] * binary_operation[name[toValue] - name[fromValue]]]]
variable[newTime] assign[=] binary_operation[name[fromTime] + binary_operation[name[stepAmount] * binary_operation[name[toTime] - name[fromTime]]]]
call[name[newPitchList].append, parameter[tuple[[<ast.Name object at 0x7da1b10d58a0>, <ast.Name object at 0x7da1b10d5360>]]]]
variable[newPitchList] assign[=] call[name[_makeTimingAbsolute], parameter[name[newPitchList], name[fromStartTime], name[fromEndTime]]]
<ast.Yield object at 0x7da1b10419f0> | keyword[def] identifier[morphDataLists] ( identifier[fromList] , identifier[toList] , identifier[stepList] ):
literal[string]
identifier[fromListRel] , identifier[fromStartTime] , identifier[fromEndTime] = identifier[_makeTimingRelative] ( identifier[fromList] )
identifier[toListRel] = identifier[_makeTimingRelative] ( identifier[toList] )[ literal[int] ]
identifier[fromTimeList] =[ identifier[dataTuple] [ literal[int] ] keyword[for] identifier[dataTuple] keyword[in] identifier[fromListRel] ]
identifier[toTimeList] =[ identifier[dataTuple] [ literal[int] ] keyword[for] identifier[dataTuple] keyword[in] identifier[toListRel] ]
identifier[indexList] = identifier[_getNearestMappingIndexList] ( identifier[fromTimeList] , identifier[toTimeList] )
identifier[alignedToPitchRel] =[ identifier[toListRel] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[indexList] ]
keyword[for] identifier[stepAmount] keyword[in] identifier[stepList] :
identifier[newPitchList] =[]
keyword[for] identifier[fromTuple] , identifier[toTuple] keyword[in] identifier[zip] ( identifier[fromListRel] , identifier[alignedToPitchRel] ):
identifier[fromTime] , identifier[fromValue] = identifier[fromTuple]
identifier[toTime] , identifier[toValue] = identifier[toTuple]
identifier[newValue] = identifier[fromValue] +( identifier[stepAmount] *( identifier[toValue] - identifier[fromValue] ))
identifier[newTime] = identifier[fromTime] +( identifier[stepAmount] *( identifier[toTime] - identifier[fromTime] ))
identifier[newPitchList] . identifier[append] (( identifier[newTime] , identifier[newValue] ))
identifier[newPitchList] = identifier[_makeTimingAbsolute] ( identifier[newPitchList] , identifier[fromStartTime] ,
identifier[fromEndTime] )
keyword[yield] identifier[stepAmount] , identifier[newPitchList] | def morphDataLists(fromList, toList, stepList):
"""
Iteratively morph fromList into toList using the values 0 to 1 in stepList
stepList: a value of 0 means no change and a value of 1 means a complete
change to the other value
"""
# If there are more than 1 pitch value, then we align the data in
# relative time.
# Each data point comes with a timestamp. The earliest timestamp is 0
# and the latest timestamp is 1. Using this method, for each relative
# timestamp in the source list, we find the closest relative timestamp
# in the target list. Just because two pitch values have the same index
# in the source and target lists does not mean that they correspond to
# the same speech event.
(fromListRel, fromStartTime, fromEndTime) = _makeTimingRelative(fromList)
toListRel = _makeTimingRelative(toList)[0]
# If fromList has more points, we'll have flat areas
# If toList has more points, we'll might miss peaks or valleys
fromTimeList = [dataTuple[0] for dataTuple in fromListRel]
toTimeList = [dataTuple[0] for dataTuple in toListRel]
indexList = _getNearestMappingIndexList(fromTimeList, toTimeList)
alignedToPitchRel = [toListRel[i] for i in indexList]
for stepAmount in stepList:
newPitchList = []
# Perform the interpolation
for (fromTuple, toTuple) in zip(fromListRel, alignedToPitchRel):
(fromTime, fromValue) = fromTuple
(toTime, toValue) = toTuple
# i + 1 b/c i_0 = 0 = no change
newValue = fromValue + stepAmount * (toValue - fromValue)
newTime = fromTime + stepAmount * (toTime - fromTime)
newPitchList.append((newTime, newValue)) # depends on [control=['for'], data=[]]
newPitchList = _makeTimingAbsolute(newPitchList, fromStartTime, fromEndTime)
yield (stepAmount, newPitchList) # depends on [control=['for'], data=['stepAmount']] |
def received(self, limit=None):
"""
Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events
"""
return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1] | def function[received, parameter[self, limit]]:
constant[
Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events
]
return[call[call[name[list], parameter[call[name[itertools].islice, parameter[call[name[itertools].filterfalse, parameter[<ast.Lambda object at 0x7da1b07931f0>, name[self].store]], name[limit]]]]]][<ast.Slice object at 0x7da1b0790a60>]] | keyword[def] identifier[received] ( identifier[self] , identifier[limit] = keyword[None] ):
literal[string]
keyword[return] identifier[list] ( identifier[itertools] . identifier[islice] (( identifier[itertools] . identifier[filterfalse] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]. identifier[sent] , identifier[self] . identifier[store] )), identifier[limit] ))[::- literal[int] ] | def received(self, limit=None):
"""
Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events
"""
return list(itertools.islice(itertools.filterfalse(lambda x: x[1].sent, self.store), limit))[::-1] |
def ParseApplicationUsageRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
# TODO: replace usage by definition(s) in eventdata. Not sure which values
# it will hold here.
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[ParseApplicationUsageRow, parameter[self, parser_mediator, query, row]]:
constant[Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
]
variable[query_hash] assign[=] call[name[hash], parameter[name[query]]]
variable[application_name] assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[event]]]
variable[usage] assign[=] call[constant[Application {0:s}].format, parameter[name[application_name]]]
variable[event_data] assign[=] call[name[MacOSApplicationUsageEventData], parameter[]]
name[event_data].application assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[app_path]]]
name[event_data].app_version assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[app_version]]]
name[event_data].bundle_id assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[bundle_id]]]
name[event_data].count assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[number_times]]]
name[event_data].query assign[=] name[query]
variable[timestamp] assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[last_time]]]
variable[date_time] assign[=] call[name[dfdatetime_posix_time].PosixTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[usage]]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[ParseApplicationUsageRow] (
identifier[self] , identifier[parser_mediator] , identifier[query] , identifier[row] ,** identifier[unused_kwargs] ):
literal[string]
identifier[query_hash] = identifier[hash] ( identifier[query] )
identifier[application_name] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[usage] = literal[string] . identifier[format] ( identifier[application_name] )
identifier[event_data] = identifier[MacOSApplicationUsageEventData] ()
identifier[event_data] . identifier[application] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[app_version] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[bundle_id] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[count] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[query] = identifier[query]
identifier[timestamp] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[date_time] = identifier[dfdatetime_posix_time] . identifier[PosixTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] ( identifier[date_time] , identifier[usage] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def ParseApplicationUsageRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
# TODO: replace usage by definition(s) in eventdata. Not sure which values
# it will hold here.
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def freeze():
"""Combine all dependencies for the Agent's static environment."""
echo_waiting('Verifying collected packages...')
catalog, errors = make_catalog()
if errors:
for error in errors:
echo_failure(error)
abort()
static_file = get_agent_requirements()
echo_info('Static file: {}'.format(static_file))
pre_packages = list(read_packages(static_file))
catalog.write_packages(static_file)
post_packages = list(read_packages(static_file))
display_package_changes(pre_packages, post_packages) | def function[freeze, parameter[]]:
constant[Combine all dependencies for the Agent's static environment.]
call[name[echo_waiting], parameter[constant[Verifying collected packages...]]]
<ast.Tuple object at 0x7da20c6e7220> assign[=] call[name[make_catalog], parameter[]]
if name[errors] begin[:]
for taget[name[error]] in starred[name[errors]] begin[:]
call[name[echo_failure], parameter[name[error]]]
call[name[abort], parameter[]]
variable[static_file] assign[=] call[name[get_agent_requirements], parameter[]]
call[name[echo_info], parameter[call[constant[Static file: {}].format, parameter[name[static_file]]]]]
variable[pre_packages] assign[=] call[name[list], parameter[call[name[read_packages], parameter[name[static_file]]]]]
call[name[catalog].write_packages, parameter[name[static_file]]]
variable[post_packages] assign[=] call[name[list], parameter[call[name[read_packages], parameter[name[static_file]]]]]
call[name[display_package_changes], parameter[name[pre_packages], name[post_packages]]] | keyword[def] identifier[freeze] ():
literal[string]
identifier[echo_waiting] ( literal[string] )
identifier[catalog] , identifier[errors] = identifier[make_catalog] ()
keyword[if] identifier[errors] :
keyword[for] identifier[error] keyword[in] identifier[errors] :
identifier[echo_failure] ( identifier[error] )
identifier[abort] ()
identifier[static_file] = identifier[get_agent_requirements] ()
identifier[echo_info] ( literal[string] . identifier[format] ( identifier[static_file] ))
identifier[pre_packages] = identifier[list] ( identifier[read_packages] ( identifier[static_file] ))
identifier[catalog] . identifier[write_packages] ( identifier[static_file] )
identifier[post_packages] = identifier[list] ( identifier[read_packages] ( identifier[static_file] ))
identifier[display_package_changes] ( identifier[pre_packages] , identifier[post_packages] ) | def freeze():
"""Combine all dependencies for the Agent's static environment."""
echo_waiting('Verifying collected packages...')
(catalog, errors) = make_catalog()
if errors:
for error in errors:
echo_failure(error) # depends on [control=['for'], data=['error']]
abort() # depends on [control=['if'], data=[]]
static_file = get_agent_requirements()
echo_info('Static file: {}'.format(static_file))
pre_packages = list(read_packages(static_file))
catalog.write_packages(static_file)
post_packages = list(read_packages(static_file))
display_package_changes(pre_packages, post_packages) |
def severity(self):
"""Retrieves the severity for the incident/incidents from the
output response
Returns:
severity(namedtuple): List of named tuples of severity for the
incident/incidents
"""
resource_list = self.traffic_incident()
severity = namedtuple('severity', 'severity')
if len(resource_list) == 1 and resource_list[0] is None:
return None
else:
try:
return [severity(resource['severity'])
for resource in resource_list]
except (KeyError, TypeError):
return [severity(resource['Severity'])
for resource in resource_list] | def function[severity, parameter[self]]:
constant[Retrieves the severity for the incident/incidents from the
output response
Returns:
severity(namedtuple): List of named tuples of severity for the
incident/incidents
]
variable[resource_list] assign[=] call[name[self].traffic_incident, parameter[]]
variable[severity] assign[=] call[name[namedtuple], parameter[constant[severity], constant[severity]]]
if <ast.BoolOp object at 0x7da18dc05ed0> begin[:]
return[constant[None]] | keyword[def] identifier[severity] ( identifier[self] ):
literal[string]
identifier[resource_list] = identifier[self] . identifier[traffic_incident] ()
identifier[severity] = identifier[namedtuple] ( literal[string] , literal[string] )
keyword[if] identifier[len] ( identifier[resource_list] )== literal[int] keyword[and] identifier[resource_list] [ literal[int] ] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[else] :
keyword[try] :
keyword[return] [ identifier[severity] ( identifier[resource] [ literal[string] ])
keyword[for] identifier[resource] keyword[in] identifier[resource_list] ]
keyword[except] ( identifier[KeyError] , identifier[TypeError] ):
keyword[return] [ identifier[severity] ( identifier[resource] [ literal[string] ])
keyword[for] identifier[resource] keyword[in] identifier[resource_list] ] | def severity(self):
"""Retrieves the severity for the incident/incidents from the
output response
Returns:
severity(namedtuple): List of named tuples of severity for the
incident/incidents
"""
resource_list = self.traffic_incident()
severity = namedtuple('severity', 'severity')
if len(resource_list) == 1 and resource_list[0] is None:
return None # depends on [control=['if'], data=[]]
else:
try:
return [severity(resource['severity']) for resource in resource_list] # depends on [control=['try'], data=[]]
except (KeyError, TypeError):
return [severity(resource['Severity']) for resource in resource_list] # depends on [control=['except'], data=[]] |
def self_edge_filter(_: BELGraph, source: BaseEntity, target: BaseEntity, __: str) -> bool:
"""Check if the source and target nodes are the same."""
return source == target | def function[self_edge_filter, parameter[_, source, target, __]]:
constant[Check if the source and target nodes are the same.]
return[compare[name[source] equal[==] name[target]]] | keyword[def] identifier[self_edge_filter] ( identifier[_] : identifier[BELGraph] , identifier[source] : identifier[BaseEntity] , identifier[target] : identifier[BaseEntity] , identifier[__] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[return] identifier[source] == identifier[target] | def self_edge_filter(_: BELGraph, source: BaseEntity, target: BaseEntity, __: str) -> bool:
"""Check if the source and target nodes are the same."""
return source == target |
def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory."""
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = (
specified_platform if specified_platform else default_platform)
with io.open(path.join(repo_directory, 'pages/index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit(
("Sorry, we don't support command: {0} right now.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
sys.exit(
("Sorry, command {0} is not supported on your platform.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
page_path = path.join(path.join(repo_directory, 'pages'),
path.join(platform, command + '.md'))
return page_path | def function[find_page_location, parameter[command, specified_platform]]:
constant[Find the command man page in the pages directory.]
variable[repo_directory] assign[=] call[call[name[get_config], parameter[]]][constant[repo_directory]]
variable[default_platform] assign[=] call[call[name[get_config], parameter[]]][constant[platform]]
variable[command_platform] assign[=] <ast.IfExp object at 0x7da18dc9a0b0>
with call[name[io].open, parameter[call[name[path].join, parameter[name[repo_directory], constant[pages/index.json]]]]] begin[:]
variable[index] assign[=] call[name[json].load, parameter[name[f]]]
variable[command_list] assign[=] <ast.ListComp object at 0x7da18dc9b820>
if compare[name[command] <ast.NotIn object at 0x7da2590d7190> name[command_list]] begin[:]
call[name[sys].exit, parameter[call[constant[Sorry, we don't support command: {0} right now.
You can file an issue or send a PR on github:
https://github.com/tldr-pages/tldr].format, parameter[name[command]]]]]
variable[supported_platforms] assign[=] call[call[call[name[index]][constant[commands]]][call[name[command_list].index, parameter[name[command]]]]][constant[platform]]
if compare[name[command_platform] in name[supported_platforms]] begin[:]
variable[platform] assign[=] name[command_platform]
if <ast.UnaryOp object at 0x7da20c9936a0> begin[:]
call[name[sys].exit, parameter[call[constant[Sorry, command {0} is not supported on your platform.
You can file an issue or send a PR on github:
https://github.com/tldr-pages/tldr].format, parameter[name[command]]]]]
variable[page_path] assign[=] call[name[path].join, parameter[call[name[path].join, parameter[name[repo_directory], constant[pages]]], call[name[path].join, parameter[name[platform], binary_operation[name[command] + constant[.md]]]]]]
return[name[page_path]] | keyword[def] identifier[find_page_location] ( identifier[command] , identifier[specified_platform] ):
literal[string]
identifier[repo_directory] = identifier[get_config] ()[ literal[string] ]
identifier[default_platform] = identifier[get_config] ()[ literal[string] ]
identifier[command_platform] =(
identifier[specified_platform] keyword[if] identifier[specified_platform] keyword[else] identifier[default_platform] )
keyword[with] identifier[io] . identifier[open] ( identifier[path] . identifier[join] ( identifier[repo_directory] , literal[string] ),
identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[index] = identifier[json] . identifier[load] ( identifier[f] )
identifier[command_list] =[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[index] [ literal[string] ]]
keyword[if] identifier[command] keyword[not] keyword[in] identifier[command_list] :
identifier[sys] . identifier[exit] (
( literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[command] ))
identifier[supported_platforms] = identifier[index] [ literal[string] ][
identifier[command_list] . identifier[index] ( identifier[command] )][ literal[string] ]
keyword[if] identifier[command_platform] keyword[in] identifier[supported_platforms] :
identifier[platform] = identifier[command_platform]
keyword[elif] literal[string] keyword[in] identifier[supported_platforms] :
identifier[platform] = literal[string]
keyword[else] :
identifier[platform] = literal[string]
keyword[if] keyword[not] identifier[platform] :
identifier[sys] . identifier[exit] (
( literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[command] ))
identifier[page_path] = identifier[path] . identifier[join] ( identifier[path] . identifier[join] ( identifier[repo_directory] , literal[string] ),
identifier[path] . identifier[join] ( identifier[platform] , identifier[command] + literal[string] ))
keyword[return] identifier[page_path] | def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory."""
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = specified_platform if specified_platform else default_platform
with io.open(path.join(repo_directory, 'pages/index.json'), encoding='utf-8') as f:
index = json.load(f) # depends on [control=['with'], data=['f']]
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit("Sorry, we don't support command: {0} right now.\nYou can file an issue or send a PR on github:\n https://github.com/tldr-pages/tldr".format(command)) # depends on [control=['if'], data=['command']]
supported_platforms = index['commands'][command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform # depends on [control=['if'], data=['command_platform']]
elif 'common' in supported_platforms:
platform = 'common' # depends on [control=['if'], data=[]]
else:
platform = ''
if not platform:
sys.exit('Sorry, command {0} is not supported on your platform.\nYou can file an issue or send a PR on github:\n https://github.com/tldr-pages/tldr'.format(command)) # depends on [control=['if'], data=[]]
page_path = path.join(path.join(repo_directory, 'pages'), path.join(platform, command + '.md'))
return page_path |
def is_gesture(self):
"""Macro to check if this event is
a :class:`~libinput.event.GestureEvent`.
"""
if self in {type(self).GESTURE_SWIPE_BEGIN, type(self).GESTURE_SWIPE_END,
type(self).GESTURE_SWIPE_UPDATE, type(self).GESTURE_PINCH_BEGIN,
type(self).GESTURE_PINCH_UPDATE, type(self).GESTURE_PINCH_END}:
return True
else:
return False | def function[is_gesture, parameter[self]]:
constant[Macro to check if this event is
a :class:`~libinput.event.GestureEvent`.
]
if compare[name[self] in <ast.Set object at 0x7da18f00ffa0>] begin[:]
return[constant[True]] | keyword[def] identifier[is_gesture] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] keyword[in] { identifier[type] ( identifier[self] ). identifier[GESTURE_SWIPE_BEGIN] , identifier[type] ( identifier[self] ). identifier[GESTURE_SWIPE_END] ,
identifier[type] ( identifier[self] ). identifier[GESTURE_SWIPE_UPDATE] , identifier[type] ( identifier[self] ). identifier[GESTURE_PINCH_BEGIN] ,
identifier[type] ( identifier[self] ). identifier[GESTURE_PINCH_UPDATE] , identifier[type] ( identifier[self] ). identifier[GESTURE_PINCH_END] }:
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def is_gesture(self):
"""Macro to check if this event is
a :class:`~libinput.event.GestureEvent`.
"""
if self in {type(self).GESTURE_SWIPE_BEGIN, type(self).GESTURE_SWIPE_END, type(self).GESTURE_SWIPE_UPDATE, type(self).GESTURE_PINCH_BEGIN, type(self).GESTURE_PINCH_UPDATE, type(self).GESTURE_PINCH_END}:
return True # depends on [control=['if'], data=[]]
else:
return False |
def flatten(self, lst=None):
"""syntax.flatten(token_stream) - compile period tokens
This turns a stream of tokens into p-code for the trivial
stack machine that evaluates period expressions in in_period.
"""
tree = []
uops = [] # accumulated unary operations
s = Stack()
group_len = 0 # in current precendence group
for item in lst:
if type(item) == type([]):
# Subexpression.
tree = tree + self.flatten(item)
group_len = group_len + 1
# Unary ops dump, for things like: '!(Monday|Wednesday)'
for uop in uops:
tree.append(uop)
uops = []
elif item in self.ops and item not in self.uops:
# Operator.
if not s.empty():
prev_op = s.pop()
# If the precendence of the previous operation is
# higher then dump out everything so far, ensuring the
# order of evaluation.
if _precedence[prev_op] > _precedence[item]:
s.push(prev_op) # put it back
for i in range(group_len - 1):
tree.append(s.pop())
group_len = 0
else:
s.push(prev_op)
s.push(item)
else:
s.push(item)
elif item in self.uops:
uops.append(item)
else:
# Token of some sort.
tree.append(item)
group_len = group_len + 1
# Dump any unary operations.
for uop in uops:
tree.append(uop)
uops = []
while not s.empty():
tree.append(s.pop())
# Drop any remaining unary operations.
for uop in uops:
tree.append(uop)
return tree | def function[flatten, parameter[self, lst]]:
constant[syntax.flatten(token_stream) - compile period tokens
This turns a stream of tokens into p-code for the trivial
stack machine that evaluates period expressions in in_period.
]
variable[tree] assign[=] list[[]]
variable[uops] assign[=] list[[]]
variable[s] assign[=] call[name[Stack], parameter[]]
variable[group_len] assign[=] constant[0]
for taget[name[item]] in starred[name[lst]] begin[:]
if compare[call[name[type], parameter[name[item]]] equal[==] call[name[type], parameter[list[[]]]]] begin[:]
variable[tree] assign[=] binary_operation[name[tree] + call[name[self].flatten, parameter[name[item]]]]
variable[group_len] assign[=] binary_operation[name[group_len] + constant[1]]
for taget[name[uop]] in starred[name[uops]] begin[:]
call[name[tree].append, parameter[name[uop]]]
variable[uops] assign[=] list[[]]
while <ast.UnaryOp object at 0x7da2044c2740> begin[:]
call[name[tree].append, parameter[call[name[s].pop, parameter[]]]]
for taget[name[uop]] in starred[name[uops]] begin[:]
call[name[tree].append, parameter[name[uop]]]
return[name[tree]] | keyword[def] identifier[flatten] ( identifier[self] , identifier[lst] = keyword[None] ):
literal[string]
identifier[tree] =[]
identifier[uops] =[]
identifier[s] = identifier[Stack] ()
identifier[group_len] = literal[int]
keyword[for] identifier[item] keyword[in] identifier[lst] :
keyword[if] identifier[type] ( identifier[item] )== identifier[type] ([]):
identifier[tree] = identifier[tree] + identifier[self] . identifier[flatten] ( identifier[item] )
identifier[group_len] = identifier[group_len] + literal[int]
keyword[for] identifier[uop] keyword[in] identifier[uops] :
identifier[tree] . identifier[append] ( identifier[uop] )
identifier[uops] =[]
keyword[elif] identifier[item] keyword[in] identifier[self] . identifier[ops] keyword[and] identifier[item] keyword[not] keyword[in] identifier[self] . identifier[uops] :
keyword[if] keyword[not] identifier[s] . identifier[empty] ():
identifier[prev_op] = identifier[s] . identifier[pop] ()
keyword[if] identifier[_precedence] [ identifier[prev_op] ]> identifier[_precedence] [ identifier[item] ]:
identifier[s] . identifier[push] ( identifier[prev_op] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[group_len] - literal[int] ):
identifier[tree] . identifier[append] ( identifier[s] . identifier[pop] ())
identifier[group_len] = literal[int]
keyword[else] :
identifier[s] . identifier[push] ( identifier[prev_op] )
identifier[s] . identifier[push] ( identifier[item] )
keyword[else] :
identifier[s] . identifier[push] ( identifier[item] )
keyword[elif] identifier[item] keyword[in] identifier[self] . identifier[uops] :
identifier[uops] . identifier[append] ( identifier[item] )
keyword[else] :
identifier[tree] . identifier[append] ( identifier[item] )
identifier[group_len] = identifier[group_len] + literal[int]
keyword[for] identifier[uop] keyword[in] identifier[uops] :
identifier[tree] . identifier[append] ( identifier[uop] )
identifier[uops] =[]
keyword[while] keyword[not] identifier[s] . identifier[empty] ():
identifier[tree] . identifier[append] ( identifier[s] . identifier[pop] ())
keyword[for] identifier[uop] keyword[in] identifier[uops] :
identifier[tree] . identifier[append] ( identifier[uop] )
keyword[return] identifier[tree] | def flatten(self, lst=None):
"""syntax.flatten(token_stream) - compile period tokens
This turns a stream of tokens into p-code for the trivial
stack machine that evaluates period expressions in in_period.
"""
tree = []
uops = [] # accumulated unary operations
s = Stack()
group_len = 0 # in current precendence group
for item in lst:
if type(item) == type([]):
# Subexpression.
tree = tree + self.flatten(item)
group_len = group_len + 1
# Unary ops dump, for things like: '!(Monday|Wednesday)'
for uop in uops:
tree.append(uop) # depends on [control=['for'], data=['uop']]
uops = [] # depends on [control=['if'], data=[]]
elif item in self.ops and item not in self.uops:
# Operator.
if not s.empty():
prev_op = s.pop()
# If the precendence of the previous operation is
# higher then dump out everything so far, ensuring the
# order of evaluation.
if _precedence[prev_op] > _precedence[item]:
s.push(prev_op) # put it back
for i in range(group_len - 1):
tree.append(s.pop()) # depends on [control=['for'], data=[]]
group_len = 0 # depends on [control=['if'], data=[]]
else:
s.push(prev_op)
s.push(item) # depends on [control=['if'], data=[]]
else:
s.push(item) # depends on [control=['if'], data=[]]
elif item in self.uops:
uops.append(item) # depends on [control=['if'], data=['item']]
else:
# Token of some sort.
tree.append(item)
group_len = group_len + 1
# Dump any unary operations.
for uop in uops:
tree.append(uop) # depends on [control=['for'], data=['uop']]
uops = [] # depends on [control=['for'], data=['item']]
while not s.empty():
tree.append(s.pop()) # depends on [control=['while'], data=[]]
# Drop any remaining unary operations.
for uop in uops:
tree.append(uop) # depends on [control=['for'], data=['uop']]
return tree |
def constructor_args(class_, *args, **kwargs):
"""
Return (args, kwargs) matching the function signature
:param callable: callable to inspect
:type callable: Callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: (args, kwargs) matching the function signature
:rtype: tuple
"""
argspec = getargspec(_constructor(class_)) # pylint:disable=deprecated-method
return argspec_args(argspec, True, *args, **kwargs) | def function[constructor_args, parameter[class_]]:
constant[
Return (args, kwargs) matching the function signature
:param callable: callable to inspect
:type callable: Callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: (args, kwargs) matching the function signature
:rtype: tuple
]
variable[argspec] assign[=] call[name[getargspec], parameter[call[name[_constructor], parameter[name[class_]]]]]
return[call[name[argspec_args], parameter[name[argspec], constant[True], <ast.Starred object at 0x7da18f09df00>]]] | keyword[def] identifier[constructor_args] ( identifier[class_] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[argspec] = identifier[getargspec] ( identifier[_constructor] ( identifier[class_] ))
keyword[return] identifier[argspec_args] ( identifier[argspec] , keyword[True] ,* identifier[args] ,** identifier[kwargs] ) | def constructor_args(class_, *args, **kwargs):
"""
Return (args, kwargs) matching the function signature
:param callable: callable to inspect
:type callable: Callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: (args, kwargs) matching the function signature
:rtype: tuple
"""
argspec = getargspec(_constructor(class_)) # pylint:disable=deprecated-method
return argspec_args(argspec, True, *args, **kwargs) |
def _get_trendline(self,date0=None,date1=None,on=None,kind='trend',to_strfmt='%Y-%m-%d',from_strfmt='%d%b%y',**kwargs):
"""
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
ann_values=copy.deepcopy(get_annotation_kwargs())
ann_values.extend(['x','y'])
ann_kwargs=utils.check_kwargs(kwargs,ann_values,{},clean_origin=True)
def position(d0,d1):
return d0+(d1-d0)/2
date0=kwargs.pop('date',date0)
date0=date_tools.stringToString(date0,from_strfmt,to_strfmt) if '-' not in date0 else date0
if kind=='trend':
date1=date_tools.stringToString(date1,from_strfmt,to_strfmt) if '-' not in date1 else date1
on='close' if not on else on
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.ix[date0].values[0])
y1=kwargs.get('y1',df.ix[date1].values[0])
if kind in ('support','resistance'):
mode=kwargs.pop('mode','starttoend')
if not on:
on='low' if kind=='support' else 'high'
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.ix[date0].values[0])
y1=kwargs.get('y1',y0)
if mode=='starttoend':
date0=df.index[0]
date1=df.index[-1]
elif mode=='toend':
date1=df.index[-1]
elif mode=='fromstart':
date1=date0
date0=df.index[0]
if isinstance(date0,pd.Timestamp):
date0=date_tools.dateToString(date0,to_strfmt)
if isinstance(date1,pd.Timestamp):
date1=date_tools.dateToString(date1,to_strfmt)
d={'x0':date0,'x1':date1,'y0':y0,'y1':y1}
d.update(**kwargs)
shape=tools.get_shape(**d)
if ann_kwargs.get('text',False):
ann_kwargs['x']=ann_kwargs.get('x',date_tools.dateToString(position(date_tools.stringToDate(date0,to_strfmt),date_tools.stringToDate(date1,to_strfmt)),to_strfmt))
ann_kwargs['y']=ann_kwargs.get('y',position(shape['y0'],shape['y1']))
else:
ann_kwargs={}
return {'shape':shape,'annotation':ann_kwargs} | def function[_get_trendline, parameter[self, date0, date1, on, kind, to_strfmt, from_strfmt]]:
constant[
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
]
variable[ann_values] assign[=] call[name[copy].deepcopy, parameter[call[name[get_annotation_kwargs], parameter[]]]]
call[name[ann_values].extend, parameter[list[[<ast.Constant object at 0x7da1b1cfb7f0>, <ast.Constant object at 0x7da1b1cfb820>]]]]
variable[ann_kwargs] assign[=] call[name[utils].check_kwargs, parameter[name[kwargs], name[ann_values], dictionary[[], []]]]
def function[position, parameter[d0, d1]]:
return[binary_operation[name[d0] + binary_operation[binary_operation[name[d1] - name[d0]] / constant[2]]]]
variable[date0] assign[=] call[name[kwargs].pop, parameter[constant[date], name[date0]]]
variable[date0] assign[=] <ast.IfExp object at 0x7da1b1cef3a0>
if compare[name[kind] equal[==] constant[trend]] begin[:]
variable[date1] assign[=] <ast.IfExp object at 0x7da1b1ceec80>
variable[on] assign[=] <ast.IfExp object at 0x7da1b1cee380>
variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[self].df][call[name[self]._d][name[on]]]]]
variable[y0] assign[=] call[name[kwargs].get, parameter[constant[y0], call[call[name[df].ix][name[date0]].values][constant[0]]]]
variable[y1] assign[=] call[name[kwargs].get, parameter[constant[y1], call[call[name[df].ix][name[date1]].values][constant[0]]]]
if compare[name[kind] in tuple[[<ast.Constant object at 0x7da1b1ceea70>, <ast.Constant object at 0x7da1b1ceeb30>]]] begin[:]
variable[mode] assign[=] call[name[kwargs].pop, parameter[constant[mode], constant[starttoend]]]
if <ast.UnaryOp object at 0x7da1b1cee6b0> begin[:]
variable[on] assign[=] <ast.IfExp object at 0x7da1b1cee9b0>
variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[self].df][call[name[self]._d][name[on]]]]]
variable[y0] assign[=] call[name[kwargs].get, parameter[constant[y0], call[call[name[df].ix][name[date0]].values][constant[0]]]]
variable[y1] assign[=] call[name[kwargs].get, parameter[constant[y1], name[y0]]]
if compare[name[mode] equal[==] constant[starttoend]] begin[:]
variable[date0] assign[=] call[name[df].index][constant[0]]
variable[date1] assign[=] call[name[df].index][<ast.UnaryOp object at 0x7da1b1ceeb90>]
if call[name[isinstance], parameter[name[date0], name[pd].Timestamp]] begin[:]
variable[date0] assign[=] call[name[date_tools].dateToString, parameter[name[date0], name[to_strfmt]]]
if call[name[isinstance], parameter[name[date1], name[pd].Timestamp]] begin[:]
variable[date1] assign[=] call[name[date_tools].dateToString, parameter[name[date1], name[to_strfmt]]]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c315a0>, <ast.Constant object at 0x7da1b1c309a0>, <ast.Constant object at 0x7da1b1c30160>, <ast.Constant object at 0x7da1b1c31a80>], [<ast.Name object at 0x7da1b1c33490>, <ast.Name object at 0x7da1b1c32950>, <ast.Name object at 0x7da1b1c32860>, <ast.Name object at 0x7da1b1c33e80>]]
call[name[d].update, parameter[]]
variable[shape] assign[=] call[name[tools].get_shape, parameter[]]
if call[name[ann_kwargs].get, parameter[constant[text], constant[False]]] begin[:]
call[name[ann_kwargs]][constant[x]] assign[=] call[name[ann_kwargs].get, parameter[constant[x], call[name[date_tools].dateToString, parameter[call[name[position], parameter[call[name[date_tools].stringToDate, parameter[name[date0], name[to_strfmt]]], call[name[date_tools].stringToDate, parameter[name[date1], name[to_strfmt]]]]], name[to_strfmt]]]]]
call[name[ann_kwargs]][constant[y]] assign[=] call[name[ann_kwargs].get, parameter[constant[y], call[name[position], parameter[call[name[shape]][constant[y0]], call[name[shape]][constant[y1]]]]]]
return[dictionary[[<ast.Constant object at 0x7da1b1c32740>, <ast.Constant object at 0x7da1b1c32aa0>], [<ast.Name object at 0x7da1b1c339a0>, <ast.Name object at 0x7da1b1c335b0>]]] | keyword[def] identifier[_get_trendline] ( identifier[self] , identifier[date0] = keyword[None] , identifier[date1] = keyword[None] , identifier[on] = keyword[None] , identifier[kind] = literal[string] , identifier[to_strfmt] = literal[string] , identifier[from_strfmt] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[ann_values] = identifier[copy] . identifier[deepcopy] ( identifier[get_annotation_kwargs] ())
identifier[ann_values] . identifier[extend] ([ literal[string] , literal[string] ])
identifier[ann_kwargs] = identifier[utils] . identifier[check_kwargs] ( identifier[kwargs] , identifier[ann_values] ,{}, identifier[clean_origin] = keyword[True] )
keyword[def] identifier[position] ( identifier[d0] , identifier[d1] ):
keyword[return] identifier[d0] +( identifier[d1] - identifier[d0] )/ literal[int]
identifier[date0] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[date0] )
identifier[date0] = identifier[date_tools] . identifier[stringToString] ( identifier[date0] , identifier[from_strfmt] , identifier[to_strfmt] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[date0] keyword[else] identifier[date0]
keyword[if] identifier[kind] == literal[string] :
identifier[date1] = identifier[date_tools] . identifier[stringToString] ( identifier[date1] , identifier[from_strfmt] , identifier[to_strfmt] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[date1] keyword[else] identifier[date1]
identifier[on] = literal[string] keyword[if] keyword[not] identifier[on] keyword[else] identifier[on]
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[self] . identifier[df] [ identifier[self] . identifier[_d] [ identifier[on] ]])
identifier[y0] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[df] . identifier[ix] [ identifier[date0] ]. identifier[values] [ literal[int] ])
identifier[y1] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[df] . identifier[ix] [ identifier[date1] ]. identifier[values] [ literal[int] ])
keyword[if] identifier[kind] keyword[in] ( literal[string] , literal[string] ):
identifier[mode] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[on] :
identifier[on] = literal[string] keyword[if] identifier[kind] == literal[string] keyword[else] literal[string]
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[self] . identifier[df] [ identifier[self] . identifier[_d] [ identifier[on] ]])
identifier[y0] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[df] . identifier[ix] [ identifier[date0] ]. identifier[values] [ literal[int] ])
identifier[y1] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[y0] )
keyword[if] identifier[mode] == literal[string] :
identifier[date0] = identifier[df] . identifier[index] [ literal[int] ]
identifier[date1] = identifier[df] . identifier[index] [- literal[int] ]
keyword[elif] identifier[mode] == literal[string] :
identifier[date1] = identifier[df] . identifier[index] [- literal[int] ]
keyword[elif] identifier[mode] == literal[string] :
identifier[date1] = identifier[date0]
identifier[date0] = identifier[df] . identifier[index] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[date0] , identifier[pd] . identifier[Timestamp] ):
identifier[date0] = identifier[date_tools] . identifier[dateToString] ( identifier[date0] , identifier[to_strfmt] )
keyword[if] identifier[isinstance] ( identifier[date1] , identifier[pd] . identifier[Timestamp] ):
identifier[date1] = identifier[date_tools] . identifier[dateToString] ( identifier[date1] , identifier[to_strfmt] )
identifier[d] ={ literal[string] : identifier[date0] , literal[string] : identifier[date1] , literal[string] : identifier[y0] , literal[string] : identifier[y1] }
identifier[d] . identifier[update] (** identifier[kwargs] )
identifier[shape] = identifier[tools] . identifier[get_shape] (** identifier[d] )
keyword[if] identifier[ann_kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[ann_kwargs] [ literal[string] ]= identifier[ann_kwargs] . identifier[get] ( literal[string] , identifier[date_tools] . identifier[dateToString] ( identifier[position] ( identifier[date_tools] . identifier[stringToDate] ( identifier[date0] , identifier[to_strfmt] ), identifier[date_tools] . identifier[stringToDate] ( identifier[date1] , identifier[to_strfmt] )), identifier[to_strfmt] ))
identifier[ann_kwargs] [ literal[string] ]= identifier[ann_kwargs] . identifier[get] ( literal[string] , identifier[position] ( identifier[shape] [ literal[string] ], identifier[shape] [ literal[string] ]))
keyword[else] :
identifier[ann_kwargs] ={}
keyword[return] { literal[string] : identifier[shape] , literal[string] : identifier[ann_kwargs] } | def _get_trendline(self, date0=None, date1=None, on=None, kind='trend', to_strfmt='%Y-%m-%d', from_strfmt='%d%b%y', **kwargs):
"""
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
ann_values = copy.deepcopy(get_annotation_kwargs())
ann_values.extend(['x', 'y'])
ann_kwargs = utils.check_kwargs(kwargs, ann_values, {}, clean_origin=True)
def position(d0, d1):
return d0 + (d1 - d0) / 2
date0 = kwargs.pop('date', date0)
date0 = date_tools.stringToString(date0, from_strfmt, to_strfmt) if '-' not in date0 else date0
if kind == 'trend':
date1 = date_tools.stringToString(date1, from_strfmt, to_strfmt) if '-' not in date1 else date1
on = 'close' if not on else on
df = pd.DataFrame(self.df[self._d[on]])
y0 = kwargs.get('y0', df.ix[date0].values[0])
y1 = kwargs.get('y1', df.ix[date1].values[0]) # depends on [control=['if'], data=[]]
if kind in ('support', 'resistance'):
mode = kwargs.pop('mode', 'starttoend')
if not on:
on = 'low' if kind == 'support' else 'high' # depends on [control=['if'], data=[]]
df = pd.DataFrame(self.df[self._d[on]])
y0 = kwargs.get('y0', df.ix[date0].values[0])
y1 = kwargs.get('y1', y0)
if mode == 'starttoend':
date0 = df.index[0]
date1 = df.index[-1] # depends on [control=['if'], data=[]]
elif mode == 'toend':
date1 = df.index[-1] # depends on [control=['if'], data=[]]
elif mode == 'fromstart':
date1 = date0
date0 = df.index[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kind']]
if isinstance(date0, pd.Timestamp):
date0 = date_tools.dateToString(date0, to_strfmt) # depends on [control=['if'], data=[]]
if isinstance(date1, pd.Timestamp):
date1 = date_tools.dateToString(date1, to_strfmt) # depends on [control=['if'], data=[]]
d = {'x0': date0, 'x1': date1, 'y0': y0, 'y1': y1}
d.update(**kwargs)
shape = tools.get_shape(**d)
if ann_kwargs.get('text', False):
ann_kwargs['x'] = ann_kwargs.get('x', date_tools.dateToString(position(date_tools.stringToDate(date0, to_strfmt), date_tools.stringToDate(date1, to_strfmt)), to_strfmt))
ann_kwargs['y'] = ann_kwargs.get('y', position(shape['y0'], shape['y1'])) # depends on [control=['if'], data=[]]
else:
ann_kwargs = {}
return {'shape': shape, 'annotation': ann_kwargs} |
def _read_stream(self, stream):
"""
Read in the pod stream
"""
data = yaml.safe_load_all(stream=stream)
obj = self._find_convertable_object(data)
pod = self.pod_types[obj['kind']](obj)
return obj, pod.get('containers'), self.ingest_volumes_param(pod.get('volumes', [])) | def function[_read_stream, parameter[self, stream]]:
constant[
Read in the pod stream
]
variable[data] assign[=] call[name[yaml].safe_load_all, parameter[]]
variable[obj] assign[=] call[name[self]._find_convertable_object, parameter[name[data]]]
variable[pod] assign[=] call[call[name[self].pod_types][call[name[obj]][constant[kind]]], parameter[name[obj]]]
return[tuple[[<ast.Name object at 0x7da2041d8880>, <ast.Call object at 0x7da2041da9e0>, <ast.Call object at 0x7da2041da170>]]] | keyword[def] identifier[_read_stream] ( identifier[self] , identifier[stream] ):
literal[string]
identifier[data] = identifier[yaml] . identifier[safe_load_all] ( identifier[stream] = identifier[stream] )
identifier[obj] = identifier[self] . identifier[_find_convertable_object] ( identifier[data] )
identifier[pod] = identifier[self] . identifier[pod_types] [ identifier[obj] [ literal[string] ]]( identifier[obj] )
keyword[return] identifier[obj] , identifier[pod] . identifier[get] ( literal[string] ), identifier[self] . identifier[ingest_volumes_param] ( identifier[pod] . identifier[get] ( literal[string] ,[])) | def _read_stream(self, stream):
"""
Read in the pod stream
"""
data = yaml.safe_load_all(stream=stream)
obj = self._find_convertable_object(data)
pod = self.pod_types[obj['kind']](obj)
return (obj, pod.get('containers'), self.ingest_volumes_param(pod.get('volumes', []))) |
def sg_print(tensor_list):
r"""Simple tensor printing function for debugging.
Prints the value, shape, and data type of each tensor in the list.
Args:
tensor_list: A list/tuple of tensors or a single tensor.
Returns:
The value of the tensors.
For example,
```python
import sugartensor as tf
a = tf.constant([1.])
b = tf.constant([2.])
out = tf.sg_print([a, b])
# Should print [ 1.] (1,) float32
# [ 2.] (1,) float32
print(out)
# Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)]
```
"""
# to list
if type(tensor_list) is not list and type(tensor_list) is not tuple:
tensor_list = [tensor_list]
# evaluate tensor list with queue runner
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sg_init(sess)
with tf.sg_queue_context():
res = sess.run(tensor_list)
for r in res:
print(r, r.shape, r.dtype)
if len(res) == 1:
return res[0]
else:
return res | def function[sg_print, parameter[tensor_list]]:
constant[Simple tensor printing function for debugging.
Prints the value, shape, and data type of each tensor in the list.
Args:
tensor_list: A list/tuple of tensors or a single tensor.
Returns:
The value of the tensors.
For example,
```python
import sugartensor as tf
a = tf.constant([1.])
b = tf.constant([2.])
out = tf.sg_print([a, b])
# Should print [ 1.] (1,) float32
# [ 2.] (1,) float32
print(out)
# Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)]
```
]
if <ast.BoolOp object at 0x7da1b12b80d0> begin[:]
variable[tensor_list] assign[=] list[[<ast.Name object at 0x7da1b12b8400>]]
with call[name[tf].Session, parameter[]] begin[:]
call[name[sg_init], parameter[name[sess]]]
with call[name[tf].sg_queue_context, parameter[]] begin[:]
variable[res] assign[=] call[name[sess].run, parameter[name[tensor_list]]]
for taget[name[r]] in starred[name[res]] begin[:]
call[name[print], parameter[name[r], name[r].shape, name[r].dtype]]
if compare[call[name[len], parameter[name[res]]] equal[==] constant[1]] begin[:]
return[call[name[res]][constant[0]]] | keyword[def] identifier[sg_print] ( identifier[tensor_list] ):
literal[string]
keyword[if] identifier[type] ( identifier[tensor_list] ) keyword[is] keyword[not] identifier[list] keyword[and] identifier[type] ( identifier[tensor_list] ) keyword[is] keyword[not] identifier[tuple] :
identifier[tensor_list] =[ identifier[tensor_list] ]
keyword[with] identifier[tf] . identifier[Session] ( identifier[config] = identifier[tf] . identifier[ConfigProto] ( identifier[allow_soft_placement] = keyword[True] )) keyword[as] identifier[sess] :
identifier[sg_init] ( identifier[sess] )
keyword[with] identifier[tf] . identifier[sg_queue_context] ():
identifier[res] = identifier[sess] . identifier[run] ( identifier[tensor_list] )
keyword[for] identifier[r] keyword[in] identifier[res] :
identifier[print] ( identifier[r] , identifier[r] . identifier[shape] , identifier[r] . identifier[dtype] )
keyword[if] identifier[len] ( identifier[res] )== literal[int] :
keyword[return] identifier[res] [ literal[int] ]
keyword[else] :
keyword[return] identifier[res] | def sg_print(tensor_list):
"""Simple tensor printing function for debugging.
Prints the value, shape, and data type of each tensor in the list.
Args:
tensor_list: A list/tuple of tensors or a single tensor.
Returns:
The value of the tensors.
For example,
```python
import sugartensor as tf
a = tf.constant([1.])
b = tf.constant([2.])
out = tf.sg_print([a, b])
# Should print [ 1.] (1,) float32
# [ 2.] (1,) float32
print(out)
# Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)]
```
"""
# to list
if type(tensor_list) is not list and type(tensor_list) is not tuple:
tensor_list = [tensor_list] # depends on [control=['if'], data=[]]
# evaluate tensor list with queue runner
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sg_init(sess)
with tf.sg_queue_context():
res = sess.run(tensor_list)
for r in res:
print(r, r.shape, r.dtype) # depends on [control=['for'], data=['r']] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['sess']]
if len(res) == 1:
return res[0] # depends on [control=['if'], data=[]]
else:
return res |
def root_block(template_name=DEFAULT_TEMPLATE_NAME):
"""A decorator that is used to define that the decorated block function
will be at the root of the block template hierarchy. In the usual case
this will be the HTML skeleton of the document, unless the template is used
to serve partial HTML rendering for Ajax.
The :func:`root_block` decorator accepts the following arguments:
:param template_name: The name of the block template hierarchy which is
passed to the :func:`render_template` document
rendering function. Different templates are useful
for rendering documents with differing layouts
(e.g. admin back-end vs. site front-end), or for
partial HTML rendering for Ajax.
"""
def decorator(block_func):
block = RootBlock(block_func, template_name)
return block_func
return decorator | def function[root_block, parameter[template_name]]:
constant[A decorator that is used to define that the decorated block function
will be at the root of the block template hierarchy. In the usual case
this will be the HTML skeleton of the document, unless the template is used
to serve partial HTML rendering for Ajax.
The :func:`root_block` decorator accepts the following arguments:
:param template_name: The name of the block template hierarchy which is
passed to the :func:`render_template` document
rendering function. Different templates are useful
for rendering documents with differing layouts
(e.g. admin back-end vs. site front-end), or for
partial HTML rendering for Ajax.
]
def function[decorator, parameter[block_func]]:
variable[block] assign[=] call[name[RootBlock], parameter[name[block_func], name[template_name]]]
return[name[block_func]]
return[name[decorator]] | keyword[def] identifier[root_block] ( identifier[template_name] = identifier[DEFAULT_TEMPLATE_NAME] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[block_func] ):
identifier[block] = identifier[RootBlock] ( identifier[block_func] , identifier[template_name] )
keyword[return] identifier[block_func]
keyword[return] identifier[decorator] | def root_block(template_name=DEFAULT_TEMPLATE_NAME):
"""A decorator that is used to define that the decorated block function
will be at the root of the block template hierarchy. In the usual case
this will be the HTML skeleton of the document, unless the template is used
to serve partial HTML rendering for Ajax.
The :func:`root_block` decorator accepts the following arguments:
:param template_name: The name of the block template hierarchy which is
passed to the :func:`render_template` document
rendering function. Different templates are useful
for rendering documents with differing layouts
(e.g. admin back-end vs. site front-end), or for
partial HTML rendering for Ajax.
"""
def decorator(block_func):
block = RootBlock(block_func, template_name)
return block_func
return decorator |
def make_tree(statement, filename="<aexec>", symbol="single", local={}):
"""Helper for *aexec*."""
# Create tree
tree = ast.parse(CORO_CODE, filename, symbol)
# Check expression statement
if isinstance(statement, ast.Expr):
tree.body[0].body[0].value.elts[0] = statement.value
else:
tree.body[0].body.insert(0, statement)
# Check and return coroutine
exec(compile(tree, filename, symbol))
return tree | def function[make_tree, parameter[statement, filename, symbol, local]]:
constant[Helper for *aexec*.]
variable[tree] assign[=] call[name[ast].parse, parameter[name[CORO_CODE], name[filename], name[symbol]]]
if call[name[isinstance], parameter[name[statement], name[ast].Expr]] begin[:]
call[call[call[name[tree].body][constant[0]].body][constant[0]].value.elts][constant[0]] assign[=] name[statement].value
call[name[exec], parameter[call[name[compile], parameter[name[tree], name[filename], name[symbol]]]]]
return[name[tree]] | keyword[def] identifier[make_tree] ( identifier[statement] , identifier[filename] = literal[string] , identifier[symbol] = literal[string] , identifier[local] ={}):
literal[string]
identifier[tree] = identifier[ast] . identifier[parse] ( identifier[CORO_CODE] , identifier[filename] , identifier[symbol] )
keyword[if] identifier[isinstance] ( identifier[statement] , identifier[ast] . identifier[Expr] ):
identifier[tree] . identifier[body] [ literal[int] ]. identifier[body] [ literal[int] ]. identifier[value] . identifier[elts] [ literal[int] ]= identifier[statement] . identifier[value]
keyword[else] :
identifier[tree] . identifier[body] [ literal[int] ]. identifier[body] . identifier[insert] ( literal[int] , identifier[statement] )
identifier[exec] ( identifier[compile] ( identifier[tree] , identifier[filename] , identifier[symbol] ))
keyword[return] identifier[tree] | def make_tree(statement, filename='<aexec>', symbol='single', local={}):
"""Helper for *aexec*."""
# Create tree
tree = ast.parse(CORO_CODE, filename, symbol)
# Check expression statement
if isinstance(statement, ast.Expr):
tree.body[0].body[0].value.elts[0] = statement.value # depends on [control=['if'], data=[]]
else:
tree.body[0].body.insert(0, statement)
# Check and return coroutine
exec(compile(tree, filename, symbol))
return tree |
def register_model(self, model):
"""
Register ``model`` to this group
:param model: model name
:return: None
"""
assert isinstance(model, str)
if model not in self.all_models:
self.all_models.append(model) | def function[register_model, parameter[self, model]]:
constant[
Register ``model`` to this group
:param model: model name
:return: None
]
assert[call[name[isinstance], parameter[name[model], name[str]]]]
if compare[name[model] <ast.NotIn object at 0x7da2590d7190> name[self].all_models] begin[:]
call[name[self].all_models.append, parameter[name[model]]] | keyword[def] identifier[register_model] ( identifier[self] , identifier[model] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[model] , identifier[str] )
keyword[if] identifier[model] keyword[not] keyword[in] identifier[self] . identifier[all_models] :
identifier[self] . identifier[all_models] . identifier[append] ( identifier[model] ) | def register_model(self, model):
"""
Register ``model`` to this group
:param model: model name
:return: None
"""
assert isinstance(model, str)
if model not in self.all_models:
self.all_models.append(model) # depends on [control=['if'], data=['model']] |
def get_deleted_objects(self, request, queryset):
"""
Find all objects related to instances of ``queryset`` that should also be deleted.
Returns
- to_delete - a nested list of strings suitable for display in the template with the ``unordered_list`` filter.
- model_count - statistics for models of all deleted instances
- perms_needed - list of names for all instances which can not be deleted because of not enough rights
- protected - list of names for all objects protected for deletion because of reference type
"""
collector = NestedObjects(using=queryset.db)
collector.collect(queryset)
model_perms_needed = set()
object_perms_needed = set()
STRONG_DELETION_CONTROL = getattr(settings, 'ACCESS_STRONG_DELETION_CONTROL', False)
def format_callback(obj):
has_admin = obj.__class__ in self.admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
# Trying to get admin change URL
admin_url = None
try:
admin_url = reverse('%s:%s_%s_change'
% (self.admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
pass
# Collecting forbidden subobjects, compatible with Django or forced by the option
if STRONG_DELETION_CONTROL or has_admin:
if not obj.__class__._meta.auto_created:
manager = AccessManager(obj.__class__)
# filter out forbidden items
if manager.check_deleteable(obj.__class__, request) is False:
model_perms_needed.add(opts.verbose_name)
if not manager.apply_deleteable(obj.__class__._default_manager.filter(pk=obj.pk), request):
object_perms_needed.add(obj)
if admin_url:
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
protected = set([format_callback(obj) for obj in object_perms_needed]).union(protected)
model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()}
return to_delete, model_count, model_perms_needed, protected | def function[get_deleted_objects, parameter[self, request, queryset]]:
constant[
Find all objects related to instances of ``queryset`` that should also be deleted.
Returns
- to_delete - a nested list of strings suitable for display in the template with the ``unordered_list`` filter.
- model_count - statistics for models of all deleted instances
- perms_needed - list of names for all instances which can not be deleted because of not enough rights
- protected - list of names for all objects protected for deletion because of reference type
]
variable[collector] assign[=] call[name[NestedObjects], parameter[]]
call[name[collector].collect, parameter[name[queryset]]]
variable[model_perms_needed] assign[=] call[name[set], parameter[]]
variable[object_perms_needed] assign[=] call[name[set], parameter[]]
variable[STRONG_DELETION_CONTROL] assign[=] call[name[getattr], parameter[name[settings], constant[ACCESS_STRONG_DELETION_CONTROL], constant[False]]]
def function[format_callback, parameter[obj]]:
variable[has_admin] assign[=] compare[name[obj].__class__ in name[self].admin_site._registry]
variable[opts] assign[=] name[obj]._meta
variable[no_edit_link] assign[=] binary_operation[constant[%s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f01ed0>, <ast.Call object at 0x7da207f01120>]]]
variable[admin_url] assign[=] constant[None]
<ast.Try object at 0x7da207f00280>
if <ast.BoolOp object at 0x7da18f09ee00> begin[:]
if <ast.UnaryOp object at 0x7da18f09fac0> begin[:]
variable[manager] assign[=] call[name[AccessManager], parameter[name[obj].__class__]]
if compare[call[name[manager].check_deleteable, parameter[name[obj].__class__, name[request]]] is constant[False]] begin[:]
call[name[model_perms_needed].add, parameter[name[opts].verbose_name]]
if <ast.UnaryOp object at 0x7da18f09e770> begin[:]
call[name[object_perms_needed].add, parameter[name[obj]]]
if name[admin_url] begin[:]
return[call[name[format_html], parameter[constant[{}: <a href="{}">{}</a>], call[name[capfirst], parameter[name[opts].verbose_name]], name[admin_url], name[obj]]]]
variable[to_delete] assign[=] call[name[collector].nested, parameter[name[format_callback]]]
variable[protected] assign[=] <ast.ListComp object at 0x7da20c993a90>
variable[protected] assign[=] call[call[name[set], parameter[<ast.ListComp object at 0x7da20c993820>]].union, parameter[name[protected]]]
variable[model_count] assign[=] <ast.DictComp object at 0x7da20c9900d0>
return[tuple[[<ast.Name object at 0x7da20c990b20>, <ast.Name object at 0x7da20c9908e0>, <ast.Name object at 0x7da20c992a40>, <ast.Name object at 0x7da20c991240>]]] | keyword[def] identifier[get_deleted_objects] ( identifier[self] , identifier[request] , identifier[queryset] ):
literal[string]
identifier[collector] = identifier[NestedObjects] ( identifier[using] = identifier[queryset] . identifier[db] )
identifier[collector] . identifier[collect] ( identifier[queryset] )
identifier[model_perms_needed] = identifier[set] ()
identifier[object_perms_needed] = identifier[set] ()
identifier[STRONG_DELETION_CONTROL] = identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] )
keyword[def] identifier[format_callback] ( identifier[obj] ):
identifier[has_admin] = identifier[obj] . identifier[__class__] keyword[in] identifier[self] . identifier[admin_site] . identifier[_registry]
identifier[opts] = identifier[obj] . identifier[_meta]
identifier[no_edit_link] = literal[string] %( identifier[capfirst] ( identifier[opts] . identifier[verbose_name] ),
identifier[force_text] ( identifier[obj] ))
identifier[admin_url] = keyword[None]
keyword[try] :
identifier[admin_url] = identifier[reverse] ( literal[string]
%( identifier[self] . identifier[admin_site] . identifier[name] ,
identifier[opts] . identifier[app_label] ,
identifier[opts] . identifier[model_name] ),
keyword[None] ,( identifier[quote] ( identifier[obj] . identifier[_get_pk_val] ()),))
keyword[except] identifier[NoReverseMatch] :
keyword[pass]
keyword[if] identifier[STRONG_DELETION_CONTROL] keyword[or] identifier[has_admin] :
keyword[if] keyword[not] identifier[obj] . identifier[__class__] . identifier[_meta] . identifier[auto_created] :
identifier[manager] = identifier[AccessManager] ( identifier[obj] . identifier[__class__] )
keyword[if] identifier[manager] . identifier[check_deleteable] ( identifier[obj] . identifier[__class__] , identifier[request] ) keyword[is] keyword[False] :
identifier[model_perms_needed] . identifier[add] ( identifier[opts] . identifier[verbose_name] )
keyword[if] keyword[not] identifier[manager] . identifier[apply_deleteable] ( identifier[obj] . identifier[__class__] . identifier[_default_manager] . identifier[filter] ( identifier[pk] = identifier[obj] . identifier[pk] ), identifier[request] ):
identifier[object_perms_needed] . identifier[add] ( identifier[obj] )
keyword[if] identifier[admin_url] :
keyword[return] identifier[format_html] ( literal[string] ,
identifier[capfirst] ( identifier[opts] . identifier[verbose_name] ),
identifier[admin_url] ,
identifier[obj] )
keyword[else] :
keyword[return] identifier[no_edit_link]
identifier[to_delete] = identifier[collector] . identifier[nested] ( identifier[format_callback] )
identifier[protected] =[ identifier[format_callback] ( identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[collector] . identifier[protected] ]
identifier[protected] = identifier[set] ([ identifier[format_callback] ( identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[object_perms_needed] ]). identifier[union] ( identifier[protected] )
identifier[model_count] ={ identifier[model] . identifier[_meta] . identifier[verbose_name_plural] : identifier[len] ( identifier[objs] ) keyword[for] identifier[model] , identifier[objs] keyword[in] identifier[collector] . identifier[model_objs] . identifier[items] ()}
keyword[return] identifier[to_delete] , identifier[model_count] , identifier[model_perms_needed] , identifier[protected] | def get_deleted_objects(self, request, queryset):
"""
Find all objects related to instances of ``queryset`` that should also be deleted.
Returns
- to_delete - a nested list of strings suitable for display in the template with the ``unordered_list`` filter.
- model_count - statistics for models of all deleted instances
- perms_needed - list of names for all instances which can not be deleted because of not enough rights
- protected - list of names for all objects protected for deletion because of reference type
"""
collector = NestedObjects(using=queryset.db)
collector.collect(queryset)
model_perms_needed = set()
object_perms_needed = set()
STRONG_DELETION_CONTROL = getattr(settings, 'ACCESS_STRONG_DELETION_CONTROL', False)
def format_callback(obj):
has_admin = obj.__class__ in self.admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name), force_text(obj))
# Trying to get admin change URL
admin_url = None
try:
admin_url = reverse('%s:%s_%s_change' % (self.admin_site.name, opts.app_label, opts.model_name), None, (quote(obj._get_pk_val()),)) # depends on [control=['try'], data=[]]
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
pass # depends on [control=['except'], data=[]]
# Collecting forbidden subobjects, compatible with Django or forced by the option
if STRONG_DELETION_CONTROL or has_admin:
if not obj.__class__._meta.auto_created:
manager = AccessManager(obj.__class__)
# filter out forbidden items
if manager.check_deleteable(obj.__class__, request) is False:
model_perms_needed.add(opts.verbose_name) # depends on [control=['if'], data=[]]
if not manager.apply_deleteable(obj.__class__._default_manager.filter(pk=obj.pk), request):
object_perms_needed.add(obj) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if admin_url:
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>', capfirst(opts.verbose_name), admin_url, obj) # depends on [control=['if'], data=[]]
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
protected = set([format_callback(obj) for obj in object_perms_needed]).union(protected)
model_count = {model._meta.verbose_name_plural: len(objs) for (model, objs) in collector.model_objs.items()}
return (to_delete, model_count, model_perms_needed, protected) |
def get_residual_norms(H, self_adjoint=False):
'''Compute relative residual norms from Hessenberg matrix.
It is assumed that the initial guess is chosen as zero.'''
H = H.copy()
n_, n = H.shape
y = numpy.eye(n_, 1, dtype=H.dtype)
resnorms = [1.]
for i in range(n_-1):
G = Givens(H[i:i+2, [i]])
if self_adjoint:
H[i:i+2, i:i+3] = G.apply(H[i:i+2, i:i+3])
else:
H[i:i+2, i:] = G.apply(H[i:i+2, i:])
y[i:i+2] = G.apply(y[i:i+2])
resnorms.append(numpy.abs(y[i+1, 0]))
if n_ == n:
resnorms.append(0.)
return numpy.array(resnorms) | def function[get_residual_norms, parameter[H, self_adjoint]]:
constant[Compute relative residual norms from Hessenberg matrix.
It is assumed that the initial guess is chosen as zero.]
variable[H] assign[=] call[name[H].copy, parameter[]]
<ast.Tuple object at 0x7da1b26279a0> assign[=] name[H].shape
variable[y] assign[=] call[name[numpy].eye, parameter[name[n_], constant[1]]]
variable[resnorms] assign[=] list[[<ast.Constant object at 0x7da1b26276d0>]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[n_] - constant[1]]]]] begin[:]
variable[G] assign[=] call[name[Givens], parameter[call[name[H]][tuple[[<ast.Slice object at 0x7da1b2627d30>, <ast.List object at 0x7da1b2627ee0>]]]]]
if name[self_adjoint] begin[:]
call[name[H]][tuple[[<ast.Slice object at 0x7da1b26274f0>, <ast.Slice object at 0x7da1b2627a00>]]] assign[=] call[name[G].apply, parameter[call[name[H]][tuple[[<ast.Slice object at 0x7da1b26271f0>, <ast.Slice object at 0x7da1b26a4c10>]]]]]
call[name[y]][<ast.Slice object at 0x7da1b26a6a70>] assign[=] call[name[G].apply, parameter[call[name[y]][<ast.Slice object at 0x7da1b26a6d10>]]]
call[name[resnorms].append, parameter[call[name[numpy].abs, parameter[call[name[y]][tuple[[<ast.BinOp object at 0x7da1b26a6cb0>, <ast.Constant object at 0x7da1b26a7280>]]]]]]]
if compare[name[n_] equal[==] name[n]] begin[:]
call[name[resnorms].append, parameter[constant[0.0]]]
return[call[name[numpy].array, parameter[name[resnorms]]]] | keyword[def] identifier[get_residual_norms] ( identifier[H] , identifier[self_adjoint] = keyword[False] ):
literal[string]
identifier[H] = identifier[H] . identifier[copy] ()
identifier[n_] , identifier[n] = identifier[H] . identifier[shape]
identifier[y] = identifier[numpy] . identifier[eye] ( identifier[n_] , literal[int] , identifier[dtype] = identifier[H] . identifier[dtype] )
identifier[resnorms] =[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_] - literal[int] ):
identifier[G] = identifier[Givens] ( identifier[H] [ identifier[i] : identifier[i] + literal[int] ,[ identifier[i] ]])
keyword[if] identifier[self_adjoint] :
identifier[H] [ identifier[i] : identifier[i] + literal[int] , identifier[i] : identifier[i] + literal[int] ]= identifier[G] . identifier[apply] ( identifier[H] [ identifier[i] : identifier[i] + literal[int] , identifier[i] : identifier[i] + literal[int] ])
keyword[else] :
identifier[H] [ identifier[i] : identifier[i] + literal[int] , identifier[i] :]= identifier[G] . identifier[apply] ( identifier[H] [ identifier[i] : identifier[i] + literal[int] , identifier[i] :])
identifier[y] [ identifier[i] : identifier[i] + literal[int] ]= identifier[G] . identifier[apply] ( identifier[y] [ identifier[i] : identifier[i] + literal[int] ])
identifier[resnorms] . identifier[append] ( identifier[numpy] . identifier[abs] ( identifier[y] [ identifier[i] + literal[int] , literal[int] ]))
keyword[if] identifier[n_] == identifier[n] :
identifier[resnorms] . identifier[append] ( literal[int] )
keyword[return] identifier[numpy] . identifier[array] ( identifier[resnorms] ) | def get_residual_norms(H, self_adjoint=False):
"""Compute relative residual norms from Hessenberg matrix.
It is assumed that the initial guess is chosen as zero."""
H = H.copy()
(n_, n) = H.shape
y = numpy.eye(n_, 1, dtype=H.dtype)
resnorms = [1.0]
for i in range(n_ - 1):
G = Givens(H[i:i + 2, [i]])
if self_adjoint:
H[i:i + 2, i:i + 3] = G.apply(H[i:i + 2, i:i + 3]) # depends on [control=['if'], data=[]]
else:
H[i:i + 2, i:] = G.apply(H[i:i + 2, i:])
y[i:i + 2] = G.apply(y[i:i + 2])
resnorms.append(numpy.abs(y[i + 1, 0])) # depends on [control=['for'], data=['i']]
if n_ == n:
resnorms.append(0.0) # depends on [control=['if'], data=[]]
return numpy.array(resnorms) |
def outline(self, level=logging.INFO, message=""):
"""Print an outline of the actions the plan is going to take.
The outline will represent the rough ordering of the steps that will be
taken.
Args:
level (int, optional): a valid log level that should be used to log
the outline
message (str, optional): a message that will be logged to
the user after the outline has been logged.
"""
steps = 1
logger.log(level, "Plan \"%s\":", self.description)
for step in self.steps:
logger.log(
level,
" - step: %s: target: \"%s\", action: \"%s\"",
steps,
step.name,
step.fn.__name__,
)
steps += 1
if message:
logger.log(level, message) | def function[outline, parameter[self, level, message]]:
constant[Print an outline of the actions the plan is going to take.
The outline will represent the rough ordering of the steps that will be
taken.
Args:
level (int, optional): a valid log level that should be used to log
the outline
message (str, optional): a message that will be logged to
the user after the outline has been logged.
]
variable[steps] assign[=] constant[1]
call[name[logger].log, parameter[name[level], constant[Plan "%s":], name[self].description]]
for taget[name[step]] in starred[name[self].steps] begin[:]
call[name[logger].log, parameter[name[level], constant[ - step: %s: target: "%s", action: "%s"], name[steps], name[step].name, name[step].fn.__name__]]
<ast.AugAssign object at 0x7da20e961b40>
if name[message] begin[:]
call[name[logger].log, parameter[name[level], name[message]]] | keyword[def] identifier[outline] ( identifier[self] , identifier[level] = identifier[logging] . identifier[INFO] , identifier[message] = literal[string] ):
literal[string]
identifier[steps] = literal[int]
identifier[logger] . identifier[log] ( identifier[level] , literal[string] , identifier[self] . identifier[description] )
keyword[for] identifier[step] keyword[in] identifier[self] . identifier[steps] :
identifier[logger] . identifier[log] (
identifier[level] ,
literal[string] ,
identifier[steps] ,
identifier[step] . identifier[name] ,
identifier[step] . identifier[fn] . identifier[__name__] ,
)
identifier[steps] += literal[int]
keyword[if] identifier[message] :
identifier[logger] . identifier[log] ( identifier[level] , identifier[message] ) | def outline(self, level=logging.INFO, message=''):
"""Print an outline of the actions the plan is going to take.
The outline will represent the rough ordering of the steps that will be
taken.
Args:
level (int, optional): a valid log level that should be used to log
the outline
message (str, optional): a message that will be logged to
the user after the outline has been logged.
"""
steps = 1
logger.log(level, 'Plan "%s":', self.description)
for step in self.steps:
logger.log(level, ' - step: %s: target: "%s", action: "%s"', steps, step.name, step.fn.__name__)
steps += 1 # depends on [control=['for'], data=['step']]
if message:
logger.log(level, message) # depends on [control=['if'], data=[]] |
def merge_config(template, config, list_identifiers=None):
"""
Merges ``config`` on top of ``template``.
Conflicting keys are handled in the following way:
* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will
overwrite the ones in ``template``
* values of type ``list`` in both ``config`` and ``template`` will be
merged using to the ``merge_list`` function
* values of type ``dict`` will be merged recursively
:param template: template ``dict``
:param config: config ``dict``
:param list_identifiers: ``list`` or ``None``
:returns: merged ``dict``
"""
result = template.copy()
for key, value in config.items():
if isinstance(value, dict):
node = result.get(key, OrderedDict())
result[key] = merge_config(node, value)
elif isinstance(value, list) and isinstance(result.get(key), list):
result[key] = merge_list(result[key], value, list_identifiers)
else:
result[key] = value
return result | def function[merge_config, parameter[template, config, list_identifiers]]:
constant[
Merges ``config`` on top of ``template``.
Conflicting keys are handled in the following way:
* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will
overwrite the ones in ``template``
* values of type ``list`` in both ``config`` and ``template`` will be
merged using to the ``merge_list`` function
* values of type ``dict`` will be merged recursively
:param template: template ``dict``
:param config: config ``dict``
:param list_identifiers: ``list`` or ``None``
:returns: merged ``dict``
]
variable[result] assign[=] call[name[template].copy, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0060cd0>, <ast.Name object at 0x7da1b0061e10>]]] in starred[call[name[config].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
variable[node] assign[=] call[name[result].get, parameter[name[key], call[name[OrderedDict], parameter[]]]]
call[name[result]][name[key]] assign[=] call[name[merge_config], parameter[name[node], name[value]]]
return[name[result]] | keyword[def] identifier[merge_config] ( identifier[template] , identifier[config] , identifier[list_identifiers] = keyword[None] ):
literal[string]
identifier[result] = identifier[template] . identifier[copy] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[config] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[node] = identifier[result] . identifier[get] ( identifier[key] , identifier[OrderedDict] ())
identifier[result] [ identifier[key] ]= identifier[merge_config] ( identifier[node] , identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[and] identifier[isinstance] ( identifier[result] . identifier[get] ( identifier[key] ), identifier[list] ):
identifier[result] [ identifier[key] ]= identifier[merge_list] ( identifier[result] [ identifier[key] ], identifier[value] , identifier[list_identifiers] )
keyword[else] :
identifier[result] [ identifier[key] ]= identifier[value]
keyword[return] identifier[result] | def merge_config(template, config, list_identifiers=None):
"""
Merges ``config`` on top of ``template``.
Conflicting keys are handled in the following way:
* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will
overwrite the ones in ``template``
* values of type ``list`` in both ``config`` and ``template`` will be
merged using to the ``merge_list`` function
* values of type ``dict`` will be merged recursively
:param template: template ``dict``
:param config: config ``dict``
:param list_identifiers: ``list`` or ``None``
:returns: merged ``dict``
"""
result = template.copy()
for (key, value) in config.items():
if isinstance(value, dict):
node = result.get(key, OrderedDict())
result[key] = merge_config(node, value) # depends on [control=['if'], data=[]]
elif isinstance(value, list) and isinstance(result.get(key), list):
result[key] = merge_list(result[key], value, list_identifiers) # depends on [control=['if'], data=[]]
else:
result[key] = value # depends on [control=['for'], data=[]]
return result |
def get_way(self, way_id, resolve_missing=False):
"""
Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0] | def function[get_way, parameter[self, way_id, resolve_missing]]:
constant[
Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
]
variable[ways] assign[=] call[name[self].get_ways, parameter[]]
if compare[call[name[len], parameter[name[ways]]] equal[==] constant[0]] begin[:]
if compare[name[resolve_missing] is constant[False]] begin[:]
<ast.Raise object at 0x7da1b0405090>
variable[query] assign[=] constant[
[out:json];
way({way_id});
out body;
]
variable[query] assign[=] call[name[query].format, parameter[]]
variable[tmp_result] assign[=] call[name[self].api.query, parameter[name[query]]]
call[name[self].expand, parameter[name[tmp_result]]]
variable[ways] assign[=] call[name[self].get_ways, parameter[]]
if compare[call[name[len], parameter[name[ways]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0404490>
return[call[name[ways]][constant[0]]] | keyword[def] identifier[get_way] ( identifier[self] , identifier[way_id] , identifier[resolve_missing] = keyword[False] ):
literal[string]
identifier[ways] = identifier[self] . identifier[get_ways] ( identifier[way_id] = identifier[way_id] )
keyword[if] identifier[len] ( identifier[ways] )== literal[int] :
keyword[if] identifier[resolve_missing] keyword[is] keyword[False] :
keyword[raise] identifier[exception] . identifier[DataIncomplete] ( literal[string] )
identifier[query] =( literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[query] = identifier[query] . identifier[format] (
identifier[way_id] = identifier[way_id]
)
identifier[tmp_result] = identifier[self] . identifier[api] . identifier[query] ( identifier[query] )
identifier[self] . identifier[expand] ( identifier[tmp_result] )
identifier[ways] = identifier[self] . identifier[get_ways] ( identifier[way_id] = identifier[way_id] )
keyword[if] identifier[len] ( identifier[ways] )== literal[int] :
keyword[raise] identifier[exception] . identifier[DataIncomplete] ( literal[string] )
keyword[return] identifier[ways] [ literal[int] ] | def get_way(self, way_id, resolve_missing=False):
"""
Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete('Resolve missing way is disabled') # depends on [control=['if'], data=[]]
query = '\n[out:json];\nway({way_id});\nout body;\n'
query = query.format(way_id=way_id)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id) # depends on [control=['if'], data=[]]
if len(ways) == 0:
raise exception.DataIncomplete('Unable to resolve requested way') # depends on [control=['if'], data=[]]
return ways[0] |
async def SetVolumeInfo(self, volumes):
'''
volumes : typing.Sequence[~Volume]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='StorageProvisioner',
request='SetVolumeInfo',
version=4,
params=_params)
_params['volumes'] = volumes
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da18dc06080> | keyword[async] keyword[def] identifier[SetVolumeInfo] ( identifier[self] , identifier[volumes] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[volumes]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def SetVolumeInfo(self, volumes):
"""
volumes : typing.Sequence[~Volume]
Returns -> typing.Sequence[~ErrorResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='StorageProvisioner', request='SetVolumeInfo', version=4, params=_params)
_params['volumes'] = volumes
reply = await self.rpc(msg)
return reply |
def Close(self):
"""Closes the connection to TimeSketch Elasticsearch database.
Sends the remaining events for indexing and removes the processing status on
the Timesketch search index object.
"""
super(TimesketchOutputModule, self).Close()
with self._timesketch.app_context():
search_index = timesketch_sketch.SearchIndex.query.filter_by(
index_name=self._index_name).first()
search_index.status.remove(search_index.status[0])
timesketch_db_session.add(search_index)
timesketch_db_session.commit() | def function[Close, parameter[self]]:
constant[Closes the connection to TimeSketch Elasticsearch database.
Sends the remaining events for indexing and removes the processing status on
the Timesketch search index object.
]
call[call[name[super], parameter[name[TimesketchOutputModule], name[self]]].Close, parameter[]]
with call[name[self]._timesketch.app_context, parameter[]] begin[:]
variable[search_index] assign[=] call[call[name[timesketch_sketch].SearchIndex.query.filter_by, parameter[]].first, parameter[]]
call[name[search_index].status.remove, parameter[call[name[search_index].status][constant[0]]]]
call[name[timesketch_db_session].add, parameter[name[search_index]]]
call[name[timesketch_db_session].commit, parameter[]] | keyword[def] identifier[Close] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[TimesketchOutputModule] , identifier[self] ). identifier[Close] ()
keyword[with] identifier[self] . identifier[_timesketch] . identifier[app_context] ():
identifier[search_index] = identifier[timesketch_sketch] . identifier[SearchIndex] . identifier[query] . identifier[filter_by] (
identifier[index_name] = identifier[self] . identifier[_index_name] ). identifier[first] ()
identifier[search_index] . identifier[status] . identifier[remove] ( identifier[search_index] . identifier[status] [ literal[int] ])
identifier[timesketch_db_session] . identifier[add] ( identifier[search_index] )
identifier[timesketch_db_session] . identifier[commit] () | def Close(self):
"""Closes the connection to TimeSketch Elasticsearch database.
Sends the remaining events for indexing and removes the processing status on
the Timesketch search index object.
"""
super(TimesketchOutputModule, self).Close()
with self._timesketch.app_context():
search_index = timesketch_sketch.SearchIndex.query.filter_by(index_name=self._index_name).first()
search_index.status.remove(search_index.status[0])
timesketch_db_session.add(search_index)
timesketch_db_session.commit() # depends on [control=['with'], data=[]] |
def _get_scaled_image(self, resource):
"""
Get scaled watermark image
:param resource: Image.Image
:return: Image.Image
"""
image = self._get_image()
original_width, original_height = resource.size
k = image.size[0] / float(image.size[1])
if image.size[0] >= image.size[1]:
target_width = int(original_width * self.size)
target_height = int(target_width / k)
else:
target_height = int(original_height * self.size)
target_width = int(target_height * k)
image = image.resize((target_width, target_height), Image.ANTIALIAS)
return image | def function[_get_scaled_image, parameter[self, resource]]:
constant[
Get scaled watermark image
:param resource: Image.Image
:return: Image.Image
]
variable[image] assign[=] call[name[self]._get_image, parameter[]]
<ast.Tuple object at 0x7da1b142a4d0> assign[=] name[resource].size
variable[k] assign[=] binary_operation[call[name[image].size][constant[0]] / call[name[float], parameter[call[name[image].size][constant[1]]]]]
if compare[call[name[image].size][constant[0]] greater_or_equal[>=] call[name[image].size][constant[1]]] begin[:]
variable[target_width] assign[=] call[name[int], parameter[binary_operation[name[original_width] * name[self].size]]]
variable[target_height] assign[=] call[name[int], parameter[binary_operation[name[target_width] / name[k]]]]
variable[image] assign[=] call[name[image].resize, parameter[tuple[[<ast.Name object at 0x7da1b16273a0>, <ast.Name object at 0x7da1b1624670>]], name[Image].ANTIALIAS]]
return[name[image]] | keyword[def] identifier[_get_scaled_image] ( identifier[self] , identifier[resource] ):
literal[string]
identifier[image] = identifier[self] . identifier[_get_image] ()
identifier[original_width] , identifier[original_height] = identifier[resource] . identifier[size]
identifier[k] = identifier[image] . identifier[size] [ literal[int] ]/ identifier[float] ( identifier[image] . identifier[size] [ literal[int] ])
keyword[if] identifier[image] . identifier[size] [ literal[int] ]>= identifier[image] . identifier[size] [ literal[int] ]:
identifier[target_width] = identifier[int] ( identifier[original_width] * identifier[self] . identifier[size] )
identifier[target_height] = identifier[int] ( identifier[target_width] / identifier[k] )
keyword[else] :
identifier[target_height] = identifier[int] ( identifier[original_height] * identifier[self] . identifier[size] )
identifier[target_width] = identifier[int] ( identifier[target_height] * identifier[k] )
identifier[image] = identifier[image] . identifier[resize] (( identifier[target_width] , identifier[target_height] ), identifier[Image] . identifier[ANTIALIAS] )
keyword[return] identifier[image] | def _get_scaled_image(self, resource):
"""
Get scaled watermark image
:param resource: Image.Image
:return: Image.Image
"""
image = self._get_image()
(original_width, original_height) = resource.size
k = image.size[0] / float(image.size[1])
if image.size[0] >= image.size[1]:
target_width = int(original_width * self.size)
target_height = int(target_width / k) # depends on [control=['if'], data=[]]
else:
target_height = int(original_height * self.size)
target_width = int(target_height * k)
image = image.resize((target_width, target_height), Image.ANTIALIAS)
return image |
def gregorian_to_julian(day):
"""Convert a datetime.date object to its corresponding Julian day.
:param day: The datetime.date to convert to a Julian day
:returns: A Julian day, as an integer
"""
before_march = 1 if day.month < MARCH else 0
#
# Number of months since March
#
month_index = day.month + MONTHS_PER_YEAR * before_march - MARCH
#
# Number of years (year starts on March) since 4800 BC
#
years_elapsed = day.year - JULIAN_START_YEAR - before_march
total_days_in_previous_months = (153 * month_index + 2) // 5
total_days_in_previous_years = 365 * years_elapsed
total_leap_days = (
(years_elapsed // 4) -
(years_elapsed // 100) +
(years_elapsed // 400)
)
return sum([
day.day,
total_days_in_previous_months,
total_days_in_previous_years,
total_leap_days,
-32045, # Offset to get January 1, 4713 equal to 0
]) | def function[gregorian_to_julian, parameter[day]]:
constant[Convert a datetime.date object to its corresponding Julian day.
:param day: The datetime.date to convert to a Julian day
:returns: A Julian day, as an integer
]
variable[before_march] assign[=] <ast.IfExp object at 0x7da20c6a9a80>
variable[month_index] assign[=] binary_operation[binary_operation[name[day].month + binary_operation[name[MONTHS_PER_YEAR] * name[before_march]]] - name[MARCH]]
variable[years_elapsed] assign[=] binary_operation[binary_operation[name[day].year - name[JULIAN_START_YEAR]] - name[before_march]]
variable[total_days_in_previous_months] assign[=] binary_operation[binary_operation[binary_operation[constant[153] * name[month_index]] + constant[2]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[5]]
variable[total_days_in_previous_years] assign[=] binary_operation[constant[365] * name[years_elapsed]]
variable[total_leap_days] assign[=] binary_operation[binary_operation[binary_operation[name[years_elapsed] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]] - binary_operation[name[years_elapsed] <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]]] + binary_operation[name[years_elapsed] <ast.FloorDiv object at 0x7da2590d6bc0> constant[400]]]
return[call[name[sum], parameter[list[[<ast.Attribute object at 0x7da204344a30>, <ast.Name object at 0x7da204346590>, <ast.Name object at 0x7da2043478b0>, <ast.Name object at 0x7da204344790>, <ast.UnaryOp object at 0x7da204345ab0>]]]]] | keyword[def] identifier[gregorian_to_julian] ( identifier[day] ):
literal[string]
identifier[before_march] = literal[int] keyword[if] identifier[day] . identifier[month] < identifier[MARCH] keyword[else] literal[int]
identifier[month_index] = identifier[day] . identifier[month] + identifier[MONTHS_PER_YEAR] * identifier[before_march] - identifier[MARCH]
identifier[years_elapsed] = identifier[day] . identifier[year] - identifier[JULIAN_START_YEAR] - identifier[before_march]
identifier[total_days_in_previous_months] =( literal[int] * identifier[month_index] + literal[int] )// literal[int]
identifier[total_days_in_previous_years] = literal[int] * identifier[years_elapsed]
identifier[total_leap_days] =(
( identifier[years_elapsed] // literal[int] )-
( identifier[years_elapsed] // literal[int] )+
( identifier[years_elapsed] // literal[int] )
)
keyword[return] identifier[sum] ([
identifier[day] . identifier[day] ,
identifier[total_days_in_previous_months] ,
identifier[total_days_in_previous_years] ,
identifier[total_leap_days] ,
- literal[int] ,
]) | def gregorian_to_julian(day):
"""Convert a datetime.date object to its corresponding Julian day.
:param day: The datetime.date to convert to a Julian day
:returns: A Julian day, as an integer
"""
before_march = 1 if day.month < MARCH else 0
#
# Number of months since March
#
month_index = day.month + MONTHS_PER_YEAR * before_march - MARCH
#
# Number of years (year starts on March) since 4800 BC
#
years_elapsed = day.year - JULIAN_START_YEAR - before_march
total_days_in_previous_months = (153 * month_index + 2) // 5
total_days_in_previous_years = 365 * years_elapsed
total_leap_days = years_elapsed // 4 - years_elapsed // 100 + years_elapsed // 400 # Offset to get January 1, 4713 equal to 0
return sum([day.day, total_days_in_previous_months, total_days_in_previous_years, total_leap_days, -32045]) |
def copy_obj(obj):
''' does a deepcopy of an object, but does not copy a class
i.e.
x = {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
y = copy_obj(x)
y --> {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
del y['key'][0]
y --> {"key":[<classInstance2>,<classInstance3>]}
x --> {" key":[<classInstance1>,<classInstance2>,<classInstance3>]}
*** this is to overcome a dictionary object that lists with classes
as the list items. '''
if isinstance(obj, dict):
return_obj = {}
for key, value in obj.items():
if isinstance(value, dict):
return_obj[key] = copy_obj(value)
elif isinstance(value, list):
return_obj[key] = copy_obj(value)
else:
return_obj[key] = value
elif isinstance(obj, list):
return_obj = []
for value in obj:
if isinstance(value, dict):
return_obj.append(copy_obj(value))
elif isinstance(value, list):
return_obj.append(copy_obj(value))
else:
return_obj.append(value)
else:
return_obj = copy.copy(obj)
return return_obj | def function[copy_obj, parameter[obj]]:
constant[ does a deepcopy of an object, but does not copy a class
i.e.
x = {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
y = copy_obj(x)
y --> {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
del y['key'][0]
y --> {"key":[<classInstance2>,<classInstance3>]}
x --> {" key":[<classInstance1>,<classInstance2>,<classInstance3>]}
*** this is to overcome a dictionary object that lists with classes
as the list items. ]
if call[name[isinstance], parameter[name[obj], name[dict]]] begin[:]
variable[return_obj] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18bc72d40>, <ast.Name object at 0x7da18bc73190>]]] in starred[call[name[obj].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
call[name[return_obj]][name[key]] assign[=] call[name[copy_obj], parameter[name[value]]]
return[name[return_obj]] | keyword[def] identifier[copy_obj] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
identifier[return_obj] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[return_obj] [ identifier[key] ]= identifier[copy_obj] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[return_obj] [ identifier[key] ]= identifier[copy_obj] ( identifier[value] )
keyword[else] :
identifier[return_obj] [ identifier[key] ]= identifier[value]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[list] ):
identifier[return_obj] =[]
keyword[for] identifier[value] keyword[in] identifier[obj] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[return_obj] . identifier[append] ( identifier[copy_obj] ( identifier[value] ))
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[return_obj] . identifier[append] ( identifier[copy_obj] ( identifier[value] ))
keyword[else] :
identifier[return_obj] . identifier[append] ( identifier[value] )
keyword[else] :
identifier[return_obj] = identifier[copy] . identifier[copy] ( identifier[obj] )
keyword[return] identifier[return_obj] | def copy_obj(obj):
""" does a deepcopy of an object, but does not copy a class
i.e.
x = {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
y = copy_obj(x)
y --> {"key":[<classInstance1>,<classInstance2>,<classInstance3>]}
del y['key'][0]
y --> {"key":[<classInstance2>,<classInstance3>]}
x --> {" key":[<classInstance1>,<classInstance2>,<classInstance3>]}
*** this is to overcome a dictionary object that lists with classes
as the list items. """
if isinstance(obj, dict):
return_obj = {}
for (key, value) in obj.items():
if isinstance(value, dict):
return_obj[key] = copy_obj(value) # depends on [control=['if'], data=[]]
elif isinstance(value, list):
return_obj[key] = copy_obj(value) # depends on [control=['if'], data=[]]
else:
return_obj[key] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, list):
return_obj = []
for value in obj:
if isinstance(value, dict):
return_obj.append(copy_obj(value)) # depends on [control=['if'], data=[]]
elif isinstance(value, list):
return_obj.append(copy_obj(value)) # depends on [control=['if'], data=[]]
else:
return_obj.append(value) # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]]
else:
return_obj = copy.copy(obj)
return return_obj |
def _analyze_case(model_dir, bench_dir, config):
""" Runs all of the verification checks on a particular case """
bundle = livvkit.verification_model_module
model_out = functions.find_file(model_dir, "*"+config["output_ext"])
bench_out = functions.find_file(bench_dir, "*"+config["output_ext"])
model_config = functions.find_file(model_dir, "*"+config["config_ext"])
bench_config = functions.find_file(bench_dir, "*"+config["config_ext"])
model_log = functions.find_file(model_dir, "*"+config["logfile_ext"])
el = [
bit_for_bit(model_out, bench_out, config),
diff_configurations(model_config, bench_config, bundle, bundle),
bundle.parse_log(model_log)
]
return el | def function[_analyze_case, parameter[model_dir, bench_dir, config]]:
constant[ Runs all of the verification checks on a particular case ]
variable[bundle] assign[=] name[livvkit].verification_model_module
variable[model_out] assign[=] call[name[functions].find_file, parameter[name[model_dir], binary_operation[constant[*] + call[name[config]][constant[output_ext]]]]]
variable[bench_out] assign[=] call[name[functions].find_file, parameter[name[bench_dir], binary_operation[constant[*] + call[name[config]][constant[output_ext]]]]]
variable[model_config] assign[=] call[name[functions].find_file, parameter[name[model_dir], binary_operation[constant[*] + call[name[config]][constant[config_ext]]]]]
variable[bench_config] assign[=] call[name[functions].find_file, parameter[name[bench_dir], binary_operation[constant[*] + call[name[config]][constant[config_ext]]]]]
variable[model_log] assign[=] call[name[functions].find_file, parameter[name[model_dir], binary_operation[constant[*] + call[name[config]][constant[logfile_ext]]]]]
variable[el] assign[=] list[[<ast.Call object at 0x7da1b0a2dbd0>, <ast.Call object at 0x7da1b0a2e770>, <ast.Call object at 0x7da1b0a2fd90>]]
return[name[el]] | keyword[def] identifier[_analyze_case] ( identifier[model_dir] , identifier[bench_dir] , identifier[config] ):
literal[string]
identifier[bundle] = identifier[livvkit] . identifier[verification_model_module]
identifier[model_out] = identifier[functions] . identifier[find_file] ( identifier[model_dir] , literal[string] + identifier[config] [ literal[string] ])
identifier[bench_out] = identifier[functions] . identifier[find_file] ( identifier[bench_dir] , literal[string] + identifier[config] [ literal[string] ])
identifier[model_config] = identifier[functions] . identifier[find_file] ( identifier[model_dir] , literal[string] + identifier[config] [ literal[string] ])
identifier[bench_config] = identifier[functions] . identifier[find_file] ( identifier[bench_dir] , literal[string] + identifier[config] [ literal[string] ])
identifier[model_log] = identifier[functions] . identifier[find_file] ( identifier[model_dir] , literal[string] + identifier[config] [ literal[string] ])
identifier[el] =[
identifier[bit_for_bit] ( identifier[model_out] , identifier[bench_out] , identifier[config] ),
identifier[diff_configurations] ( identifier[model_config] , identifier[bench_config] , identifier[bundle] , identifier[bundle] ),
identifier[bundle] . identifier[parse_log] ( identifier[model_log] )
]
keyword[return] identifier[el] | def _analyze_case(model_dir, bench_dir, config):
""" Runs all of the verification checks on a particular case """
bundle = livvkit.verification_model_module
model_out = functions.find_file(model_dir, '*' + config['output_ext'])
bench_out = functions.find_file(bench_dir, '*' + config['output_ext'])
model_config = functions.find_file(model_dir, '*' + config['config_ext'])
bench_config = functions.find_file(bench_dir, '*' + config['config_ext'])
model_log = functions.find_file(model_dir, '*' + config['logfile_ext'])
el = [bit_for_bit(model_out, bench_out, config), diff_configurations(model_config, bench_config, bundle, bundle), bundle.parse_log(model_log)]
return el |
def stop(self):
"""Stop consuming the stream and shutdown the background thread."""
with self._operational_lock:
self._bidi_rpc.close()
if self._thread is not None:
# Resume the thread to wake it up in case it is sleeping.
self.resume()
self._thread.join()
self._thread = None | def function[stop, parameter[self]]:
constant[Stop consuming the stream and shutdown the background thread.]
with name[self]._operational_lock begin[:]
call[name[self]._bidi_rpc.close, parameter[]]
if compare[name[self]._thread is_not constant[None]] begin[:]
call[name[self].resume, parameter[]]
call[name[self]._thread.join, parameter[]]
name[self]._thread assign[=] constant[None] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_operational_lock] :
identifier[self] . identifier[_bidi_rpc] . identifier[close] ()
keyword[if] identifier[self] . identifier[_thread] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[resume] ()
identifier[self] . identifier[_thread] . identifier[join] ()
identifier[self] . identifier[_thread] = keyword[None] | def stop(self):
"""Stop consuming the stream and shutdown the background thread."""
with self._operational_lock:
self._bidi_rpc.close()
if self._thread is not None:
# Resume the thread to wake it up in case it is sleeping.
self.resume()
self._thread.join() # depends on [control=['if'], data=[]]
self._thread = None # depends on [control=['with'], data=[]] |
def get_station_year_text(WMO, WBAN, year):
'''Basic method to download data from the GSOD database, given a
station identifier and year.
Parameters
----------
WMO : int or None
World Meteorological Organization (WMO) identifiers, [-]
WBAN : int or None
Weather Bureau Army Navy (WBAN) weather station identifier, [-]
year : int
Year data should be retrieved from, [year]
Returns
-------
data : str
Downloaded data file
'''
if WMO is None:
WMO = 999999
if WBAN is None:
WBAN = 99999
station = str(int(WMO)) + '-' + str(int(WBAN))
gsod_year_dir = os.path.join(data_dir, 'gsod', str(year))
path = os.path.join(gsod_year_dir, station + '.op')
if os.path.exists(path):
data = open(path).read()
if data and data != 'Exception':
return data
else:
raise Exception(data)
toget = ('ftp://ftp.ncdc.noaa.gov/pub/data/gsod/' + str(year) + '/'
+ station + '-' + str(year) +'.op.gz')
try:
data = urlopen(toget, timeout=5)
except Exception as e:
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir)
open(path, 'w').write('Exception')
raise Exception('Could not obtain desired data; check '
'if the year has data published for the '
'specified station and the station was specified '
'in the correct form. The full error is %s' %(e))
data = data.read()
data_thing = StringIO(data)
f = gzip.GzipFile(fileobj=data_thing, mode="r")
year_station_data = f.read()
try:
year_station_data = year_station_data.decode('utf-8')
except:
pass
# Cache the data for future use
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir)
open(path, 'w').write(year_station_data)
return year_station_data | def function[get_station_year_text, parameter[WMO, WBAN, year]]:
constant[Basic method to download data from the GSOD database, given a
station identifier and year.
Parameters
----------
WMO : int or None
World Meteorological Organization (WMO) identifiers, [-]
WBAN : int or None
Weather Bureau Army Navy (WBAN) weather station identifier, [-]
year : int
Year data should be retrieved from, [year]
Returns
-------
data : str
Downloaded data file
]
if compare[name[WMO] is constant[None]] begin[:]
variable[WMO] assign[=] constant[999999]
if compare[name[WBAN] is constant[None]] begin[:]
variable[WBAN] assign[=] constant[99999]
variable[station] assign[=] binary_operation[binary_operation[call[name[str], parameter[call[name[int], parameter[name[WMO]]]]] + constant[-]] + call[name[str], parameter[call[name[int], parameter[name[WBAN]]]]]]
variable[gsod_year_dir] assign[=] call[name[os].path.join, parameter[name[data_dir], constant[gsod], call[name[str], parameter[name[year]]]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[gsod_year_dir], binary_operation[name[station] + constant[.op]]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
variable[data] assign[=] call[call[name[open], parameter[name[path]]].read, parameter[]]
if <ast.BoolOp object at 0x7da1b1209f90> begin[:]
return[name[data]]
variable[toget] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[ftp://ftp.ncdc.noaa.gov/pub/data/gsod/] + call[name[str], parameter[name[year]]]] + constant[/]] + name[station]] + constant[-]] + call[name[str], parameter[name[year]]]] + constant[.op.gz]]
<ast.Try object at 0x7da1b12088b0>
variable[data] assign[=] call[name[data].read, parameter[]]
variable[data_thing] assign[=] call[name[StringIO], parameter[name[data]]]
variable[f] assign[=] call[name[gzip].GzipFile, parameter[]]
variable[year_station_data] assign[=] call[name[f].read, parameter[]]
<ast.Try object at 0x7da1b120a5c0>
if <ast.UnaryOp object at 0x7da1b120b790> begin[:]
call[name[os].makedirs, parameter[name[gsod_year_dir]]]
call[call[name[open], parameter[name[path], constant[w]]].write, parameter[name[year_station_data]]]
return[name[year_station_data]] | keyword[def] identifier[get_station_year_text] ( identifier[WMO] , identifier[WBAN] , identifier[year] ):
literal[string]
keyword[if] identifier[WMO] keyword[is] keyword[None] :
identifier[WMO] = literal[int]
keyword[if] identifier[WBAN] keyword[is] keyword[None] :
identifier[WBAN] = literal[int]
identifier[station] = identifier[str] ( identifier[int] ( identifier[WMO] ))+ literal[string] + identifier[str] ( identifier[int] ( identifier[WBAN] ))
identifier[gsod_year_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , literal[string] , identifier[str] ( identifier[year] ))
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[gsod_year_dir] , identifier[station] + literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[data] = identifier[open] ( identifier[path] ). identifier[read] ()
keyword[if] identifier[data] keyword[and] identifier[data] != literal[string] :
keyword[return] identifier[data]
keyword[else] :
keyword[raise] identifier[Exception] ( identifier[data] )
identifier[toget] =( literal[string] + identifier[str] ( identifier[year] )+ literal[string]
+ identifier[station] + literal[string] + identifier[str] ( identifier[year] )+ literal[string] )
keyword[try] :
identifier[data] = identifier[urlopen] ( identifier[toget] , identifier[timeout] = literal[int] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[gsod_year_dir] ):
identifier[os] . identifier[makedirs] ( identifier[gsod_year_dir] )
identifier[open] ( identifier[path] , literal[string] ). identifier[write] ( literal[string] )
keyword[raise] identifier[Exception] ( literal[string]
literal[string]
literal[string]
literal[string] %( identifier[e] ))
identifier[data] = identifier[data] . identifier[read] ()
identifier[data_thing] = identifier[StringIO] ( identifier[data] )
identifier[f] = identifier[gzip] . identifier[GzipFile] ( identifier[fileobj] = identifier[data_thing] , identifier[mode] = literal[string] )
identifier[year_station_data] = identifier[f] . identifier[read] ()
keyword[try] :
identifier[year_station_data] = identifier[year_station_data] . identifier[decode] ( literal[string] )
keyword[except] :
keyword[pass]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[gsod_year_dir] ):
identifier[os] . identifier[makedirs] ( identifier[gsod_year_dir] )
identifier[open] ( identifier[path] , literal[string] ). identifier[write] ( identifier[year_station_data] )
keyword[return] identifier[year_station_data] | def get_station_year_text(WMO, WBAN, year):
"""Basic method to download data from the GSOD database, given a
station identifier and year.
Parameters
----------
WMO : int or None
World Meteorological Organization (WMO) identifiers, [-]
WBAN : int or None
Weather Bureau Army Navy (WBAN) weather station identifier, [-]
year : int
Year data should be retrieved from, [year]
Returns
-------
data : str
Downloaded data file
"""
if WMO is None:
WMO = 999999 # depends on [control=['if'], data=['WMO']]
if WBAN is None:
WBAN = 99999 # depends on [control=['if'], data=['WBAN']]
station = str(int(WMO)) + '-' + str(int(WBAN))
gsod_year_dir = os.path.join(data_dir, 'gsod', str(year))
path = os.path.join(gsod_year_dir, station + '.op')
if os.path.exists(path):
data = open(path).read()
if data and data != 'Exception':
return data # depends on [control=['if'], data=[]]
else:
raise Exception(data) # depends on [control=['if'], data=[]]
toget = 'ftp://ftp.ncdc.noaa.gov/pub/data/gsod/' + str(year) + '/' + station + '-' + str(year) + '.op.gz'
try:
data = urlopen(toget, timeout=5) # depends on [control=['try'], data=[]]
except Exception as e:
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir) # depends on [control=['if'], data=[]]
open(path, 'w').write('Exception')
raise Exception('Could not obtain desired data; check if the year has data published for the specified station and the station was specified in the correct form. The full error is %s' % e) # depends on [control=['except'], data=['e']]
data = data.read()
data_thing = StringIO(data)
f = gzip.GzipFile(fileobj=data_thing, mode='r')
year_station_data = f.read()
try:
year_station_data = year_station_data.decode('utf-8') # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
# Cache the data for future use
if not os.path.exists(gsod_year_dir):
os.makedirs(gsod_year_dir) # depends on [control=['if'], data=[]]
open(path, 'w').write(year_station_data)
return year_station_data |
def update_launch_config(self, scaling_group, server_name=None, image=None,
flavor=None, disk_config=None, metadata=None, personality=None,
networks=None, load_balancers=None, key_name=None, config_drive=False,
user_data=None):
"""
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
return self._manager.update_launch_config(scaling_group,
server_name=server_name, image=image, flavor=flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data) | def function[update_launch_config, parameter[self, scaling_group, server_name, image, flavor, disk_config, metadata, personality, networks, load_balancers, key_name, config_drive, user_data]]:
constant[
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
]
return[call[name[self]._manager.update_launch_config, parameter[name[scaling_group]]]] | keyword[def] identifier[update_launch_config] ( identifier[self] , identifier[scaling_group] , identifier[server_name] = keyword[None] , identifier[image] = keyword[None] ,
identifier[flavor] = keyword[None] , identifier[disk_config] = keyword[None] , identifier[metadata] = keyword[None] , identifier[personality] = keyword[None] ,
identifier[networks] = keyword[None] , identifier[load_balancers] = keyword[None] , identifier[key_name] = keyword[None] , identifier[config_drive] = keyword[False] ,
identifier[user_data] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_manager] . identifier[update_launch_config] ( identifier[scaling_group] ,
identifier[server_name] = identifier[server_name] , identifier[image] = identifier[image] , identifier[flavor] = identifier[flavor] ,
identifier[disk_config] = identifier[disk_config] , identifier[metadata] = identifier[metadata] ,
identifier[personality] = identifier[personality] , identifier[networks] = identifier[networks] ,
identifier[load_balancers] = identifier[load_balancers] , identifier[key_name] = identifier[key_name] ,
identifier[config_drive] = identifier[config_drive] , identifier[user_data] = identifier[user_data] ) | def update_launch_config(self, scaling_group, server_name=None, image=None, flavor=None, disk_config=None, metadata=None, personality=None, networks=None, load_balancers=None, key_name=None, config_drive=False, user_data=None):
"""
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
return self._manager.update_launch_config(scaling_group, server_name=server_name, image=image, flavor=flavor, disk_config=disk_config, metadata=metadata, personality=personality, networks=networks, load_balancers=load_balancers, key_name=key_name, config_drive=config_drive, user_data=user_data) |
def folderName(self, folder):
"""gets/set the current folder"""
if folder == "" or\
folder == "/":
self._currentURL = self._url
self._services = None
self._description = None
self._folderName = None
self._webEncrypted = None
self.__init()
self._folderName = folder
elif folder in self.folders:
self._currentURL = self._url + "/%s" % folder
self._services = None
self._description = None
self._folderName = None
self._webEncrypted = None
self.__init()
self._folderName = folder | def function[folderName, parameter[self, folder]]:
constant[gets/set the current folder]
if <ast.BoolOp object at 0x7da18dc9bd60> begin[:]
name[self]._currentURL assign[=] name[self]._url
name[self]._services assign[=] constant[None]
name[self]._description assign[=] constant[None]
name[self]._folderName assign[=] constant[None]
name[self]._webEncrypted assign[=] constant[None]
call[name[self].__init, parameter[]]
name[self]._folderName assign[=] name[folder] | keyword[def] identifier[folderName] ( identifier[self] , identifier[folder] ):
literal[string]
keyword[if] identifier[folder] == literal[string] keyword[or] identifier[folder] == literal[string] :
identifier[self] . identifier[_currentURL] = identifier[self] . identifier[_url]
identifier[self] . identifier[_services] = keyword[None]
identifier[self] . identifier[_description] = keyword[None]
identifier[self] . identifier[_folderName] = keyword[None]
identifier[self] . identifier[_webEncrypted] = keyword[None]
identifier[self] . identifier[__init] ()
identifier[self] . identifier[_folderName] = identifier[folder]
keyword[elif] identifier[folder] keyword[in] identifier[self] . identifier[folders] :
identifier[self] . identifier[_currentURL] = identifier[self] . identifier[_url] + literal[string] % identifier[folder]
identifier[self] . identifier[_services] = keyword[None]
identifier[self] . identifier[_description] = keyword[None]
identifier[self] . identifier[_folderName] = keyword[None]
identifier[self] . identifier[_webEncrypted] = keyword[None]
identifier[self] . identifier[__init] ()
identifier[self] . identifier[_folderName] = identifier[folder] | def folderName(self, folder):
"""gets/set the current folder"""
if folder == '' or folder == '/':
self._currentURL = self._url
self._services = None
self._description = None
self._folderName = None
self._webEncrypted = None
self.__init()
self._folderName = folder # depends on [control=['if'], data=[]]
elif folder in self.folders:
self._currentURL = self._url + '/%s' % folder
self._services = None
self._description = None
self._folderName = None
self._webEncrypted = None
self.__init()
self._folderName = folder # depends on [control=['if'], data=['folder']] |
async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) | <ast.AsyncFunctionDef object at 0x7da207f026e0> | keyword[async] keyword[def] identifier[stepper_config] ( identifier[self] , identifier[steps_per_revolution] , identifier[stepper_pins] ):
literal[string]
identifier[data] =[ identifier[PrivateConstants] . identifier[STEPPER_CONFIGURE] , identifier[steps_per_revolution] & literal[int] ,
( identifier[steps_per_revolution] >> literal[int] )& literal[int] ]
keyword[for] identifier[pin] keyword[in] identifier[range] ( identifier[len] ( identifier[stepper_pins] )):
identifier[data] . identifier[append] ( identifier[stepper_pins] [ identifier[pin] ])
keyword[await] identifier[self] . identifier[_send_sysex] ( identifier[PrivateConstants] . identifier[STEPPER_DATA] , identifier[data] ) | async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 127, steps_per_revolution >> 7 & 127]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin]) # depends on [control=['for'], data=['pin']]
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) |
def poke_array(self, store, name, elemtype, elements, container, visited, _stack):
"""abstract method"""
raise NotImplementedError | def function[poke_array, parameter[self, store, name, elemtype, elements, container, visited, _stack]]:
constant[abstract method]
<ast.Raise object at 0x7da1b1463c70> | keyword[def] identifier[poke_array] ( identifier[self] , identifier[store] , identifier[name] , identifier[elemtype] , identifier[elements] , identifier[container] , identifier[visited] , identifier[_stack] ):
literal[string]
keyword[raise] identifier[NotImplementedError] | def poke_array(self, store, name, elemtype, elements, container, visited, _stack):
"""abstract method"""
raise NotImplementedError |
def get_hash_statements_dict(self):
"""Return a dict of Statements keyed by hashes."""
res = {stmt_hash: stmts_from_json([stmt])[0]
for stmt_hash, stmt in self.__statement_jsons.items()}
return res | def function[get_hash_statements_dict, parameter[self]]:
constant[Return a dict of Statements keyed by hashes.]
variable[res] assign[=] <ast.DictComp object at 0x7da20c6e7850>
return[name[res]] | keyword[def] identifier[get_hash_statements_dict] ( identifier[self] ):
literal[string]
identifier[res] ={ identifier[stmt_hash] : identifier[stmts_from_json] ([ identifier[stmt] ])[ literal[int] ]
keyword[for] identifier[stmt_hash] , identifier[stmt] keyword[in] identifier[self] . identifier[__statement_jsons] . identifier[items] ()}
keyword[return] identifier[res] | def get_hash_statements_dict(self):
"""Return a dict of Statements keyed by hashes."""
res = {stmt_hash: stmts_from_json([stmt])[0] for (stmt_hash, stmt) in self.__statement_jsons.items()}
return res |
def unlink(self, func):
'''
Remove a callback function previously added with link()
Example:
base.unlink( callback )
'''
if func in self._syn_links:
self._syn_links.remove(func) | def function[unlink, parameter[self, func]]:
constant[
Remove a callback function previously added with link()
Example:
base.unlink( callback )
]
if compare[name[func] in name[self]._syn_links] begin[:]
call[name[self]._syn_links.remove, parameter[name[func]]] | keyword[def] identifier[unlink] ( identifier[self] , identifier[func] ):
literal[string]
keyword[if] identifier[func] keyword[in] identifier[self] . identifier[_syn_links] :
identifier[self] . identifier[_syn_links] . identifier[remove] ( identifier[func] ) | def unlink(self, func):
"""
Remove a callback function previously added with link()
Example:
base.unlink( callback )
"""
if func in self._syn_links:
self._syn_links.remove(func) # depends on [control=['if'], data=['func']] |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-01: :mod:`v2015_06_01.models<azure.mgmt.authorization.v2015_06_01.models>`
* 2015-07-01: :mod:`v2015_07_01.models<azure.mgmt.authorization.v2015_07_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.authorization.v2018_01_01_preview.models>`
* 2018-07-01-preview: :mod:`v2018_07_01_preview.models<azure.mgmt.authorization.v2018_07_01_preview.models>`
* 2018-09-01-preview: :mod:`v2018_09_01_preview.models<azure.mgmt.authorization.v2018_09_01_preview.models>`
"""
if api_version == '2015-06-01':
from .v2015_06_01 import models
return models
elif api_version == '2015-07-01':
from .v2015_07_01 import models
return models
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview import models
return models
elif api_version == '2018-07-01-preview':
from .v2018_07_01_preview import models
return models
elif api_version == '2018-09-01-preview':
from .v2018_09_01_preview import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | def function[models, parameter[cls, api_version]]:
constant[Module depends on the API version:
* 2015-06-01: :mod:`v2015_06_01.models<azure.mgmt.authorization.v2015_06_01.models>`
* 2015-07-01: :mod:`v2015_07_01.models<azure.mgmt.authorization.v2015_07_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.authorization.v2018_01_01_preview.models>`
* 2018-07-01-preview: :mod:`v2018_07_01_preview.models<azure.mgmt.authorization.v2018_07_01_preview.models>`
* 2018-09-01-preview: :mod:`v2018_09_01_preview.models<azure.mgmt.authorization.v2018_09_01_preview.models>`
]
if compare[name[api_version] equal[==] constant[2015-06-01]] begin[:]
from relative_module[v2015_06_01] import module[models]
return[name[models]]
<ast.Raise object at 0x7da18c4ce4a0> | keyword[def] identifier[models] ( identifier[cls] , identifier[api_version] = identifier[DEFAULT_API_VERSION] ):
literal[string]
keyword[if] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2015_06_01] keyword[import] identifier[models]
keyword[return] identifier[models]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2015_07_01] keyword[import] identifier[models]
keyword[return] identifier[models]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_01_01_preview] keyword[import] identifier[models]
keyword[return] identifier[models]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_07_01_preview] keyword[import] identifier[models]
keyword[return] identifier[models]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_09_01_preview] keyword[import] identifier[models]
keyword[return] identifier[models]
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] )) | def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-01: :mod:`v2015_06_01.models<azure.mgmt.authorization.v2015_06_01.models>`
* 2015-07-01: :mod:`v2015_07_01.models<azure.mgmt.authorization.v2015_07_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.authorization.v2018_01_01_preview.models>`
* 2018-07-01-preview: :mod:`v2018_07_01_preview.models<azure.mgmt.authorization.v2018_07_01_preview.models>`
* 2018-09-01-preview: :mod:`v2018_09_01_preview.models<azure.mgmt.authorization.v2018_09_01_preview.models>`
"""
if api_version == '2015-06-01':
from .v2015_06_01 import models
return models # depends on [control=['if'], data=[]]
elif api_version == '2015-07-01':
from .v2015_07_01 import models
return models # depends on [control=['if'], data=[]]
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview import models
return models # depends on [control=['if'], data=[]]
elif api_version == '2018-07-01-preview':
from .v2018_07_01_preview import models
return models # depends on [control=['if'], data=[]]
elif api_version == '2018-09-01-preview':
from .v2018_09_01_preview import models
return models # depends on [control=['if'], data=[]]
raise NotImplementedError('APIVersion {} is not available'.format(api_version)) |
def setup(self):
"""Setup."""
self.normalize = self.config['normalize'].upper()
self.convert_encoding = self.config['convert_encoding'].lower()
self.errors = self.config['errors'].lower()
if self.convert_encoding:
self.convert_encoding = codecs.lookup(
filters.PYTHON_ENCODING_NAMES.get(self.default_encoding, self.default_encoding).lower()
).name
# Don't generate content with BOMs
if (
self.convert_encoding.startswith(('utf-32', 'utf-16')) and
not self.convert_encoding.endswith(('le', 'be'))
):
self.convert_encoding += '-le'
if self.convert_encoding == 'utf-8-sig':
self.convert_encoding = 'utf-8' | def function[setup, parameter[self]]:
constant[Setup.]
name[self].normalize assign[=] call[call[name[self].config][constant[normalize]].upper, parameter[]]
name[self].convert_encoding assign[=] call[call[name[self].config][constant[convert_encoding]].lower, parameter[]]
name[self].errors assign[=] call[call[name[self].config][constant[errors]].lower, parameter[]]
if name[self].convert_encoding begin[:]
name[self].convert_encoding assign[=] call[name[codecs].lookup, parameter[call[call[name[filters].PYTHON_ENCODING_NAMES.get, parameter[name[self].default_encoding, name[self].default_encoding]].lower, parameter[]]]].name
if <ast.BoolOp object at 0x7da20e9b1ff0> begin[:]
<ast.AugAssign object at 0x7da20e9b3eb0>
if compare[name[self].convert_encoding equal[==] constant[utf-8-sig]] begin[:]
name[self].convert_encoding assign[=] constant[utf-8] | keyword[def] identifier[setup] ( identifier[self] ):
literal[string]
identifier[self] . identifier[normalize] = identifier[self] . identifier[config] [ literal[string] ]. identifier[upper] ()
identifier[self] . identifier[convert_encoding] = identifier[self] . identifier[config] [ literal[string] ]. identifier[lower] ()
identifier[self] . identifier[errors] = identifier[self] . identifier[config] [ literal[string] ]. identifier[lower] ()
keyword[if] identifier[self] . identifier[convert_encoding] :
identifier[self] . identifier[convert_encoding] = identifier[codecs] . identifier[lookup] (
identifier[filters] . identifier[PYTHON_ENCODING_NAMES] . identifier[get] ( identifier[self] . identifier[default_encoding] , identifier[self] . identifier[default_encoding] ). identifier[lower] ()
). identifier[name]
keyword[if] (
identifier[self] . identifier[convert_encoding] . identifier[startswith] (( literal[string] , literal[string] )) keyword[and]
keyword[not] identifier[self] . identifier[convert_encoding] . identifier[endswith] (( literal[string] , literal[string] ))
):
identifier[self] . identifier[convert_encoding] += literal[string]
keyword[if] identifier[self] . identifier[convert_encoding] == literal[string] :
identifier[self] . identifier[convert_encoding] = literal[string] | def setup(self):
"""Setup."""
self.normalize = self.config['normalize'].upper()
self.convert_encoding = self.config['convert_encoding'].lower()
self.errors = self.config['errors'].lower()
if self.convert_encoding:
self.convert_encoding = codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(self.default_encoding, self.default_encoding).lower()).name
# Don't generate content with BOMs
if self.convert_encoding.startswith(('utf-32', 'utf-16')) and (not self.convert_encoding.endswith(('le', 'be'))):
self.convert_encoding += '-le' # depends on [control=['if'], data=[]]
if self.convert_encoding == 'utf-8-sig':
self.convert_encoding = 'utf-8' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result] | def function[get_close_matches, parameter[word, possibilities, n, cutoff]]:
constant[Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
]
if <ast.UnaryOp object at 0x7da18dc068c0> begin[:]
<ast.Raise object at 0x7da18dc065c0>
if <ast.UnaryOp object at 0x7da18dc05de0> begin[:]
<ast.Raise object at 0x7da18dc07640>
variable[result] assign[=] list[[]]
variable[s] assign[=] call[name[SequenceMatcher], parameter[]]
call[name[s].set_seq2, parameter[name[word]]]
for taget[name[x]] in starred[name[possibilities]] begin[:]
call[name[s].set_seq1, parameter[name[x]]]
if <ast.BoolOp object at 0x7da18dc04b50> begin[:]
call[name[result].append, parameter[tuple[[<ast.Call object at 0x7da18dc07f70>, <ast.Name object at 0x7da18dc05090>]]]]
variable[result] assign[=] call[name[heapq].nlargest, parameter[name[n], name[result]]]
return[<ast.ListComp object at 0x7da18dc07dc0>] | keyword[def] identifier[get_close_matches] ( identifier[word] , identifier[possibilities] , identifier[n] = literal[int] , identifier[cutoff] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[n] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[n] ,))
keyword[if] keyword[not] literal[int] <= identifier[cutoff] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[cutoff] ,))
identifier[result] =[]
identifier[s] = identifier[SequenceMatcher] ()
identifier[s] . identifier[set_seq2] ( identifier[word] )
keyword[for] identifier[x] keyword[in] identifier[possibilities] :
identifier[s] . identifier[set_seq1] ( identifier[x] )
keyword[if] identifier[s] . identifier[real_quick_ratio] ()>= identifier[cutoff] keyword[and] identifier[s] . identifier[quick_ratio] ()>= identifier[cutoff] keyword[and] identifier[s] . identifier[ratio] ()>= identifier[cutoff] :
identifier[result] . identifier[append] (( identifier[s] . identifier[ratio] (), identifier[x] ))
identifier[result] = identifier[heapq] . identifier[nlargest] ( identifier[n] , identifier[result] )
keyword[return] [ identifier[x] keyword[for] identifier[score] , identifier[x] keyword[in] identifier[result] ] | def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError('n must be > 0: %r' % (n,)) # depends on [control=['if'], data=[]]
if not 0.0 <= cutoff <= 1.0:
raise ValueError('cutoff must be in [0.0, 1.0]: %r' % (cutoff,)) # depends on [control=['if'], data=[]]
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and s.quick_ratio() >= cutoff and (s.ratio() >= cutoff):
result.append((s.ratio(), x)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']]
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for (score, x) in result] |
def make_reader_task(self, stream, callback):
"""
Create a reader executor task for a stream.
"""
return self.loop.create_task(self.executor_wrapper(background_reader, stream, self.loop, callback)) | def function[make_reader_task, parameter[self, stream, callback]]:
constant[
Create a reader executor task for a stream.
]
return[call[name[self].loop.create_task, parameter[call[name[self].executor_wrapper, parameter[name[background_reader], name[stream], name[self].loop, name[callback]]]]]] | keyword[def] identifier[make_reader_task] ( identifier[self] , identifier[stream] , identifier[callback] ):
literal[string]
keyword[return] identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[executor_wrapper] ( identifier[background_reader] , identifier[stream] , identifier[self] . identifier[loop] , identifier[callback] )) | def make_reader_task(self, stream, callback):
"""
Create a reader executor task for a stream.
"""
return self.loop.create_task(self.executor_wrapper(background_reader, stream, self.loop, callback)) |
def value(self, value):
"""
set the value
"""
# for the indep direction we also allow a string which points to one
# of the other available dimensions
# TODO: support c, fc, ec?
if isinstance(value, common.basestring) and value in ['x', 'y', 'z']:
# we'll cast just to get rid of any python2 unicodes
self._value = str(value)
dimension = value
self._unit = getattr(self.call, dimension).unit
return
# NOTE: cannot do super on setter directly, see this python
# bug: https://bugs.python.org/issue14965 and discussion:
# https://mail.python.org/pipermail/python-dev/2010-April/099672.html
super(CallDimensionI, self)._set_value(value) | def function[value, parameter[self, value]]:
constant[
set the value
]
if <ast.BoolOp object at 0x7da2041d9d50> begin[:]
name[self]._value assign[=] call[name[str], parameter[name[value]]]
variable[dimension] assign[=] name[value]
name[self]._unit assign[=] call[name[getattr], parameter[name[self].call, name[dimension]]].unit
return[None]
call[call[name[super], parameter[name[CallDimensionI], name[self]]]._set_value, parameter[name[value]]] | keyword[def] identifier[value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[common] . identifier[basestring] ) keyword[and] identifier[value] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[self] . identifier[_value] = identifier[str] ( identifier[value] )
identifier[dimension] = identifier[value]
identifier[self] . identifier[_unit] = identifier[getattr] ( identifier[self] . identifier[call] , identifier[dimension] ). identifier[unit]
keyword[return]
identifier[super] ( identifier[CallDimensionI] , identifier[self] ). identifier[_set_value] ( identifier[value] ) | def value(self, value):
"""
set the value
"""
# for the indep direction we also allow a string which points to one
# of the other available dimensions
# TODO: support c, fc, ec?
if isinstance(value, common.basestring) and value in ['x', 'y', 'z']:
# we'll cast just to get rid of any python2 unicodes
self._value = str(value)
dimension = value
self._unit = getattr(self.call, dimension).unit
return # depends on [control=['if'], data=[]]
# NOTE: cannot do super on setter directly, see this python
# bug: https://bugs.python.org/issue14965 and discussion:
# https://mail.python.org/pipermail/python-dev/2010-April/099672.html
super(CallDimensionI, self)._set_value(value) |
def _get_photos(session, user_or_group_id):
"""
https://vk.com/dev/photos.getAll
"""
response = session.fetch_items("photos.getAll", Photo.from_json, count=200, owner_id=user_or_group_id)
return response | def function[_get_photos, parameter[session, user_or_group_id]]:
constant[
https://vk.com/dev/photos.getAll
]
variable[response] assign[=] call[name[session].fetch_items, parameter[constant[photos.getAll], name[Photo].from_json]]
return[name[response]] | keyword[def] identifier[_get_photos] ( identifier[session] , identifier[user_or_group_id] ):
literal[string]
identifier[response] = identifier[session] . identifier[fetch_items] ( literal[string] , identifier[Photo] . identifier[from_json] , identifier[count] = literal[int] , identifier[owner_id] = identifier[user_or_group_id] )
keyword[return] identifier[response] | def _get_photos(session, user_or_group_id):
"""
https://vk.com/dev/photos.getAll
"""
response = session.fetch_items('photos.getAll', Photo.from_json, count=200, owner_id=user_or_group_id)
return response |
def gpg_key(value):
"""
test if value points to a known gpg key
and return that key as a gpg key object.
"""
try:
return crypto.get_key(value)
except GPGProblem as e:
raise ValidateError(str(e)) | def function[gpg_key, parameter[value]]:
constant[
test if value points to a known gpg key
and return that key as a gpg key object.
]
<ast.Try object at 0x7da1b0795720> | keyword[def] identifier[gpg_key] ( identifier[value] ):
literal[string]
keyword[try] :
keyword[return] identifier[crypto] . identifier[get_key] ( identifier[value] )
keyword[except] identifier[GPGProblem] keyword[as] identifier[e] :
keyword[raise] identifier[ValidateError] ( identifier[str] ( identifier[e] )) | def gpg_key(value):
"""
test if value points to a known gpg key
and return that key as a gpg key object.
"""
try:
return crypto.get_key(value) # depends on [control=['try'], data=[]]
except GPGProblem as e:
raise ValidateError(str(e)) # depends on [control=['except'], data=['e']] |
def unpack_kinesis_event(kinesis_event, deserializer=None, unpacker=None,
embed_timestamp=False):
"""Extracts events (a list of dicts) from a Kinesis event."""
records = kinesis_event["Records"]
events = []
shard_ids = set()
for rec in records:
data = rec["kinesis"]["data"]
try:
payload = b64decode(data)
except TypeError:
payload = b64decode(data.encode("utf-8"))
if unpacker:
payload = unpacker(payload)
shard_ids.add(rec["eventID"].split(":")[0])
try:
payload = payload.decode()
except AttributeError:
pass
if deserializer:
try:
payload = deserializer(payload)
except ValueError:
try:
payload = deserializer(payload.replace("\\'", "'"))
except:
logger.error("Invalid searialized payload: {}".format(
payload))
raise
if isinstance(payload, dict) and embed_timestamp:
ts = rec["kinesis"].get("approximateArrivalTimestamp")
if ts:
ts = datetime.fromtimestamp(ts, tz=tz.tzutc())
ts_str = ("{year:04d}-{month:02d}-{day:02d} "
"{hour:02d}:{minute:02d}:{second:02d}").format(
year=ts.year,
month=ts.month,
day=ts.day,
hour=ts.hour,
minute=ts.minute,
second=ts.second)
else:
ts_str = ""
payload[embed_timestamp] = ts_str
events.append(payload)
if len(shard_ids) > 1:
msg = "Kinesis event contains records from several shards: {}".format(
shard_ids)
raise(BadKinesisEventError(msg))
return events, shard_ids.pop() | def function[unpack_kinesis_event, parameter[kinesis_event, deserializer, unpacker, embed_timestamp]]:
constant[Extracts events (a list of dicts) from a Kinesis event.]
variable[records] assign[=] call[name[kinesis_event]][constant[Records]]
variable[events] assign[=] list[[]]
variable[shard_ids] assign[=] call[name[set], parameter[]]
for taget[name[rec]] in starred[name[records]] begin[:]
variable[data] assign[=] call[call[name[rec]][constant[kinesis]]][constant[data]]
<ast.Try object at 0x7da18dc04d30>
if name[unpacker] begin[:]
variable[payload] assign[=] call[name[unpacker], parameter[name[payload]]]
call[name[shard_ids].add, parameter[call[call[call[name[rec]][constant[eventID]].split, parameter[constant[:]]]][constant[0]]]]
<ast.Try object at 0x7da18dc07d30>
if name[deserializer] begin[:]
<ast.Try object at 0x7da18dc05090>
if <ast.BoolOp object at 0x7da18dc049d0> begin[:]
variable[ts] assign[=] call[call[name[rec]][constant[kinesis]].get, parameter[constant[approximateArrivalTimestamp]]]
if name[ts] begin[:]
variable[ts] assign[=] call[name[datetime].fromtimestamp, parameter[name[ts]]]
variable[ts_str] assign[=] call[constant[{year:04d}-{month:02d}-{day:02d} {hour:02d}:{minute:02d}:{second:02d}].format, parameter[]]
call[name[payload]][name[embed_timestamp]] assign[=] name[ts_str]
call[name[events].append, parameter[name[payload]]]
if compare[call[name[len], parameter[name[shard_ids]]] greater[>] constant[1]] begin[:]
variable[msg] assign[=] call[constant[Kinesis event contains records from several shards: {}].format, parameter[name[shard_ids]]]
<ast.Raise object at 0x7da18dc061a0>
return[tuple[[<ast.Name object at 0x7da18dc06c50>, <ast.Call object at 0x7da18dc07b80>]]] | keyword[def] identifier[unpack_kinesis_event] ( identifier[kinesis_event] , identifier[deserializer] = keyword[None] , identifier[unpacker] = keyword[None] ,
identifier[embed_timestamp] = keyword[False] ):
literal[string]
identifier[records] = identifier[kinesis_event] [ literal[string] ]
identifier[events] =[]
identifier[shard_ids] = identifier[set] ()
keyword[for] identifier[rec] keyword[in] identifier[records] :
identifier[data] = identifier[rec] [ literal[string] ][ literal[string] ]
keyword[try] :
identifier[payload] = identifier[b64decode] ( identifier[data] )
keyword[except] identifier[TypeError] :
identifier[payload] = identifier[b64decode] ( identifier[data] . identifier[encode] ( literal[string] ))
keyword[if] identifier[unpacker] :
identifier[payload] = identifier[unpacker] ( identifier[payload] )
identifier[shard_ids] . identifier[add] ( identifier[rec] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ])
keyword[try] :
identifier[payload] = identifier[payload] . identifier[decode] ()
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[deserializer] :
keyword[try] :
identifier[payload] = identifier[deserializer] ( identifier[payload] )
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[payload] = identifier[deserializer] ( identifier[payload] . identifier[replace] ( literal[string] , literal[string] ))
keyword[except] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] (
identifier[payload] ))
keyword[raise]
keyword[if] identifier[isinstance] ( identifier[payload] , identifier[dict] ) keyword[and] identifier[embed_timestamp] :
identifier[ts] = identifier[rec] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[ts] :
identifier[ts] = identifier[datetime] . identifier[fromtimestamp] ( identifier[ts] , identifier[tz] = identifier[tz] . identifier[tzutc] ())
identifier[ts_str] =( literal[string]
literal[string] ). identifier[format] (
identifier[year] = identifier[ts] . identifier[year] ,
identifier[month] = identifier[ts] . identifier[month] ,
identifier[day] = identifier[ts] . identifier[day] ,
identifier[hour] = identifier[ts] . identifier[hour] ,
identifier[minute] = identifier[ts] . identifier[minute] ,
identifier[second] = identifier[ts] . identifier[second] )
keyword[else] :
identifier[ts_str] = literal[string]
identifier[payload] [ identifier[embed_timestamp] ]= identifier[ts_str]
identifier[events] . identifier[append] ( identifier[payload] )
keyword[if] identifier[len] ( identifier[shard_ids] )> literal[int] :
identifier[msg] = literal[string] . identifier[format] (
identifier[shard_ids] )
keyword[raise] ( identifier[BadKinesisEventError] ( identifier[msg] ))
keyword[return] identifier[events] , identifier[shard_ids] . identifier[pop] () | def unpack_kinesis_event(kinesis_event, deserializer=None, unpacker=None, embed_timestamp=False):
"""Extracts events (a list of dicts) from a Kinesis event."""
records = kinesis_event['Records']
events = []
shard_ids = set()
for rec in records:
data = rec['kinesis']['data']
try:
payload = b64decode(data) # depends on [control=['try'], data=[]]
except TypeError:
payload = b64decode(data.encode('utf-8')) # depends on [control=['except'], data=[]]
if unpacker:
payload = unpacker(payload) # depends on [control=['if'], data=[]]
shard_ids.add(rec['eventID'].split(':')[0])
try:
payload = payload.decode() # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
if deserializer:
try:
payload = deserializer(payload) # depends on [control=['try'], data=[]]
except ValueError:
try:
payload = deserializer(payload.replace("\\'", "'")) # depends on [control=['try'], data=[]]
except:
logger.error('Invalid searialized payload: {}'.format(payload))
raise # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(payload, dict) and embed_timestamp:
ts = rec['kinesis'].get('approximateArrivalTimestamp')
if ts:
ts = datetime.fromtimestamp(ts, tz=tz.tzutc())
ts_str = '{year:04d}-{month:02d}-{day:02d} {hour:02d}:{minute:02d}:{second:02d}'.format(year=ts.year, month=ts.month, day=ts.day, hour=ts.hour, minute=ts.minute, second=ts.second) # depends on [control=['if'], data=[]]
else:
ts_str = ''
payload[embed_timestamp] = ts_str # depends on [control=['if'], data=[]]
events.append(payload) # depends on [control=['for'], data=['rec']]
if len(shard_ids) > 1:
msg = 'Kinesis event contains records from several shards: {}'.format(shard_ids)
raise BadKinesisEventError(msg) # depends on [control=['if'], data=[]]
return (events, shard_ids.pop()) |
def solve(guess_a, guess_b, power, solver='scipy'):
""" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1])**p/2 - 1,
(x[1] - x[0])**p/2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver) | def function[solve, parameter[guess_a, guess_b, power, solver]]:
constant[ Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. ]
variable[x] assign[=] call[name[sp].symbols, parameter[constant[x:2]]]
variable[p] assign[=] call[name[sp].Symbol, parameter[constant[p]]]
variable[f] assign[=] list[[<ast.BinOp object at 0x7da1b26afa00>, <ast.BinOp object at 0x7da1b26ac160>]]
variable[neqsys] assign[=] call[name[SymbolicSys], parameter[name[x], name[f], list[[<ast.Name object at 0x7da1b26ac100>]]]]
return[call[name[neqsys].solve, parameter[list[[<ast.Name object at 0x7da1b26af310>, <ast.Name object at 0x7da1b26ae2c0>]], list[[<ast.Name object at 0x7da1b26af370>]]]]] | keyword[def] identifier[solve] ( identifier[guess_a] , identifier[guess_b] , identifier[power] , identifier[solver] = literal[string] ):
literal[string]
identifier[x] = identifier[sp] . identifier[symbols] ( literal[string] , identifier[real] = keyword[True] )
identifier[p] = identifier[sp] . identifier[Symbol] ( literal[string] , identifier[real] = keyword[True] , identifier[negative] = keyword[False] , identifier[integer] = keyword[True] )
identifier[f] =[ identifier[x] [ literal[int] ]+( identifier[x] [ literal[int] ]- identifier[x] [ literal[int] ])** identifier[p] / literal[int] - literal[int] ,
( identifier[x] [ literal[int] ]- identifier[x] [ literal[int] ])** identifier[p] / literal[int] + identifier[x] [ literal[int] ]]
identifier[neqsys] = identifier[SymbolicSys] ( identifier[x] , identifier[f] ,[ identifier[p] ])
keyword[return] identifier[neqsys] . identifier[solve] ([ identifier[guess_a] , identifier[guess_b] ],[ identifier[power] ], identifier[solver] = identifier[solver] ) | def solve(guess_a, guess_b, power, solver='scipy'):
""" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1]) ** p / 2 - 1, (x[1] - x[0]) ** p / 2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver) |
def discharge_token(self, username):
"""Discharge token for a user.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@return The resulting base64 encoded discharged token.
"""
url = '{}discharge-token-for-user?username={}'.format(
self.url, quote(username))
logging.debug('Sending identity info to {}'.format(url))
response = make_request(url, method='GET', timeout=self.timeout)
try:
macaroon = response['DischargeToken']
json_macaroon = json.dumps(macaroon)
except (KeyError, UnicodeDecodeError) as err:
raise InvalidMacaroon(
'Invalid macaroon from discharger: {}'.format(err.message))
return base64.urlsafe_b64encode("[{}]".format(
json_macaroon).encode('utf-8')) | def function[discharge_token, parameter[self, username]]:
constant[Discharge token for a user.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@return The resulting base64 encoded discharged token.
]
variable[url] assign[=] call[constant[{}discharge-token-for-user?username={}].format, parameter[name[self].url, call[name[quote], parameter[name[username]]]]]
call[name[logging].debug, parameter[call[constant[Sending identity info to {}].format, parameter[name[url]]]]]
variable[response] assign[=] call[name[make_request], parameter[name[url]]]
<ast.Try object at 0x7da1b2597850>
return[call[name[base64].urlsafe_b64encode, parameter[call[call[constant[[{}]].format, parameter[name[json_macaroon]]].encode, parameter[constant[utf-8]]]]]] | keyword[def] identifier[discharge_token] ( identifier[self] , identifier[username] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[self] . identifier[url] , identifier[quote] ( identifier[username] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[url] ))
identifier[response] = identifier[make_request] ( identifier[url] , identifier[method] = literal[string] , identifier[timeout] = identifier[self] . identifier[timeout] )
keyword[try] :
identifier[macaroon] = identifier[response] [ literal[string] ]
identifier[json_macaroon] = identifier[json] . identifier[dumps] ( identifier[macaroon] )
keyword[except] ( identifier[KeyError] , identifier[UnicodeDecodeError] ) keyword[as] identifier[err] :
keyword[raise] identifier[InvalidMacaroon] (
literal[string] . identifier[format] ( identifier[err] . identifier[message] ))
keyword[return] identifier[base64] . identifier[urlsafe_b64encode] ( literal[string] . identifier[format] (
identifier[json_macaroon] ). identifier[encode] ( literal[string] )) | def discharge_token(self, username):
"""Discharge token for a user.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@return The resulting base64 encoded discharged token.
"""
url = '{}discharge-token-for-user?username={}'.format(self.url, quote(username))
logging.debug('Sending identity info to {}'.format(url))
response = make_request(url, method='GET', timeout=self.timeout)
try:
macaroon = response['DischargeToken']
json_macaroon = json.dumps(macaroon) # depends on [control=['try'], data=[]]
except (KeyError, UnicodeDecodeError) as err:
raise InvalidMacaroon('Invalid macaroon from discharger: {}'.format(err.message)) # depends on [control=['except'], data=['err']]
return base64.urlsafe_b64encode('[{}]'.format(json_macaroon).encode('utf-8')) |
def power_status_send(self, Vcc, Vservo, flags, force_mavlink1=False):
'''
Power supply status
Vcc : 5V rail voltage in millivolts (uint16_t)
Vservo : servo rail voltage in millivolts (uint16_t)
flags : power supply status flags (see MAV_POWER_STATUS enum) (uint16_t)
'''
return self.send(self.power_status_encode(Vcc, Vservo, flags), force_mavlink1=force_mavlink1) | def function[power_status_send, parameter[self, Vcc, Vservo, flags, force_mavlink1]]:
constant[
Power supply status
Vcc : 5V rail voltage in millivolts (uint16_t)
Vservo : servo rail voltage in millivolts (uint16_t)
flags : power supply status flags (see MAV_POWER_STATUS enum) (uint16_t)
]
return[call[name[self].send, parameter[call[name[self].power_status_encode, parameter[name[Vcc], name[Vservo], name[flags]]]]]] | keyword[def] identifier[power_status_send] ( identifier[self] , identifier[Vcc] , identifier[Vservo] , identifier[flags] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[power_status_encode] ( identifier[Vcc] , identifier[Vservo] , identifier[flags] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def power_status_send(self, Vcc, Vservo, flags, force_mavlink1=False):
"""
Power supply status
Vcc : 5V rail voltage in millivolts (uint16_t)
Vservo : servo rail voltage in millivolts (uint16_t)
flags : power supply status flags (see MAV_POWER_STATUS enum) (uint16_t)
"""
return self.send(self.power_status_encode(Vcc, Vservo, flags), force_mavlink1=force_mavlink1) |
def get_block_sysfee(self, height, id=None, endpoint=None):
"""
Get the system fee of a block by height. This is used in calculating gas claims
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint) | def function[get_block_sysfee, parameter[self, height, id, endpoint]]:
constant[
Get the system fee of a block by height. This is used in calculating gas claims
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
]
return[call[name[self]._call_endpoint, parameter[name[GET_BLOCK_SYS_FEE]]]] | keyword[def] identifier[get_block_sysfee] ( identifier[self] , identifier[height] , identifier[id] = keyword[None] , identifier[endpoint] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_call_endpoint] ( identifier[GET_BLOCK_SYS_FEE] , identifier[params] =[ identifier[height] ], identifier[id] = identifier[id] , identifier[endpoint] = identifier[endpoint] ) | def get_block_sysfee(self, height, id=None, endpoint=None):
"""
Get the system fee of a block by height. This is used in calculating gas claims
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint) |
def get_tag_html(tag_id):
"""
Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for.
"""
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data['tag']
args = tag_data['args']
kwargs = tag_data['kwargs']
lib, tag_name = get_lib_and_tag_name(tag)
args_str = ''
if args:
for arg in args:
if isinstance(arg, six.string_types):
args_str += "'{0}' ".format(arg)
else:
args_str += "{0} ".format(arg)
kwargs_str = ''
if kwargs:
for name, value in kwargs.items():
if isinstance(value, six.string_types):
kwargs_str += "{0}='{1}' ".format(name, value)
else:
kwargs_str += "{0}={1} ".format(name, value)
html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(
lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)
return html | def function[get_tag_html, parameter[tag_id]]:
constant[
Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for.
]
variable[tag_data] assign[=] call[name[get_lazy_tag_data], parameter[name[tag_id]]]
variable[tag] assign[=] call[name[tag_data]][constant[tag]]
variable[args] assign[=] call[name[tag_data]][constant[args]]
variable[kwargs] assign[=] call[name[tag_data]][constant[kwargs]]
<ast.Tuple object at 0x7da1b26773a0> assign[=] call[name[get_lib_and_tag_name], parameter[name[tag]]]
variable[args_str] assign[=] constant[]
if name[args] begin[:]
for taget[name[arg]] in starred[name[args]] begin[:]
if call[name[isinstance], parameter[name[arg], name[six].string_types]] begin[:]
<ast.AugAssign object at 0x7da1b2677c70>
variable[kwargs_str] assign[=] constant[]
if name[kwargs] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b26770a0>, <ast.Name object at 0x7da1b2677f40>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:]
<ast.AugAssign object at 0x7da1b25870d0>
variable[html] assign[=] call[constant[{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}].format, parameter[]]
return[name[html]] | keyword[def] identifier[get_tag_html] ( identifier[tag_id] ):
literal[string]
identifier[tag_data] = identifier[get_lazy_tag_data] ( identifier[tag_id] )
identifier[tag] = identifier[tag_data] [ literal[string] ]
identifier[args] = identifier[tag_data] [ literal[string] ]
identifier[kwargs] = identifier[tag_data] [ literal[string] ]
identifier[lib] , identifier[tag_name] = identifier[get_lib_and_tag_name] ( identifier[tag] )
identifier[args_str] = literal[string]
keyword[if] identifier[args] :
keyword[for] identifier[arg] keyword[in] identifier[args] :
keyword[if] identifier[isinstance] ( identifier[arg] , identifier[six] . identifier[string_types] ):
identifier[args_str] += literal[string] . identifier[format] ( identifier[arg] )
keyword[else] :
identifier[args_str] += literal[string] . identifier[format] ( identifier[arg] )
identifier[kwargs_str] = literal[string]
keyword[if] identifier[kwargs] :
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[kwargs_str] += literal[string] . identifier[format] ( identifier[name] , identifier[value] )
keyword[else] :
identifier[kwargs_str] += literal[string] . identifier[format] ( identifier[name] , identifier[value] )
identifier[html] = literal[string] . identifier[format] (
identifier[lib] = identifier[lib] , identifier[tag_name] = identifier[tag_name] , identifier[args] = identifier[args_str] , identifier[kwargs] = identifier[kwargs_str] )
keyword[return] identifier[html] | def get_tag_html(tag_id):
"""
Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for.
"""
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data['tag']
args = tag_data['args']
kwargs = tag_data['kwargs']
(lib, tag_name) = get_lib_and_tag_name(tag)
args_str = ''
if args:
for arg in args:
if isinstance(arg, six.string_types):
args_str += "'{0}' ".format(arg) # depends on [control=['if'], data=[]]
else:
args_str += '{0} '.format(arg) # depends on [control=['for'], data=['arg']] # depends on [control=['if'], data=[]]
kwargs_str = ''
if kwargs:
for (name, value) in kwargs.items():
if isinstance(value, six.string_types):
kwargs_str += "{0}='{1}' ".format(name, value) # depends on [control=['if'], data=[]]
else:
kwargs_str += '{0}={1} '.format(name, value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)
return html |
def update(self, *names: str) -> 'ListTree':
"""Add all the mailbox names to the tree, filling in any missing nodes.
Args:
names: The names of the mailboxes.
"""
for name in names:
parts = name.split(self._delimiter)
self._root.add(*parts)
return self | def function[update, parameter[self]]:
constant[Add all the mailbox names to the tree, filling in any missing nodes.
Args:
names: The names of the mailboxes.
]
for taget[name[name]] in starred[name[names]] begin[:]
variable[parts] assign[=] call[name[name].split, parameter[name[self]._delimiter]]
call[name[self]._root.add, parameter[<ast.Starred object at 0x7da20e9565f0>]]
return[name[self]] | keyword[def] identifier[update] ( identifier[self] ,* identifier[names] : identifier[str] )-> literal[string] :
literal[string]
keyword[for] identifier[name] keyword[in] identifier[names] :
identifier[parts] = identifier[name] . identifier[split] ( identifier[self] . identifier[_delimiter] )
identifier[self] . identifier[_root] . identifier[add] (* identifier[parts] )
keyword[return] identifier[self] | def update(self, *names: str) -> 'ListTree':
"""Add all the mailbox names to the tree, filling in any missing nodes.
Args:
names: The names of the mailboxes.
"""
for name in names:
parts = name.split(self._delimiter)
self._root.add(*parts) # depends on [control=['for'], data=['name']]
return self |
def unpack_apply_message(bufs, g=None, copy=True):
"""Unpack f,args,kwargs from buffers packed by pack_apply_message().
Returns: original f,args,kwargs
"""
bufs = list(bufs) # allow us to pop
assert len(bufs) >= 2, "not enough buffers!"
pf = buffer_to_bytes_py2(bufs.pop(0))
f = uncan(pickle.loads(pf), g)
pinfo = buffer_to_bytes_py2(bufs.pop(0))
info = pickle.loads(pinfo)
arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:]
args = []
for i in range(info['nargs']):
arg, arg_bufs = deserialize_object(arg_bufs, g)
args.append(arg)
args = tuple(args)
assert not arg_bufs, "Shouldn't be any arg bufs left over"
kwargs = {}
for key in info['kw_keys']:
kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
kwargs[key] = kwarg
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
return f, args, kwargs | def function[unpack_apply_message, parameter[bufs, g, copy]]:
constant[Unpack f,args,kwargs from buffers packed by pack_apply_message().
Returns: original f,args,kwargs
]
variable[bufs] assign[=] call[name[list], parameter[name[bufs]]]
assert[compare[call[name[len], parameter[name[bufs]]] greater_or_equal[>=] constant[2]]]
variable[pf] assign[=] call[name[buffer_to_bytes_py2], parameter[call[name[bufs].pop, parameter[constant[0]]]]]
variable[f] assign[=] call[name[uncan], parameter[call[name[pickle].loads, parameter[name[pf]]], name[g]]]
variable[pinfo] assign[=] call[name[buffer_to_bytes_py2], parameter[call[name[bufs].pop, parameter[constant[0]]]]]
variable[info] assign[=] call[name[pickle].loads, parameter[name[pinfo]]]
<ast.Tuple object at 0x7da1b014fa60> assign[=] tuple[[<ast.Subscript object at 0x7da1b014f3a0>, <ast.Subscript object at 0x7da1b014d570>]]
variable[args] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[info]][constant[nargs]]]]] begin[:]
<ast.Tuple object at 0x7da1b014fd00> assign[=] call[name[deserialize_object], parameter[name[arg_bufs], name[g]]]
call[name[args].append, parameter[name[arg]]]
variable[args] assign[=] call[name[tuple], parameter[name[args]]]
assert[<ast.UnaryOp object at 0x7da1b014f730>]
variable[kwargs] assign[=] dictionary[[], []]
for taget[name[key]] in starred[call[name[info]][constant[kw_keys]]] begin[:]
<ast.Tuple object at 0x7da1b014f220> assign[=] call[name[deserialize_object], parameter[name[kwarg_bufs], name[g]]]
call[name[kwargs]][name[key]] assign[=] name[kwarg]
assert[<ast.UnaryOp object at 0x7da1b014cc10>]
return[tuple[[<ast.Name object at 0x7da1b015b6d0>, <ast.Name object at 0x7da1b015ae60>, <ast.Name object at 0x7da1b0159270>]]] | keyword[def] identifier[unpack_apply_message] ( identifier[bufs] , identifier[g] = keyword[None] , identifier[copy] = keyword[True] ):
literal[string]
identifier[bufs] = identifier[list] ( identifier[bufs] )
keyword[assert] identifier[len] ( identifier[bufs] )>= literal[int] , literal[string]
identifier[pf] = identifier[buffer_to_bytes_py2] ( identifier[bufs] . identifier[pop] ( literal[int] ))
identifier[f] = identifier[uncan] ( identifier[pickle] . identifier[loads] ( identifier[pf] ), identifier[g] )
identifier[pinfo] = identifier[buffer_to_bytes_py2] ( identifier[bufs] . identifier[pop] ( literal[int] ))
identifier[info] = identifier[pickle] . identifier[loads] ( identifier[pinfo] )
identifier[arg_bufs] , identifier[kwarg_bufs] = identifier[bufs] [: identifier[info] [ literal[string] ]], identifier[bufs] [ identifier[info] [ literal[string] ]:]
identifier[args] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[info] [ literal[string] ]):
identifier[arg] , identifier[arg_bufs] = identifier[deserialize_object] ( identifier[arg_bufs] , identifier[g] )
identifier[args] . identifier[append] ( identifier[arg] )
identifier[args] = identifier[tuple] ( identifier[args] )
keyword[assert] keyword[not] identifier[arg_bufs] , literal[string]
identifier[kwargs] ={}
keyword[for] identifier[key] keyword[in] identifier[info] [ literal[string] ]:
identifier[kwarg] , identifier[kwarg_bufs] = identifier[deserialize_object] ( identifier[kwarg_bufs] , identifier[g] )
identifier[kwargs] [ identifier[key] ]= identifier[kwarg]
keyword[assert] keyword[not] identifier[kwarg_bufs] , literal[string]
keyword[return] identifier[f] , identifier[args] , identifier[kwargs] | def unpack_apply_message(bufs, g=None, copy=True):
"""Unpack f,args,kwargs from buffers packed by pack_apply_message().
Returns: original f,args,kwargs
"""
bufs = list(bufs) # allow us to pop
assert len(bufs) >= 2, 'not enough buffers!'
pf = buffer_to_bytes_py2(bufs.pop(0))
f = uncan(pickle.loads(pf), g)
pinfo = buffer_to_bytes_py2(bufs.pop(0))
info = pickle.loads(pinfo)
(arg_bufs, kwarg_bufs) = (bufs[:info['narg_bufs']], bufs[info['narg_bufs']:])
args = []
for i in range(info['nargs']):
(arg, arg_bufs) = deserialize_object(arg_bufs, g)
args.append(arg) # depends on [control=['for'], data=[]]
args = tuple(args)
assert not arg_bufs, "Shouldn't be any arg bufs left over"
kwargs = {}
for key in info['kw_keys']:
(kwarg, kwarg_bufs) = deserialize_object(kwarg_bufs, g)
kwargs[key] = kwarg # depends on [control=['for'], data=['key']]
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
return (f, args, kwargs) |
def signal_to_exception(sig: signal.Signals) -> SignalException:
"""
Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM)
"""
signal.signal(sig, _sig_exc_handler)
return SignalException(sig) | def function[signal_to_exception, parameter[sig]]:
constant[
Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM)
]
call[name[signal].signal, parameter[name[sig], name[_sig_exc_handler]]]
return[call[name[SignalException], parameter[name[sig]]]] | keyword[def] identifier[signal_to_exception] ( identifier[sig] : identifier[signal] . identifier[Signals] )-> identifier[SignalException] :
literal[string]
identifier[signal] . identifier[signal] ( identifier[sig] , identifier[_sig_exc_handler] )
keyword[return] identifier[SignalException] ( identifier[sig] ) | def signal_to_exception(sig: signal.Signals) -> SignalException:
"""
Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM)
"""
signal.signal(sig, _sig_exc_handler)
return SignalException(sig) |
def get_kline(self, symbol, period, size=150, _async=False):
"""
获取KLine
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
"""
params = {'symbol': symbol, 'period': period, 'size': size}
url = u.MARKET_URL + '/market/history/kline'
return http_get_request(url, params, _async=_async) | def function[get_kline, parameter[self, symbol, period, size, _async]]:
constant[
获取KLine
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f09e4d0>, <ast.Constant object at 0x7da18f09fa30>, <ast.Constant object at 0x7da18f09e080>], [<ast.Name object at 0x7da18f09df00>, <ast.Name object at 0x7da18f09c220>, <ast.Name object at 0x7da18f09cdc0>]]
variable[url] assign[=] binary_operation[name[u].MARKET_URL + constant[/market/history/kline]]
return[call[name[http_get_request], parameter[name[url], name[params]]]] | keyword[def] identifier[get_kline] ( identifier[self] , identifier[symbol] , identifier[period] , identifier[size] = literal[int] , identifier[_async] = keyword[False] ):
literal[string]
identifier[params] ={ literal[string] : identifier[symbol] , literal[string] : identifier[period] , literal[string] : identifier[size] }
identifier[url] = identifier[u] . identifier[MARKET_URL] + literal[string]
keyword[return] identifier[http_get_request] ( identifier[url] , identifier[params] , identifier[_async] = identifier[_async] ) | def get_kline(self, symbol, period, size=150, _async=False):
"""
获取KLine
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
"""
params = {'symbol': symbol, 'period': period, 'size': size}
url = u.MARKET_URL + '/market/history/kline'
return http_get_request(url, params, _async=_async) |
def from_call(cls, call_node):
"""Get a CallSite object from the given Call node."""
callcontext = contextmod.CallContext(call_node.args, call_node.keywords)
return cls(callcontext) | def function[from_call, parameter[cls, call_node]]:
constant[Get a CallSite object from the given Call node.]
variable[callcontext] assign[=] call[name[contextmod].CallContext, parameter[name[call_node].args, name[call_node].keywords]]
return[call[name[cls], parameter[name[callcontext]]]] | keyword[def] identifier[from_call] ( identifier[cls] , identifier[call_node] ):
literal[string]
identifier[callcontext] = identifier[contextmod] . identifier[CallContext] ( identifier[call_node] . identifier[args] , identifier[call_node] . identifier[keywords] )
keyword[return] identifier[cls] ( identifier[callcontext] ) | def from_call(cls, call_node):
"""Get a CallSite object from the given Call node."""
callcontext = contextmod.CallContext(call_node.args, call_node.keywords)
return cls(callcontext) |
def from_csv(cls, csv_file):
"""
Not implemented. Will provide a route from CSV file.
"""
try:
param_dict = csv.DictReader(csv_file)
return cls(param_dict)
except:
raise NotImplementedError | def function[from_csv, parameter[cls, csv_file]]:
constant[
Not implemented. Will provide a route from CSV file.
]
<ast.Try object at 0x7da1b23ec1c0> | keyword[def] identifier[from_csv] ( identifier[cls] , identifier[csv_file] ):
literal[string]
keyword[try] :
identifier[param_dict] = identifier[csv] . identifier[DictReader] ( identifier[csv_file] )
keyword[return] identifier[cls] ( identifier[param_dict] )
keyword[except] :
keyword[raise] identifier[NotImplementedError] | def from_csv(cls, csv_file):
"""
Not implemented. Will provide a route from CSV file.
"""
try:
param_dict = csv.DictReader(csv_file)
return cls(param_dict) # depends on [control=['try'], data=[]]
except:
raise NotImplementedError # depends on [control=['except'], data=[]] |
def create_error_response(self, in_response_to, destination, info,
sign=False, issuer=None, sign_alg=None,
digest_alg=None, **kwargs):
""" Create a error response.
:param in_response_to: The identifier of the message this is a response
to.
:param destination: The intended recipient of this message
:param info: Either an Exception instance or a 2-tuple consisting of
error code and descriptive text
:param sign: Whether the response should be signed or not
:param issuer: The issuer of the response
:param kwargs: To capture key,value pairs I don't care about
:return: A response instance
"""
status = error_status_factory(info)
return self._response(in_response_to, destination, status, issuer,
sign, sign_alg=sign_alg, digest_alg=digest_alg) | def function[create_error_response, parameter[self, in_response_to, destination, info, sign, issuer, sign_alg, digest_alg]]:
constant[ Create a error response.
:param in_response_to: The identifier of the message this is a response
to.
:param destination: The intended recipient of this message
:param info: Either an Exception instance or a 2-tuple consisting of
error code and descriptive text
:param sign: Whether the response should be signed or not
:param issuer: The issuer of the response
:param kwargs: To capture key,value pairs I don't care about
:return: A response instance
]
variable[status] assign[=] call[name[error_status_factory], parameter[name[info]]]
return[call[name[self]._response, parameter[name[in_response_to], name[destination], name[status], name[issuer], name[sign]]]] | keyword[def] identifier[create_error_response] ( identifier[self] , identifier[in_response_to] , identifier[destination] , identifier[info] ,
identifier[sign] = keyword[False] , identifier[issuer] = keyword[None] , identifier[sign_alg] = keyword[None] ,
identifier[digest_alg] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[status] = identifier[error_status_factory] ( identifier[info] )
keyword[return] identifier[self] . identifier[_response] ( identifier[in_response_to] , identifier[destination] , identifier[status] , identifier[issuer] ,
identifier[sign] , identifier[sign_alg] = identifier[sign_alg] , identifier[digest_alg] = identifier[digest_alg] ) | def create_error_response(self, in_response_to, destination, info, sign=False, issuer=None, sign_alg=None, digest_alg=None, **kwargs):
""" Create a error response.
:param in_response_to: The identifier of the message this is a response
to.
:param destination: The intended recipient of this message
:param info: Either an Exception instance or a 2-tuple consisting of
error code and descriptive text
:param sign: Whether the response should be signed or not
:param issuer: The issuer of the response
:param kwargs: To capture key,value pairs I don't care about
:return: A response instance
"""
status = error_status_factory(info)
return self._response(in_response_to, destination, status, issuer, sign, sign_alg=sign_alg, digest_alg=digest_alg) |
def _releaseModifierKeys(self, modifiers):
"""Release given modifier keys (provided in list form).
Parameters: modifiers list
Returns: Unsigned int representing flags to set
"""
modFlags = self._releaseModifiers(modifiers)
# Post the queued keypresses:
self._postQueuedEvents()
return modFlags | def function[_releaseModifierKeys, parameter[self, modifiers]]:
constant[Release given modifier keys (provided in list form).
Parameters: modifiers list
Returns: Unsigned int representing flags to set
]
variable[modFlags] assign[=] call[name[self]._releaseModifiers, parameter[name[modifiers]]]
call[name[self]._postQueuedEvents, parameter[]]
return[name[modFlags]] | keyword[def] identifier[_releaseModifierKeys] ( identifier[self] , identifier[modifiers] ):
literal[string]
identifier[modFlags] = identifier[self] . identifier[_releaseModifiers] ( identifier[modifiers] )
identifier[self] . identifier[_postQueuedEvents] ()
keyword[return] identifier[modFlags] | def _releaseModifierKeys(self, modifiers):
"""Release given modifier keys (provided in list form).
Parameters: modifiers list
Returns: Unsigned int representing flags to set
"""
modFlags = self._releaseModifiers(modifiers)
# Post the queued keypresses:
self._postQueuedEvents()
return modFlags |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.