code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def translate(cls, f=None, output=0, **kwargs): """translate(f=None, *, output=TranslateOutput.code, **kwargs) Decorator that turns a function into a shellcode emitting function. Arguments: f(callable): The function to decorate. If ``f`` is ``None`` a decorator will be returned instead. output(~pwnypack.shellcode.base.BaseEnvironment.TranslateOutput): The output format the shellcode function will produce. **kwargs: Keyword arguments are passed to shellcode environment constructor. Returns: A decorator that will translate the given function into a shellcode generator Examples: >>> from pwny import * >>> @sc.LinuxX86Mutable.translate ... def shellcode(): ... sys_exit(0) >>> @sc.LinuxX86Mutable.translate(output=1) ... def shellcode(): ... sys_exit(0) """ def decorator(f): @functools.wraps(f) def proxy(*p_args, **p_kwargs): env = cls(**kwargs) result = translate(env, f, *p_args, **p_kwargs) if output == cls.TranslateOutput.code: return env.assemble(result) elif output == cls.TranslateOutput.assembly: return env.target, env.compile(result) else: return env, result return proxy if f is None: return decorator else: return decorator(f)
def function[translate, parameter[cls, f, output]]: constant[translate(f=None, *, output=TranslateOutput.code, **kwargs) Decorator that turns a function into a shellcode emitting function. Arguments: f(callable): The function to decorate. If ``f`` is ``None`` a decorator will be returned instead. output(~pwnypack.shellcode.base.BaseEnvironment.TranslateOutput): The output format the shellcode function will produce. **kwargs: Keyword arguments are passed to shellcode environment constructor. Returns: A decorator that will translate the given function into a shellcode generator Examples: >>> from pwny import * >>> @sc.LinuxX86Mutable.translate ... def shellcode(): ... sys_exit(0) >>> @sc.LinuxX86Mutable.translate(output=1) ... def shellcode(): ... sys_exit(0) ] def function[decorator, parameter[f]]: def function[proxy, parameter[]]: variable[env] assign[=] call[name[cls], parameter[]] variable[result] assign[=] call[name[translate], parameter[name[env], name[f], <ast.Starred object at 0x7da18f721b70>]] if compare[name[output] equal[==] name[cls].TranslateOutput.code] begin[:] return[call[name[env].assemble, parameter[name[result]]]] return[name[proxy]] if compare[name[f] is constant[None]] begin[:] return[name[decorator]]
keyword[def] identifier[translate] ( identifier[cls] , identifier[f] = keyword[None] , identifier[output] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[decorator] ( identifier[f] ): @ identifier[functools] . identifier[wraps] ( identifier[f] ) keyword[def] identifier[proxy] (* identifier[p_args] ,** identifier[p_kwargs] ): identifier[env] = identifier[cls] (** identifier[kwargs] ) identifier[result] = identifier[translate] ( identifier[env] , identifier[f] ,* identifier[p_args] ,** identifier[p_kwargs] ) keyword[if] identifier[output] == identifier[cls] . identifier[TranslateOutput] . identifier[code] : keyword[return] identifier[env] . identifier[assemble] ( identifier[result] ) keyword[elif] identifier[output] == identifier[cls] . identifier[TranslateOutput] . identifier[assembly] : keyword[return] identifier[env] . identifier[target] , identifier[env] . identifier[compile] ( identifier[result] ) keyword[else] : keyword[return] identifier[env] , identifier[result] keyword[return] identifier[proxy] keyword[if] identifier[f] keyword[is] keyword[None] : keyword[return] identifier[decorator] keyword[else] : keyword[return] identifier[decorator] ( identifier[f] )
def translate(cls, f=None, output=0, **kwargs): """translate(f=None, *, output=TranslateOutput.code, **kwargs) Decorator that turns a function into a shellcode emitting function. Arguments: f(callable): The function to decorate. If ``f`` is ``None`` a decorator will be returned instead. output(~pwnypack.shellcode.base.BaseEnvironment.TranslateOutput): The output format the shellcode function will produce. **kwargs: Keyword arguments are passed to shellcode environment constructor. Returns: A decorator that will translate the given function into a shellcode generator Examples: >>> from pwny import * >>> @sc.LinuxX86Mutable.translate ... def shellcode(): ... sys_exit(0) >>> @sc.LinuxX86Mutable.translate(output=1) ... def shellcode(): ... sys_exit(0) """ def decorator(f): @functools.wraps(f) def proxy(*p_args, **p_kwargs): env = cls(**kwargs) result = translate(env, f, *p_args, **p_kwargs) if output == cls.TranslateOutput.code: return env.assemble(result) # depends on [control=['if'], data=[]] elif output == cls.TranslateOutput.assembly: return (env.target, env.compile(result)) # depends on [control=['if'], data=[]] else: return (env, result) return proxy if f is None: return decorator # depends on [control=['if'], data=[]] else: return decorator(f)
def auto_index(mcs): """Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. """ for index in mcs._meta.indices: index.ensure(mcs.collection)
def function[auto_index, parameter[mcs]]: constant[Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. ] for taget[name[index]] in starred[name[mcs]._meta.indices] begin[:] call[name[index].ensure, parameter[name[mcs].collection]]
keyword[def] identifier[auto_index] ( identifier[mcs] ): literal[string] keyword[for] identifier[index] keyword[in] identifier[mcs] . identifier[_meta] . identifier[indices] : identifier[index] . identifier[ensure] ( identifier[mcs] . identifier[collection] )
def auto_index(mcs): """Builds all indices, listed in model's Meta class. >>> class SomeModel(Model) ... class Meta: ... indices = ( ... Index('foo'), ... ) .. note:: this will result in calls to :meth:`pymongo.collection.Collection.ensure_index` method at import time, so import all your models up front. """ for index in mcs._meta.indices: index.ensure(mcs.collection) # depends on [control=['for'], data=['index']]
def splitlines(self, keepends=False): """ S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ # Py2 unicode.splitlines() takes keepends as an optional parameter, # not as a keyword argument as in Python 3 str. parts = super(newstr, self).splitlines(keepends) return [newstr(part) for part in parts]
def function[splitlines, parameter[self, keepends]]: constant[ S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. ] variable[parts] assign[=] call[call[name[super], parameter[name[newstr], name[self]]].splitlines, parameter[name[keepends]]] return[<ast.ListComp object at 0x7da18dc9b640>]
keyword[def] identifier[splitlines] ( identifier[self] , identifier[keepends] = keyword[False] ): literal[string] identifier[parts] = identifier[super] ( identifier[newstr] , identifier[self] ). identifier[splitlines] ( identifier[keepends] ) keyword[return] [ identifier[newstr] ( identifier[part] ) keyword[for] identifier[part] keyword[in] identifier[parts] ]
def splitlines(self, keepends=False): """ S.splitlines(keepends=False) -> list of strings Return a list of the lines in S, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ # Py2 unicode.splitlines() takes keepends as an optional parameter, # not as a keyword argument as in Python 3 str. parts = super(newstr, self).splitlines(keepends) return [newstr(part) for part in parts]
def trimUTR(args): """ %prog trimUTR gffile Remove UTRs in the annotation set. If reference GFF3 is provided, reinstate UTRs from reference transcripts after trimming. Note: After running trimUTR, it is advised to also run `python -m jcvi.formats.gff fixboundaries` on the resultant GFF3 to adjust the boundaries of all parent 'gene' features """ import gffutils from jcvi.formats.base import SetFile p = OptionParser(trimUTR.__doc__) p.add_option("--trim5", default=None, type="str", \ help="File containing gene list for 5' UTR trimming") p.add_option("--trim3", default=None, type="str", \ help="File containing gene list for 3' UTR trimming") p.add_option("--trimrange", default=None, type="str", \ help="File containing gene list for UTR trim back" + \ "based on suggested (start, stop) coordinate range") p.add_option("--refgff", default=None, type="str", \ help="Reference GFF3 used as fallback to replace UTRs") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = make_index(gffile) trim_both = False if (opts.trim5 or opts.trim3) else True trim5 = SetFile(opts.trim5) if opts.trim5 else set() trim3 = SetFile(opts.trim3) if opts.trim3 else set() trimrange = dict() if opts.trimrange: trf = must_open(opts.trimrange) for tr in trf: assert len(tr.split("\t")) == 3, \ "Must specify (start, stop) coordinate range" id, start, stop = tr.split("\t") trimrange[id] = (int(start), int(stop)) trf.close() refgff = make_index(opts.refgff) if opts.refgff else None fw = must_open(opts.outfile, "w") for feat in gff.iter_by_parent_childs(featuretype="gene", order_by=("seqid", "start"), level=1): for c in feat: cid, ctype, cparent = c.id, c.featuretype, \ c.attributes.get('Parent', [None])[0] t5, t3 = False, False if ctype == "gene": t5 = True if cid in trim5 else False t3 = True if cid in trim3 else False start, end = get_cds_minmax(gff, cid) trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(c, fw) elif ctype == "mRNA": utr_types, extras = [], set() if any(id in trim5 for id in (cid, cparent)): t5 = True trim5.add(cid) if any(id in trim3 for id in (cid, cparent)): t3 = True trim3.add(cid) refc = None if refgff: try: refc = refgff[cid] refctype = refc.featuretype refptype = refgff[refc.attributes['Parent'][0]].featuretype if refctype == "mRNA" and refptype == "gene": if cmp_children(cid, gff, refgff, cftype="CDS"): reinstate(c, refc, trim5=t5, trim3=t3, both=trim_both) if t5: utr_types.append('five_prime_UTR') if t3: utr_types.append('three_prime_UTR') for utr_type in utr_types: for utr in refgff.children(refc, featuretype=utr_type): extras.add(utr) for exon in refgff.region(region=utr, featuretype="exon"): if exon.attributes['Parent'][0] == cid: extras.add(exon) else: refc = None except gffutils.exceptions.FeatureNotFoundError: pass start, end = get_cds_minmax(gff, cid, level=1) if cid in trimrange: start, end = range_minmax([trimrange[cid], (start, end)]) if not refc: trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(c, fw) for cc in gff.children(cid, order_by=("start")): _ctype = cc.featuretype if _ctype not in utr_types: if _ctype != "CDS": if _ctype == "exon": eskip = [range_overlap(to_range(cc), to_range(x)) \ for x in extras if x.featuretype == 'exon'] if any(skip for skip in eskip): continue trim(cc, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(cc, fw) else: fprint(cc, fw) for x in extras: fprint(x, fw) fw.close()
def function[trimUTR, parameter[args]]: constant[ %prog trimUTR gffile Remove UTRs in the annotation set. If reference GFF3 is provided, reinstate UTRs from reference transcripts after trimming. Note: After running trimUTR, it is advised to also run `python -m jcvi.formats.gff fixboundaries` on the resultant GFF3 to adjust the boundaries of all parent 'gene' features ] import module[gffutils] from relative_module[jcvi.formats.base] import module[SetFile] variable[p] assign[=] call[name[OptionParser], parameter[name[trimUTR].__doc__]] call[name[p].add_option, parameter[constant[--trim5]]] call[name[p].add_option, parameter[constant[--trim3]]] call[name[p].add_option, parameter[constant[--trimrange]]] call[name[p].add_option, parameter[constant[--refgff]]] call[name[p].set_outfile, parameter[]] <ast.Tuple object at 0x7da2041db9d0> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2041dbf70>]] <ast.Tuple object at 0x7da20e960610> assign[=] name[args] variable[gff] assign[=] call[name[make_index], parameter[name[gffile]]] variable[trim_both] assign[=] <ast.IfExp object at 0x7da20e963d90> variable[trim5] assign[=] <ast.IfExp object at 0x7da20e9609d0> variable[trim3] assign[=] <ast.IfExp object at 0x7da20e962830> variable[trimrange] assign[=] call[name[dict], parameter[]] if name[opts].trimrange begin[:] variable[trf] assign[=] call[name[must_open], parameter[name[opts].trimrange]] for taget[name[tr]] in starred[name[trf]] begin[:] assert[compare[call[name[len], parameter[call[name[tr].split, parameter[constant[ ]]]]] equal[==] constant[3]]] <ast.Tuple object at 0x7da20e963ac0> assign[=] call[name[tr].split, parameter[constant[ ]]] call[name[trimrange]][name[id]] assign[=] tuple[[<ast.Call object at 0x7da20e961f00>, <ast.Call object at 0x7da20e963370>]] call[name[trf].close, parameter[]] variable[refgff] assign[=] <ast.IfExp object at 0x7da20e960c40> variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]] for taget[name[feat]] in starred[call[name[gff].iter_by_parent_childs, parameter[]]] begin[:] for taget[name[c]] in starred[name[feat]] begin[:] <ast.Tuple object at 0x7da2041d8af0> assign[=] tuple[[<ast.Attribute object at 0x7da2041d9bd0>, <ast.Attribute object at 0x7da2041d9db0>, <ast.Subscript object at 0x7da2041dba90>]] <ast.Tuple object at 0x7da2041d86d0> assign[=] tuple[[<ast.Constant object at 0x7da2041db520>, <ast.Constant object at 0x7da2041d9b70>]] if compare[name[ctype] equal[==] constant[gene]] begin[:] variable[t5] assign[=] <ast.IfExp object at 0x7da2041d9d20> variable[t3] assign[=] <ast.IfExp object at 0x7da2041d9f30> <ast.Tuple object at 0x7da2041d9210> assign[=] call[name[get_cds_minmax], parameter[name[gff], name[cid]]] call[name[trim], parameter[name[c], name[start], name[end]]] call[name[fprint], parameter[name[c], name[fw]]] call[name[fw].close, parameter[]]
keyword[def] identifier[trimUTR] ( identifier[args] ): literal[string] keyword[import] identifier[gffutils] keyword[from] identifier[jcvi] . identifier[formats] . identifier[base] keyword[import] identifier[SetFile] identifier[p] = identifier[OptionParser] ( identifier[trimUTR] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] , identifier[type] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] , identifier[type] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] , identifier[type] = literal[string] , identifier[help] = literal[string] + literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] , identifier[type] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[set_outfile] () identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[gffile] ,= identifier[args] identifier[gff] = identifier[make_index] ( identifier[gffile] ) identifier[trim_both] = keyword[False] keyword[if] ( identifier[opts] . identifier[trim5] keyword[or] identifier[opts] . identifier[trim3] ) keyword[else] keyword[True] identifier[trim5] = identifier[SetFile] ( identifier[opts] . identifier[trim5] ) keyword[if] identifier[opts] . identifier[trim5] keyword[else] identifier[set] () identifier[trim3] = identifier[SetFile] ( identifier[opts] . identifier[trim3] ) keyword[if] identifier[opts] . identifier[trim3] keyword[else] identifier[set] () identifier[trimrange] = identifier[dict] () keyword[if] identifier[opts] . identifier[trimrange] : identifier[trf] = identifier[must_open] ( identifier[opts] . identifier[trimrange] ) keyword[for] identifier[tr] keyword[in] identifier[trf] : keyword[assert] identifier[len] ( identifier[tr] . identifier[split] ( literal[string] ))== literal[int] , literal[string] identifier[id] , identifier[start] , identifier[stop] = identifier[tr] . identifier[split] ( literal[string] ) identifier[trimrange] [ identifier[id] ]=( identifier[int] ( identifier[start] ), identifier[int] ( identifier[stop] )) identifier[trf] . identifier[close] () identifier[refgff] = identifier[make_index] ( identifier[opts] . identifier[refgff] ) keyword[if] identifier[opts] . identifier[refgff] keyword[else] keyword[None] identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] ) keyword[for] identifier[feat] keyword[in] identifier[gff] . identifier[iter_by_parent_childs] ( identifier[featuretype] = literal[string] , identifier[order_by] =( literal[string] , literal[string] ), identifier[level] = literal[int] ): keyword[for] identifier[c] keyword[in] identifier[feat] : identifier[cid] , identifier[ctype] , identifier[cparent] = identifier[c] . identifier[id] , identifier[c] . identifier[featuretype] , identifier[c] . identifier[attributes] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ] identifier[t5] , identifier[t3] = keyword[False] , keyword[False] keyword[if] identifier[ctype] == literal[string] : identifier[t5] = keyword[True] keyword[if] identifier[cid] keyword[in] identifier[trim5] keyword[else] keyword[False] identifier[t3] = keyword[True] keyword[if] identifier[cid] keyword[in] identifier[trim3] keyword[else] keyword[False] identifier[start] , identifier[end] = identifier[get_cds_minmax] ( identifier[gff] , identifier[cid] ) identifier[trim] ( identifier[c] , identifier[start] , identifier[end] , identifier[trim5] = identifier[t5] , identifier[trim3] = identifier[t3] , identifier[both] = identifier[trim_both] ) identifier[fprint] ( identifier[c] , identifier[fw] ) keyword[elif] identifier[ctype] == literal[string] : identifier[utr_types] , identifier[extras] =[], identifier[set] () keyword[if] identifier[any] ( identifier[id] keyword[in] identifier[trim5] keyword[for] identifier[id] keyword[in] ( identifier[cid] , identifier[cparent] )): identifier[t5] = keyword[True] identifier[trim5] . identifier[add] ( identifier[cid] ) keyword[if] identifier[any] ( identifier[id] keyword[in] identifier[trim3] keyword[for] identifier[id] keyword[in] ( identifier[cid] , identifier[cparent] )): identifier[t3] = keyword[True] identifier[trim3] . identifier[add] ( identifier[cid] ) identifier[refc] = keyword[None] keyword[if] identifier[refgff] : keyword[try] : identifier[refc] = identifier[refgff] [ identifier[cid] ] identifier[refctype] = identifier[refc] . identifier[featuretype] identifier[refptype] = identifier[refgff] [ identifier[refc] . identifier[attributes] [ literal[string] ][ literal[int] ]]. identifier[featuretype] keyword[if] identifier[refctype] == literal[string] keyword[and] identifier[refptype] == literal[string] : keyword[if] identifier[cmp_children] ( identifier[cid] , identifier[gff] , identifier[refgff] , identifier[cftype] = literal[string] ): identifier[reinstate] ( identifier[c] , identifier[refc] , identifier[trim5] = identifier[t5] , identifier[trim3] = identifier[t3] , identifier[both] = identifier[trim_both] ) keyword[if] identifier[t5] : identifier[utr_types] . identifier[append] ( literal[string] ) keyword[if] identifier[t3] : identifier[utr_types] . identifier[append] ( literal[string] ) keyword[for] identifier[utr_type] keyword[in] identifier[utr_types] : keyword[for] identifier[utr] keyword[in] identifier[refgff] . identifier[children] ( identifier[refc] , identifier[featuretype] = identifier[utr_type] ): identifier[extras] . identifier[add] ( identifier[utr] ) keyword[for] identifier[exon] keyword[in] identifier[refgff] . identifier[region] ( identifier[region] = identifier[utr] , identifier[featuretype] = literal[string] ): keyword[if] identifier[exon] . identifier[attributes] [ literal[string] ][ literal[int] ]== identifier[cid] : identifier[extras] . identifier[add] ( identifier[exon] ) keyword[else] : identifier[refc] = keyword[None] keyword[except] identifier[gffutils] . identifier[exceptions] . identifier[FeatureNotFoundError] : keyword[pass] identifier[start] , identifier[end] = identifier[get_cds_minmax] ( identifier[gff] , identifier[cid] , identifier[level] = literal[int] ) keyword[if] identifier[cid] keyword[in] identifier[trimrange] : identifier[start] , identifier[end] = identifier[range_minmax] ([ identifier[trimrange] [ identifier[cid] ],( identifier[start] , identifier[end] )]) keyword[if] keyword[not] identifier[refc] : identifier[trim] ( identifier[c] , identifier[start] , identifier[end] , identifier[trim5] = identifier[t5] , identifier[trim3] = identifier[t3] , identifier[both] = identifier[trim_both] ) identifier[fprint] ( identifier[c] , identifier[fw] ) keyword[for] identifier[cc] keyword[in] identifier[gff] . identifier[children] ( identifier[cid] , identifier[order_by] =( literal[string] )): identifier[_ctype] = identifier[cc] . identifier[featuretype] keyword[if] identifier[_ctype] keyword[not] keyword[in] identifier[utr_types] : keyword[if] identifier[_ctype] != literal[string] : keyword[if] identifier[_ctype] == literal[string] : identifier[eskip] =[ identifier[range_overlap] ( identifier[to_range] ( identifier[cc] ), identifier[to_range] ( identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[extras] keyword[if] identifier[x] . identifier[featuretype] == literal[string] ] keyword[if] identifier[any] ( identifier[skip] keyword[for] identifier[skip] keyword[in] identifier[eskip] ): keyword[continue] identifier[trim] ( identifier[cc] , identifier[start] , identifier[end] , identifier[trim5] = identifier[t5] , identifier[trim3] = identifier[t3] , identifier[both] = identifier[trim_both] ) identifier[fprint] ( identifier[cc] , identifier[fw] ) keyword[else] : identifier[fprint] ( identifier[cc] , identifier[fw] ) keyword[for] identifier[x] keyword[in] identifier[extras] : identifier[fprint] ( identifier[x] , identifier[fw] ) identifier[fw] . identifier[close] ()
def trimUTR(args): """ %prog trimUTR gffile Remove UTRs in the annotation set. If reference GFF3 is provided, reinstate UTRs from reference transcripts after trimming. Note: After running trimUTR, it is advised to also run `python -m jcvi.formats.gff fixboundaries` on the resultant GFF3 to adjust the boundaries of all parent 'gene' features """ import gffutils from jcvi.formats.base import SetFile p = OptionParser(trimUTR.__doc__) p.add_option('--trim5', default=None, type='str', help="File containing gene list for 5' UTR trimming") p.add_option('--trim3', default=None, type='str', help="File containing gene list for 3' UTR trimming") p.add_option('--trimrange', default=None, type='str', help='File containing gene list for UTR trim back' + 'based on suggested (start, stop) coordinate range') p.add_option('--refgff', default=None, type='str', help='Reference GFF3 used as fallback to replace UTRs') p.set_outfile() (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (gffile,) = args gff = make_index(gffile) trim_both = False if opts.trim5 or opts.trim3 else True trim5 = SetFile(opts.trim5) if opts.trim5 else set() trim3 = SetFile(opts.trim3) if opts.trim3 else set() trimrange = dict() if opts.trimrange: trf = must_open(opts.trimrange) for tr in trf: assert len(tr.split('\t')) == 3, 'Must specify (start, stop) coordinate range' (id, start, stop) = tr.split('\t') trimrange[id] = (int(start), int(stop)) # depends on [control=['for'], data=['tr']] trf.close() # depends on [control=['if'], data=[]] refgff = make_index(opts.refgff) if opts.refgff else None fw = must_open(opts.outfile, 'w') for feat in gff.iter_by_parent_childs(featuretype='gene', order_by=('seqid', 'start'), level=1): for c in feat: (cid, ctype, cparent) = (c.id, c.featuretype, c.attributes.get('Parent', [None])[0]) (t5, t3) = (False, False) if ctype == 'gene': t5 = True if cid in trim5 else False t3 = True if cid in trim3 else False (start, end) = get_cds_minmax(gff, cid) trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(c, fw) # depends on [control=['if'], data=[]] elif ctype == 'mRNA': (utr_types, extras) = ([], set()) if any((id in trim5 for id in (cid, cparent))): t5 = True trim5.add(cid) # depends on [control=['if'], data=[]] if any((id in trim3 for id in (cid, cparent))): t3 = True trim3.add(cid) # depends on [control=['if'], data=[]] refc = None if refgff: try: refc = refgff[cid] refctype = refc.featuretype refptype = refgff[refc.attributes['Parent'][0]].featuretype if refctype == 'mRNA' and refptype == 'gene': if cmp_children(cid, gff, refgff, cftype='CDS'): reinstate(c, refc, trim5=t5, trim3=t3, both=trim_both) if t5: utr_types.append('five_prime_UTR') # depends on [control=['if'], data=[]] if t3: utr_types.append('three_prime_UTR') # depends on [control=['if'], data=[]] for utr_type in utr_types: for utr in refgff.children(refc, featuretype=utr_type): extras.add(utr) for exon in refgff.region(region=utr, featuretype='exon'): if exon.attributes['Parent'][0] == cid: extras.add(exon) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['exon']] # depends on [control=['for'], data=['utr']] # depends on [control=['for'], data=['utr_type']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: refc = None # depends on [control=['try'], data=[]] except gffutils.exceptions.FeatureNotFoundError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] (start, end) = get_cds_minmax(gff, cid, level=1) if cid in trimrange: (start, end) = range_minmax([trimrange[cid], (start, end)]) # depends on [control=['if'], data=['cid', 'trimrange']] if not refc: trim(c, start, end, trim5=t5, trim3=t3, both=trim_both) # depends on [control=['if'], data=[]] fprint(c, fw) for cc in gff.children(cid, order_by='start'): _ctype = cc.featuretype if _ctype not in utr_types: if _ctype != 'CDS': if _ctype == 'exon': eskip = [range_overlap(to_range(cc), to_range(x)) for x in extras if x.featuretype == 'exon'] if any((skip for skip in eskip)): continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] trim(cc, start, end, trim5=t5, trim3=t3, both=trim_both) fprint(cc, fw) # depends on [control=['if'], data=['_ctype']] else: fprint(cc, fw) # depends on [control=['if'], data=['_ctype']] # depends on [control=['for'], data=['cc']] for x in extras: fprint(x, fw) # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['feat']] fw.close()
def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs, tmplt_banks, output_dir=None, injection_file=None, tags=None): ''' This function aims to be the gateway for setting up a set of matched-filter jobs in a workflow. This function is intended to support multiple different ways/codes that could be used for doing this. For now the only supported sub-module is one that runs the matched-filtering by setting up a serious of matched-filtering jobs, from one executable, to create matched-filter triggers covering the full range of science times for which there is data and a template bank file. Parameters ----------- Workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly. ''' if tags is None: tags = [] logging.info("Entering matched-filtering setup module.") make_analysis_dir(output_dir) cp = workflow.cp # Parse for options in .ini file mfltrMethod = cp.get_opt_tags("workflow-matchedfilter", "matchedfilter-method", tags) # Could have a number of choices here if mfltrMethod == "WORKFLOW_INDEPENDENT_IFOS": logging.info("Adding matched-filter jobs to workflow.") if cp.has_option_tags("workflow-matchedfilter", "matchedfilter-link-to-tmpltbank", tags): if not cp.has_option_tags("workflow-tmpltbank", "tmpltbank-link-to-matchedfilter", tags): errMsg = "If using matchedfilter-link-to-tmpltbank, you should " errMsg += "also use tmpltbank-link-to-matchedfilter." logging.warn(errMsg) linkToTmpltbank = True else: linkToTmpltbank = False if cp.has_option_tags("workflow-matchedfilter", "matchedfilter-compatibility-mode", tags): if not linkToTmpltbank: errMsg = "Compatibility mode requires that the " errMsg += "matchedfilter-link-to-tmpltbank option is also set." raise ValueError(errMsg) if not cp.has_option_tags("workflow-tmpltbank", "tmpltbank-compatibility-mode", tags): errMsg = "If using compatibility mode it must be set both in " errMsg += "the template bank and matched-filtering stages." raise ValueError(errMsg) compatibility_mode = True else: compatibility_mode = False inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file=injection_file, tags=tags, link_to_tmpltbank=linkToTmpltbank, compatibility_mode=compatibility_mode) elif mfltrMethod == "WORKFLOW_MULTIPLE_IFOS": logging.info("Adding matched-filter jobs to workflow.") inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file=injection_file, tags=tags) else: errMsg = "Matched filter method not recognized. Must be one of " errMsg += "WORKFLOW_INDEPENDENT_IFOS (currently only one option)." raise ValueError(errMsg) logging.info("Leaving matched-filtering setup module.") return inspiral_outs
def function[setup_matchedfltr_workflow, parameter[workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file, tags]]: constant[ This function aims to be the gateway for setting up a set of matched-filter jobs in a workflow. This function is intended to support multiple different ways/codes that could be used for doing this. For now the only supported sub-module is one that runs the matched-filtering by setting up a serious of matched-filtering jobs, from one executable, to create matched-filter triggers covering the full range of science times for which there is data and a template bank file. Parameters ----------- Workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly. ] if compare[name[tags] is constant[None]] begin[:] variable[tags] assign[=] list[[]] call[name[logging].info, parameter[constant[Entering matched-filtering setup module.]]] call[name[make_analysis_dir], parameter[name[output_dir]]] variable[cp] assign[=] name[workflow].cp variable[mfltrMethod] assign[=] call[name[cp].get_opt_tags, parameter[constant[workflow-matchedfilter], constant[matchedfilter-method], name[tags]]] if compare[name[mfltrMethod] equal[==] constant[WORKFLOW_INDEPENDENT_IFOS]] begin[:] call[name[logging].info, parameter[constant[Adding matched-filter jobs to workflow.]]] if call[name[cp].has_option_tags, parameter[constant[workflow-matchedfilter], constant[matchedfilter-link-to-tmpltbank], name[tags]]] begin[:] if <ast.UnaryOp object at 0x7da18bc70e50> begin[:] variable[errMsg] assign[=] constant[If using matchedfilter-link-to-tmpltbank, you should ] <ast.AugAssign object at 0x7da18bc72ef0> call[name[logging].warn, parameter[name[errMsg]]] variable[linkToTmpltbank] assign[=] constant[True] if call[name[cp].has_option_tags, parameter[constant[workflow-matchedfilter], constant[matchedfilter-compatibility-mode], name[tags]]] begin[:] if <ast.UnaryOp object at 0x7da18bc73520> begin[:] variable[errMsg] assign[=] constant[Compatibility mode requires that the ] <ast.AugAssign object at 0x7da18bc71210> <ast.Raise object at 0x7da18bc71a20> if <ast.UnaryOp object at 0x7da18bc73ac0> begin[:] variable[errMsg] assign[=] constant[If using compatibility mode it must be set both in ] <ast.AugAssign object at 0x7da18bc72440> <ast.Raise object at 0x7da18bc71bd0> variable[compatibility_mode] assign[=] constant[True] variable[inspiral_outs] assign[=] call[name[setup_matchedfltr_dax_generated], parameter[name[workflow], name[science_segs], name[datafind_outs], name[tmplt_banks], name[output_dir]]] call[name[logging].info, parameter[constant[Leaving matched-filtering setup module.]]] return[name[inspiral_outs]]
keyword[def] identifier[setup_matchedfltr_workflow] ( identifier[workflow] , identifier[science_segs] , identifier[datafind_outs] , identifier[tmplt_banks] , identifier[output_dir] = keyword[None] , identifier[injection_file] = keyword[None] , identifier[tags] = keyword[None] ): literal[string] keyword[if] identifier[tags] keyword[is] keyword[None] : identifier[tags] =[] identifier[logging] . identifier[info] ( literal[string] ) identifier[make_analysis_dir] ( identifier[output_dir] ) identifier[cp] = identifier[workflow] . identifier[cp] identifier[mfltrMethod] = identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] ) keyword[if] identifier[mfltrMethod] == literal[string] : identifier[logging] . identifier[info] ( literal[string] ) keyword[if] identifier[cp] . identifier[has_option_tags] ( literal[string] , literal[string] , identifier[tags] ): keyword[if] keyword[not] identifier[cp] . identifier[has_option_tags] ( literal[string] , literal[string] , identifier[tags] ): identifier[errMsg] = literal[string] identifier[errMsg] += literal[string] identifier[logging] . identifier[warn] ( identifier[errMsg] ) identifier[linkToTmpltbank] = keyword[True] keyword[else] : identifier[linkToTmpltbank] = keyword[False] keyword[if] identifier[cp] . identifier[has_option_tags] ( literal[string] , literal[string] , identifier[tags] ): keyword[if] keyword[not] identifier[linkToTmpltbank] : identifier[errMsg] = literal[string] identifier[errMsg] += literal[string] keyword[raise] identifier[ValueError] ( identifier[errMsg] ) keyword[if] keyword[not] identifier[cp] . identifier[has_option_tags] ( literal[string] , literal[string] , identifier[tags] ): identifier[errMsg] = literal[string] identifier[errMsg] += literal[string] keyword[raise] identifier[ValueError] ( identifier[errMsg] ) identifier[compatibility_mode] = keyword[True] keyword[else] : identifier[compatibility_mode] = keyword[False] identifier[inspiral_outs] = identifier[setup_matchedfltr_dax_generated] ( identifier[workflow] , identifier[science_segs] , identifier[datafind_outs] , identifier[tmplt_banks] , identifier[output_dir] , identifier[injection_file] = identifier[injection_file] , identifier[tags] = identifier[tags] , identifier[link_to_tmpltbank] = identifier[linkToTmpltbank] , identifier[compatibility_mode] = identifier[compatibility_mode] ) keyword[elif] identifier[mfltrMethod] == literal[string] : identifier[logging] . identifier[info] ( literal[string] ) identifier[inspiral_outs] = identifier[setup_matchedfltr_dax_generated_multi] ( identifier[workflow] , identifier[science_segs] , identifier[datafind_outs] , identifier[tmplt_banks] , identifier[output_dir] , identifier[injection_file] = identifier[injection_file] , identifier[tags] = identifier[tags] ) keyword[else] : identifier[errMsg] = literal[string] identifier[errMsg] += literal[string] keyword[raise] identifier[ValueError] ( identifier[errMsg] ) identifier[logging] . identifier[info] ( literal[string] ) keyword[return] identifier[inspiral_outs]
def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs, tmplt_banks, output_dir=None, injection_file=None, tags=None): """ This function aims to be the gateway for setting up a set of matched-filter jobs in a workflow. This function is intended to support multiple different ways/codes that could be used for doing this. For now the only supported sub-module is one that runs the matched-filtering by setting up a serious of matched-filtering jobs, from one executable, to create matched-filter triggers covering the full range of science times for which there is data and a template bank file. Parameters ----------- Workflow : pycbc.workflow.core.Workflow The workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly. """ if tags is None: tags = [] # depends on [control=['if'], data=['tags']] logging.info('Entering matched-filtering setup module.') make_analysis_dir(output_dir) cp = workflow.cp # Parse for options in .ini file mfltrMethod = cp.get_opt_tags('workflow-matchedfilter', 'matchedfilter-method', tags) # Could have a number of choices here if mfltrMethod == 'WORKFLOW_INDEPENDENT_IFOS': logging.info('Adding matched-filter jobs to workflow.') if cp.has_option_tags('workflow-matchedfilter', 'matchedfilter-link-to-tmpltbank', tags): if not cp.has_option_tags('workflow-tmpltbank', 'tmpltbank-link-to-matchedfilter', tags): errMsg = 'If using matchedfilter-link-to-tmpltbank, you should ' errMsg += 'also use tmpltbank-link-to-matchedfilter.' logging.warn(errMsg) # depends on [control=['if'], data=[]] linkToTmpltbank = True # depends on [control=['if'], data=[]] else: linkToTmpltbank = False if cp.has_option_tags('workflow-matchedfilter', 'matchedfilter-compatibility-mode', tags): if not linkToTmpltbank: errMsg = 'Compatibility mode requires that the ' errMsg += 'matchedfilter-link-to-tmpltbank option is also set.' raise ValueError(errMsg) # depends on [control=['if'], data=[]] if not cp.has_option_tags('workflow-tmpltbank', 'tmpltbank-compatibility-mode', tags): errMsg = 'If using compatibility mode it must be set both in ' errMsg += 'the template bank and matched-filtering stages.' raise ValueError(errMsg) # depends on [control=['if'], data=[]] compatibility_mode = True # depends on [control=['if'], data=[]] else: compatibility_mode = False inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file=injection_file, tags=tags, link_to_tmpltbank=linkToTmpltbank, compatibility_mode=compatibility_mode) # depends on [control=['if'], data=[]] elif mfltrMethod == 'WORKFLOW_MULTIPLE_IFOS': logging.info('Adding matched-filter jobs to workflow.') inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file=injection_file, tags=tags) # depends on [control=['if'], data=[]] else: errMsg = 'Matched filter method not recognized. Must be one of ' errMsg += 'WORKFLOW_INDEPENDENT_IFOS (currently only one option).' raise ValueError(errMsg) logging.info('Leaving matched-filtering setup module.') return inspiral_outs
def append(self, event, category=None): """ Adds a new event to the trace store. The event may hava a category Args: event (spade.message.Message): the event to be stored category (str, optional): a category to classify the event (Default value = None) """ date = datetime.datetime.now() self.store.insert(0, (date, event, category)) if len(self.store) > self.size: del self.store[-1]
def function[append, parameter[self, event, category]]: constant[ Adds a new event to the trace store. The event may hava a category Args: event (spade.message.Message): the event to be stored category (str, optional): a category to classify the event (Default value = None) ] variable[date] assign[=] call[name[datetime].datetime.now, parameter[]] call[name[self].store.insert, parameter[constant[0], tuple[[<ast.Name object at 0x7da1b0791420>, <ast.Name object at 0x7da1b0791480>, <ast.Name object at 0x7da1b0791450>]]]] if compare[call[name[len], parameter[name[self].store]] greater[>] name[self].size] begin[:] <ast.Delete object at 0x7da1b0790490>
keyword[def] identifier[append] ( identifier[self] , identifier[event] , identifier[category] = keyword[None] ): literal[string] identifier[date] = identifier[datetime] . identifier[datetime] . identifier[now] () identifier[self] . identifier[store] . identifier[insert] ( literal[int] ,( identifier[date] , identifier[event] , identifier[category] )) keyword[if] identifier[len] ( identifier[self] . identifier[store] )> identifier[self] . identifier[size] : keyword[del] identifier[self] . identifier[store] [- literal[int] ]
def append(self, event, category=None): """ Adds a new event to the trace store. The event may hava a category Args: event (spade.message.Message): the event to be stored category (str, optional): a category to classify the event (Default value = None) """ date = datetime.datetime.now() self.store.insert(0, (date, event, category)) if len(self.store) > self.size: del self.store[-1] # depends on [control=['if'], data=[]]
def get_sysid(self): '''get sysid tuple to use for parameters''' component = self.target_component if component == 0: component = 1 return (self.target_system, component)
def function[get_sysid, parameter[self]]: constant[get sysid tuple to use for parameters] variable[component] assign[=] name[self].target_component if compare[name[component] equal[==] constant[0]] begin[:] variable[component] assign[=] constant[1] return[tuple[[<ast.Attribute object at 0x7da1b162ba60>, <ast.Name object at 0x7da1b1629630>]]]
keyword[def] identifier[get_sysid] ( identifier[self] ): literal[string] identifier[component] = identifier[self] . identifier[target_component] keyword[if] identifier[component] == literal[int] : identifier[component] = literal[int] keyword[return] ( identifier[self] . identifier[target_system] , identifier[component] )
def get_sysid(self): """get sysid tuple to use for parameters""" component = self.target_component if component == 0: component = 1 # depends on [control=['if'], data=['component']] return (self.target_system, component)
def stream(self, handler, whenDone=None): """ Fetches data from river streams and feeds them into the given function. :param handler: (function) passed headers [list] and row [list] of the data for one time step, for every row of data """ self._createConfluence() headers = ["timestamp"] + self.getStreamIds() for row in self._confluence: handler(headers, row) if whenDone is not None: return whenDone()
def function[stream, parameter[self, handler, whenDone]]: constant[ Fetches data from river streams and feeds them into the given function. :param handler: (function) passed headers [list] and row [list] of the data for one time step, for every row of data ] call[name[self]._createConfluence, parameter[]] variable[headers] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b2368e50>]] + call[name[self].getStreamIds, parameter[]]] for taget[name[row]] in starred[name[self]._confluence] begin[:] call[name[handler], parameter[name[headers], name[row]]] if compare[name[whenDone] is_not constant[None]] begin[:] return[call[name[whenDone], parameter[]]]
keyword[def] identifier[stream] ( identifier[self] , identifier[handler] , identifier[whenDone] = keyword[None] ): literal[string] identifier[self] . identifier[_createConfluence] () identifier[headers] =[ literal[string] ]+ identifier[self] . identifier[getStreamIds] () keyword[for] identifier[row] keyword[in] identifier[self] . identifier[_confluence] : identifier[handler] ( identifier[headers] , identifier[row] ) keyword[if] identifier[whenDone] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[whenDone] ()
def stream(self, handler, whenDone=None): """ Fetches data from river streams and feeds them into the given function. :param handler: (function) passed headers [list] and row [list] of the data for one time step, for every row of data """ self._createConfluence() headers = ['timestamp'] + self.getStreamIds() for row in self._confluence: handler(headers, row) # depends on [control=['for'], data=['row']] if whenDone is not None: return whenDone() # depends on [control=['if'], data=['whenDone']]
def static_singleton(*args, **kwargs): """ STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop' """ def __static_singleton_wrapper(cls): if cls not in __singleton_instances: __singleton_instances[cls] = cls(*args, **kwargs) return __singleton_instances[cls] return __static_singleton_wrapper
def function[static_singleton, parameter[]]: constant[ STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop' ] def function[__static_singleton_wrapper, parameter[cls]]: if compare[name[cls] <ast.NotIn object at 0x7da2590d7190> name[__singleton_instances]] begin[:] call[name[__singleton_instances]][name[cls]] assign[=] call[name[cls], parameter[<ast.Starred object at 0x7da18bcca8c0>]] return[call[name[__singleton_instances]][name[cls]]] return[name[__static_singleton_wrapper]]
keyword[def] identifier[static_singleton] (* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[__static_singleton_wrapper] ( identifier[cls] ): keyword[if] identifier[cls] keyword[not] keyword[in] identifier[__singleton_instances] : identifier[__singleton_instances] [ identifier[cls] ]= identifier[cls] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[__singleton_instances] [ identifier[cls] ] keyword[return] identifier[__static_singleton_wrapper]
def static_singleton(*args, **kwargs): """ STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop' """ def __static_singleton_wrapper(cls): if cls not in __singleton_instances: __singleton_instances[cls] = cls(*args, **kwargs) # depends on [control=['if'], data=['cls', '__singleton_instances']] return __singleton_instances[cls] return __static_singleton_wrapper
def retrieve_tx(self, txid): """Returns rawtx for <txid>.""" txid = deserialize.txid(txid) tx = self.service.get_tx(txid) return serialize.tx(tx)
def function[retrieve_tx, parameter[self, txid]]: constant[Returns rawtx for <txid>.] variable[txid] assign[=] call[name[deserialize].txid, parameter[name[txid]]] variable[tx] assign[=] call[name[self].service.get_tx, parameter[name[txid]]] return[call[name[serialize].tx, parameter[name[tx]]]]
keyword[def] identifier[retrieve_tx] ( identifier[self] , identifier[txid] ): literal[string] identifier[txid] = identifier[deserialize] . identifier[txid] ( identifier[txid] ) identifier[tx] = identifier[self] . identifier[service] . identifier[get_tx] ( identifier[txid] ) keyword[return] identifier[serialize] . identifier[tx] ( identifier[tx] )
def retrieve_tx(self, txid): """Returns rawtx for <txid>.""" txid = deserialize.txid(txid) tx = self.service.get_tx(txid) return serialize.tx(tx)
def startInventory(self, proto=None, force_regen_rospec=False): """Add a ROSpec to the reader and enable it.""" if self.state == LLRPClient.STATE_INVENTORYING: logger.warn('ignoring startInventory() while already inventorying') return None rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec'] logger.info('starting inventory') # upside-down chain of callbacks: add, enable, start ROSpec # started_rospec = defer.Deferred() # started_rospec.addCallback(self._setState_wrapper, # LLRPClient.STATE_INVENTORYING) # started_rospec.addErrback(self.panic, 'START_ROSPEC failed') # logger.debug('made started_rospec') enabled_rospec = defer.Deferred() enabled_rospec.addCallback(self._setState_wrapper, LLRPClient.STATE_INVENTORYING) # enabled_rospec.addCallback(self.send_START_ROSPEC, rospec, # onCompletion=started_rospec) enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed') logger.debug('made enabled_rospec') added_rospec = defer.Deferred() added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec, onCompletion=enabled_rospec) added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed') logger.debug('made added_rospec') self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
def function[startInventory, parameter[self, proto, force_regen_rospec]]: constant[Add a ROSpec to the reader and enable it.] if compare[name[self].state equal[==] name[LLRPClient].STATE_INVENTORYING] begin[:] call[name[logger].warn, parameter[constant[ignoring startInventory() while already inventorying]]] return[constant[None]] variable[rospec] assign[=] call[call[name[self].getROSpec, parameter[]]][constant[ROSpec]] call[name[logger].info, parameter[constant[starting inventory]]] variable[enabled_rospec] assign[=] call[name[defer].Deferred, parameter[]] call[name[enabled_rospec].addCallback, parameter[name[self]._setState_wrapper, name[LLRPClient].STATE_INVENTORYING]] call[name[enabled_rospec].addErrback, parameter[name[self].panic, constant[ENABLE_ROSPEC failed]]] call[name[logger].debug, parameter[constant[made enabled_rospec]]] variable[added_rospec] assign[=] call[name[defer].Deferred, parameter[]] call[name[added_rospec].addCallback, parameter[name[self].send_ENABLE_ROSPEC, name[rospec]]] call[name[added_rospec].addErrback, parameter[name[self].panic, constant[ADD_ROSPEC failed]]] call[name[logger].debug, parameter[constant[made added_rospec]]] call[name[self].send_ADD_ROSPEC, parameter[name[rospec]]]
keyword[def] identifier[startInventory] ( identifier[self] , identifier[proto] = keyword[None] , identifier[force_regen_rospec] = keyword[False] ): literal[string] keyword[if] identifier[self] . identifier[state] == identifier[LLRPClient] . identifier[STATE_INVENTORYING] : identifier[logger] . identifier[warn] ( literal[string] ) keyword[return] keyword[None] identifier[rospec] = identifier[self] . identifier[getROSpec] ( identifier[force_new] = identifier[force_regen_rospec] )[ literal[string] ] identifier[logger] . identifier[info] ( literal[string] ) identifier[enabled_rospec] = identifier[defer] . identifier[Deferred] () identifier[enabled_rospec] . identifier[addCallback] ( identifier[self] . identifier[_setState_wrapper] , identifier[LLRPClient] . identifier[STATE_INVENTORYING] ) identifier[enabled_rospec] . identifier[addErrback] ( identifier[self] . identifier[panic] , literal[string] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[added_rospec] = identifier[defer] . identifier[Deferred] () identifier[added_rospec] . identifier[addCallback] ( identifier[self] . identifier[send_ENABLE_ROSPEC] , identifier[rospec] , identifier[onCompletion] = identifier[enabled_rospec] ) identifier[added_rospec] . identifier[addErrback] ( identifier[self] . identifier[panic] , literal[string] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[send_ADD_ROSPEC] ( identifier[rospec] , identifier[onCompletion] = identifier[added_rospec] )
def startInventory(self, proto=None, force_regen_rospec=False): """Add a ROSpec to the reader and enable it.""" if self.state == LLRPClient.STATE_INVENTORYING: logger.warn('ignoring startInventory() while already inventorying') return None # depends on [control=['if'], data=[]] rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec'] logger.info('starting inventory') # upside-down chain of callbacks: add, enable, start ROSpec # started_rospec = defer.Deferred() # started_rospec.addCallback(self._setState_wrapper, # LLRPClient.STATE_INVENTORYING) # started_rospec.addErrback(self.panic, 'START_ROSPEC failed') # logger.debug('made started_rospec') enabled_rospec = defer.Deferred() enabled_rospec.addCallback(self._setState_wrapper, LLRPClient.STATE_INVENTORYING) # enabled_rospec.addCallback(self.send_START_ROSPEC, rospec, # onCompletion=started_rospec) enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed') logger.debug('made enabled_rospec') added_rospec = defer.Deferred() added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec, onCompletion=enabled_rospec) added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed') logger.debug('made added_rospec') self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
def __array_op(self, f, x, axis): """operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray) :param f: operator function :param x: array(1D, 2D) or field (2D) or View (2D) :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis :return: dict with result of operation (same form as field.d) """ if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False: raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2') d = {} #x is a vector (only numpy ndarray) if isinstance(axis, int) and isinstance(x, ndarray): if len(self.__partition.mesh.bounds[0]) == 3: try: for i in self.__d: try: ind = self.__indices(self.partition.meta_data[i], self.__mask) except: raise ValueError("Indices geht nicht.") if axis == 0: d[i] = f(self.__d[i], x[ind[0]][:, newaxis, newaxis]) elif axis == 1: d[i] = f(self.__d[i], x[ind[1]][:, newaxis]) elif axis == 2: d[i] = f(self.__d[i], x[ind[2]]) else: raise ValueError('"axis" can only have value 0, 1 or 2 .') self.__d[i][:] = d[i][:] except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis) elif len(self.__partition.mesh.bounds[0]) == 2: try: for i in self.__d: ind = self.__indices(self.partition.meta_data[i], self.__mask) if axis == 0: d[i] = f(self.__d[i], x[ind[0]][:, newaxis]) elif axis == 1: d[i] = f(self.__d[i], x[ind[1]][:]) else: raise ValueError('"axis" can only have value 0 or 2 .') self.__d[i][:] = d[i][:] except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis) #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field) elif len(axis) == 2: #operation for 2D-arrays if isinstance(x, ndarray): try: for i in self.__d: ind = self.__indices(self.partition.meta_data[i], self.__mask) if axis == (0, 1) or axis == (1, 0): d[i] = f(self.__d[i], x[ind[0], ind[1]][:, :, newaxis]) elif axis == (1, 2) or axis == (2, 1): d[i] = f(self.__d[i], x[ind[1], ind[2]]) elif axis == (0, 2) or axis == (2, 0): d[i] = f(self.__d[i], x[ind[0], ind[2]][:, newaxis, :]) else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).') self.__d[i][:] = d[i][:] except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,)) #operation for 2D Fields or View (Field from same origin mesh but bounds like View has) elif isinstance(x, Field) or isinstance(x, View): if axis == (0, 1) or axis == (1, 0): try: for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[1])][:, :, newaxis]) except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)') elif axis == (1, 2) or axis == (2, 1): try: for i in self.__d: d[i] = f(self.__d[i], x.d[(i[1],i[2])]) except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)') elif axis == (0, 2) or axis == (2, 0): try: for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[2])][:, newaxis, :]) except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)') else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).') else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))') else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)') return d
def function[__array_op, parameter[self, f, x, axis]]: constant[operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray) :param f: operator function :param x: array(1D, 2D) or field (2D) or View (2D) :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis :return: dict with result of operation (same form as field.d) ] if <ast.BoolOp object at 0x7da1b2873d90> begin[:] <ast.Raise object at 0x7da1b28ddc90> variable[d] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da1b28dd390> begin[:] if compare[call[name[len], parameter[call[name[self].__partition.mesh.bounds][constant[0]]]] equal[==] constant[3]] begin[:] <ast.Try object at 0x7da1b28df430> return[name[d]]
keyword[def] identifier[__array_op] ( identifier[self] , identifier[f] , identifier[x] , identifier[axis] ): literal[string] keyword[if] identifier[isinstance] ( identifier[x] , identifier[ndarray] )== keyword[False] keyword[and] identifier[isinstance] ( identifier[x] , identifier[Field] )== keyword[False] keyword[and] identifier[isinstance] ( identifier[x] , identifier[View] )== keyword[False] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[d] ={} keyword[if] identifier[isinstance] ( identifier[axis] , identifier[int] ) keyword[and] identifier[isinstance] ( identifier[x] , identifier[ndarray] ): keyword[if] identifier[len] ( identifier[self] . identifier[__partition] . identifier[mesh] . identifier[bounds] [ literal[int] ])== literal[int] : keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : keyword[try] : identifier[ind] = identifier[self] . identifier[__indices] ( identifier[self] . identifier[partition] . identifier[meta_data] [ identifier[i] ], identifier[self] . identifier[__mask] ) keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[axis] == literal[int] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ]][:, identifier[newaxis] , identifier[newaxis] ]) keyword[elif] identifier[axis] == literal[int] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ]][:, identifier[newaxis] ]) keyword[elif] identifier[axis] == literal[int] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ]]) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[__d] [ identifier[i] ][:]= identifier[d] [ identifier[i] ][:] keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[axis] ) keyword[elif] identifier[len] ( identifier[self] . identifier[__partition] . identifier[mesh] . identifier[bounds] [ literal[int] ])== literal[int] : keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : identifier[ind] = identifier[self] . identifier[__indices] ( identifier[self] . identifier[partition] . identifier[meta_data] [ identifier[i] ], identifier[self] . identifier[__mask] ) keyword[if] identifier[axis] == literal[int] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ]][:, identifier[newaxis] ]) keyword[elif] identifier[axis] == literal[int] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ]][:]) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[__d] [ identifier[i] ][:]= identifier[d] [ identifier[i] ][:] keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[axis] ) keyword[elif] identifier[len] ( identifier[axis] )== literal[int] : keyword[if] identifier[isinstance] ( identifier[x] , identifier[ndarray] ): keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : identifier[ind] = identifier[self] . identifier[__indices] ( identifier[self] . identifier[partition] . identifier[meta_data] [ identifier[i] ], identifier[self] . identifier[__mask] ) keyword[if] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ], identifier[ind] [ literal[int] ]][:,:, identifier[newaxis] ]) keyword[elif] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ], identifier[ind] [ literal[int] ]]) keyword[elif] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] [ identifier[ind] [ literal[int] ], identifier[ind] [ literal[int] ]][:, identifier[newaxis] ,:]) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[__d] [ identifier[i] ][:]= identifier[d] [ identifier[i] ][:] keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[axis] ,)) keyword[elif] identifier[isinstance] ( identifier[x] , identifier[Field] ) keyword[or] identifier[isinstance] ( identifier[x] , identifier[View] ): keyword[if] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] . identifier[d] [( identifier[i] [ literal[int] ], identifier[i] [ literal[int] ])][:,:, identifier[newaxis] ]) keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] . identifier[d] [( identifier[i] [ literal[int] ], identifier[i] [ literal[int] ])]) keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] identifier[axis] ==( literal[int] , literal[int] ) keyword[or] identifier[axis] ==( literal[int] , literal[int] ): keyword[try] : keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__d] : identifier[d] [ identifier[i] ]= identifier[f] ( identifier[self] . identifier[__d] [ identifier[i] ], identifier[x] . identifier[d] [( identifier[i] [ literal[int] ], identifier[i] [ literal[int] ])][:, identifier[newaxis] ,:]) keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[d]
def __array_op(self, f, x, axis): """operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray) :param f: operator function :param x: array(1D, 2D) or field (2D) or View (2D) :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis :return: dict with result of operation (same form as field.d) """ if isinstance(x, ndarray) == False and isinstance(x, Field) == False and (isinstance(x, View) == False): raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2') # depends on [control=['if'], data=[]] d = {} #x is a vector (only numpy ndarray) if isinstance(axis, int) and isinstance(x, ndarray): if len(self.__partition.mesh.bounds[0]) == 3: try: for i in self.__d: try: ind = self.__indices(self.partition.meta_data[i], self.__mask) # depends on [control=['try'], data=[]] except: raise ValueError('Indices geht nicht.') # depends on [control=['except'], data=[]] if axis == 0: d[i] = f(self.__d[i], x[ind[0]][:, newaxis, newaxis]) # depends on [control=['if'], data=[]] elif axis == 1: d[i] = f(self.__d[i], x[ind[1]][:, newaxis]) # depends on [control=['if'], data=[]] elif axis == 2: d[i] = f(self.__d[i], x[ind[2]]) # depends on [control=['if'], data=[]] else: raise ValueError('"axis" can only have value 0, 1 or 2 .') self.__d[i][:] = d[i][:] # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('Vector does not have same length as Field along axis %d.' % axis) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif len(self.__partition.mesh.bounds[0]) == 2: try: for i in self.__d: ind = self.__indices(self.partition.meta_data[i], self.__mask) if axis == 0: d[i] = f(self.__d[i], x[ind[0]][:, newaxis]) # depends on [control=['if'], data=[]] elif axis == 1: d[i] = f(self.__d[i], x[ind[1]][:]) # depends on [control=['if'], data=[]] else: raise ValueError('"axis" can only have value 0 or 2 .') self.__d[i][:] = d[i][:] # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('Vector does not have same length as Field along axis %d.' % axis) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field) elif len(axis) == 2: #operation for 2D-arrays if isinstance(x, ndarray): try: for i in self.__d: ind = self.__indices(self.partition.meta_data[i], self.__mask) if axis == (0, 1) or axis == (1, 0): d[i] = f(self.__d[i], x[ind[0], ind[1]][:, :, newaxis]) # depends on [control=['if'], data=[]] elif axis == (1, 2) or axis == (2, 1): d[i] = f(self.__d[i], x[ind[1], ind[2]]) # depends on [control=['if'], data=[]] elif axis == (0, 2) or axis == (2, 0): d[i] = f(self.__d[i], x[ind[0], ind[2]][:, newaxis, :]) # depends on [control=['if'], data=[]] else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).') self.__d[i][:] = d[i][:] # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('2D-Array does not fit to plane %s of Field' % (axis,)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] #operation for 2D Fields or View (Field from same origin mesh but bounds like View has) elif isinstance(x, Field) or isinstance(x, View): if axis == (0, 1) or axis == (1, 0): try: for i in self.__d: d[i] = f(self.__d[i], x.d[i[0], i[1]][:, :, newaxis]) # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif axis == (1, 2) or axis == (2, 1): try: for i in self.__d: d[i] = f(self.__d[i], x.d[i[1], i[2]]) # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif axis == (0, 2) or axis == (2, 0): try: for i in self.__d: d[i] = f(self.__d[i], x.d[i[0], i[2]][:, newaxis, :]) # depends on [control=['for'], data=['i']] # depends on [control=['try'], data=[]] except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).') # depends on [control=['if'], data=[]] else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))') # depends on [control=['if'], data=[]] else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)') return d
def _draw_text(self, pos, text, font, **kw): """ Remember a single drawable tuple to paint later. """ self.drawables.append((pos, text, font, kw))
def function[_draw_text, parameter[self, pos, text, font]]: constant[ Remember a single drawable tuple to paint later. ] call[name[self].drawables.append, parameter[tuple[[<ast.Name object at 0x7da18bc72d10>, <ast.Name object at 0x7da18bc71750>, <ast.Name object at 0x7da18bc73880>, <ast.Name object at 0x7da18bc73a60>]]]]
keyword[def] identifier[_draw_text] ( identifier[self] , identifier[pos] , identifier[text] , identifier[font] ,** identifier[kw] ): literal[string] identifier[self] . identifier[drawables] . identifier[append] (( identifier[pos] , identifier[text] , identifier[font] , identifier[kw] ))
def _draw_text(self, pos, text, font, **kw): """ Remember a single drawable tuple to paint later. """ self.drawables.append((pos, text, font, kw))
def _choose_tuner(self, algorithm_name): """ Parameters ---------- algorithm_name : str algorithm_name includes "tpe", "random_search" and anneal" """ if algorithm_name == 'tpe': return hp.tpe.suggest if algorithm_name == 'random_search': return hp.rand.suggest if algorithm_name == 'anneal': return hp.anneal.suggest raise RuntimeError('Not support tuner algorithm in hyperopt.')
def function[_choose_tuner, parameter[self, algorithm_name]]: constant[ Parameters ---------- algorithm_name : str algorithm_name includes "tpe", "random_search" and anneal" ] if compare[name[algorithm_name] equal[==] constant[tpe]] begin[:] return[name[hp].tpe.suggest] if compare[name[algorithm_name] equal[==] constant[random_search]] begin[:] return[name[hp].rand.suggest] if compare[name[algorithm_name] equal[==] constant[anneal]] begin[:] return[name[hp].anneal.suggest] <ast.Raise object at 0x7da1b1fd79a0>
keyword[def] identifier[_choose_tuner] ( identifier[self] , identifier[algorithm_name] ): literal[string] keyword[if] identifier[algorithm_name] == literal[string] : keyword[return] identifier[hp] . identifier[tpe] . identifier[suggest] keyword[if] identifier[algorithm_name] == literal[string] : keyword[return] identifier[hp] . identifier[rand] . identifier[suggest] keyword[if] identifier[algorithm_name] == literal[string] : keyword[return] identifier[hp] . identifier[anneal] . identifier[suggest] keyword[raise] identifier[RuntimeError] ( literal[string] )
def _choose_tuner(self, algorithm_name): """ Parameters ---------- algorithm_name : str algorithm_name includes "tpe", "random_search" and anneal" """ if algorithm_name == 'tpe': return hp.tpe.suggest # depends on [control=['if'], data=[]] if algorithm_name == 'random_search': return hp.rand.suggest # depends on [control=['if'], data=[]] if algorithm_name == 'anneal': return hp.anneal.suggest # depends on [control=['if'], data=[]] raise RuntimeError('Not support tuner algorithm in hyperopt.')
def smoothing_window(data, window=[1, 1, 1]): """ This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data. """ for i in range(len(data) - sum(window)): start_window_from = i start_window_to = i+window[0] end_window_from = start_window_to + window[1] end_window_to = end_window_from + window[2] if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]): data[start_window_from: end_window_to] = data[start_window_from] return data
def function[smoothing_window, parameter[data, window]]: constant[ This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data. ] for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[data]]] - call[name[sum], parameter[name[window]]]]]]] begin[:] variable[start_window_from] assign[=] name[i] variable[start_window_to] assign[=] binary_operation[name[i] + call[name[window]][constant[0]]] variable[end_window_from] assign[=] binary_operation[name[start_window_to] + call[name[window]][constant[1]]] variable[end_window_to] assign[=] binary_operation[name[end_window_from] + call[name[window]][constant[2]]] if call[name[np].all, parameter[compare[call[name[data]][<ast.Slice object at 0x7da2041d8340>] equal[==] call[name[data]][<ast.Slice object at 0x7da2041d8e80>]]]] begin[:] call[name[data]][<ast.Slice object at 0x7da2041d8ca0>] assign[=] call[name[data]][name[start_window_from]] return[name[data]]
keyword[def] identifier[smoothing_window] ( identifier[data] , identifier[window] =[ literal[int] , literal[int] , literal[int] ]): literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[data] )- identifier[sum] ( identifier[window] )): identifier[start_window_from] = identifier[i] identifier[start_window_to] = identifier[i] + identifier[window] [ literal[int] ] identifier[end_window_from] = identifier[start_window_to] + identifier[window] [ literal[int] ] identifier[end_window_to] = identifier[end_window_from] + identifier[window] [ literal[int] ] keyword[if] identifier[np] . identifier[all] ( identifier[data] [ identifier[start_window_from] : identifier[start_window_to] ]== identifier[data] [ identifier[end_window_from] : identifier[end_window_to] ]): identifier[data] [ identifier[start_window_from] : identifier[end_window_to] ]= identifier[data] [ identifier[start_window_from] ] keyword[return] identifier[data]
def smoothing_window(data, window=[1, 1, 1]): """ This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data. """ for i in range(len(data) - sum(window)): start_window_from = i start_window_to = i + window[0] end_window_from = start_window_to + window[1] end_window_to = end_window_from + window[2] if np.all(data[start_window_from:start_window_to] == data[end_window_from:end_window_to]): data[start_window_from:end_window_to] = data[start_window_from] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return data
def BGPSessionState_originator_switch_info_switchIpV6Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") BGPSessionState = ET.SubElement(config, "BGPSessionState", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(BGPSessionState, "originator-switch-info") switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address") switchIpV6Address.text = kwargs.pop('switchIpV6Address') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[BGPSessionState_originator_switch_info_switchIpV6Address, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[BGPSessionState] assign[=] call[name[ET].SubElement, parameter[name[config], constant[BGPSessionState]]] variable[originator_switch_info] assign[=] call[name[ET].SubElement, parameter[name[BGPSessionState], constant[originator-switch-info]]] variable[switchIpV6Address] assign[=] call[name[ET].SubElement, parameter[name[originator_switch_info], constant[switchIpV6Address]]] name[switchIpV6Address].text assign[=] call[name[kwargs].pop, parameter[constant[switchIpV6Address]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[BGPSessionState_originator_switch_info_switchIpV6Address] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[BGPSessionState] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[originator_switch_info] = identifier[ET] . identifier[SubElement] ( identifier[BGPSessionState] , literal[string] ) identifier[switchIpV6Address] = identifier[ET] . identifier[SubElement] ( identifier[originator_switch_info] , literal[string] ) identifier[switchIpV6Address] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def BGPSessionState_originator_switch_info_switchIpV6Address(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') BGPSessionState = ET.SubElement(config, 'BGPSessionState', xmlns='http://brocade.com/ns/brocade-notification-stream') originator_switch_info = ET.SubElement(BGPSessionState, 'originator-switch-info') switchIpV6Address = ET.SubElement(originator_switch_info, 'switchIpV6Address') switchIpV6Address.text = kwargs.pop('switchIpV6Address') callback = kwargs.pop('callback', self._callback) return callback(config)
def make_command(command: Callable) -> Callable: """Create an command from a method signature.""" @wraps(command) def actualcommand(self, *args, **kwds): # pylint: disable=missing-docstring data = command(self, *args, **kwds) name = command.__name__[3:] signal = '{uuid}{sep}{event}'.format( uuid=self._uuid, # pylint: disable=protected-access sep=SEPARATOR, event=name ) if flask.has_request_context(): emit(signal, {'data': pack(data)}) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, {'data': pack(data)}) eventlet.sleep() return actualcommand
def function[make_command, parameter[command]]: constant[Create an command from a method signature.] def function[actualcommand, parameter[self]]: variable[data] assign[=] call[name[command], parameter[name[self], <ast.Starred object at 0x7da18ede4ac0>]] variable[name] assign[=] call[name[command].__name__][<ast.Slice object at 0x7da18ede68f0>] variable[signal] assign[=] call[constant[{uuid}{sep}{event}].format, parameter[]] if call[name[flask].has_request_context, parameter[]] begin[:] call[name[emit], parameter[name[signal], dictionary[[<ast.Constant object at 0x7da18ede7310>], [<ast.Call object at 0x7da18ede7b20>]]]] call[name[eventlet].sleep, parameter[]] return[name[actualcommand]]
keyword[def] identifier[make_command] ( identifier[command] : identifier[Callable] )-> identifier[Callable] : literal[string] @ identifier[wraps] ( identifier[command] ) keyword[def] identifier[actualcommand] ( identifier[self] ,* identifier[args] ,** identifier[kwds] ): identifier[data] = identifier[command] ( identifier[self] ,* identifier[args] ,** identifier[kwds] ) identifier[name] = identifier[command] . identifier[__name__] [ literal[int] :] identifier[signal] = literal[string] . identifier[format] ( identifier[uuid] = identifier[self] . identifier[_uuid] , identifier[sep] = identifier[SEPARATOR] , identifier[event] = identifier[name] ) keyword[if] identifier[flask] . identifier[has_request_context] (): identifier[emit] ( identifier[signal] ,{ literal[string] : identifier[pack] ( identifier[data] )}) keyword[else] : identifier[sio] = identifier[flask] . identifier[current_app] . identifier[extensions] [ literal[string] ] identifier[sio] . identifier[emit] ( identifier[signal] ,{ literal[string] : identifier[pack] ( identifier[data] )}) identifier[eventlet] . identifier[sleep] () keyword[return] identifier[actualcommand]
def make_command(command: Callable) -> Callable: """Create an command from a method signature.""" @wraps(command) def actualcommand(self, *args, **kwds): # pylint: disable=missing-docstring data = command(self, *args, **kwds) name = command.__name__[3:] # pylint: disable=protected-access signal = '{uuid}{sep}{event}'.format(uuid=self._uuid, sep=SEPARATOR, event=name) if flask.has_request_context(): emit(signal, {'data': pack(data)}) # depends on [control=['if'], data=[]] else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, {'data': pack(data)}) eventlet.sleep() return actualcommand
def _read_stderr(self): """Read the stderr file of the kernel.""" # We need to read stderr_file as bytes to be able to # detect its encoding with chardet f = open(self.stderr_file, 'rb') try: stderr_text = f.read() # This is needed to avoid showing an empty error message # when the kernel takes too much time to start. # See issue 8581 if not stderr_text: return '' # This is needed since the stderr file could be encoded # in something different to utf-8. # See issue 4191 encoding = get_coding(stderr_text) stderr_text = to_text_string(stderr_text, encoding) return stderr_text finally: f.close()
def function[_read_stderr, parameter[self]]: constant[Read the stderr file of the kernel.] variable[f] assign[=] call[name[open], parameter[name[self].stderr_file, constant[rb]]] <ast.Try object at 0x7da18eb56470>
keyword[def] identifier[_read_stderr] ( identifier[self] ): literal[string] identifier[f] = identifier[open] ( identifier[self] . identifier[stderr_file] , literal[string] ) keyword[try] : identifier[stderr_text] = identifier[f] . identifier[read] () keyword[if] keyword[not] identifier[stderr_text] : keyword[return] literal[string] identifier[encoding] = identifier[get_coding] ( identifier[stderr_text] ) identifier[stderr_text] = identifier[to_text_string] ( identifier[stderr_text] , identifier[encoding] ) keyword[return] identifier[stderr_text] keyword[finally] : identifier[f] . identifier[close] ()
def _read_stderr(self): """Read the stderr file of the kernel.""" # We need to read stderr_file as bytes to be able to # detect its encoding with chardet f = open(self.stderr_file, 'rb') try: stderr_text = f.read() # This is needed to avoid showing an empty error message # when the kernel takes too much time to start. # See issue 8581 if not stderr_text: return '' # depends on [control=['if'], data=[]] # This is needed since the stderr file could be encoded # in something different to utf-8. # See issue 4191 encoding = get_coding(stderr_text) stderr_text = to_text_string(stderr_text, encoding) return stderr_text # depends on [control=['try'], data=[]] finally: f.close()
def _read_response(self): """ Reads a complete response packet from the server """ result = self.buf.read_line().decode("utf-8") if not result: raise NoResponseError("No response received from server.") msg = self._read_message() if result != "ok": raise InvalidResponseError(msg) return msg
def function[_read_response, parameter[self]]: constant[ Reads a complete response packet from the server ] variable[result] assign[=] call[call[name[self].buf.read_line, parameter[]].decode, parameter[constant[utf-8]]] if <ast.UnaryOp object at 0x7da20c7ca620> begin[:] <ast.Raise object at 0x7da20c7c9ea0> variable[msg] assign[=] call[name[self]._read_message, parameter[]] if compare[name[result] not_equal[!=] constant[ok]] begin[:] <ast.Raise object at 0x7da20c7c9570> return[name[msg]]
keyword[def] identifier[_read_response] ( identifier[self] ): literal[string] identifier[result] = identifier[self] . identifier[buf] . identifier[read_line] (). identifier[decode] ( literal[string] ) keyword[if] keyword[not] identifier[result] : keyword[raise] identifier[NoResponseError] ( literal[string] ) identifier[msg] = identifier[self] . identifier[_read_message] () keyword[if] identifier[result] != literal[string] : keyword[raise] identifier[InvalidResponseError] ( identifier[msg] ) keyword[return] identifier[msg]
def _read_response(self): """ Reads a complete response packet from the server """ result = self.buf.read_line().decode('utf-8') if not result: raise NoResponseError('No response received from server.') # depends on [control=['if'], data=[]] msg = self._read_message() if result != 'ok': raise InvalidResponseError(msg) # depends on [control=['if'], data=[]] return msg
def ping(self, message=_NOTSET, *, encoding=_NOTSET): """Ping the server. Accept optional echo message. """ if message is not _NOTSET: args = (message,) else: args = () return self.execute('PING', *args, encoding=encoding)
def function[ping, parameter[self, message]]: constant[Ping the server. Accept optional echo message. ] if compare[name[message] is_not name[_NOTSET]] begin[:] variable[args] assign[=] tuple[[<ast.Name object at 0x7da18bc73640>]] return[call[name[self].execute, parameter[constant[PING], <ast.Starred object at 0x7da20c7cb790>]]]
keyword[def] identifier[ping] ( identifier[self] , identifier[message] = identifier[_NOTSET] ,*, identifier[encoding] = identifier[_NOTSET] ): literal[string] keyword[if] identifier[message] keyword[is] keyword[not] identifier[_NOTSET] : identifier[args] =( identifier[message] ,) keyword[else] : identifier[args] =() keyword[return] identifier[self] . identifier[execute] ( literal[string] ,* identifier[args] , identifier[encoding] = identifier[encoding] )
def ping(self, message=_NOTSET, *, encoding=_NOTSET): """Ping the server. Accept optional echo message. """ if message is not _NOTSET: args = (message,) # depends on [control=['if'], data=['message']] else: args = () return self.execute('PING', *args, encoding=encoding)
def _escape_headers(self, headers): """ :param dict(str,str) headers: """ for key, val in headers.items(): try: val = val.replace('\\', '\\\\').replace('\n', '\\n').replace(':', '\\c').replace('\r', '\\r') except: pass headers[key] = val
def function[_escape_headers, parameter[self, headers]]: constant[ :param dict(str,str) headers: ] for taget[tuple[[<ast.Name object at 0x7da18f722200>, <ast.Name object at 0x7da18f720100>]]] in starred[call[name[headers].items, parameter[]]] begin[:] <ast.Try object at 0x7da18f720d90> call[name[headers]][name[key]] assign[=] name[val]
keyword[def] identifier[_escape_headers] ( identifier[self] , identifier[headers] ): literal[string] keyword[for] identifier[key] , identifier[val] keyword[in] identifier[headers] . identifier[items] (): keyword[try] : identifier[val] = identifier[val] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) keyword[except] : keyword[pass] identifier[headers] [ identifier[key] ]= identifier[val]
def _escape_headers(self, headers): """ :param dict(str,str) headers: """ for (key, val) in headers.items(): try: val = val.replace('\\', '\\\\').replace('\n', '\\n').replace(':', '\\c').replace('\r', '\\r') # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] headers[key] = val # depends on [control=['for'], data=[]]
def RecurseKeys(self): """Recurses the Windows Registry keys starting with the root key. Yields: WinRegistryKey: Windows Registry key. """ root_key = self.GetRootKey() if root_key: for registry_key in root_key.RecurseKeys(): yield registry_key
def function[RecurseKeys, parameter[self]]: constant[Recurses the Windows Registry keys starting with the root key. Yields: WinRegistryKey: Windows Registry key. ] variable[root_key] assign[=] call[name[self].GetRootKey, parameter[]] if name[root_key] begin[:] for taget[name[registry_key]] in starred[call[name[root_key].RecurseKeys, parameter[]]] begin[:] <ast.Yield object at 0x7da20c6a94e0>
keyword[def] identifier[RecurseKeys] ( identifier[self] ): literal[string] identifier[root_key] = identifier[self] . identifier[GetRootKey] () keyword[if] identifier[root_key] : keyword[for] identifier[registry_key] keyword[in] identifier[root_key] . identifier[RecurseKeys] (): keyword[yield] identifier[registry_key]
def RecurseKeys(self): """Recurses the Windows Registry keys starting with the root key. Yields: WinRegistryKey: Windows Registry key. """ root_key = self.GetRootKey() if root_key: for registry_key in root_key.RecurseKeys(): yield registry_key # depends on [control=['for'], data=['registry_key']] # depends on [control=['if'], data=[]]
def configure_panel(self): """ Configure templates and routing """ webroot = os.path.dirname(__file__) self.template_path = os.path.join(webroot, 'templates') aiohttp_jinja2.setup( self, loader=jinja2.FileSystemLoader(self.template_path), filters={'sorted': sorted, 'int': int} ) self['static_root_url'] = '/static' self.router.add_view('/', Panel) self.router.add_static( '/static/', path=os.path.join(webroot, 'static'), name='static' )
def function[configure_panel, parameter[self]]: constant[ Configure templates and routing ] variable[webroot] assign[=] call[name[os].path.dirname, parameter[name[__file__]]] name[self].template_path assign[=] call[name[os].path.join, parameter[name[webroot], constant[templates]]] call[name[aiohttp_jinja2].setup, parameter[name[self]]] call[name[self]][constant[static_root_url]] assign[=] constant[/static] call[name[self].router.add_view, parameter[constant[/], name[Panel]]] call[name[self].router.add_static, parameter[constant[/static/]]]
keyword[def] identifier[configure_panel] ( identifier[self] ): literal[string] identifier[webroot] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ) identifier[self] . identifier[template_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[webroot] , literal[string] ) identifier[aiohttp_jinja2] . identifier[setup] ( identifier[self] , identifier[loader] = identifier[jinja2] . identifier[FileSystemLoader] ( identifier[self] . identifier[template_path] ), identifier[filters] ={ literal[string] : identifier[sorted] , literal[string] : identifier[int] } ) identifier[self] [ literal[string] ]= literal[string] identifier[self] . identifier[router] . identifier[add_view] ( literal[string] , identifier[Panel] ) identifier[self] . identifier[router] . identifier[add_static] ( literal[string] , identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[webroot] , literal[string] ), identifier[name] = literal[string] )
def configure_panel(self): """ Configure templates and routing """ webroot = os.path.dirname(__file__) self.template_path = os.path.join(webroot, 'templates') aiohttp_jinja2.setup(self, loader=jinja2.FileSystemLoader(self.template_path), filters={'sorted': sorted, 'int': int}) self['static_root_url'] = '/static' self.router.add_view('/', Panel) self.router.add_static('/static/', path=os.path.join(webroot, 'static'), name='static')
def install(url=None, auth_username=None, auth_password=None, submission_interval=None, max_batch_size=None, max_clients=10, base_tags=None, max_buffer_size=None, trigger_size=None, sample_probability=1.0): """Call this to install/setup the InfluxDB client collector. All arguments are optional. :param str url: The InfluxDB API URL. If URL is not specified, the ``INFLUXDB_SCHEME``, ``INFLUXDB_HOST`` and ``INFLUXDB_PORT`` environment variables will be used to construct the base URL. Default: ``http://localhost:8086/write`` :param str auth_username: A username to use for InfluxDB authentication. If not specified, the ``INFLUXDB_USER`` environment variable will be used. Default: ``None`` :param str auth_password: A password to use for InfluxDB authentication. If not specified, the ``INFLUXDB_PASSWORD`` environment variable will be used. Default: ``None`` :param int submission_interval: The maximum number of milliseconds to wait after the last batch submission before submitting a batch that is smaller than ``trigger_size``. Default: ``60000`` :param int max_batch_size: The number of measurements to be submitted in a single HTTP request. Default: ``10000`` :param int max_clients: The number of simultaneous batch submissions that may be made at any given time. Default: ``10`` :param dict base_tags: Default tags that are to be submitted with each measurement. Default: ``None`` :param int max_buffer_size: The maximum number of pending measurements in the buffer before new measurements are discarded. Default: ``25000`` :param int trigger_size: The minimum number of measurements that are in the buffer before a batch can be submitted. Default: ``5000`` :param float sample_probability: Value between 0 and 1.0 specifying the probability that a batch will be submitted (0.25 == 25%) :returns: :data:`True` if the client was installed by this call and :data:`False` otherwise. If ``INFLUXDB_PASSWORD`` is specified as an environment variable, it will be masked in the Python process. """ global _base_tags, _base_url, _credentials, _enabled, _installed, \ _max_batch_size, _max_buffer_size, _max_clients, \ _sample_probability, _timeout, _timeout_interval, _trigger_size _enabled = os.environ.get('INFLUXDB_ENABLED', 'true') == 'true' if not _enabled: LOGGER.warning('Disabling InfluxDB support') return if _installed: LOGGER.warning('InfluxDB client already installed') return False _base_url = url or '{}://{}:{}/write'.format( os.environ.get('INFLUXDB_SCHEME', 'http'), os.environ.get('INFLUXDB_HOST', 'localhost'), os.environ.get('INFLUXDB_PORT', 8086)) _credentials = (auth_username or os.environ.get('INFLUXDB_USER', None), auth_password or os.environ.get('INFLUXDB_PASSWORD', None)) # Don't leave the environment variable out there with the password if os.environ.get('INFLUXDB_PASSWORD'): os.environ['INFLUXDB_PASSWORD'] = \ 'X' * len(os.environ['INFLUXDB_PASSWORD']) # Submission related values _timeout_interval = submission_interval or \ int(os.environ.get('INFLUXDB_INTERVAL', _timeout_interval)) _max_batch_size = max_batch_size or \ int(os.environ.get('INFLUXDB_MAX_BATCH_SIZE', _max_batch_size)) _max_clients = max_clients _max_buffer_size = max_buffer_size or \ int(os.environ.get('INFLUXDB_MAX_BUFFER_SIZE', _max_buffer_size)) _sample_probability = sample_probability or \ float(os.environ.get('INFLUXDB_SAMPLE_PROBABILITY', _sample_probability)) _trigger_size = trigger_size or \ int(os.environ.get('INFLUXDB_TRIGGER_SIZE', _trigger_size)) # Set the base tags if os.environ.get('INFLUXDB_TAG_HOSTNAME', 'true') == 'true': _base_tags.setdefault('hostname', socket.gethostname()) if os.environ.get('ENVIRONMENT'): _base_tags.setdefault('environment', os.environ['ENVIRONMENT']) _base_tags.update(base_tags or {}) # Seed the random number generator for batch sampling random.seed() # Don't let this run multiple times _installed = True LOGGER.info('sprockets_influxdb v%s installed; %i measurements or %.2f ' 'seconds will trigger batch submission', __version__, _trigger_size, _timeout_interval / 1000.0) return True
def function[install, parameter[url, auth_username, auth_password, submission_interval, max_batch_size, max_clients, base_tags, max_buffer_size, trigger_size, sample_probability]]: constant[Call this to install/setup the InfluxDB client collector. All arguments are optional. :param str url: The InfluxDB API URL. If URL is not specified, the ``INFLUXDB_SCHEME``, ``INFLUXDB_HOST`` and ``INFLUXDB_PORT`` environment variables will be used to construct the base URL. Default: ``http://localhost:8086/write`` :param str auth_username: A username to use for InfluxDB authentication. If not specified, the ``INFLUXDB_USER`` environment variable will be used. Default: ``None`` :param str auth_password: A password to use for InfluxDB authentication. If not specified, the ``INFLUXDB_PASSWORD`` environment variable will be used. Default: ``None`` :param int submission_interval: The maximum number of milliseconds to wait after the last batch submission before submitting a batch that is smaller than ``trigger_size``. Default: ``60000`` :param int max_batch_size: The number of measurements to be submitted in a single HTTP request. Default: ``10000`` :param int max_clients: The number of simultaneous batch submissions that may be made at any given time. Default: ``10`` :param dict base_tags: Default tags that are to be submitted with each measurement. Default: ``None`` :param int max_buffer_size: The maximum number of pending measurements in the buffer before new measurements are discarded. Default: ``25000`` :param int trigger_size: The minimum number of measurements that are in the buffer before a batch can be submitted. Default: ``5000`` :param float sample_probability: Value between 0 and 1.0 specifying the probability that a batch will be submitted (0.25 == 25%) :returns: :data:`True` if the client was installed by this call and :data:`False` otherwise. If ``INFLUXDB_PASSWORD`` is specified as an environment variable, it will be masked in the Python process. ] <ast.Global object at 0x7da18eb54eb0> variable[_enabled] assign[=] compare[call[name[os].environ.get, parameter[constant[INFLUXDB_ENABLED], constant[true]]] equal[==] constant[true]] if <ast.UnaryOp object at 0x7da18eb57010> begin[:] call[name[LOGGER].warning, parameter[constant[Disabling InfluxDB support]]] return[None] if name[_installed] begin[:] call[name[LOGGER].warning, parameter[constant[InfluxDB client already installed]]] return[constant[False]] variable[_base_url] assign[=] <ast.BoolOp object at 0x7da18eb55330> variable[_credentials] assign[=] tuple[[<ast.BoolOp object at 0x7da18eb540a0>, <ast.BoolOp object at 0x7da18eb549d0>]] if call[name[os].environ.get, parameter[constant[INFLUXDB_PASSWORD]]] begin[:] call[name[os].environ][constant[INFLUXDB_PASSWORD]] assign[=] binary_operation[constant[X] * call[name[len], parameter[call[name[os].environ][constant[INFLUXDB_PASSWORD]]]]] variable[_timeout_interval] assign[=] <ast.BoolOp object at 0x7da18eb55030> variable[_max_batch_size] assign[=] <ast.BoolOp object at 0x7da18eb57fd0> variable[_max_clients] assign[=] name[max_clients] variable[_max_buffer_size] assign[=] <ast.BoolOp object at 0x7da18eb56020> variable[_sample_probability] assign[=] <ast.BoolOp object at 0x7da18eb56ec0> variable[_trigger_size] assign[=] <ast.BoolOp object at 0x7da18eb550c0> if compare[call[name[os].environ.get, parameter[constant[INFLUXDB_TAG_HOSTNAME], constant[true]]] equal[==] constant[true]] begin[:] call[name[_base_tags].setdefault, parameter[constant[hostname], call[name[socket].gethostname, parameter[]]]] if call[name[os].environ.get, parameter[constant[ENVIRONMENT]]] begin[:] call[name[_base_tags].setdefault, parameter[constant[environment], call[name[os].environ][constant[ENVIRONMENT]]]] call[name[_base_tags].update, parameter[<ast.BoolOp object at 0x7da204346290>]] call[name[random].seed, parameter[]] variable[_installed] assign[=] constant[True] call[name[LOGGER].info, parameter[constant[sprockets_influxdb v%s installed; %i measurements or %.2f seconds will trigger batch submission], name[__version__], name[_trigger_size], binary_operation[name[_timeout_interval] / constant[1000.0]]]] return[constant[True]]
keyword[def] identifier[install] ( identifier[url] = keyword[None] , identifier[auth_username] = keyword[None] , identifier[auth_password] = keyword[None] , identifier[submission_interval] = keyword[None] , identifier[max_batch_size] = keyword[None] , identifier[max_clients] = literal[int] , identifier[base_tags] = keyword[None] , identifier[max_buffer_size] = keyword[None] , identifier[trigger_size] = keyword[None] , identifier[sample_probability] = literal[int] ): literal[string] keyword[global] identifier[_base_tags] , identifier[_base_url] , identifier[_credentials] , identifier[_enabled] , identifier[_installed] , identifier[_max_batch_size] , identifier[_max_buffer_size] , identifier[_max_clients] , identifier[_sample_probability] , identifier[_timeout] , identifier[_timeout_interval] , identifier[_trigger_size] identifier[_enabled] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )== literal[string] keyword[if] keyword[not] identifier[_enabled] : identifier[LOGGER] . identifier[warning] ( literal[string] ) keyword[return] keyword[if] identifier[_installed] : identifier[LOGGER] . identifier[warning] ( literal[string] ) keyword[return] keyword[False] identifier[_base_url] = identifier[url] keyword[or] literal[string] . identifier[format] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ), identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ), identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[int] )) identifier[_credentials] =( identifier[auth_username] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] ), identifier[auth_password] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] )) keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ): identifier[os] . identifier[environ] [ literal[string] ]= literal[string] * identifier[len] ( identifier[os] . identifier[environ] [ literal[string] ]) identifier[_timeout_interval] = identifier[submission_interval] keyword[or] identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[_timeout_interval] )) identifier[_max_batch_size] = identifier[max_batch_size] keyword[or] identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[_max_batch_size] )) identifier[_max_clients] = identifier[max_clients] identifier[_max_buffer_size] = identifier[max_buffer_size] keyword[or] identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[_max_buffer_size] )) identifier[_sample_probability] = identifier[sample_probability] keyword[or] identifier[float] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[_sample_probability] )) identifier[_trigger_size] = identifier[trigger_size] keyword[or] identifier[int] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[_trigger_size] )) keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )== literal[string] : identifier[_base_tags] . identifier[setdefault] ( literal[string] , identifier[socket] . identifier[gethostname] ()) keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ): identifier[_base_tags] . identifier[setdefault] ( literal[string] , identifier[os] . identifier[environ] [ literal[string] ]) identifier[_base_tags] . identifier[update] ( identifier[base_tags] keyword[or] {}) identifier[random] . identifier[seed] () identifier[_installed] = keyword[True] identifier[LOGGER] . identifier[info] ( literal[string] literal[string] , identifier[__version__] , identifier[_trigger_size] , identifier[_timeout_interval] / literal[int] ) keyword[return] keyword[True]
def install(url=None, auth_username=None, auth_password=None, submission_interval=None, max_batch_size=None, max_clients=10, base_tags=None, max_buffer_size=None, trigger_size=None, sample_probability=1.0): """Call this to install/setup the InfluxDB client collector. All arguments are optional. :param str url: The InfluxDB API URL. If URL is not specified, the ``INFLUXDB_SCHEME``, ``INFLUXDB_HOST`` and ``INFLUXDB_PORT`` environment variables will be used to construct the base URL. Default: ``http://localhost:8086/write`` :param str auth_username: A username to use for InfluxDB authentication. If not specified, the ``INFLUXDB_USER`` environment variable will be used. Default: ``None`` :param str auth_password: A password to use for InfluxDB authentication. If not specified, the ``INFLUXDB_PASSWORD`` environment variable will be used. Default: ``None`` :param int submission_interval: The maximum number of milliseconds to wait after the last batch submission before submitting a batch that is smaller than ``trigger_size``. Default: ``60000`` :param int max_batch_size: The number of measurements to be submitted in a single HTTP request. Default: ``10000`` :param int max_clients: The number of simultaneous batch submissions that may be made at any given time. Default: ``10`` :param dict base_tags: Default tags that are to be submitted with each measurement. Default: ``None`` :param int max_buffer_size: The maximum number of pending measurements in the buffer before new measurements are discarded. Default: ``25000`` :param int trigger_size: The minimum number of measurements that are in the buffer before a batch can be submitted. Default: ``5000`` :param float sample_probability: Value between 0 and 1.0 specifying the probability that a batch will be submitted (0.25 == 25%) :returns: :data:`True` if the client was installed by this call and :data:`False` otherwise. If ``INFLUXDB_PASSWORD`` is specified as an environment variable, it will be masked in the Python process. """ global _base_tags, _base_url, _credentials, _enabled, _installed, _max_batch_size, _max_buffer_size, _max_clients, _sample_probability, _timeout, _timeout_interval, _trigger_size _enabled = os.environ.get('INFLUXDB_ENABLED', 'true') == 'true' if not _enabled: LOGGER.warning('Disabling InfluxDB support') return # depends on [control=['if'], data=[]] if _installed: LOGGER.warning('InfluxDB client already installed') return False # depends on [control=['if'], data=[]] _base_url = url or '{}://{}:{}/write'.format(os.environ.get('INFLUXDB_SCHEME', 'http'), os.environ.get('INFLUXDB_HOST', 'localhost'), os.environ.get('INFLUXDB_PORT', 8086)) _credentials = (auth_username or os.environ.get('INFLUXDB_USER', None), auth_password or os.environ.get('INFLUXDB_PASSWORD', None)) # Don't leave the environment variable out there with the password if os.environ.get('INFLUXDB_PASSWORD'): os.environ['INFLUXDB_PASSWORD'] = 'X' * len(os.environ['INFLUXDB_PASSWORD']) # depends on [control=['if'], data=[]] # Submission related values _timeout_interval = submission_interval or int(os.environ.get('INFLUXDB_INTERVAL', _timeout_interval)) _max_batch_size = max_batch_size or int(os.environ.get('INFLUXDB_MAX_BATCH_SIZE', _max_batch_size)) _max_clients = max_clients _max_buffer_size = max_buffer_size or int(os.environ.get('INFLUXDB_MAX_BUFFER_SIZE', _max_buffer_size)) _sample_probability = sample_probability or float(os.environ.get('INFLUXDB_SAMPLE_PROBABILITY', _sample_probability)) _trigger_size = trigger_size or int(os.environ.get('INFLUXDB_TRIGGER_SIZE', _trigger_size)) # Set the base tags if os.environ.get('INFLUXDB_TAG_HOSTNAME', 'true') == 'true': _base_tags.setdefault('hostname', socket.gethostname()) # depends on [control=['if'], data=[]] if os.environ.get('ENVIRONMENT'): _base_tags.setdefault('environment', os.environ['ENVIRONMENT']) # depends on [control=['if'], data=[]] _base_tags.update(base_tags or {}) # Seed the random number generator for batch sampling random.seed() # Don't let this run multiple times _installed = True LOGGER.info('sprockets_influxdb v%s installed; %i measurements or %.2f seconds will trigger batch submission', __version__, _trigger_size, _timeout_interval / 1000.0) return True
def updatePassword(self, user, currentPassword, newPassword): """Change the password of a user.""" return self.__post('/api/updatePassword', data={ 'user': user, 'currentPassword': currentPassword, 'newPassword': newPassword })
def function[updatePassword, parameter[self, user, currentPassword, newPassword]]: constant[Change the password of a user.] return[call[name[self].__post, parameter[constant[/api/updatePassword]]]]
keyword[def] identifier[updatePassword] ( identifier[self] , identifier[user] , identifier[currentPassword] , identifier[newPassword] ): literal[string] keyword[return] identifier[self] . identifier[__post] ( literal[string] , identifier[data] ={ literal[string] : identifier[user] , literal[string] : identifier[currentPassword] , literal[string] : identifier[newPassword] })
def updatePassword(self, user, currentPassword, newPassword): """Change the password of a user.""" return self.__post('/api/updatePassword', data={'user': user, 'currentPassword': currentPassword, 'newPassword': newPassword})
def _message(self, beacon_config, invert_hello=False): """ Overridden :meth:`.WBeaconGouverneurMessenger._message` method. Appends encoded host group names to requests and responses. :param beacon_config: beacon configuration :return: bytes """ m = WBeaconGouverneurMessenger._message(self, beacon_config, invert_hello=invert_hello) hostgroups = self._message_hostgroup_generate() if len(hostgroups) > 0: m += (WHostgroupBeaconMessenger.__message_groups_splitter__ + hostgroups) return m
def function[_message, parameter[self, beacon_config, invert_hello]]: constant[ Overridden :meth:`.WBeaconGouverneurMessenger._message` method. Appends encoded host group names to requests and responses. :param beacon_config: beacon configuration :return: bytes ] variable[m] assign[=] call[name[WBeaconGouverneurMessenger]._message, parameter[name[self], name[beacon_config]]] variable[hostgroups] assign[=] call[name[self]._message_hostgroup_generate, parameter[]] if compare[call[name[len], parameter[name[hostgroups]]] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b2585180> return[name[m]]
keyword[def] identifier[_message] ( identifier[self] , identifier[beacon_config] , identifier[invert_hello] = keyword[False] ): literal[string] identifier[m] = identifier[WBeaconGouverneurMessenger] . identifier[_message] ( identifier[self] , identifier[beacon_config] , identifier[invert_hello] = identifier[invert_hello] ) identifier[hostgroups] = identifier[self] . identifier[_message_hostgroup_generate] () keyword[if] identifier[len] ( identifier[hostgroups] )> literal[int] : identifier[m] +=( identifier[WHostgroupBeaconMessenger] . identifier[__message_groups_splitter__] + identifier[hostgroups] ) keyword[return] identifier[m]
def _message(self, beacon_config, invert_hello=False): """ Overridden :meth:`.WBeaconGouverneurMessenger._message` method. Appends encoded host group names to requests and responses. :param beacon_config: beacon configuration :return: bytes """ m = WBeaconGouverneurMessenger._message(self, beacon_config, invert_hello=invert_hello) hostgroups = self._message_hostgroup_generate() if len(hostgroups) > 0: m += WHostgroupBeaconMessenger.__message_groups_splitter__ + hostgroups # depends on [control=['if'], data=[]] return m
def _reformat(p, buf): """ Apply format of ``p`` to data in 1-d array ``buf``. """ if numpy.ndim(buf) != 1: raise ValueError("Buffer ``buf`` must be 1-d.") if hasattr(p, 'keys'): ans = _gvar.BufferDict(p) if ans.size != len(buf): raise ValueError( # "p, buf size mismatch: %d, %d"%(ans.size, len(buf))) ans = _gvar.BufferDict(ans, buf=buf) else: if numpy.size(p) != len(buf): raise ValueError( # "p, buf size mismatch: %d, %d"%(numpy.size(p), len(buf))) ans = numpy.array(buf).reshape(numpy.shape(p)) return ans
def function[_reformat, parameter[p, buf]]: constant[ Apply format of ``p`` to data in 1-d array ``buf``. ] if compare[call[name[numpy].ndim, parameter[name[buf]]] not_equal[!=] constant[1]] begin[:] <ast.Raise object at 0x7da2046239a0> if call[name[hasattr], parameter[name[p], constant[keys]]] begin[:] variable[ans] assign[=] call[name[_gvar].BufferDict, parameter[name[p]]] if compare[name[ans].size not_equal[!=] call[name[len], parameter[name[buf]]]] begin[:] <ast.Raise object at 0x7da2046231f0> variable[ans] assign[=] call[name[_gvar].BufferDict, parameter[name[ans]]] return[name[ans]]
keyword[def] identifier[_reformat] ( identifier[p] , identifier[buf] ): literal[string] keyword[if] identifier[numpy] . identifier[ndim] ( identifier[buf] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[hasattr] ( identifier[p] , literal[string] ): identifier[ans] = identifier[_gvar] . identifier[BufferDict] ( identifier[p] ) keyword[if] identifier[ans] . identifier[size] != identifier[len] ( identifier[buf] ): keyword[raise] identifier[ValueError] ( literal[string] %( identifier[ans] . identifier[size] , identifier[len] ( identifier[buf] ))) identifier[ans] = identifier[_gvar] . identifier[BufferDict] ( identifier[ans] , identifier[buf] = identifier[buf] ) keyword[else] : keyword[if] identifier[numpy] . identifier[size] ( identifier[p] )!= identifier[len] ( identifier[buf] ): keyword[raise] identifier[ValueError] ( literal[string] %( identifier[numpy] . identifier[size] ( identifier[p] ), identifier[len] ( identifier[buf] ))) identifier[ans] = identifier[numpy] . identifier[array] ( identifier[buf] ). identifier[reshape] ( identifier[numpy] . identifier[shape] ( identifier[p] )) keyword[return] identifier[ans]
def _reformat(p, buf): """ Apply format of ``p`` to data in 1-d array ``buf``. """ if numpy.ndim(buf) != 1: raise ValueError('Buffer ``buf`` must be 1-d.') # depends on [control=['if'], data=[]] if hasattr(p, 'keys'): ans = _gvar.BufferDict(p) if ans.size != len(buf): # raise ValueError('p, buf size mismatch: %d, %d' % (ans.size, len(buf))) # depends on [control=['if'], data=[]] ans = _gvar.BufferDict(ans, buf=buf) # depends on [control=['if'], data=[]] else: if numpy.size(p) != len(buf): # raise ValueError('p, buf size mismatch: %d, %d' % (numpy.size(p), len(buf))) # depends on [control=['if'], data=[]] ans = numpy.array(buf).reshape(numpy.shape(p)) return ans
def authorized(resp, remote): """Authorized callback handler for GitHub. :param resp: The response. :param remote: The remote application. """ if resp and 'error' in resp: if resp['error'] == 'bad_verification_code': # See https://developer.github.com/v3/oauth/#bad-verification-code # which recommends starting auth flow again. return redirect(url_for('invenio_oauthclient.login', remote_app='github')) elif resp['error'] in ['incorrect_client_credentials', 'redirect_uri_mismatch']: raise OAuthResponseError( 'Application mis-configuration in GitHub', remote, resp ) return authorized_signup_handler(resp, remote)
def function[authorized, parameter[resp, remote]]: constant[Authorized callback handler for GitHub. :param resp: The response. :param remote: The remote application. ] if <ast.BoolOp object at 0x7da1b251add0> begin[:] if compare[call[name[resp]][constant[error]] equal[==] constant[bad_verification_code]] begin[:] return[call[name[redirect], parameter[call[name[url_for], parameter[constant[invenio_oauthclient.login]]]]]] return[call[name[authorized_signup_handler], parameter[name[resp], name[remote]]]]
keyword[def] identifier[authorized] ( identifier[resp] , identifier[remote] ): literal[string] keyword[if] identifier[resp] keyword[and] literal[string] keyword[in] identifier[resp] : keyword[if] identifier[resp] [ literal[string] ]== literal[string] : keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[remote_app] = literal[string] )) keyword[elif] identifier[resp] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]: keyword[raise] identifier[OAuthResponseError] ( literal[string] , identifier[remote] , identifier[resp] ) keyword[return] identifier[authorized_signup_handler] ( identifier[resp] , identifier[remote] )
def authorized(resp, remote): """Authorized callback handler for GitHub. :param resp: The response. :param remote: The remote application. """ if resp and 'error' in resp: if resp['error'] == 'bad_verification_code': # See https://developer.github.com/v3/oauth/#bad-verification-code # which recommends starting auth flow again. return redirect(url_for('invenio_oauthclient.login', remote_app='github')) # depends on [control=['if'], data=[]] elif resp['error'] in ['incorrect_client_credentials', 'redirect_uri_mismatch']: raise OAuthResponseError('Application mis-configuration in GitHub', remote, resp) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return authorized_signup_handler(resp, remote)
def replace_zeros(self, val, zero_thresh=0.0): """ Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with """ new_data = self.data.copy() new_data[new_data <= zero_thresh] = val return type(self)(new_data.astype(self.data.dtype), frame=self._frame)
def function[replace_zeros, parameter[self, val, zero_thresh]]: constant[ Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with ] variable[new_data] assign[=] call[name[self].data.copy, parameter[]] call[name[new_data]][compare[name[new_data] less_or_equal[<=] name[zero_thresh]]] assign[=] name[val] return[call[call[name[type], parameter[name[self]]], parameter[call[name[new_data].astype, parameter[name[self].data.dtype]]]]]
keyword[def] identifier[replace_zeros] ( identifier[self] , identifier[val] , identifier[zero_thresh] = literal[int] ): literal[string] identifier[new_data] = identifier[self] . identifier[data] . identifier[copy] () identifier[new_data] [ identifier[new_data] <= identifier[zero_thresh] ]= identifier[val] keyword[return] identifier[type] ( identifier[self] )( identifier[new_data] . identifier[astype] ( identifier[self] . identifier[data] . identifier[dtype] ), identifier[frame] = identifier[self] . identifier[_frame] )
def replace_zeros(self, val, zero_thresh=0.0): """ Replaces all zeros in the image with a specified value Returns ------- image dtype value to replace zeros with """ new_data = self.data.copy() new_data[new_data <= zero_thresh] = val return type(self)(new_data.astype(self.data.dtype), frame=self._frame)
def make_pkh_output(value, pubkey, witness=False): ''' int, bytearray -> TxOut ''' return _make_output( value=utils.i2le_padded(value, 8), output_script=make_pkh_output_script(pubkey, witness))
def function[make_pkh_output, parameter[value, pubkey, witness]]: constant[ int, bytearray -> TxOut ] return[call[name[_make_output], parameter[]]]
keyword[def] identifier[make_pkh_output] ( identifier[value] , identifier[pubkey] , identifier[witness] = keyword[False] ): literal[string] keyword[return] identifier[_make_output] ( identifier[value] = identifier[utils] . identifier[i2le_padded] ( identifier[value] , literal[int] ), identifier[output_script] = identifier[make_pkh_output_script] ( identifier[pubkey] , identifier[witness] ))
def make_pkh_output(value, pubkey, witness=False): """ int, bytearray -> TxOut """ return _make_output(value=utils.i2le_padded(value, 8), output_script=make_pkh_output_script(pubkey, witness))
def _as_chunk(self): """ Allows reconstructing indefinite length values :return: A unicode string of bits - 1s and 0s """ extra_bits = int_from_bytes(self.contents[0:1]) bit_string = '{0:b}'.format(int_from_bytes(self.contents[1:])) # Ensure we have leading zeros since these chunks may be concatenated together mod_bit_len = len(bit_string) % 8 if mod_bit_len != 0: bit_string = ('0' * (8 - mod_bit_len)) + bit_string if extra_bits > 0: return bit_string[0:0 - extra_bits] return bit_string
def function[_as_chunk, parameter[self]]: constant[ Allows reconstructing indefinite length values :return: A unicode string of bits - 1s and 0s ] variable[extra_bits] assign[=] call[name[int_from_bytes], parameter[call[name[self].contents][<ast.Slice object at 0x7da1b08ae380>]]] variable[bit_string] assign[=] call[constant[{0:b}].format, parameter[call[name[int_from_bytes], parameter[call[name[self].contents][<ast.Slice object at 0x7da1b08acc10>]]]]] variable[mod_bit_len] assign[=] binary_operation[call[name[len], parameter[name[bit_string]]] <ast.Mod object at 0x7da2590d6920> constant[8]] if compare[name[mod_bit_len] not_equal[!=] constant[0]] begin[:] variable[bit_string] assign[=] binary_operation[binary_operation[constant[0] * binary_operation[constant[8] - name[mod_bit_len]]] + name[bit_string]] if compare[name[extra_bits] greater[>] constant[0]] begin[:] return[call[name[bit_string]][<ast.Slice object at 0x7da1b08aef50>]] return[name[bit_string]]
keyword[def] identifier[_as_chunk] ( identifier[self] ): literal[string] identifier[extra_bits] = identifier[int_from_bytes] ( identifier[self] . identifier[contents] [ literal[int] : literal[int] ]) identifier[bit_string] = literal[string] . identifier[format] ( identifier[int_from_bytes] ( identifier[self] . identifier[contents] [ literal[int] :])) identifier[mod_bit_len] = identifier[len] ( identifier[bit_string] )% literal[int] keyword[if] identifier[mod_bit_len] != literal[int] : identifier[bit_string] =( literal[string] *( literal[int] - identifier[mod_bit_len] ))+ identifier[bit_string] keyword[if] identifier[extra_bits] > literal[int] : keyword[return] identifier[bit_string] [ literal[int] : literal[int] - identifier[extra_bits] ] keyword[return] identifier[bit_string]
def _as_chunk(self): """ Allows reconstructing indefinite length values :return: A unicode string of bits - 1s and 0s """ extra_bits = int_from_bytes(self.contents[0:1]) bit_string = '{0:b}'.format(int_from_bytes(self.contents[1:])) # Ensure we have leading zeros since these chunks may be concatenated together mod_bit_len = len(bit_string) % 8 if mod_bit_len != 0: bit_string = '0' * (8 - mod_bit_len) + bit_string # depends on [control=['if'], data=['mod_bit_len']] if extra_bits > 0: return bit_string[0:0 - extra_bits] # depends on [control=['if'], data=['extra_bits']] return bit_string
def evaluate_bound( distribution, x_data, parameters=None, cache=None, ): """ Evaluate lower and upper bounds. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate bounds at. Relevant in the case of multivariate distributions where the bounds are affected by the output of other distributions. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The lower and upper bounds of ``distribution`` at location ``x_data`` using parameters ``parameters``. """ assert len(x_data) == len(distribution) assert len(x_data.shape) == 2 cache = cache if cache is not None else {} parameters = load_parameters( distribution, "_bnd", parameters=parameters, cache=cache) out = numpy.zeros((2,) + x_data.shape) lower, upper = distribution._bnd(x_data.copy(), **parameters) out.T[:, :, 0] = numpy.asfarray(lower).T out.T[:, :, 1] = numpy.asfarray(upper).T cache[distribution] = out return out
def function[evaluate_bound, parameter[distribution, x_data, parameters, cache]]: constant[ Evaluate lower and upper bounds. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate bounds at. Relevant in the case of multivariate distributions where the bounds are affected by the output of other distributions. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The lower and upper bounds of ``distribution`` at location ``x_data`` using parameters ``parameters``. ] assert[compare[call[name[len], parameter[name[x_data]]] equal[==] call[name[len], parameter[name[distribution]]]]] assert[compare[call[name[len], parameter[name[x_data].shape]] equal[==] constant[2]]] variable[cache] assign[=] <ast.IfExp object at 0x7da2044c3670> variable[parameters] assign[=] call[name[load_parameters], parameter[name[distribution], constant[_bnd]]] variable[out] assign[=] call[name[numpy].zeros, parameter[binary_operation[tuple[[<ast.Constant object at 0x7da2044c3190>]] + name[x_data].shape]]] <ast.Tuple object at 0x7da2044c1960> assign[=] call[name[distribution]._bnd, parameter[call[name[x_data].copy, parameter[]]]] call[name[out].T][tuple[[<ast.Slice object at 0x7da2044c3490>, <ast.Slice object at 0x7da2044c00a0>, <ast.Constant object at 0x7da2044c3310>]]] assign[=] call[name[numpy].asfarray, parameter[name[lower]]].T call[name[out].T][tuple[[<ast.Slice object at 0x7da20eb29d50>, <ast.Slice object at 0x7da20eb2b070>, <ast.Constant object at 0x7da20eb2a140>]]] assign[=] call[name[numpy].asfarray, parameter[name[upper]]].T call[name[cache]][name[distribution]] assign[=] name[out] return[name[out]]
keyword[def] identifier[evaluate_bound] ( identifier[distribution] , identifier[x_data] , identifier[parameters] = keyword[None] , identifier[cache] = keyword[None] , ): literal[string] keyword[assert] identifier[len] ( identifier[x_data] )== identifier[len] ( identifier[distribution] ) keyword[assert] identifier[len] ( identifier[x_data] . identifier[shape] )== literal[int] identifier[cache] = identifier[cache] keyword[if] identifier[cache] keyword[is] keyword[not] keyword[None] keyword[else] {} identifier[parameters] = identifier[load_parameters] ( identifier[distribution] , literal[string] , identifier[parameters] = identifier[parameters] , identifier[cache] = identifier[cache] ) identifier[out] = identifier[numpy] . identifier[zeros] (( literal[int] ,)+ identifier[x_data] . identifier[shape] ) identifier[lower] , identifier[upper] = identifier[distribution] . identifier[_bnd] ( identifier[x_data] . identifier[copy] (),** identifier[parameters] ) identifier[out] . identifier[T] [:,:, literal[int] ]= identifier[numpy] . identifier[asfarray] ( identifier[lower] ). identifier[T] identifier[out] . identifier[T] [:,:, literal[int] ]= identifier[numpy] . identifier[asfarray] ( identifier[upper] ). identifier[T] identifier[cache] [ identifier[distribution] ]= identifier[out] keyword[return] identifier[out]
def evaluate_bound(distribution, x_data, parameters=None, cache=None): """ Evaluate lower and upper bounds. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate bounds at. Relevant in the case of multivariate distributions where the bounds are affected by the output of other distributions. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The lower and upper bounds of ``distribution`` at location ``x_data`` using parameters ``parameters``. """ assert len(x_data) == len(distribution) assert len(x_data.shape) == 2 cache = cache if cache is not None else {} parameters = load_parameters(distribution, '_bnd', parameters=parameters, cache=cache) out = numpy.zeros((2,) + x_data.shape) (lower, upper) = distribution._bnd(x_data.copy(), **parameters) out.T[:, :, 0] = numpy.asfarray(lower).T out.T[:, :, 1] = numpy.asfarray(upper).T cache[distribution] = out return out
def priceItems(items): """ Takes a list of Item objects and returns a list of Item objects with respective prices modified Uses the given list of item objects to formulate a query to the item database. Uses the returned results to populate each item in the list with its respective price, then returns the modified list. Parameters: items (list[Item]) -- List of items to price Returns list[Item] - Priced list of items """ retItems = [] sendItems = [] for item in items: sendItems.append(item.name) resp = CodexAPI.searchMany(sendItems) for respItem in resp: retItems[respItem['name']].price = respItem['price'] return retItems
def function[priceItems, parameter[items]]: constant[ Takes a list of Item objects and returns a list of Item objects with respective prices modified Uses the given list of item objects to formulate a query to the item database. Uses the returned results to populate each item in the list with its respective price, then returns the modified list. Parameters: items (list[Item]) -- List of items to price Returns list[Item] - Priced list of items ] variable[retItems] assign[=] list[[]] variable[sendItems] assign[=] list[[]] for taget[name[item]] in starred[name[items]] begin[:] call[name[sendItems].append, parameter[name[item].name]] variable[resp] assign[=] call[name[CodexAPI].searchMany, parameter[name[sendItems]]] for taget[name[respItem]] in starred[name[resp]] begin[:] call[name[retItems]][call[name[respItem]][constant[name]]].price assign[=] call[name[respItem]][constant[price]] return[name[retItems]]
keyword[def] identifier[priceItems] ( identifier[items] ): literal[string] identifier[retItems] =[] identifier[sendItems] =[] keyword[for] identifier[item] keyword[in] identifier[items] : identifier[sendItems] . identifier[append] ( identifier[item] . identifier[name] ) identifier[resp] = identifier[CodexAPI] . identifier[searchMany] ( identifier[sendItems] ) keyword[for] identifier[respItem] keyword[in] identifier[resp] : identifier[retItems] [ identifier[respItem] [ literal[string] ]]. identifier[price] = identifier[respItem] [ literal[string] ] keyword[return] identifier[retItems]
def priceItems(items): """ Takes a list of Item objects and returns a list of Item objects with respective prices modified Uses the given list of item objects to formulate a query to the item database. Uses the returned results to populate each item in the list with its respective price, then returns the modified list. Parameters: items (list[Item]) -- List of items to price Returns list[Item] - Priced list of items """ retItems = [] sendItems = [] for item in items: sendItems.append(item.name) # depends on [control=['for'], data=['item']] resp = CodexAPI.searchMany(sendItems) for respItem in resp: retItems[respItem['name']].price = respItem['price'] # depends on [control=['for'], data=['respItem']] return retItems
def find_input_usage(self, full_usage_id): """Check if full usage Id included in input reports set Parameters: full_usage_id Full target usage, use get_full_usage_id Returns: Report ID as integer value, or None if report does not exist with target usage. Nottice that report ID 0 is a valid report. """ for report_id, report_obj in self.__input_report_templates.items(): if full_usage_id in report_obj: return report_id return None
def function[find_input_usage, parameter[self, full_usage_id]]: constant[Check if full usage Id included in input reports set Parameters: full_usage_id Full target usage, use get_full_usage_id Returns: Report ID as integer value, or None if report does not exist with target usage. Nottice that report ID 0 is a valid report. ] for taget[tuple[[<ast.Name object at 0x7da20c6c52a0>, <ast.Name object at 0x7da20c6c7b20>]]] in starred[call[name[self].__input_report_templates.items, parameter[]]] begin[:] if compare[name[full_usage_id] in name[report_obj]] begin[:] return[name[report_id]] return[constant[None]]
keyword[def] identifier[find_input_usage] ( identifier[self] , identifier[full_usage_id] ): literal[string] keyword[for] identifier[report_id] , identifier[report_obj] keyword[in] identifier[self] . identifier[__input_report_templates] . identifier[items] (): keyword[if] identifier[full_usage_id] keyword[in] identifier[report_obj] : keyword[return] identifier[report_id] keyword[return] keyword[None]
def find_input_usage(self, full_usage_id): """Check if full usage Id included in input reports set Parameters: full_usage_id Full target usage, use get_full_usage_id Returns: Report ID as integer value, or None if report does not exist with target usage. Nottice that report ID 0 is a valid report. """ for (report_id, report_obj) in self.__input_report_templates.items(): if full_usage_id in report_obj: return report_id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return None
def wait(self, timeout): """ Starts the Timer for timeout seconds, then gives 5 second grace period to join the thread. :param timeout: Duration for timer. :return: Nothing """ self.timeout = timeout self.start() self.join(timeout=5)
def function[wait, parameter[self, timeout]]: constant[ Starts the Timer for timeout seconds, then gives 5 second grace period to join the thread. :param timeout: Duration for timer. :return: Nothing ] name[self].timeout assign[=] name[timeout] call[name[self].start, parameter[]] call[name[self].join, parameter[]]
keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] ): literal[string] identifier[self] . identifier[timeout] = identifier[timeout] identifier[self] . identifier[start] () identifier[self] . identifier[join] ( identifier[timeout] = literal[int] )
def wait(self, timeout): """ Starts the Timer for timeout seconds, then gives 5 second grace period to join the thread. :param timeout: Duration for timer. :return: Nothing """ self.timeout = timeout self.start() self.join(timeout=5)
def authorize_ip_permission( self, group_name, ip_protocol, from_port, to_port, cidr_ip): """ This is a convenience function that wraps the "authorize ip permission" functionality of the C{authorize_security_group} method. For an explanation of the parameters, see C{authorize_security_group}. """ d = self.authorize_security_group( group_name, ip_protocol=ip_protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) return d
def function[authorize_ip_permission, parameter[self, group_name, ip_protocol, from_port, to_port, cidr_ip]]: constant[ This is a convenience function that wraps the "authorize ip permission" functionality of the C{authorize_security_group} method. For an explanation of the parameters, see C{authorize_security_group}. ] variable[d] assign[=] call[name[self].authorize_security_group, parameter[name[group_name]]] return[name[d]]
keyword[def] identifier[authorize_ip_permission] ( identifier[self] , identifier[group_name] , identifier[ip_protocol] , identifier[from_port] , identifier[to_port] , identifier[cidr_ip] ): literal[string] identifier[d] = identifier[self] . identifier[authorize_security_group] ( identifier[group_name] , identifier[ip_protocol] = identifier[ip_protocol] , identifier[from_port] = identifier[from_port] , identifier[to_port] = identifier[to_port] , identifier[cidr_ip] = identifier[cidr_ip] ) keyword[return] identifier[d]
def authorize_ip_permission(self, group_name, ip_protocol, from_port, to_port, cidr_ip): """ This is a convenience function that wraps the "authorize ip permission" functionality of the C{authorize_security_group} method. For an explanation of the parameters, see C{authorize_security_group}. """ d = self.authorize_security_group(group_name, ip_protocol=ip_protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) return d
def get_ordering(self, request, queryset, view): """Return an ordering for a given request. DRF expects a comma separated list, while DREST expects an array. This method overwrites the DRF default so it can parse the array. """ params = view.get_request_feature(view.SORT) if params: fields = [param.strip() for param in params] valid_ordering, invalid_ordering = self.remove_invalid_fields( queryset, fields, view ) # if any of the sort fields are invalid, throw an error. # else return the ordering if invalid_ordering: raise ValidationError( "Invalid filter field: %s" % invalid_ordering ) else: return valid_ordering # No sorting was included return self.get_default_ordering(view)
def function[get_ordering, parameter[self, request, queryset, view]]: constant[Return an ordering for a given request. DRF expects a comma separated list, while DREST expects an array. This method overwrites the DRF default so it can parse the array. ] variable[params] assign[=] call[name[view].get_request_feature, parameter[name[view].SORT]] if name[params] begin[:] variable[fields] assign[=] <ast.ListComp object at 0x7da18eb56e60> <ast.Tuple object at 0x7da18eb56e90> assign[=] call[name[self].remove_invalid_fields, parameter[name[queryset], name[fields], name[view]]] if name[invalid_ordering] begin[:] <ast.Raise object at 0x7da18eb56830> return[call[name[self].get_default_ordering, parameter[name[view]]]]
keyword[def] identifier[get_ordering] ( identifier[self] , identifier[request] , identifier[queryset] , identifier[view] ): literal[string] identifier[params] = identifier[view] . identifier[get_request_feature] ( identifier[view] . identifier[SORT] ) keyword[if] identifier[params] : identifier[fields] =[ identifier[param] . identifier[strip] () keyword[for] identifier[param] keyword[in] identifier[params] ] identifier[valid_ordering] , identifier[invalid_ordering] = identifier[self] . identifier[remove_invalid_fields] ( identifier[queryset] , identifier[fields] , identifier[view] ) keyword[if] identifier[invalid_ordering] : keyword[raise] identifier[ValidationError] ( literal[string] % identifier[invalid_ordering] ) keyword[else] : keyword[return] identifier[valid_ordering] keyword[return] identifier[self] . identifier[get_default_ordering] ( identifier[view] )
def get_ordering(self, request, queryset, view): """Return an ordering for a given request. DRF expects a comma separated list, while DREST expects an array. This method overwrites the DRF default so it can parse the array. """ params = view.get_request_feature(view.SORT) if params: fields = [param.strip() for param in params] (valid_ordering, invalid_ordering) = self.remove_invalid_fields(queryset, fields, view) # if any of the sort fields are invalid, throw an error. # else return the ordering if invalid_ordering: raise ValidationError('Invalid filter field: %s' % invalid_ordering) # depends on [control=['if'], data=[]] else: return valid_ordering # depends on [control=['if'], data=[]] # No sorting was included return self.get_default_ordering(view)
def process_multinest_run(file_root, base_dir, **kwargs): """Loads data from a MultiNest run into the nestcheck dictionary format for analysis. N.B. producing required output file containing information about the iso-likelihood contours within which points were sampled (where they were "born") requies MultiNest version 3.11 or later. Parameters ---------- file_root: str Root name for output files. When running MultiNest, this is determined by the nest_root parameter. base_dir: str Directory containing output files. When running MultiNest, this is determined by the nest_root parameter. kwargs: dict, optional Passed to ns_run_utils.check_ns_run (via process_samples_array) Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). """ # Load dead and live points dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt') live = np.loadtxt(os.path.join(base_dir, file_root) + '-phys_live-birth.txt') # Remove unnecessary final columns dead = dead[:, :-2] live = live[:, :-1] assert dead[:, -2].max() < live[:, -2].min(), ( 'final live points should have greater logls than any dead point!', dead, live) ns_run = process_samples_array(np.vstack((dead, live)), **kwargs) assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), ( 'As MultiNest does not currently perform dynamic nested sampling, all ' 'threads should start by sampling the whole prior.') ns_run['output'] = {} ns_run['output']['file_root'] = file_root ns_run['output']['base_dir'] = base_dir return ns_run
def function[process_multinest_run, parameter[file_root, base_dir]]: constant[Loads data from a MultiNest run into the nestcheck dictionary format for analysis. N.B. producing required output file containing information about the iso-likelihood contours within which points were sampled (where they were "born") requies MultiNest version 3.11 or later. Parameters ---------- file_root: str Root name for output files. When running MultiNest, this is determined by the nest_root parameter. base_dir: str Directory containing output files. When running MultiNest, this is determined by the nest_root parameter. kwargs: dict, optional Passed to ns_run_utils.check_ns_run (via process_samples_array) Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). ] variable[dead] assign[=] call[name[np].loadtxt, parameter[binary_operation[call[name[os].path.join, parameter[name[base_dir], name[file_root]]] + constant[-dead-birth.txt]]]] variable[live] assign[=] call[name[np].loadtxt, parameter[binary_operation[call[name[os].path.join, parameter[name[base_dir], name[file_root]]] + constant[-phys_live-birth.txt]]]] variable[dead] assign[=] call[name[dead]][tuple[[<ast.Slice object at 0x7da2054a6920>, <ast.Slice object at 0x7da2054a5210>]]] variable[live] assign[=] call[name[live]][tuple[[<ast.Slice object at 0x7da2054a55a0>, <ast.Slice object at 0x7da2054a7f70>]]] assert[compare[call[call[name[dead]][tuple[[<ast.Slice object at 0x7da2054a6e30>, <ast.UnaryOp object at 0x7da2054a7670>]]].max, parameter[]] less[<] call[call[name[live]][tuple[[<ast.Slice object at 0x7da2054a5b70>, <ast.UnaryOp object at 0x7da2054a6e90>]]].min, parameter[]]]] variable[ns_run] assign[=] call[name[process_samples_array], parameter[call[name[np].vstack, parameter[tuple[[<ast.Name object at 0x7da2054a6a70>, <ast.Name object at 0x7da2054a4c10>]]]]]] assert[call[name[np].all, parameter[compare[call[call[name[ns_run]][constant[thread_min_max]]][tuple[[<ast.Slice object at 0x7da2054a5c30>, <ast.Constant object at 0x7da2054a7fd0>]]] equal[==] <ast.UnaryOp object at 0x7da2054a6950>]]]] call[name[ns_run]][constant[output]] assign[=] dictionary[[], []] call[call[name[ns_run]][constant[output]]][constant[file_root]] assign[=] name[file_root] call[call[name[ns_run]][constant[output]]][constant[base_dir]] assign[=] name[base_dir] return[name[ns_run]]
keyword[def] identifier[process_multinest_run] ( identifier[file_root] , identifier[base_dir] ,** identifier[kwargs] ): literal[string] identifier[dead] = identifier[np] . identifier[loadtxt] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , identifier[file_root] )+ literal[string] ) identifier[live] = identifier[np] . identifier[loadtxt] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , identifier[file_root] ) + literal[string] ) identifier[dead] = identifier[dead] [:,:- literal[int] ] identifier[live] = identifier[live] [:,:- literal[int] ] keyword[assert] identifier[dead] [:,- literal[int] ]. identifier[max] ()< identifier[live] [:,- literal[int] ]. identifier[min] (),( literal[string] , identifier[dead] , identifier[live] ) identifier[ns_run] = identifier[process_samples_array] ( identifier[np] . identifier[vstack] (( identifier[dead] , identifier[live] )),** identifier[kwargs] ) keyword[assert] identifier[np] . identifier[all] ( identifier[ns_run] [ literal[string] ][:, literal[int] ]==- identifier[np] . identifier[inf] ),( literal[string] literal[string] ) identifier[ns_run] [ literal[string] ]={} identifier[ns_run] [ literal[string] ][ literal[string] ]= identifier[file_root] identifier[ns_run] [ literal[string] ][ literal[string] ]= identifier[base_dir] keyword[return] identifier[ns_run]
def process_multinest_run(file_root, base_dir, **kwargs): """Loads data from a MultiNest run into the nestcheck dictionary format for analysis. N.B. producing required output file containing information about the iso-likelihood contours within which points were sampled (where they were "born") requies MultiNest version 3.11 or later. Parameters ---------- file_root: str Root name for output files. When running MultiNest, this is determined by the nest_root parameter. base_dir: str Directory containing output files. When running MultiNest, this is determined by the nest_root parameter. kwargs: dict, optional Passed to ns_run_utils.check_ns_run (via process_samples_array) Returns ------- ns_run: dict Nested sampling run dict (see the module docstring for more details). """ # Load dead and live points dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt') live = np.loadtxt(os.path.join(base_dir, file_root) + '-phys_live-birth.txt') # Remove unnecessary final columns dead = dead[:, :-2] live = live[:, :-1] assert dead[:, -2].max() < live[:, -2].min(), ('final live points should have greater logls than any dead point!', dead, live) ns_run = process_samples_array(np.vstack((dead, live)), **kwargs) assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), 'As MultiNest does not currently perform dynamic nested sampling, all threads should start by sampling the whole prior.' ns_run['output'] = {} ns_run['output']['file_root'] = file_root ns_run['output']['base_dir'] = base_dir return ns_run
def vq_nearest_neighbor(x, means, soft_em=False, num_samples=10, temperature=None): """Find the nearest element in means to elements in x.""" bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if soft_em: x_means_idx = tf.multinomial(-dist, num_samples=num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=common_layers.shape_list(means)[0]) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: if temperature is None: x_means_idx = tf.argmax(-dist, axis=-1) else: x_means_idx = tf.multinomial(- dist / temperature, 1) x_means_idx = tf.squeeze(x_means_idx, axis=-1) if (common_layers.should_generate_summaries() and not common_layers.is_xla_compiled()): tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1])) x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size]) x_means = tf.matmul(x_means_hot_flat, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss, dist
def function[vq_nearest_neighbor, parameter[x, means, soft_em, num_samples, temperature]]: constant[Find the nearest element in means to elements in x.] variable[bottleneck_size] assign[=] call[call[name[common_layers].shape_list, parameter[name[means]]]][constant[0]] variable[x_norm_sq] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].square, parameter[name[x]]]]] variable[means_norm_sq] assign[=] call[name[tf].reduce_sum, parameter[call[name[tf].square, parameter[name[means]]]]] variable[scalar_prod] assign[=] call[name[tf].matmul, parameter[name[x], name[means]]] variable[dist] assign[=] binary_operation[binary_operation[name[x_norm_sq] + call[name[tf].transpose, parameter[name[means_norm_sq]]]] - binary_operation[constant[2] * name[scalar_prod]]] if name[soft_em] begin[:] variable[x_means_idx] assign[=] call[name[tf].multinomial, parameter[<ast.UnaryOp object at 0x7da1b1fe0280>]] variable[x_means_hot] assign[=] call[name[tf].one_hot, parameter[name[x_means_idx]]] variable[x_means_hot] assign[=] call[name[tf].reduce_mean, parameter[name[x_means_hot]]] variable[x_means_hot_flat] assign[=] call[name[tf].reshape, parameter[name[x_means_hot], list[[<ast.UnaryOp object at 0x7da1b1f38610>, <ast.Name object at 0x7da1b1f3bd30>]]]] variable[x_means] assign[=] call[name[tf].matmul, parameter[name[x_means_hot_flat], name[means]]] variable[e_loss] assign[=] call[name[tf].reduce_mean, parameter[call[name[tf].squared_difference, parameter[name[x], call[name[tf].stop_gradient, parameter[name[x_means]]]]]]] return[tuple[[<ast.Name object at 0x7da1b1f3b250>, <ast.Name object at 0x7da1b1f394b0>, <ast.Name object at 0x7da1b1f3b640>]]]
keyword[def] identifier[vq_nearest_neighbor] ( identifier[x] , identifier[means] , identifier[soft_em] = keyword[False] , identifier[num_samples] = literal[int] , identifier[temperature] = keyword[None] ): literal[string] identifier[bottleneck_size] = identifier[common_layers] . identifier[shape_list] ( identifier[means] )[ literal[int] ] identifier[x_norm_sq] = identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[x] ), identifier[axis] =- literal[int] , identifier[keepdims] = keyword[True] ) identifier[means_norm_sq] = identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[means] ), identifier[axis] =- literal[int] , identifier[keepdims] = keyword[True] ) identifier[scalar_prod] = identifier[tf] . identifier[matmul] ( identifier[x] , identifier[means] , identifier[transpose_b] = keyword[True] ) identifier[dist] = identifier[x_norm_sq] + identifier[tf] . identifier[transpose] ( identifier[means_norm_sq] )- literal[int] * identifier[scalar_prod] keyword[if] identifier[soft_em] : identifier[x_means_idx] = identifier[tf] . identifier[multinomial] (- identifier[dist] , identifier[num_samples] = identifier[num_samples] ) identifier[x_means_hot] = identifier[tf] . identifier[one_hot] ( identifier[x_means_idx] , identifier[depth] = identifier[common_layers] . identifier[shape_list] ( identifier[means] )[ literal[int] ]) identifier[x_means_hot] = identifier[tf] . identifier[reduce_mean] ( identifier[x_means_hot] , identifier[axis] = literal[int] ) keyword[else] : keyword[if] identifier[temperature] keyword[is] keyword[None] : identifier[x_means_idx] = identifier[tf] . identifier[argmax] (- identifier[dist] , identifier[axis] =- literal[int] ) keyword[else] : identifier[x_means_idx] = identifier[tf] . identifier[multinomial] (- identifier[dist] / identifier[temperature] , literal[int] ) identifier[x_means_idx] = identifier[tf] . identifier[squeeze] ( identifier[x_means_idx] , identifier[axis] =- literal[int] ) keyword[if] ( identifier[common_layers] . identifier[should_generate_summaries] () keyword[and] keyword[not] identifier[common_layers] . identifier[is_xla_compiled] ()): identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[tf] . identifier[reshape] ( identifier[x_means_idx] ,[- literal[int] ])) identifier[x_means_hot] = identifier[tf] . identifier[one_hot] ( identifier[x_means_idx] , identifier[bottleneck_size] ) identifier[x_means_hot_flat] = identifier[tf] . identifier[reshape] ( identifier[x_means_hot] ,[- literal[int] , identifier[bottleneck_size] ]) identifier[x_means] = identifier[tf] . identifier[matmul] ( identifier[x_means_hot_flat] , identifier[means] ) identifier[e_loss] = identifier[tf] . identifier[reduce_mean] ( identifier[tf] . identifier[squared_difference] ( identifier[x] , identifier[tf] . identifier[stop_gradient] ( identifier[x_means] ))) keyword[return] identifier[x_means_hot] , identifier[e_loss] , identifier[dist]
def vq_nearest_neighbor(x, means, soft_em=False, num_samples=10, temperature=None): """Find the nearest element in means to elements in x.""" bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if soft_em: x_means_idx = tf.multinomial(-dist, num_samples=num_samples) x_means_hot = tf.one_hot(x_means_idx, depth=common_layers.shape_list(means)[0]) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) # depends on [control=['if'], data=[]] else: if temperature is None: x_means_idx = tf.argmax(-dist, axis=-1) # depends on [control=['if'], data=[]] else: x_means_idx = tf.multinomial(-dist / temperature, 1) x_means_idx = tf.squeeze(x_means_idx, axis=-1) if common_layers.should_generate_summaries() and (not common_layers.is_xla_compiled()): tf.summary.histogram('means_idx', tf.reshape(x_means_idx, [-1])) # depends on [control=['if'], data=[]] x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size]) x_means = tf.matmul(x_means_hot_flat, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return (x_means_hot, e_loss, dist)
def active_futures(ticker: str, dt) -> str: """ Active futures contract Args: ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc. dt: date Returns: str: ticker name """ t_info = ticker.split() prefix, asset = ' '.join(t_info[:-1]), t_info[-1] info = const.market_info(f'{prefix[:-1]}1 {asset}') f1, f2 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}' fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq']) fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq']) fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True) if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1 d1 = bdib(ticker=f1, dt=dt) d2 = bdib(ticker=f2, dt=dt) return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2
def function[active_futures, parameter[ticker, dt]]: constant[ Active futures contract Args: ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc. dt: date Returns: str: ticker name ] variable[t_info] assign[=] call[name[ticker].split, parameter[]] <ast.Tuple object at 0x7da1b00537f0> assign[=] tuple[[<ast.Call object at 0x7da1b0052350>, <ast.Subscript object at 0x7da1b00515a0>]] variable[info] assign[=] call[name[const].market_info, parameter[<ast.JoinedStr object at 0x7da1b0051d20>]] <ast.Tuple object at 0x7da1b0050e80> assign[=] tuple[[<ast.JoinedStr object at 0x7da1b0051330>, <ast.JoinedStr object at 0x7da1b0052e30>]] variable[fut_2] assign[=] call[name[fut_ticker], parameter[]] variable[fut_1] assign[=] call[name[fut_ticker], parameter[]] variable[fut_tk] assign[=] call[name[bdp], parameter[]] if compare[call[name[pd].Timestamp, parameter[name[dt]]].month less[<] call[name[pd].Timestamp, parameter[call[name[fut_tk].last_tradeable_dt][constant[0]]]].month] begin[:] return[name[fut_1]] variable[d1] assign[=] call[name[bdib], parameter[]] variable[d2] assign[=] call[name[bdib], parameter[]] return[<ast.IfExp object at 0x7da1b0052590>]
keyword[def] identifier[active_futures] ( identifier[ticker] : identifier[str] , identifier[dt] )-> identifier[str] : literal[string] identifier[t_info] = identifier[ticker] . identifier[split] () identifier[prefix] , identifier[asset] = literal[string] . identifier[join] ( identifier[t_info] [:- literal[int] ]), identifier[t_info] [- literal[int] ] identifier[info] = identifier[const] . identifier[market_info] ( literal[string] ) identifier[f1] , identifier[f2] = literal[string] , literal[string] identifier[fut_2] = identifier[fut_ticker] ( identifier[gen_ticker] = identifier[f2] , identifier[dt] = identifier[dt] , identifier[freq] = identifier[info] [ literal[string] ]) identifier[fut_1] = identifier[fut_ticker] ( identifier[gen_ticker] = identifier[f1] , identifier[dt] = identifier[dt] , identifier[freq] = identifier[info] [ literal[string] ]) identifier[fut_tk] = identifier[bdp] ( identifier[tickers] =[ identifier[fut_1] , identifier[fut_2] ], identifier[flds] = literal[string] , identifier[cache] = keyword[True] ) keyword[if] identifier[pd] . identifier[Timestamp] ( identifier[dt] ). identifier[month] < identifier[pd] . identifier[Timestamp] ( identifier[fut_tk] . identifier[last_tradeable_dt] [ literal[int] ]). identifier[month] : keyword[return] identifier[fut_1] identifier[d1] = identifier[bdib] ( identifier[ticker] = identifier[f1] , identifier[dt] = identifier[dt] ) identifier[d2] = identifier[bdib] ( identifier[ticker] = identifier[f2] , identifier[dt] = identifier[dt] ) keyword[return] identifier[fut_1] keyword[if] identifier[d1] [ identifier[f1] ]. identifier[volume] . identifier[sum] ()> identifier[d2] [ identifier[f2] ]. identifier[volume] . identifier[sum] () keyword[else] identifier[fut_2]
def active_futures(ticker: str, dt) -> str: """ Active futures contract Args: ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc. dt: date Returns: str: ticker name """ t_info = ticker.split() (prefix, asset) = (' '.join(t_info[:-1]), t_info[-1]) info = const.market_info(f'{prefix[:-1]}1 {asset}') (f1, f2) = (f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}') fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq']) fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq']) fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True) if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1 # depends on [control=['if'], data=[]] d1 = bdib(ticker=f1, dt=dt) d2 = bdib(ticker=f2, dt=dt) return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2
def send_mail(subject, message, from_email, recipient_list, html_message='', scheduled_time=None, headers=None, priority=PRIORITY.medium): """ Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method. """ subject = force_text(subject) status = None if priority == PRIORITY.now else STATUS.queued emails = [] for address in recipient_list: emails.append( Email.objects.create( from_email=from_email, to=address, subject=subject, message=message, html_message=html_message, status=status, headers=headers, priority=priority, scheduled_time=scheduled_time ) ) if priority == PRIORITY.now: for email in emails: email.dispatch() return emails
def function[send_mail, parameter[subject, message, from_email, recipient_list, html_message, scheduled_time, headers, priority]]: constant[ Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method. ] variable[subject] assign[=] call[name[force_text], parameter[name[subject]]] variable[status] assign[=] <ast.IfExp object at 0x7da20e9b1480> variable[emails] assign[=] list[[]] for taget[name[address]] in starred[name[recipient_list]] begin[:] call[name[emails].append, parameter[call[name[Email].objects.create, parameter[]]]] if compare[name[priority] equal[==] name[PRIORITY].now] begin[:] for taget[name[email]] in starred[name[emails]] begin[:] call[name[email].dispatch, parameter[]] return[name[emails]]
keyword[def] identifier[send_mail] ( identifier[subject] , identifier[message] , identifier[from_email] , identifier[recipient_list] , identifier[html_message] = literal[string] , identifier[scheduled_time] = keyword[None] , identifier[headers] = keyword[None] , identifier[priority] = identifier[PRIORITY] . identifier[medium] ): literal[string] identifier[subject] = identifier[force_text] ( identifier[subject] ) identifier[status] = keyword[None] keyword[if] identifier[priority] == identifier[PRIORITY] . identifier[now] keyword[else] identifier[STATUS] . identifier[queued] identifier[emails] =[] keyword[for] identifier[address] keyword[in] identifier[recipient_list] : identifier[emails] . identifier[append] ( identifier[Email] . identifier[objects] . identifier[create] ( identifier[from_email] = identifier[from_email] , identifier[to] = identifier[address] , identifier[subject] = identifier[subject] , identifier[message] = identifier[message] , identifier[html_message] = identifier[html_message] , identifier[status] = identifier[status] , identifier[headers] = identifier[headers] , identifier[priority] = identifier[priority] , identifier[scheduled_time] = identifier[scheduled_time] ) ) keyword[if] identifier[priority] == identifier[PRIORITY] . identifier[now] : keyword[for] identifier[email] keyword[in] identifier[emails] : identifier[email] . identifier[dispatch] () keyword[return] identifier[emails]
def send_mail(subject, message, from_email, recipient_list, html_message='', scheduled_time=None, headers=None, priority=PRIORITY.medium): """ Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method. """ subject = force_text(subject) status = None if priority == PRIORITY.now else STATUS.queued emails = [] for address in recipient_list: emails.append(Email.objects.create(from_email=from_email, to=address, subject=subject, message=message, html_message=html_message, status=status, headers=headers, priority=priority, scheduled_time=scheduled_time)) # depends on [control=['for'], data=['address']] if priority == PRIORITY.now: for email in emails: email.dispatch() # depends on [control=['for'], data=['email']] # depends on [control=['if'], data=[]] return emails
def _read_para_relay_hmac(self, code, cbit, clen, *, desc, length, version): """Read HIP RELAY_HMAC parameter. Structure of HIP RELAY_HMAC parameter [RFC 5770]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 relay_hmac.type Parameter Type 1 15 relay_hmac.critical Critical Bit 2 16 relay_hmac.length Length of Contents 4 32 relay_hmac.hmac HMAC ? ? - Padding """ _hmac = self._read_fileng(clen) relay_hmac = dict( type=desc, critical=cbit, length=clen, hmac=_hmac, ) _plen = length - clen if _plen: self._read_fileng(_plen) return relay_hmac
def function[_read_para_relay_hmac, parameter[self, code, cbit, clen]]: constant[Read HIP RELAY_HMAC parameter. Structure of HIP RELAY_HMAC parameter [RFC 5770]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 relay_hmac.type Parameter Type 1 15 relay_hmac.critical Critical Bit 2 16 relay_hmac.length Length of Contents 4 32 relay_hmac.hmac HMAC ? ? - Padding ] variable[_hmac] assign[=] call[name[self]._read_fileng, parameter[name[clen]]] variable[relay_hmac] assign[=] call[name[dict], parameter[]] variable[_plen] assign[=] binary_operation[name[length] - name[clen]] if name[_plen] begin[:] call[name[self]._read_fileng, parameter[name[_plen]]] return[name[relay_hmac]]
keyword[def] identifier[_read_para_relay_hmac] ( identifier[self] , identifier[code] , identifier[cbit] , identifier[clen] ,*, identifier[desc] , identifier[length] , identifier[version] ): literal[string] identifier[_hmac] = identifier[self] . identifier[_read_fileng] ( identifier[clen] ) identifier[relay_hmac] = identifier[dict] ( identifier[type] = identifier[desc] , identifier[critical] = identifier[cbit] , identifier[length] = identifier[clen] , identifier[hmac] = identifier[_hmac] , ) identifier[_plen] = identifier[length] - identifier[clen] keyword[if] identifier[_plen] : identifier[self] . identifier[_read_fileng] ( identifier[_plen] ) keyword[return] identifier[relay_hmac]
def _read_para_relay_hmac(self, code, cbit, clen, *, desc, length, version): """Read HIP RELAY_HMAC parameter. Structure of HIP RELAY_HMAC parameter [RFC 5770]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 relay_hmac.type Parameter Type 1 15 relay_hmac.critical Critical Bit 2 16 relay_hmac.length Length of Contents 4 32 relay_hmac.hmac HMAC ? ? - Padding """ _hmac = self._read_fileng(clen) relay_hmac = dict(type=desc, critical=cbit, length=clen, hmac=_hmac) _plen = length - clen if _plen: self._read_fileng(_plen) # depends on [control=['if'], data=[]] return relay_hmac
def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED'
def function[server_enabled, parameter[s_name]]: constant[ Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ] variable[server] assign[=] call[name[_server_get], parameter[name[s_name]]] return[<ast.BoolOp object at 0x7da1b2139780>]
keyword[def] identifier[server_enabled] ( identifier[s_name] ,** identifier[connection_args] ): literal[string] identifier[server] = identifier[_server_get] ( identifier[s_name] ,** identifier[connection_args] ) keyword[return] identifier[server] keyword[is] keyword[not] keyword[None] keyword[and] identifier[server] . identifier[get_state] ()== literal[string]
def server_enabled(s_name, **connection_args): """ Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' """ server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED'
def auth(**kwargs): ''' Authorize device synchronization. ''' """ kodrive auth <path> <device_id (client)> 1. make sure path has been added to config.xml, server 2. make sure path is not shared by someone 3. add device_id to folder in config.xml, server 4. add device to devices in config.xml, server """ option = 'add' path = kwargs['path'] key = kwargs['key'] if kwargs['remove']: option = 'remove' if kwargs['yes']: output, err = cli_syncthing_adapter.auth(option, key, path) click.echo("%s" % output, err=err) else: verb = 'authorize' if not kwargs['remove'] else 'de-authorize' if click.confirm("Are you sure you want to %s this device to access %s?" % (verb, path)): output, err = cli_syncthing_adapter.auth(option, key, path) if output: click.echo("%s" % output, err=err)
def function[auth, parameter[]]: constant[ Authorize device synchronization. ] constant[ kodrive auth <path> <device_id (client)> 1. make sure path has been added to config.xml, server 2. make sure path is not shared by someone 3. add device_id to folder in config.xml, server 4. add device to devices in config.xml, server ] variable[option] assign[=] constant[add] variable[path] assign[=] call[name[kwargs]][constant[path]] variable[key] assign[=] call[name[kwargs]][constant[key]] if call[name[kwargs]][constant[remove]] begin[:] variable[option] assign[=] constant[remove] if call[name[kwargs]][constant[yes]] begin[:] <ast.Tuple object at 0x7da1b1f9c1f0> assign[=] call[name[cli_syncthing_adapter].auth, parameter[name[option], name[key], name[path]]] call[name[click].echo, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[output]]]]
keyword[def] identifier[auth] (** identifier[kwargs] ): literal[string] literal[string] identifier[option] = literal[string] identifier[path] = identifier[kwargs] [ literal[string] ] identifier[key] = identifier[kwargs] [ literal[string] ] keyword[if] identifier[kwargs] [ literal[string] ]: identifier[option] = literal[string] keyword[if] identifier[kwargs] [ literal[string] ]: identifier[output] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[auth] ( identifier[option] , identifier[key] , identifier[path] ) identifier[click] . identifier[echo] ( literal[string] % identifier[output] , identifier[err] = identifier[err] ) keyword[else] : identifier[verb] = literal[string] keyword[if] keyword[not] identifier[kwargs] [ literal[string] ] keyword[else] literal[string] keyword[if] identifier[click] . identifier[confirm] ( literal[string] %( identifier[verb] , identifier[path] )): identifier[output] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[auth] ( identifier[option] , identifier[key] , identifier[path] ) keyword[if] identifier[output] : identifier[click] . identifier[echo] ( literal[string] % identifier[output] , identifier[err] = identifier[err] )
def auth(**kwargs): """ Authorize device synchronization. """ '\n kodrive auth <path> <device_id (client)>\n\n 1. make sure path has been added to config.xml, server\n 2. make sure path is not shared by someone\n 3. add device_id to folder in config.xml, server\n 4. add device to devices in config.xml, server\n\n ' option = 'add' path = kwargs['path'] key = kwargs['key'] if kwargs['remove']: option = 'remove' # depends on [control=['if'], data=[]] if kwargs['yes']: (output, err) = cli_syncthing_adapter.auth(option, key, path) click.echo('%s' % output, err=err) # depends on [control=['if'], data=[]] else: verb = 'authorize' if not kwargs['remove'] else 'de-authorize' if click.confirm('Are you sure you want to %s this device to access %s?' % (verb, path)): (output, err) = cli_syncthing_adapter.auth(option, key, path) if output: click.echo('%s' % output, err=err) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def stats(args): """Create stats from the analysis """ logger.info("Reading sequeces") data = parse_ma_file(args.ma) logger.info("Get sequences from sam") is_align = _read_sam(args.sam) is_json, is_db = _read_json(args.json) res = _summarise_sam(data, is_align, is_json, is_db) _write_suma(res, os.path.join(args.out, "stats_align.dat")) logger.info("Done")
def function[stats, parameter[args]]: constant[Create stats from the analysis ] call[name[logger].info, parameter[constant[Reading sequeces]]] variable[data] assign[=] call[name[parse_ma_file], parameter[name[args].ma]] call[name[logger].info, parameter[constant[Get sequences from sam]]] variable[is_align] assign[=] call[name[_read_sam], parameter[name[args].sam]] <ast.Tuple object at 0x7da1b031e170> assign[=] call[name[_read_json], parameter[name[args].json]] variable[res] assign[=] call[name[_summarise_sam], parameter[name[data], name[is_align], name[is_json], name[is_db]]] call[name[_write_suma], parameter[name[res], call[name[os].path.join, parameter[name[args].out, constant[stats_align.dat]]]]] call[name[logger].info, parameter[constant[Done]]]
keyword[def] identifier[stats] ( identifier[args] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[data] = identifier[parse_ma_file] ( identifier[args] . identifier[ma] ) identifier[logger] . identifier[info] ( literal[string] ) identifier[is_align] = identifier[_read_sam] ( identifier[args] . identifier[sam] ) identifier[is_json] , identifier[is_db] = identifier[_read_json] ( identifier[args] . identifier[json] ) identifier[res] = identifier[_summarise_sam] ( identifier[data] , identifier[is_align] , identifier[is_json] , identifier[is_db] ) identifier[_write_suma] ( identifier[res] , identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[out] , literal[string] )) identifier[logger] . identifier[info] ( literal[string] )
def stats(args): """Create stats from the analysis """ logger.info('Reading sequeces') data = parse_ma_file(args.ma) logger.info('Get sequences from sam') is_align = _read_sam(args.sam) (is_json, is_db) = _read_json(args.json) res = _summarise_sam(data, is_align, is_json, is_db) _write_suma(res, os.path.join(args.out, 'stats_align.dat')) logger.info('Done')
def qqplot(pv, distr = 'log10', alphaLevel = 0.05): """ This script makes a Quantile-Quantile plot of the observed negative log P-value distribution against the theoretical one under the null. Input: pv pvalues (numpy array) distr scale of the distribution (log10 or chi2) alphaLevel significance bounds """ shape_ok = (len(pv.shape)==1) or ((len(pv.shape)==2) and pv.shape[1]==1) assert shape_ok, 'qqplot requires a 1D array of p-values' tests = pv.shape[0] pnull = (0.5 + sp.arange(tests))/tests # pnull = np.sort(np.random.uniform(size = tests)) Ipv = sp.argsort(pv) if distr == 'chi2': qnull = sp.stats.chi2.isf(pnull, 1) qemp = (sp.stats.chi2.isf(pv[Ipv],1)) xl = 'LOD scores' yl = '$\chi^2$ quantiles' if distr == 'log10': qnull = -sp.log10(pnull) qemp = -sp.log10(pv[Ipv]) xl = '-log10(P) observed' yl = '-log10(P) expected' line = plt.plot(qnull, qemp, '.')[0] #plt.plot([0,qemp.m0x()], [0,qemp.max()],'r') plt.plot([0,qnull.max()], [0,qnull.max()],'r') plt.ylabel(xl) plt.xlabel(yl) if alphaLevel is not None: if distr == 'log10': betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=tests,alphaLevel=alphaLevel,distr=distr) lower = -sp.log10(theoreticalPvals-betaDown) upper = -sp.log10(theoreticalPvals+betaUp) plt.fill_between(-sp.log10(theoreticalPvals),lower,upper,color='grey',alpha=0.5) #plt.plot(-sp.log10(theoreticalPvals),lower,'g-.') #plt.plot(-sp.log10(theoreticalPvals),upper,'g-.') return line
def function[qqplot, parameter[pv, distr, alphaLevel]]: constant[ This script makes a Quantile-Quantile plot of the observed negative log P-value distribution against the theoretical one under the null. Input: pv pvalues (numpy array) distr scale of the distribution (log10 or chi2) alphaLevel significance bounds ] variable[shape_ok] assign[=] <ast.BoolOp object at 0x7da2054a5240> assert[name[shape_ok]] variable[tests] assign[=] call[name[pv].shape][constant[0]] variable[pnull] assign[=] binary_operation[binary_operation[constant[0.5] + call[name[sp].arange, parameter[name[tests]]]] / name[tests]] variable[Ipv] assign[=] call[name[sp].argsort, parameter[name[pv]]] if compare[name[distr] equal[==] constant[chi2]] begin[:] variable[qnull] assign[=] call[name[sp].stats.chi2.isf, parameter[name[pnull], constant[1]]] variable[qemp] assign[=] call[name[sp].stats.chi2.isf, parameter[call[name[pv]][name[Ipv]], constant[1]]] variable[xl] assign[=] constant[LOD scores] variable[yl] assign[=] constant[$\chi^2$ quantiles] if compare[name[distr] equal[==] constant[log10]] begin[:] variable[qnull] assign[=] <ast.UnaryOp object at 0x7da2054a7550> variable[qemp] assign[=] <ast.UnaryOp object at 0x7da2054a5180> variable[xl] assign[=] constant[-log10(P) observed] variable[yl] assign[=] constant[-log10(P) expected] variable[line] assign[=] call[call[name[plt].plot, parameter[name[qnull], name[qemp], constant[.]]]][constant[0]] call[name[plt].plot, parameter[list[[<ast.Constant object at 0x7da2054a54e0>, <ast.Call object at 0x7da2054a7790>]], list[[<ast.Constant object at 0x7da2054a76a0>, <ast.Call object at 0x7da2054a5c00>]], constant[r]]] call[name[plt].ylabel, parameter[name[xl]]] call[name[plt].xlabel, parameter[name[yl]]] if compare[name[alphaLevel] is_not constant[None]] begin[:] if compare[name[distr] equal[==] constant[log10]] begin[:] <ast.Tuple object at 0x7da2054a69e0> assign[=] call[name[_qqplot_bar], parameter[]] variable[lower] assign[=] <ast.UnaryOp object at 0x7da20cabc940> variable[upper] assign[=] <ast.UnaryOp object at 0x7da20cabe9b0> call[name[plt].fill_between, parameter[<ast.UnaryOp object at 0x7da204960e50>, name[lower], name[upper]]] return[name[line]]
keyword[def] identifier[qqplot] ( identifier[pv] , identifier[distr] = literal[string] , identifier[alphaLevel] = literal[int] ): literal[string] identifier[shape_ok] =( identifier[len] ( identifier[pv] . identifier[shape] )== literal[int] ) keyword[or] (( identifier[len] ( identifier[pv] . identifier[shape] )== literal[int] ) keyword[and] identifier[pv] . identifier[shape] [ literal[int] ]== literal[int] ) keyword[assert] identifier[shape_ok] , literal[string] identifier[tests] = identifier[pv] . identifier[shape] [ literal[int] ] identifier[pnull] =( literal[int] + identifier[sp] . identifier[arange] ( identifier[tests] ))/ identifier[tests] identifier[Ipv] = identifier[sp] . identifier[argsort] ( identifier[pv] ) keyword[if] identifier[distr] == literal[string] : identifier[qnull] = identifier[sp] . identifier[stats] . identifier[chi2] . identifier[isf] ( identifier[pnull] , literal[int] ) identifier[qemp] =( identifier[sp] . identifier[stats] . identifier[chi2] . identifier[isf] ( identifier[pv] [ identifier[Ipv] ], literal[int] )) identifier[xl] = literal[string] identifier[yl] = literal[string] keyword[if] identifier[distr] == literal[string] : identifier[qnull] =- identifier[sp] . identifier[log10] ( identifier[pnull] ) identifier[qemp] =- identifier[sp] . identifier[log10] ( identifier[pv] [ identifier[Ipv] ]) identifier[xl] = literal[string] identifier[yl] = literal[string] identifier[line] = identifier[plt] . identifier[plot] ( identifier[qnull] , identifier[qemp] , literal[string] )[ literal[int] ] identifier[plt] . identifier[plot] ([ literal[int] , identifier[qnull] . identifier[max] ()],[ literal[int] , identifier[qnull] . identifier[max] ()], literal[string] ) identifier[plt] . identifier[ylabel] ( identifier[xl] ) identifier[plt] . identifier[xlabel] ( identifier[yl] ) keyword[if] identifier[alphaLevel] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[distr] == literal[string] : identifier[betaUp] , identifier[betaDown] , identifier[theoreticalPvals] = identifier[_qqplot_bar] ( identifier[M] = identifier[tests] , identifier[alphaLevel] = identifier[alphaLevel] , identifier[distr] = identifier[distr] ) identifier[lower] =- identifier[sp] . identifier[log10] ( identifier[theoreticalPvals] - identifier[betaDown] ) identifier[upper] =- identifier[sp] . identifier[log10] ( identifier[theoreticalPvals] + identifier[betaUp] ) identifier[plt] . identifier[fill_between] (- identifier[sp] . identifier[log10] ( identifier[theoreticalPvals] ), identifier[lower] , identifier[upper] , identifier[color] = literal[string] , identifier[alpha] = literal[int] ) keyword[return] identifier[line]
def qqplot(pv, distr='log10', alphaLevel=0.05): """ This script makes a Quantile-Quantile plot of the observed negative log P-value distribution against the theoretical one under the null. Input: pv pvalues (numpy array) distr scale of the distribution (log10 or chi2) alphaLevel significance bounds """ shape_ok = len(pv.shape) == 1 or (len(pv.shape) == 2 and pv.shape[1] == 1) assert shape_ok, 'qqplot requires a 1D array of p-values' tests = pv.shape[0] pnull = (0.5 + sp.arange(tests)) / tests # pnull = np.sort(np.random.uniform(size = tests)) Ipv = sp.argsort(pv) if distr == 'chi2': qnull = sp.stats.chi2.isf(pnull, 1) qemp = sp.stats.chi2.isf(pv[Ipv], 1) xl = 'LOD scores' yl = '$\\chi^2$ quantiles' # depends on [control=['if'], data=[]] if distr == 'log10': qnull = -sp.log10(pnull) qemp = -sp.log10(pv[Ipv]) xl = '-log10(P) observed' yl = '-log10(P) expected' # depends on [control=['if'], data=[]] line = plt.plot(qnull, qemp, '.')[0] #plt.plot([0,qemp.m0x()], [0,qemp.max()],'r') plt.plot([0, qnull.max()], [0, qnull.max()], 'r') plt.ylabel(xl) plt.xlabel(yl) if alphaLevel is not None: if distr == 'log10': (betaUp, betaDown, theoreticalPvals) = _qqplot_bar(M=tests, alphaLevel=alphaLevel, distr=distr) lower = -sp.log10(theoreticalPvals - betaDown) upper = -sp.log10(theoreticalPvals + betaUp) plt.fill_between(-sp.log10(theoreticalPvals), lower, upper, color='grey', alpha=0.5) # depends on [control=['if'], data=['distr']] # depends on [control=['if'], data=['alphaLevel']] #plt.plot(-sp.log10(theoreticalPvals),lower,'g-.') #plt.plot(-sp.log10(theoreticalPvals),upper,'g-.') return line
def doc_children(self, doctype, limiters=[]): """Finds all grand-children of this element's docstrings that match the specified doctype. If 'limiters' is specified, only docstrings with those doctypes are searched. """ result = [] for doc in self.docstring: if len(limiters) == 0 or doc.doctype in limiters: result.extend(doc.children(doctype)) return result
def function[doc_children, parameter[self, doctype, limiters]]: constant[Finds all grand-children of this element's docstrings that match the specified doctype. If 'limiters' is specified, only docstrings with those doctypes are searched. ] variable[result] assign[=] list[[]] for taget[name[doc]] in starred[name[self].docstring] begin[:] if <ast.BoolOp object at 0x7da1b26a4820> begin[:] call[name[result].extend, parameter[call[name[doc].children, parameter[name[doctype]]]]] return[name[result]]
keyword[def] identifier[doc_children] ( identifier[self] , identifier[doctype] , identifier[limiters] =[]): literal[string] identifier[result] =[] keyword[for] identifier[doc] keyword[in] identifier[self] . identifier[docstring] : keyword[if] identifier[len] ( identifier[limiters] )== literal[int] keyword[or] identifier[doc] . identifier[doctype] keyword[in] identifier[limiters] : identifier[result] . identifier[extend] ( identifier[doc] . identifier[children] ( identifier[doctype] )) keyword[return] identifier[result]
def doc_children(self, doctype, limiters=[]): """Finds all grand-children of this element's docstrings that match the specified doctype. If 'limiters' is specified, only docstrings with those doctypes are searched. """ result = [] for doc in self.docstring: if len(limiters) == 0 or doc.doctype in limiters: result.extend(doc.children(doctype)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['doc']] return result
def COSTALD(T, Tc, Vc, omega): r'''Calculate saturation liquid density using the COSTALD CSP method. A popular and accurate estimation method. If possible, fit parameters are used; alternatively critical properties work well. The density of a liquid is given by: .. math:: V_s=V^*V^{(0)}[1-\omega_{SRK}V^{(\delta)}] V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3} - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3} V^{(\delta)}=\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3} {T_r-1.00001} Units are that of critical or fit constant volume. Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol]. This parameter is alternatively a fit parameter omega : float (ideally SRK) Acentric factor for fluid, [-] This parameter is alternatively a fit parameter. Returns ------- Vs : float Saturation liquid volume Notes ----- 196 constants are fit to this function in [1]_. Range: 0.25 < Tr < 0.95, often said to be to 1.0 This function has been checked with the API handbook example problem. Examples -------- Propane, from an example in the API Handbook >>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097) 530.3009967969841 References ---------- .. [1] Hankinson, Risdon W., and George H. Thomson. "A New Correlation for Saturated Densities of Liquids and Their Mixtures." AIChE Journal 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412 ''' Tr = T/Tc V_delta = (-0.296123 + 0.386914*Tr - 0.0427258*Tr**2 - 0.0480645*Tr**3)/(Tr - 1.00001) V_0 = 1 - 1.52816*(1-Tr)**(1/3.) + 1.43907*(1-Tr)**(2/3.) \ - 0.81446*(1-Tr) + 0.190454*(1-Tr)**(4/3.) return Vc*V_0*(1-omega*V_delta)
def function[COSTALD, parameter[T, Tc, Vc, omega]]: constant[Calculate saturation liquid density using the COSTALD CSP method. A popular and accurate estimation method. If possible, fit parameters are used; alternatively critical properties work well. The density of a liquid is given by: .. math:: V_s=V^*V^{(0)}[1-\omega_{SRK}V^{(\delta)}] V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3} - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3} V^{(\delta)}=\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3} {T_r-1.00001} Units are that of critical or fit constant volume. Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol]. This parameter is alternatively a fit parameter omega : float (ideally SRK) Acentric factor for fluid, [-] This parameter is alternatively a fit parameter. Returns ------- Vs : float Saturation liquid volume Notes ----- 196 constants are fit to this function in [1]_. Range: 0.25 < Tr < 0.95, often said to be to 1.0 This function has been checked with the API handbook example problem. Examples -------- Propane, from an example in the API Handbook >>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097) 530.3009967969841 References ---------- .. [1] Hankinson, Risdon W., and George H. Thomson. "A New Correlation for Saturated Densities of Liquids and Their Mixtures." AIChE Journal 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412 ] variable[Tr] assign[=] binary_operation[name[T] / name[Tc]] variable[V_delta] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20e956f50> + binary_operation[constant[0.386914] * name[Tr]]] - binary_operation[constant[0.0427258] * binary_operation[name[Tr] ** constant[2]]]] - binary_operation[constant[0.0480645] * binary_operation[name[Tr] ** constant[3]]]] / binary_operation[name[Tr] - constant[1.00001]]] variable[V_0] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[1] - binary_operation[constant[1.52816] * binary_operation[binary_operation[constant[1] - name[Tr]] ** binary_operation[constant[1] / constant[3.0]]]]] + binary_operation[constant[1.43907] * binary_operation[binary_operation[constant[1] - name[Tr]] ** binary_operation[constant[2] / constant[3.0]]]]] - binary_operation[constant[0.81446] * binary_operation[constant[1] - name[Tr]]]] + binary_operation[constant[0.190454] * binary_operation[binary_operation[constant[1] - name[Tr]] ** binary_operation[constant[4] / constant[3.0]]]]] return[binary_operation[binary_operation[name[Vc] * name[V_0]] * binary_operation[constant[1] - binary_operation[name[omega] * name[V_delta]]]]]
keyword[def] identifier[COSTALD] ( identifier[T] , identifier[Tc] , identifier[Vc] , identifier[omega] ): literal[string] identifier[Tr] = identifier[T] / identifier[Tc] identifier[V_delta] =(- literal[int] + literal[int] * identifier[Tr] - literal[int] * identifier[Tr] ** literal[int] - literal[int] * identifier[Tr] ** literal[int] )/( identifier[Tr] - literal[int] ) identifier[V_0] = literal[int] - literal[int] *( literal[int] - identifier[Tr] )**( literal[int] / literal[int] )+ literal[int] *( literal[int] - identifier[Tr] )**( literal[int] / literal[int] )- literal[int] *( literal[int] - identifier[Tr] )+ literal[int] *( literal[int] - identifier[Tr] )**( literal[int] / literal[int] ) keyword[return] identifier[Vc] * identifier[V_0] *( literal[int] - identifier[omega] * identifier[V_delta] )
def COSTALD(T, Tc, Vc, omega): """Calculate saturation liquid density using the COSTALD CSP method. A popular and accurate estimation method. If possible, fit parameters are used; alternatively critical properties work well. The density of a liquid is given by: .. math:: V_s=V^*V^{(0)}[1-\\omega_{SRK}V^{(\\delta)}] V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3} - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3} V^{(\\delta)}=\\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3} {T_r-1.00001} Units are that of critical or fit constant volume. Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol]. This parameter is alternatively a fit parameter omega : float (ideally SRK) Acentric factor for fluid, [-] This parameter is alternatively a fit parameter. Returns ------- Vs : float Saturation liquid volume Notes ----- 196 constants are fit to this function in [1]_. Range: 0.25 < Tr < 0.95, often said to be to 1.0 This function has been checked with the API handbook example problem. Examples -------- Propane, from an example in the API Handbook >>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097) 530.3009967969841 References ---------- .. [1] Hankinson, Risdon W., and George H. Thomson. "A New Correlation for Saturated Densities of Liquids and Their Mixtures." AIChE Journal 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412 """ Tr = T / Tc V_delta = (-0.296123 + 0.386914 * Tr - 0.0427258 * Tr ** 2 - 0.0480645 * Tr ** 3) / (Tr - 1.00001) V_0 = 1 - 1.52816 * (1 - Tr) ** (1 / 3.0) + 1.43907 * (1 - Tr) ** (2 / 3.0) - 0.81446 * (1 - Tr) + 0.190454 * (1 - Tr) ** (4 / 3.0) return Vc * V_0 * (1 - omega * V_delta)
def run_total_dos(self, sigma=None, freq_min=None, freq_max=None, freq_pitch=None, use_tetrahedron_method=True): """Calculate total DOS from phonons on sampling mesh. Parameters ---------- sigma : float, optional Smearing width for smearing method. Default is None freq_min, freq_max, freq_pitch : float, optional Minimum and maximum frequencies in which range DOS is computed with the specified interval (freq_pitch). Defaults are None and they are automatically determined. use_tetrahedron_method : float, optional Use tetrahedron method when this is True. When sigma is set, smearing method is used. """ if self._mesh is None: msg = "run_mesh has to be done before DOS calculation." raise RuntimeError(msg) total_dos = TotalDos(self._mesh, sigma=sigma, use_tetrahedron_method=use_tetrahedron_method) total_dos.set_draw_area(freq_min, freq_max, freq_pitch) total_dos.run() self._total_dos = total_dos
def function[run_total_dos, parameter[self, sigma, freq_min, freq_max, freq_pitch, use_tetrahedron_method]]: constant[Calculate total DOS from phonons on sampling mesh. Parameters ---------- sigma : float, optional Smearing width for smearing method. Default is None freq_min, freq_max, freq_pitch : float, optional Minimum and maximum frequencies in which range DOS is computed with the specified interval (freq_pitch). Defaults are None and they are automatically determined. use_tetrahedron_method : float, optional Use tetrahedron method when this is True. When sigma is set, smearing method is used. ] if compare[name[self]._mesh is constant[None]] begin[:] variable[msg] assign[=] constant[run_mesh has to be done before DOS calculation.] <ast.Raise object at 0x7da18fe92b00> variable[total_dos] assign[=] call[name[TotalDos], parameter[name[self]._mesh]] call[name[total_dos].set_draw_area, parameter[name[freq_min], name[freq_max], name[freq_pitch]]] call[name[total_dos].run, parameter[]] name[self]._total_dos assign[=] name[total_dos]
keyword[def] identifier[run_total_dos] ( identifier[self] , identifier[sigma] = keyword[None] , identifier[freq_min] = keyword[None] , identifier[freq_max] = keyword[None] , identifier[freq_pitch] = keyword[None] , identifier[use_tetrahedron_method] = keyword[True] ): literal[string] keyword[if] identifier[self] . identifier[_mesh] keyword[is] keyword[None] : identifier[msg] = literal[string] keyword[raise] identifier[RuntimeError] ( identifier[msg] ) identifier[total_dos] = identifier[TotalDos] ( identifier[self] . identifier[_mesh] , identifier[sigma] = identifier[sigma] , identifier[use_tetrahedron_method] = identifier[use_tetrahedron_method] ) identifier[total_dos] . identifier[set_draw_area] ( identifier[freq_min] , identifier[freq_max] , identifier[freq_pitch] ) identifier[total_dos] . identifier[run] () identifier[self] . identifier[_total_dos] = identifier[total_dos]
def run_total_dos(self, sigma=None, freq_min=None, freq_max=None, freq_pitch=None, use_tetrahedron_method=True): """Calculate total DOS from phonons on sampling mesh. Parameters ---------- sigma : float, optional Smearing width for smearing method. Default is None freq_min, freq_max, freq_pitch : float, optional Minimum and maximum frequencies in which range DOS is computed with the specified interval (freq_pitch). Defaults are None and they are automatically determined. use_tetrahedron_method : float, optional Use tetrahedron method when this is True. When sigma is set, smearing method is used. """ if self._mesh is None: msg = 'run_mesh has to be done before DOS calculation.' raise RuntimeError(msg) # depends on [control=['if'], data=[]] total_dos = TotalDos(self._mesh, sigma=sigma, use_tetrahedron_method=use_tetrahedron_method) total_dos.set_draw_area(freq_min, freq_max, freq_pitch) total_dos.run() self._total_dos = total_dos
def _t_normals(self): r""" Update the throat normals from the voronoi vertices """ verts = self['throat.vertices'] value = sp.zeros([len(verts), 3]) for i in range(len(verts)): if len(sp.unique(verts[i][:, 0])) == 1: verts_2d = sp.vstack((verts[i][:, 1], verts[i][:, 2])).T elif len(sp.unique(verts[i][:, 1])) == 1: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 2])).T else: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 1])).T hull = sptl.ConvexHull(verts_2d, qhull_options='QJ Pp') sorted_verts = verts[i][hull.vertices].astype(float) v1 = sorted_verts[-1]-sorted_verts[0] v2 = sorted_verts[1]-sorted_verts[0] value[i] = tr.unit_vector(sp.cross(v1, v2)) return value
def function[_t_normals, parameter[self]]: constant[ Update the throat normals from the voronoi vertices ] variable[verts] assign[=] call[name[self]][constant[throat.vertices]] variable[value] assign[=] call[name[sp].zeros, parameter[list[[<ast.Call object at 0x7da18eb54fa0>, <ast.Constant object at 0x7da18eb55d80>]]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[verts]]]]]] begin[:] if compare[call[name[len], parameter[call[name[sp].unique, parameter[call[call[name[verts]][name[i]]][tuple[[<ast.Slice object at 0x7da18eb56bc0>, <ast.Constant object at 0x7da18eb55b40>]]]]]]] equal[==] constant[1]] begin[:] variable[verts_2d] assign[=] call[name[sp].vstack, parameter[tuple[[<ast.Subscript object at 0x7da18eb57b80>, <ast.Subscript object at 0x7da18eb561a0>]]]].T variable[hull] assign[=] call[name[sptl].ConvexHull, parameter[name[verts_2d]]] variable[sorted_verts] assign[=] call[call[call[name[verts]][name[i]]][name[hull].vertices].astype, parameter[name[float]]] variable[v1] assign[=] binary_operation[call[name[sorted_verts]][<ast.UnaryOp object at 0x7da1b26afc10>] - call[name[sorted_verts]][constant[0]]] variable[v2] assign[=] binary_operation[call[name[sorted_verts]][constant[1]] - call[name[sorted_verts]][constant[0]]] call[name[value]][name[i]] assign[=] call[name[tr].unit_vector, parameter[call[name[sp].cross, parameter[name[v1], name[v2]]]]] return[name[value]]
keyword[def] identifier[_t_normals] ( identifier[self] ): literal[string] identifier[verts] = identifier[self] [ literal[string] ] identifier[value] = identifier[sp] . identifier[zeros] ([ identifier[len] ( identifier[verts] ), literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[verts] )): keyword[if] identifier[len] ( identifier[sp] . identifier[unique] ( identifier[verts] [ identifier[i] ][:, literal[int] ]))== literal[int] : identifier[verts_2d] = identifier[sp] . identifier[vstack] (( identifier[verts] [ identifier[i] ][:, literal[int] ], identifier[verts] [ identifier[i] ][:, literal[int] ])). identifier[T] keyword[elif] identifier[len] ( identifier[sp] . identifier[unique] ( identifier[verts] [ identifier[i] ][:, literal[int] ]))== literal[int] : identifier[verts_2d] = identifier[sp] . identifier[vstack] (( identifier[verts] [ identifier[i] ][:, literal[int] ], identifier[verts] [ identifier[i] ][:, literal[int] ])). identifier[T] keyword[else] : identifier[verts_2d] = identifier[sp] . identifier[vstack] (( identifier[verts] [ identifier[i] ][:, literal[int] ], identifier[verts] [ identifier[i] ][:, literal[int] ])). identifier[T] identifier[hull] = identifier[sptl] . identifier[ConvexHull] ( identifier[verts_2d] , identifier[qhull_options] = literal[string] ) identifier[sorted_verts] = identifier[verts] [ identifier[i] ][ identifier[hull] . identifier[vertices] ]. identifier[astype] ( identifier[float] ) identifier[v1] = identifier[sorted_verts] [- literal[int] ]- identifier[sorted_verts] [ literal[int] ] identifier[v2] = identifier[sorted_verts] [ literal[int] ]- identifier[sorted_verts] [ literal[int] ] identifier[value] [ identifier[i] ]= identifier[tr] . identifier[unit_vector] ( identifier[sp] . identifier[cross] ( identifier[v1] , identifier[v2] )) keyword[return] identifier[value]
def _t_normals(self): """ Update the throat normals from the voronoi vertices """ verts = self['throat.vertices'] value = sp.zeros([len(verts), 3]) for i in range(len(verts)): if len(sp.unique(verts[i][:, 0])) == 1: verts_2d = sp.vstack((verts[i][:, 1], verts[i][:, 2])).T # depends on [control=['if'], data=[]] elif len(sp.unique(verts[i][:, 1])) == 1: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 2])).T # depends on [control=['if'], data=[]] else: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 1])).T hull = sptl.ConvexHull(verts_2d, qhull_options='QJ Pp') sorted_verts = verts[i][hull.vertices].astype(float) v1 = sorted_verts[-1] - sorted_verts[0] v2 = sorted_verts[1] - sorted_verts[0] value[i] = tr.unit_vector(sp.cross(v1, v2)) # depends on [control=['for'], data=['i']] return value
def is_initialized(self): """Check if bucket exists. :return: True if initialized, False otherwise """ try: return self.client.head_bucket( Bucket=self.db_path)['ResponseMetadata']['HTTPStatusCode'] \ == 200 except botocore.exceptions.ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. if 'NoSuchBucket' in str(e.response['Error']['Code']): return False raise e
def function[is_initialized, parameter[self]]: constant[Check if bucket exists. :return: True if initialized, False otherwise ] <ast.Try object at 0x7da1b00f7ee0>
keyword[def] identifier[is_initialized] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[client] . identifier[head_bucket] ( identifier[Bucket] = identifier[self] . identifier[db_path] )[ literal[string] ][ literal[string] ]== literal[int] keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] : keyword[if] literal[string] keyword[in] identifier[str] ( identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]): keyword[return] keyword[False] keyword[raise] identifier[e]
def is_initialized(self): """Check if bucket exists. :return: True if initialized, False otherwise """ try: return self.client.head_bucket(Bucket=self.db_path)['ResponseMetadata']['HTTPStatusCode'] == 200 # depends on [control=['try'], data=[]] except botocore.exceptions.ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. if 'NoSuchBucket' in str(e.response['Error']['Code']): return False # depends on [control=['if'], data=[]] raise e # depends on [control=['except'], data=['e']]
def _cert_callback(callback, der_cert, reason): """ Constructs an asn1crypto.x509.Certificate object and calls the export callback :param callback: The callback to call :param der_cert: A byte string of the DER-encoded certificate :param reason: None if cert is being exported, or a unicode string of the reason it is not being exported """ if not callback: return callback(x509.Certificate.load(der_cert), reason)
def function[_cert_callback, parameter[callback, der_cert, reason]]: constant[ Constructs an asn1crypto.x509.Certificate object and calls the export callback :param callback: The callback to call :param der_cert: A byte string of the DER-encoded certificate :param reason: None if cert is being exported, or a unicode string of the reason it is not being exported ] if <ast.UnaryOp object at 0x7da1b00da050> begin[:] return[None] call[name[callback], parameter[call[name[x509].Certificate.load, parameter[name[der_cert]]], name[reason]]]
keyword[def] identifier[_cert_callback] ( identifier[callback] , identifier[der_cert] , identifier[reason] ): literal[string] keyword[if] keyword[not] identifier[callback] : keyword[return] identifier[callback] ( identifier[x509] . identifier[Certificate] . identifier[load] ( identifier[der_cert] ), identifier[reason] )
def _cert_callback(callback, der_cert, reason): """ Constructs an asn1crypto.x509.Certificate object and calls the export callback :param callback: The callback to call :param der_cert: A byte string of the DER-encoded certificate :param reason: None if cert is being exported, or a unicode string of the reason it is not being exported """ if not callback: return # depends on [control=['if'], data=[]] callback(x509.Certificate.load(der_cert), reason)
def write(self, file_des, contents): """Write string to file descriptor, returns number of bytes written. Args: file_des: An integer file descriptor for the file object requested. contents: String of bytes to write to file. Returns: Number of bytes written. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer. """ file_handle = self.filesystem.get_open_file(file_des) if isinstance(file_handle, FakeDirWrapper): self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path) if isinstance(file_handle, FakePipeWrapper): return file_handle.write(contents) file_handle.raw_io = True file_handle._sync_io() file_handle.update_flush_pos() file_handle.write(contents) file_handle.flush() return len(contents)
def function[write, parameter[self, file_des, contents]]: constant[Write string to file descriptor, returns number of bytes written. Args: file_des: An integer file descriptor for the file object requested. contents: String of bytes to write to file. Returns: Number of bytes written. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer. ] variable[file_handle] assign[=] call[name[self].filesystem.get_open_file, parameter[name[file_des]]] if call[name[isinstance], parameter[name[file_handle], name[FakeDirWrapper]]] begin[:] call[name[self].filesystem.raise_os_error, parameter[name[errno].EBADF, name[file_handle].file_path]] if call[name[isinstance], parameter[name[file_handle], name[FakePipeWrapper]]] begin[:] return[call[name[file_handle].write, parameter[name[contents]]]] name[file_handle].raw_io assign[=] constant[True] call[name[file_handle]._sync_io, parameter[]] call[name[file_handle].update_flush_pos, parameter[]] call[name[file_handle].write, parameter[name[contents]]] call[name[file_handle].flush, parameter[]] return[call[name[len], parameter[name[contents]]]]
keyword[def] identifier[write] ( identifier[self] , identifier[file_des] , identifier[contents] ): literal[string] identifier[file_handle] = identifier[self] . identifier[filesystem] . identifier[get_open_file] ( identifier[file_des] ) keyword[if] identifier[isinstance] ( identifier[file_handle] , identifier[FakeDirWrapper] ): identifier[self] . identifier[filesystem] . identifier[raise_os_error] ( identifier[errno] . identifier[EBADF] , identifier[file_handle] . identifier[file_path] ) keyword[if] identifier[isinstance] ( identifier[file_handle] , identifier[FakePipeWrapper] ): keyword[return] identifier[file_handle] . identifier[write] ( identifier[contents] ) identifier[file_handle] . identifier[raw_io] = keyword[True] identifier[file_handle] . identifier[_sync_io] () identifier[file_handle] . identifier[update_flush_pos] () identifier[file_handle] . identifier[write] ( identifier[contents] ) identifier[file_handle] . identifier[flush] () keyword[return] identifier[len] ( identifier[contents] )
def write(self, file_des, contents): """Write string to file descriptor, returns number of bytes written. Args: file_des: An integer file descriptor for the file object requested. contents: String of bytes to write to file. Returns: Number of bytes written. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer. """ file_handle = self.filesystem.get_open_file(file_des) if isinstance(file_handle, FakeDirWrapper): self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path) # depends on [control=['if'], data=[]] if isinstance(file_handle, FakePipeWrapper): return file_handle.write(contents) # depends on [control=['if'], data=[]] file_handle.raw_io = True file_handle._sync_io() file_handle.update_flush_pos() file_handle.write(contents) file_handle.flush() return len(contents)
def update(anchor, handle=None): """Update an anchor based on the current contents of its source file. Args: anchor: The `Anchor` to be updated. handle: File-like object containing contents of the anchor's file. If `None`, then this function will open the file and read it. Returns: A new `Anchor`, possibly identical to the input. Raises: ValueError: No alignments could be found between old anchor and new text. AlignmentError: If no anchor could be created. The message of the exception will say what the problem is. """ if handle is None: with anchor.file_path.open(mode='rt') as fp: source_text = fp.read() else: source_text = handle.read() handle.seek(0) ctxt = anchor.context a_score, alignments = align(ctxt.full_text, source_text, score, gap_penalty) # max_score = len(ctxt.full_text) * 3 try: alignment = next(alignments) except StopIteration: raise AlignmentError('No alignments for anchor: {}'.format(anchor)) anchor_offset = ctxt.offset - len(ctxt.before) source_indices = tuple( s_idx for (a_idx, s_idx) in alignment if a_idx is not None if s_idx is not None if _index_in_topic(a_idx + anchor_offset, anchor)) if not source_indices: raise AlignmentError( "Best alignment does not map topic to updated source.") return make_anchor( file_path=anchor.file_path, offset=source_indices[0], width=len(source_indices), context_width=anchor.context.width, metadata=anchor.metadata, handle=handle)
def function[update, parameter[anchor, handle]]: constant[Update an anchor based on the current contents of its source file. Args: anchor: The `Anchor` to be updated. handle: File-like object containing contents of the anchor's file. If `None`, then this function will open the file and read it. Returns: A new `Anchor`, possibly identical to the input. Raises: ValueError: No alignments could be found between old anchor and new text. AlignmentError: If no anchor could be created. The message of the exception will say what the problem is. ] if compare[name[handle] is constant[None]] begin[:] with call[name[anchor].file_path.open, parameter[]] begin[:] variable[source_text] assign[=] call[name[fp].read, parameter[]] variable[ctxt] assign[=] name[anchor].context <ast.Tuple object at 0x7da1b0ac8070> assign[=] call[name[align], parameter[name[ctxt].full_text, name[source_text], name[score], name[gap_penalty]]] <ast.Try object at 0x7da1b0ac9b40> variable[anchor_offset] assign[=] binary_operation[name[ctxt].offset - call[name[len], parameter[name[ctxt].before]]] variable[source_indices] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b0ac9ab0>]] if <ast.UnaryOp object at 0x7da1b0af0160> begin[:] <ast.Raise object at 0x7da1b0af1ea0> return[call[name[make_anchor], parameter[]]]
keyword[def] identifier[update] ( identifier[anchor] , identifier[handle] = keyword[None] ): literal[string] keyword[if] identifier[handle] keyword[is] keyword[None] : keyword[with] identifier[anchor] . identifier[file_path] . identifier[open] ( identifier[mode] = literal[string] ) keyword[as] identifier[fp] : identifier[source_text] = identifier[fp] . identifier[read] () keyword[else] : identifier[source_text] = identifier[handle] . identifier[read] () identifier[handle] . identifier[seek] ( literal[int] ) identifier[ctxt] = identifier[anchor] . identifier[context] identifier[a_score] , identifier[alignments] = identifier[align] ( identifier[ctxt] . identifier[full_text] , identifier[source_text] , identifier[score] , identifier[gap_penalty] ) keyword[try] : identifier[alignment] = identifier[next] ( identifier[alignments] ) keyword[except] identifier[StopIteration] : keyword[raise] identifier[AlignmentError] ( literal[string] . identifier[format] ( identifier[anchor] )) identifier[anchor_offset] = identifier[ctxt] . identifier[offset] - identifier[len] ( identifier[ctxt] . identifier[before] ) identifier[source_indices] = identifier[tuple] ( identifier[s_idx] keyword[for] ( identifier[a_idx] , identifier[s_idx] ) keyword[in] identifier[alignment] keyword[if] identifier[a_idx] keyword[is] keyword[not] keyword[None] keyword[if] identifier[s_idx] keyword[is] keyword[not] keyword[None] keyword[if] identifier[_index_in_topic] ( identifier[a_idx] + identifier[anchor_offset] , identifier[anchor] )) keyword[if] keyword[not] identifier[source_indices] : keyword[raise] identifier[AlignmentError] ( literal[string] ) keyword[return] identifier[make_anchor] ( identifier[file_path] = identifier[anchor] . identifier[file_path] , identifier[offset] = identifier[source_indices] [ literal[int] ], identifier[width] = identifier[len] ( identifier[source_indices] ), identifier[context_width] = identifier[anchor] . identifier[context] . identifier[width] , identifier[metadata] = identifier[anchor] . identifier[metadata] , identifier[handle] = identifier[handle] )
def update(anchor, handle=None): """Update an anchor based on the current contents of its source file. Args: anchor: The `Anchor` to be updated. handle: File-like object containing contents of the anchor's file. If `None`, then this function will open the file and read it. Returns: A new `Anchor`, possibly identical to the input. Raises: ValueError: No alignments could be found between old anchor and new text. AlignmentError: If no anchor could be created. The message of the exception will say what the problem is. """ if handle is None: with anchor.file_path.open(mode='rt') as fp: source_text = fp.read() # depends on [control=['with'], data=['fp']] # depends on [control=['if'], data=[]] else: source_text = handle.read() handle.seek(0) ctxt = anchor.context (a_score, alignments) = align(ctxt.full_text, source_text, score, gap_penalty) # max_score = len(ctxt.full_text) * 3 try: alignment = next(alignments) # depends on [control=['try'], data=[]] except StopIteration: raise AlignmentError('No alignments for anchor: {}'.format(anchor)) # depends on [control=['except'], data=[]] anchor_offset = ctxt.offset - len(ctxt.before) source_indices = tuple((s_idx for (a_idx, s_idx) in alignment if a_idx is not None if s_idx is not None if _index_in_topic(a_idx + anchor_offset, anchor))) if not source_indices: raise AlignmentError('Best alignment does not map topic to updated source.') # depends on [control=['if'], data=[]] return make_anchor(file_path=anchor.file_path, offset=source_indices[0], width=len(source_indices), context_width=anchor.context.width, metadata=anchor.metadata, handle=handle)
def source_address(self): """Return the authorative source of the link.""" # If link is a sender, source is determined by the local # value, else use the remote. if self._pn_link.is_sender: return self._pn_link.source.address else: return self._pn_link.remote_source.address
def function[source_address, parameter[self]]: constant[Return the authorative source of the link.] if name[self]._pn_link.is_sender begin[:] return[name[self]._pn_link.source.address]
keyword[def] identifier[source_address] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_pn_link] . identifier[is_sender] : keyword[return] identifier[self] . identifier[_pn_link] . identifier[source] . identifier[address] keyword[else] : keyword[return] identifier[self] . identifier[_pn_link] . identifier[remote_source] . identifier[address]
def source_address(self): """Return the authorative source of the link.""" # If link is a sender, source is determined by the local # value, else use the remote. if self._pn_link.is_sender: return self._pn_link.source.address # depends on [control=['if'], data=[]] else: return self._pn_link.remote_source.address
def verify_sans(amazon_cert: crypto.X509) -> bool: """Verifies Subject Alternative Names (SANs) for Amazon certificate. Args: amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. """ cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())] subject_alt_names = '' for extention in cert_extentions: if 'subjectAltName' in str(extention.get_short_name()): subject_alt_names = extention.__str__() break result = 'echo-api.amazon.com' in subject_alt_names return result
def function[verify_sans, parameter[amazon_cert]]: constant[Verifies Subject Alternative Names (SANs) for Amazon certificate. Args: amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. ] variable[cert_extentions] assign[=] <ast.ListComp object at 0x7da1b0354700> variable[subject_alt_names] assign[=] constant[] for taget[name[extention]] in starred[name[cert_extentions]] begin[:] if compare[constant[subjectAltName] in call[name[str], parameter[call[name[extention].get_short_name, parameter[]]]]] begin[:] variable[subject_alt_names] assign[=] call[name[extention].__str__, parameter[]] break variable[result] assign[=] compare[constant[echo-api.amazon.com] in name[subject_alt_names]] return[name[result]]
keyword[def] identifier[verify_sans] ( identifier[amazon_cert] : identifier[crypto] . identifier[X509] )-> identifier[bool] : literal[string] identifier[cert_extentions] =[ identifier[amazon_cert] . identifier[get_extension] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[amazon_cert] . identifier[get_extension_count] ())] identifier[subject_alt_names] = literal[string] keyword[for] identifier[extention] keyword[in] identifier[cert_extentions] : keyword[if] literal[string] keyword[in] identifier[str] ( identifier[extention] . identifier[get_short_name] ()): identifier[subject_alt_names] = identifier[extention] . identifier[__str__] () keyword[break] identifier[result] = literal[string] keyword[in] identifier[subject_alt_names] keyword[return] identifier[result]
def verify_sans(amazon_cert: crypto.X509) -> bool: """Verifies Subject Alternative Names (SANs) for Amazon certificate. Args: amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. """ cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())] subject_alt_names = '' for extention in cert_extentions: if 'subjectAltName' in str(extention.get_short_name()): subject_alt_names = extention.__str__() break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['extention']] result = 'echo-api.amazon.com' in subject_alt_names return result
def read(self, file): """Reads the captions file.""" content = self._read_content(file) self._validate(content) self._parse(content) return self
def function[read, parameter[self, file]]: constant[Reads the captions file.] variable[content] assign[=] call[name[self]._read_content, parameter[name[file]]] call[name[self]._validate, parameter[name[content]]] call[name[self]._parse, parameter[name[content]]] return[name[self]]
keyword[def] identifier[read] ( identifier[self] , identifier[file] ): literal[string] identifier[content] = identifier[self] . identifier[_read_content] ( identifier[file] ) identifier[self] . identifier[_validate] ( identifier[content] ) identifier[self] . identifier[_parse] ( identifier[content] ) keyword[return] identifier[self]
def read(self, file): """Reads the captions file.""" content = self._read_content(file) self._validate(content) self._parse(content) return self
def run(self): """The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones. """ try: quit_request_detected = False while True: function, arguments = self.task_queue.get() if function is None: # this allows us to watch the threads die and identify # threads that may be hanging or deadlocked self.config.logger.info('quits') break if quit_request_detected: continue try: try: args, kwargs = arguments except ValueError: args = arguments kwargs = {} function(*args, **kwargs) # execute the task except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: # TODO: can probably go away self.config.logger.info('quit request detected') quit_request_detected = True #thread.interrupt_main() # only needed if signal handler # not registered except Exception: self.config.logger.critical("Failure in task_queue", exc_info=True)
def function[run, parameter[self]]: constant[The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones. ] <ast.Try object at 0x7da20c6e58d0>
keyword[def] identifier[run] ( identifier[self] ): literal[string] keyword[try] : identifier[quit_request_detected] = keyword[False] keyword[while] keyword[True] : identifier[function] , identifier[arguments] = identifier[self] . identifier[task_queue] . identifier[get] () keyword[if] identifier[function] keyword[is] keyword[None] : identifier[self] . identifier[config] . identifier[logger] . identifier[info] ( literal[string] ) keyword[break] keyword[if] identifier[quit_request_detected] : keyword[continue] keyword[try] : keyword[try] : identifier[args] , identifier[kwargs] = identifier[arguments] keyword[except] identifier[ValueError] : identifier[args] = identifier[arguments] identifier[kwargs] ={} identifier[function] (* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[Exception] : identifier[self] . identifier[config] . identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] ) keyword[except] identifier[KeyboardInterrupt] : identifier[self] . identifier[config] . identifier[logger] . identifier[info] ( literal[string] ) identifier[quit_request_detected] = keyword[True] keyword[except] identifier[Exception] : identifier[self] . identifier[config] . identifier[logger] . identifier[critical] ( literal[string] , identifier[exc_info] = keyword[True] )
def run(self): """The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones. """ try: quit_request_detected = False while True: (function, arguments) = self.task_queue.get() if function is None: # this allows us to watch the threads die and identify # threads that may be hanging or deadlocked self.config.logger.info('quits') break # depends on [control=['if'], data=[]] if quit_request_detected: continue # depends on [control=['if'], data=[]] try: try: (args, kwargs) = arguments # depends on [control=['try'], data=[]] except ValueError: args = arguments kwargs = {} # depends on [control=['except'], data=[]] function(*args, **kwargs) # execute the task # depends on [control=['try'], data=[]] except Exception: self.config.logger.error('Error in processing a job', exc_info=True) # depends on [control=['except'], data=[]] except KeyboardInterrupt: # TODO: can probably go away self.config.logger.info('quit request detected') quit_request_detected = True # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] #thread.interrupt_main() # only needed if signal handler # not registered except Exception: self.config.logger.critical('Failure in task_queue', exc_info=True) # depends on [control=['except'], data=[]]
def delete_security_group(self, name=None, group_id=None): """ Delete a security group from your account. :type name: string :param name: The name of the security group to delete. :type group_id: string :param group_id: The ID of the security group to delete within a VPC. :rtype: bool :return: True if successful. """ params = {} if name is not None: params['GroupName'] = name elif group_id is not None: params['GroupId'] = group_id return self.get_status('DeleteSecurityGroup', params, verb='POST')
def function[delete_security_group, parameter[self, name, group_id]]: constant[ Delete a security group from your account. :type name: string :param name: The name of the security group to delete. :type group_id: string :param group_id: The ID of the security group to delete within a VPC. :rtype: bool :return: True if successful. ] variable[params] assign[=] dictionary[[], []] if compare[name[name] is_not constant[None]] begin[:] call[name[params]][constant[GroupName]] assign[=] name[name] return[call[name[self].get_status, parameter[constant[DeleteSecurityGroup], name[params]]]]
keyword[def] identifier[delete_security_group] ( identifier[self] , identifier[name] = keyword[None] , identifier[group_id] = keyword[None] ): literal[string] identifier[params] ={} keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[name] keyword[elif] identifier[group_id] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[group_id] keyword[return] identifier[self] . identifier[get_status] ( literal[string] , identifier[params] , identifier[verb] = literal[string] )
def delete_security_group(self, name=None, group_id=None): """ Delete a security group from your account. :type name: string :param name: The name of the security group to delete. :type group_id: string :param group_id: The ID of the security group to delete within a VPC. :rtype: bool :return: True if successful. """ params = {} if name is not None: params['GroupName'] = name # depends on [control=['if'], data=['name']] elif group_id is not None: params['GroupId'] = group_id # depends on [control=['if'], data=['group_id']] return self.get_status('DeleteSecurityGroup', params, verb='POST')
def atlas_overlap(dset,atlas=None): '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``''' atlas = find_atlas(atlas) if atlas==None: return None cost_func = 'crM' infile = os.path.abspath(dset) tmpdir = tempfile.mkdtemp() with nl.run_in(tmpdir): o = nl.run(['3dAllineate','-verb','-base',atlas,'-source',infile + '[0]','-NN','-final','NN','-cost',cost_func,'-nmatch','20%','-onepass','-fineblur','2','-cmass','-prefix','test.nii.gz']) m = re.search(r'Final\s+cost = ([\d.]+) ;',o.output) if m: cost = float(m.group(1)) o = nl.run(['3dmaskave','-mask',atlas,'-q','test.nii.gz'],stderr=None) data_thresh = float(o.output) / 4 i = nl.dset_info('test.nii.gz') o = nl.run(['3dmaskave','-q','-mask','SELF','-sum',nl.calc([atlas,'test.nii.gz'],'equals(step(a-10),step(b-%.2f))'%data_thresh)],stderr=None) overlap = 100*float(o.output) / (i.voxel_dims[0]*i.voxel_dims[1]*i.voxel_dims[2]) try: shutil.rmtree(tmpdir) except: pass return (cost,overlap)
def function[atlas_overlap, parameter[dset, atlas]]: constant[aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``] variable[atlas] assign[=] call[name[find_atlas], parameter[name[atlas]]] if compare[name[atlas] equal[==] constant[None]] begin[:] return[constant[None]] variable[cost_func] assign[=] constant[crM] variable[infile] assign[=] call[name[os].path.abspath, parameter[name[dset]]] variable[tmpdir] assign[=] call[name[tempfile].mkdtemp, parameter[]] with call[name[nl].run_in, parameter[name[tmpdir]]] begin[:] variable[o] assign[=] call[name[nl].run, parameter[list[[<ast.Constant object at 0x7da20c9922f0>, <ast.Constant object at 0x7da20c993220>, <ast.Constant object at 0x7da20c990340>, <ast.Name object at 0x7da20c9912a0>, <ast.Constant object at 0x7da20c993ac0>, <ast.BinOp object at 0x7da20c992140>, <ast.Constant object at 0x7da20c991a80>, <ast.Constant object at 0x7da20c9926e0>, <ast.Constant object at 0x7da20c9906d0>, <ast.Constant object at 0x7da20c992890>, <ast.Name object at 0x7da20c9937c0>, <ast.Constant object at 0x7da20c991870>, <ast.Constant object at 0x7da20c9924a0>, <ast.Constant object at 0x7da20c993880>, <ast.Constant object at 0x7da20c991ab0>, <ast.Constant object at 0x7da20c993640>, <ast.Constant object at 0x7da20c990af0>, <ast.Constant object at 0x7da20c9911e0>, <ast.Constant object at 0x7da20c9936d0>]]]] variable[m] assign[=] call[name[re].search, parameter[constant[Final\s+cost = ([\d.]+) ;], name[o].output]] if name[m] begin[:] variable[cost] assign[=] call[name[float], parameter[call[name[m].group, parameter[constant[1]]]]] variable[o] assign[=] call[name[nl].run, parameter[list[[<ast.Constant object at 0x7da204566170>, <ast.Constant object at 0x7da204565810>, <ast.Name object at 0x7da204565fc0>, <ast.Constant object at 0x7da204567f40>, <ast.Constant object at 0x7da204565840>]]]] variable[data_thresh] assign[=] binary_operation[call[name[float], parameter[name[o].output]] / constant[4]] variable[i] assign[=] call[name[nl].dset_info, parameter[constant[test.nii.gz]]] variable[o] assign[=] call[name[nl].run, parameter[list[[<ast.Constant object at 0x7da204565cf0>, <ast.Constant object at 0x7da2045663e0>, <ast.Constant object at 0x7da204566a10>, <ast.Constant object at 0x7da204564250>, <ast.Constant object at 0x7da204565ea0>, <ast.Call object at 0x7da2045657b0>]]]] variable[overlap] assign[=] binary_operation[binary_operation[constant[100] * call[name[float], parameter[name[o].output]]] / binary_operation[binary_operation[call[name[i].voxel_dims][constant[0]] * call[name[i].voxel_dims][constant[1]]] * call[name[i].voxel_dims][constant[2]]]] <ast.Try object at 0x7da18bcc8f40> return[tuple[[<ast.Name object at 0x7da18bcc95a0>, <ast.Name object at 0x7da204621c60>]]]
keyword[def] identifier[atlas_overlap] ( identifier[dset] , identifier[atlas] = keyword[None] ): literal[string] identifier[atlas] = identifier[find_atlas] ( identifier[atlas] ) keyword[if] identifier[atlas] == keyword[None] : keyword[return] keyword[None] identifier[cost_func] = literal[string] identifier[infile] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[dset] ) identifier[tmpdir] = identifier[tempfile] . identifier[mkdtemp] () keyword[with] identifier[nl] . identifier[run_in] ( identifier[tmpdir] ): identifier[o] = identifier[nl] . identifier[run] ([ literal[string] , literal[string] , literal[string] , identifier[atlas] , literal[string] , identifier[infile] + literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[cost_func] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) identifier[m] = identifier[re] . identifier[search] ( literal[string] , identifier[o] . identifier[output] ) keyword[if] identifier[m] : identifier[cost] = identifier[float] ( identifier[m] . identifier[group] ( literal[int] )) identifier[o] = identifier[nl] . identifier[run] ([ literal[string] , literal[string] , identifier[atlas] , literal[string] , literal[string] ], identifier[stderr] = keyword[None] ) identifier[data_thresh] = identifier[float] ( identifier[o] . identifier[output] )/ literal[int] identifier[i] = identifier[nl] . identifier[dset_info] ( literal[string] ) identifier[o] = identifier[nl] . identifier[run] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[nl] . identifier[calc] ([ identifier[atlas] , literal[string] ], literal[string] % identifier[data_thresh] )], identifier[stderr] = keyword[None] ) identifier[overlap] = literal[int] * identifier[float] ( identifier[o] . identifier[output] )/( identifier[i] . identifier[voxel_dims] [ literal[int] ]* identifier[i] . identifier[voxel_dims] [ literal[int] ]* identifier[i] . identifier[voxel_dims] [ literal[int] ]) keyword[try] : identifier[shutil] . identifier[rmtree] ( identifier[tmpdir] ) keyword[except] : keyword[pass] keyword[return] ( identifier[cost] , identifier[overlap] )
def atlas_overlap(dset, atlas=None): """aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``""" atlas = find_atlas(atlas) if atlas == None: return None # depends on [control=['if'], data=[]] cost_func = 'crM' infile = os.path.abspath(dset) tmpdir = tempfile.mkdtemp() with nl.run_in(tmpdir): o = nl.run(['3dAllineate', '-verb', '-base', atlas, '-source', infile + '[0]', '-NN', '-final', 'NN', '-cost', cost_func, '-nmatch', '20%', '-onepass', '-fineblur', '2', '-cmass', '-prefix', 'test.nii.gz']) m = re.search('Final\\s+cost = ([\\d.]+) ;', o.output) if m: cost = float(m.group(1)) # depends on [control=['if'], data=[]] o = nl.run(['3dmaskave', '-mask', atlas, '-q', 'test.nii.gz'], stderr=None) data_thresh = float(o.output) / 4 i = nl.dset_info('test.nii.gz') o = nl.run(['3dmaskave', '-q', '-mask', 'SELF', '-sum', nl.calc([atlas, 'test.nii.gz'], 'equals(step(a-10),step(b-%.2f))' % data_thresh)], stderr=None) overlap = 100 * float(o.output) / (i.voxel_dims[0] * i.voxel_dims[1] * i.voxel_dims[2]) # depends on [control=['with'], data=[]] try: shutil.rmtree(tmpdir) # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] return (cost, overlap)
def bounding_boxes(self, mode='fraction', output='dict'): """Get the bounding boxes of the labels on a page. Parameters ---------- mode: 'fraction', 'actual' If 'fraction', the bounding boxes are expressed as a fraction of the height and width of the sheet. If 'actual', they are the actual position of the labels in millimetres from the top-left of the sheet. output: 'dict', 'json' If 'dict', a dictionary with label identifier tuples (row, column) as keys and a dictionary with 'left', 'right', 'top', and 'bottom' entries as the values. If 'json', a JSON encoded string which represents a dictionary with keys of the string format 'rowxcolumn' and each value being a bounding box dictionary with 'left', 'right', 'top', and 'bottom' entries. Returns ------- The bounding boxes in the format set by the output parameter. """ boxes = {} # Check the parameters. if mode not in ('fraction', 'actual'): raise ValueError("Unknown mode {0}.".format(mode)) if output not in ('dict', 'json'): raise ValueError("Unknown output {0}.".format(output)) # Iterate over the rows. for row in range(1, self.rows + 1): # Top and bottom of all labels in the row. top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap)) bottom = top + self.label_height # Now iterate over all columns in this row. for column in range(1, self.columns + 1): # Left and right position of this column. left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap)) right = left + self.label_width # Output in the appropriate mode format. if mode == 'fraction': box = { 'top': top / self.sheet_height, 'bottom': bottom / self.sheet_height, 'left': left / self.sheet_width, 'right': right / self.sheet_width, } elif mode == 'actual': box = {'top': top, 'bottom': bottom, 'left': left, 'right': right} # Add to the collection. if output == 'json': boxes['{0:d}x{1:d}'.format(row, column)] = box box['top'] = float(box['top']) box['bottom'] = float(box['bottom']) box['left'] = float(box['left']) box['right'] = float(box['right']) else: boxes[(row, column)] = box # Done. if output == 'json': return json.dumps(boxes) return boxes
def function[bounding_boxes, parameter[self, mode, output]]: constant[Get the bounding boxes of the labels on a page. Parameters ---------- mode: 'fraction', 'actual' If 'fraction', the bounding boxes are expressed as a fraction of the height and width of the sheet. If 'actual', they are the actual position of the labels in millimetres from the top-left of the sheet. output: 'dict', 'json' If 'dict', a dictionary with label identifier tuples (row, column) as keys and a dictionary with 'left', 'right', 'top', and 'bottom' entries as the values. If 'json', a JSON encoded string which represents a dictionary with keys of the string format 'rowxcolumn' and each value being a bounding box dictionary with 'left', 'right', 'top', and 'bottom' entries. Returns ------- The bounding boxes in the format set by the output parameter. ] variable[boxes] assign[=] dictionary[[], []] if compare[name[mode] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6a9390>, <ast.Constant object at 0x7da20c6aa890>]]] begin[:] <ast.Raise object at 0x7da20c6a8820> if compare[name[output] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6aba00>, <ast.Constant object at 0x7da20c6a94b0>]]] begin[:] <ast.Raise object at 0x7da20c6abc70> for taget[name[row]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self].rows + constant[1]]]]] begin[:] variable[top] assign[=] binary_operation[name[self].top_margin + binary_operation[binary_operation[name[row] - constant[1]] * binary_operation[name[self].label_height + name[self].row_gap]]] variable[bottom] assign[=] binary_operation[name[top] + name[self].label_height] for taget[name[column]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self].columns + constant[1]]]]] begin[:] variable[left] assign[=] binary_operation[name[self].left_margin + binary_operation[binary_operation[name[column] - constant[1]] * binary_operation[name[self].label_width + name[self].column_gap]]] variable[right] assign[=] binary_operation[name[left] + name[self].label_width] if compare[name[mode] equal[==] constant[fraction]] begin[:] variable[box] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a8220>, <ast.Constant object at 0x7da20c6aae60>, <ast.Constant object at 0x7da18fe90f40>, <ast.Constant object at 0x7da18fe931f0>], [<ast.BinOp object at 0x7da18fe92ce0>, <ast.BinOp object at 0x7da18fe91c30>, <ast.BinOp object at 0x7da18fe91060>, <ast.BinOp object at 0x7da18fe93ac0>]] if compare[name[output] equal[==] constant[json]] begin[:] call[name[boxes]][call[constant[{0:d}x{1:d}].format, parameter[name[row], name[column]]]] assign[=] name[box] call[name[box]][constant[top]] assign[=] call[name[float], parameter[call[name[box]][constant[top]]]] call[name[box]][constant[bottom]] assign[=] call[name[float], parameter[call[name[box]][constant[bottom]]]] call[name[box]][constant[left]] assign[=] call[name[float], parameter[call[name[box]][constant[left]]]] call[name[box]][constant[right]] assign[=] call[name[float], parameter[call[name[box]][constant[right]]]] if compare[name[output] equal[==] constant[json]] begin[:] return[call[name[json].dumps, parameter[name[boxes]]]] return[name[boxes]]
keyword[def] identifier[bounding_boxes] ( identifier[self] , identifier[mode] = literal[string] , identifier[output] = literal[string] ): literal[string] identifier[boxes] ={} keyword[if] identifier[mode] keyword[not] keyword[in] ( literal[string] , literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] )) keyword[if] identifier[output] keyword[not] keyword[in] ( literal[string] , literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[output] )) keyword[for] identifier[row] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[rows] + literal[int] ): identifier[top] = identifier[self] . identifier[top_margin] +(( identifier[row] - literal[int] )*( identifier[self] . identifier[label_height] + identifier[self] . identifier[row_gap] )) identifier[bottom] = identifier[top] + identifier[self] . identifier[label_height] keyword[for] identifier[column] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[columns] + literal[int] ): identifier[left] = identifier[self] . identifier[left_margin] +(( identifier[column] - literal[int] )*( identifier[self] . identifier[label_width] + identifier[self] . identifier[column_gap] )) identifier[right] = identifier[left] + identifier[self] . identifier[label_width] keyword[if] identifier[mode] == literal[string] : identifier[box] ={ literal[string] : identifier[top] / identifier[self] . identifier[sheet_height] , literal[string] : identifier[bottom] / identifier[self] . identifier[sheet_height] , literal[string] : identifier[left] / identifier[self] . identifier[sheet_width] , literal[string] : identifier[right] / identifier[self] . identifier[sheet_width] , } keyword[elif] identifier[mode] == literal[string] : identifier[box] ={ literal[string] : identifier[top] , literal[string] : identifier[bottom] , literal[string] : identifier[left] , literal[string] : identifier[right] } keyword[if] identifier[output] == literal[string] : identifier[boxes] [ literal[string] . identifier[format] ( identifier[row] , identifier[column] )]= identifier[box] identifier[box] [ literal[string] ]= identifier[float] ( identifier[box] [ literal[string] ]) identifier[box] [ literal[string] ]= identifier[float] ( identifier[box] [ literal[string] ]) identifier[box] [ literal[string] ]= identifier[float] ( identifier[box] [ literal[string] ]) identifier[box] [ literal[string] ]= identifier[float] ( identifier[box] [ literal[string] ]) keyword[else] : identifier[boxes] [( identifier[row] , identifier[column] )]= identifier[box] keyword[if] identifier[output] == literal[string] : keyword[return] identifier[json] . identifier[dumps] ( identifier[boxes] ) keyword[return] identifier[boxes]
def bounding_boxes(self, mode='fraction', output='dict'): """Get the bounding boxes of the labels on a page. Parameters ---------- mode: 'fraction', 'actual' If 'fraction', the bounding boxes are expressed as a fraction of the height and width of the sheet. If 'actual', they are the actual position of the labels in millimetres from the top-left of the sheet. output: 'dict', 'json' If 'dict', a dictionary with label identifier tuples (row, column) as keys and a dictionary with 'left', 'right', 'top', and 'bottom' entries as the values. If 'json', a JSON encoded string which represents a dictionary with keys of the string format 'rowxcolumn' and each value being a bounding box dictionary with 'left', 'right', 'top', and 'bottom' entries. Returns ------- The bounding boxes in the format set by the output parameter. """ boxes = {} # Check the parameters. if mode not in ('fraction', 'actual'): raise ValueError('Unknown mode {0}.'.format(mode)) # depends on [control=['if'], data=['mode']] if output not in ('dict', 'json'): raise ValueError('Unknown output {0}.'.format(output)) # depends on [control=['if'], data=['output']] # Iterate over the rows. for row in range(1, self.rows + 1): # Top and bottom of all labels in the row. top = self.top_margin + (row - 1) * (self.label_height + self.row_gap) bottom = top + self.label_height # Now iterate over all columns in this row. for column in range(1, self.columns + 1): # Left and right position of this column. left = self.left_margin + (column - 1) * (self.label_width + self.column_gap) right = left + self.label_width # Output in the appropriate mode format. if mode == 'fraction': box = {'top': top / self.sheet_height, 'bottom': bottom / self.sheet_height, 'left': left / self.sheet_width, 'right': right / self.sheet_width} # depends on [control=['if'], data=[]] elif mode == 'actual': box = {'top': top, 'bottom': bottom, 'left': left, 'right': right} # depends on [control=['if'], data=[]] # Add to the collection. if output == 'json': boxes['{0:d}x{1:d}'.format(row, column)] = box box['top'] = float(box['top']) box['bottom'] = float(box['bottom']) box['left'] = float(box['left']) box['right'] = float(box['right']) # depends on [control=['if'], data=[]] else: boxes[row, column] = box # depends on [control=['for'], data=['column']] # depends on [control=['for'], data=['row']] # Done. if output == 'json': return json.dumps(boxes) # depends on [control=['if'], data=[]] return boxes
def command_max_delay(self, event=None): """ CPU burst max running time - self.runtime_cfg.max_delay """ try: max_delay = self.max_delay_var.get() except ValueError: max_delay = self.runtime_cfg.max_delay if max_delay < 0: max_delay = self.runtime_cfg.max_delay if max_delay > 0.1: max_delay = self.runtime_cfg.max_delay self.runtime_cfg.max_delay = max_delay self.max_delay_var.set(self.runtime_cfg.max_delay)
def function[command_max_delay, parameter[self, event]]: constant[ CPU burst max running time - self.runtime_cfg.max_delay ] <ast.Try object at 0x7da1b05e0370> if compare[name[max_delay] less[<] constant[0]] begin[:] variable[max_delay] assign[=] name[self].runtime_cfg.max_delay if compare[name[max_delay] greater[>] constant[0.1]] begin[:] variable[max_delay] assign[=] name[self].runtime_cfg.max_delay name[self].runtime_cfg.max_delay assign[=] name[max_delay] call[name[self].max_delay_var.set, parameter[name[self].runtime_cfg.max_delay]]
keyword[def] identifier[command_max_delay] ( identifier[self] , identifier[event] = keyword[None] ): literal[string] keyword[try] : identifier[max_delay] = identifier[self] . identifier[max_delay_var] . identifier[get] () keyword[except] identifier[ValueError] : identifier[max_delay] = identifier[self] . identifier[runtime_cfg] . identifier[max_delay] keyword[if] identifier[max_delay] < literal[int] : identifier[max_delay] = identifier[self] . identifier[runtime_cfg] . identifier[max_delay] keyword[if] identifier[max_delay] > literal[int] : identifier[max_delay] = identifier[self] . identifier[runtime_cfg] . identifier[max_delay] identifier[self] . identifier[runtime_cfg] . identifier[max_delay] = identifier[max_delay] identifier[self] . identifier[max_delay_var] . identifier[set] ( identifier[self] . identifier[runtime_cfg] . identifier[max_delay] )
def command_max_delay(self, event=None): """ CPU burst max running time - self.runtime_cfg.max_delay """ try: max_delay = self.max_delay_var.get() # depends on [control=['try'], data=[]] except ValueError: max_delay = self.runtime_cfg.max_delay # depends on [control=['except'], data=[]] if max_delay < 0: max_delay = self.runtime_cfg.max_delay # depends on [control=['if'], data=['max_delay']] if max_delay > 0.1: max_delay = self.runtime_cfg.max_delay # depends on [control=['if'], data=['max_delay']] self.runtime_cfg.max_delay = max_delay self.max_delay_var.set(self.runtime_cfg.max_delay)
def clean(self, value): """Cleans and returns the given value, or raises a ParameterNotValidError exception""" if isinstance(value, six.string_types): return value elif isinstance(value, numbers.Number): return str(value) raise ParameterNotValidError
def function[clean, parameter[self, value]]: constant[Cleans and returns the given value, or raises a ParameterNotValidError exception] if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:] return[name[value]] <ast.Raise object at 0x7da2054a6350>
keyword[def] identifier[clean] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ): keyword[return] identifier[value] keyword[elif] identifier[isinstance] ( identifier[value] , identifier[numbers] . identifier[Number] ): keyword[return] identifier[str] ( identifier[value] ) keyword[raise] identifier[ParameterNotValidError]
def clean(self, value): """Cleans and returns the given value, or raises a ParameterNotValidError exception""" if isinstance(value, six.string_types): return value # depends on [control=['if'], data=[]] elif isinstance(value, numbers.Number): return str(value) # depends on [control=['if'], data=[]] raise ParameterNotValidError
def ldap_server_maprole_group_ad_group(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") maprole = ET.SubElement(ldap_server, "maprole") group = ET.SubElement(maprole, "group") ad_group = ET.SubElement(group, "ad-group") ad_group.text = kwargs.pop('ad_group') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[ldap_server_maprole_group_ad_group, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[ldap_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ldap-server]]] variable[maprole] assign[=] call[name[ET].SubElement, parameter[name[ldap_server], constant[maprole]]] variable[group] assign[=] call[name[ET].SubElement, parameter[name[maprole], constant[group]]] variable[ad_group] assign[=] call[name[ET].SubElement, parameter[name[group], constant[ad-group]]] name[ad_group].text assign[=] call[name[kwargs].pop, parameter[constant[ad_group]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[ldap_server_maprole_group_ad_group] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[ldap_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[maprole] = identifier[ET] . identifier[SubElement] ( identifier[ldap_server] , literal[string] ) identifier[group] = identifier[ET] . identifier[SubElement] ( identifier[maprole] , literal[string] ) identifier[ad_group] = identifier[ET] . identifier[SubElement] ( identifier[group] , literal[string] ) identifier[ad_group] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def ldap_server_maprole_group_ad_group(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') ldap_server = ET.SubElement(config, 'ldap-server', xmlns='urn:brocade.com:mgmt:brocade-aaa') maprole = ET.SubElement(ldap_server, 'maprole') group = ET.SubElement(maprole, 'group') ad_group = ET.SubElement(group, 'ad-group') ad_group.text = kwargs.pop('ad_group') callback = kwargs.pop('callback', self._callback) return callback(config)
def _skip_to_blank(f, spacegroup, setting): """Read lines from f until a blank line is encountered.""" while True: line = f.readline() if not line: raise SpacegroupNotFoundError( 'invalid spacegroup %s, setting %i not found in data base' % ( spacegroup, setting ) ) if not line.strip(): break
def function[_skip_to_blank, parameter[f, spacegroup, setting]]: constant[Read lines from f until a blank line is encountered.] while constant[True] begin[:] variable[line] assign[=] call[name[f].readline, parameter[]] if <ast.UnaryOp object at 0x7da18dc046d0> begin[:] <ast.Raise object at 0x7da18dc07490> if <ast.UnaryOp object at 0x7da18dc06650> begin[:] break
keyword[def] identifier[_skip_to_blank] ( identifier[f] , identifier[spacegroup] , identifier[setting] ): literal[string] keyword[while] keyword[True] : identifier[line] = identifier[f] . identifier[readline] () keyword[if] keyword[not] identifier[line] : keyword[raise] identifier[SpacegroupNotFoundError] ( literal[string] % ( identifier[spacegroup] , identifier[setting] )) keyword[if] keyword[not] identifier[line] . identifier[strip] (): keyword[break]
def _skip_to_blank(f, spacegroup, setting): """Read lines from f until a blank line is encountered.""" while True: line = f.readline() if not line: raise SpacegroupNotFoundError('invalid spacegroup %s, setting %i not found in data base' % (spacegroup, setting)) # depends on [control=['if'], data=[]] if not line.strip(): break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def parse_username(username): """ Parses the given username or channel access hash, given a string, username or URL. Returns a tuple consisting of both the stripped, lowercase username and whether it is a joinchat/ hash (in which case is not lowercase'd). Returns ``(None, False)`` if the ``username`` or link is not valid. """ username = username.strip() m = USERNAME_RE.match(username) or TG_JOIN_RE.match(username) if m: username = username[m.end():] is_invite = bool(m.group(1)) if is_invite: return username, True else: username = username.rstrip('/') if VALID_USERNAME_RE.match(username): return username.lower(), False else: return None, False
def function[parse_username, parameter[username]]: constant[ Parses the given username or channel access hash, given a string, username or URL. Returns a tuple consisting of both the stripped, lowercase username and whether it is a joinchat/ hash (in which case is not lowercase'd). Returns ``(None, False)`` if the ``username`` or link is not valid. ] variable[username] assign[=] call[name[username].strip, parameter[]] variable[m] assign[=] <ast.BoolOp object at 0x7da1b1c7ada0> if name[m] begin[:] variable[username] assign[=] call[name[username]][<ast.Slice object at 0x7da1b1c78cd0>] variable[is_invite] assign[=] call[name[bool], parameter[call[name[m].group, parameter[constant[1]]]]] if name[is_invite] begin[:] return[tuple[[<ast.Name object at 0x7da1b1c78ee0>, <ast.Constant object at 0x7da1b1c7b160>]]] if call[name[VALID_USERNAME_RE].match, parameter[name[username]]] begin[:] return[tuple[[<ast.Call object at 0x7da1b2188ca0>, <ast.Constant object at 0x7da1b2189420>]]]
keyword[def] identifier[parse_username] ( identifier[username] ): literal[string] identifier[username] = identifier[username] . identifier[strip] () identifier[m] = identifier[USERNAME_RE] . identifier[match] ( identifier[username] ) keyword[or] identifier[TG_JOIN_RE] . identifier[match] ( identifier[username] ) keyword[if] identifier[m] : identifier[username] = identifier[username] [ identifier[m] . identifier[end] ():] identifier[is_invite] = identifier[bool] ( identifier[m] . identifier[group] ( literal[int] )) keyword[if] identifier[is_invite] : keyword[return] identifier[username] , keyword[True] keyword[else] : identifier[username] = identifier[username] . identifier[rstrip] ( literal[string] ) keyword[if] identifier[VALID_USERNAME_RE] . identifier[match] ( identifier[username] ): keyword[return] identifier[username] . identifier[lower] (), keyword[False] keyword[else] : keyword[return] keyword[None] , keyword[False]
def parse_username(username): """ Parses the given username or channel access hash, given a string, username or URL. Returns a tuple consisting of both the stripped, lowercase username and whether it is a joinchat/ hash (in which case is not lowercase'd). Returns ``(None, False)`` if the ``username`` or link is not valid. """ username = username.strip() m = USERNAME_RE.match(username) or TG_JOIN_RE.match(username) if m: username = username[m.end():] is_invite = bool(m.group(1)) if is_invite: return (username, True) # depends on [control=['if'], data=[]] else: username = username.rstrip('/') # depends on [control=['if'], data=[]] if VALID_USERNAME_RE.match(username): return (username.lower(), False) # depends on [control=['if'], data=[]] else: return (None, False)
def _set_node_lists(self, new): """ Maintains each edge's list of available nodes. """ for edge in self.edges: edge._nodes = self.nodes
def function[_set_node_lists, parameter[self, new]]: constant[ Maintains each edge's list of available nodes. ] for taget[name[edge]] in starred[name[self].edges] begin[:] name[edge]._nodes assign[=] name[self].nodes
keyword[def] identifier[_set_node_lists] ( identifier[self] , identifier[new] ): literal[string] keyword[for] identifier[edge] keyword[in] identifier[self] . identifier[edges] : identifier[edge] . identifier[_nodes] = identifier[self] . identifier[nodes]
def _set_node_lists(self, new): """ Maintains each edge's list of available nodes. """ for edge in self.edges: edge._nodes = self.nodes # depends on [control=['for'], data=['edge']]
def rebuild_pipelines(*args): """Entry point for rebuilding pipelines. Use to rebuild all pipelines or a specific group. """ rebuild_all = False rebuild_project = os.getenv("REBUILD_PROJECT") if args: LOG.debug('Incoming arguments: %s', args) command_args, *_ = args rebuild_all = command_args.parsed.all rebuild_project = command_args.parsed.project if rebuild_project == 'ALL': rebuild_all = True if rebuild_all: LOG.info('Rebuilding all projects.') elif rebuild_project is None: msg = 'No REBUILD_PROJECT variable found' LOG.fatal(msg) raise SystemExit('Error: {0}'.format(msg)) else: LOG.info('Rebuilding project: %s', rebuild_project) all_apps = utils.get_all_apps() for apps in all_apps: if 'repoProjectKey' not in apps: LOG.info('Skipping %s. No project key found', apps['name']) continue app_name = '{}/{}'.format(apps['repoProjectKey'], apps['repoSlug']) if apps['repoProjectKey'].lower() == rebuild_project.lower() or rebuild_all: os.environ["PROJECT"] = apps['repoProjectKey'] os.environ["GIT_REPO"] = apps['repoSlug'] LOG.info('Rebuilding pipelines for %s', app_name) runner = ForemastRunner() try: runner.write_configs() runner.create_pipeline() runner.cleanup() except Exception: # pylint: disable=broad-except LOG.warning('Error updating pipeline for %s', app_name)
def function[rebuild_pipelines, parameter[]]: constant[Entry point for rebuilding pipelines. Use to rebuild all pipelines or a specific group. ] variable[rebuild_all] assign[=] constant[False] variable[rebuild_project] assign[=] call[name[os].getenv, parameter[constant[REBUILD_PROJECT]]] if name[args] begin[:] call[name[LOG].debug, parameter[constant[Incoming arguments: %s], name[args]]] <ast.Tuple object at 0x7da20c7958d0> assign[=] name[args] variable[rebuild_all] assign[=] name[command_args].parsed.all variable[rebuild_project] assign[=] name[command_args].parsed.project if compare[name[rebuild_project] equal[==] constant[ALL]] begin[:] variable[rebuild_all] assign[=] constant[True] if name[rebuild_all] begin[:] call[name[LOG].info, parameter[constant[Rebuilding all projects.]]] variable[all_apps] assign[=] call[name[utils].get_all_apps, parameter[]] for taget[name[apps]] in starred[name[all_apps]] begin[:] if compare[constant[repoProjectKey] <ast.NotIn object at 0x7da2590d7190> name[apps]] begin[:] call[name[LOG].info, parameter[constant[Skipping %s. No project key found], call[name[apps]][constant[name]]]] continue variable[app_name] assign[=] call[constant[{}/{}].format, parameter[call[name[apps]][constant[repoProjectKey]], call[name[apps]][constant[repoSlug]]]] if <ast.BoolOp object at 0x7da20c794610> begin[:] call[name[os].environ][constant[PROJECT]] assign[=] call[name[apps]][constant[repoProjectKey]] call[name[os].environ][constant[GIT_REPO]] assign[=] call[name[apps]][constant[repoSlug]] call[name[LOG].info, parameter[constant[Rebuilding pipelines for %s], name[app_name]]] variable[runner] assign[=] call[name[ForemastRunner], parameter[]] <ast.Try object at 0x7da20c7951b0>
keyword[def] identifier[rebuild_pipelines] (* identifier[args] ): literal[string] identifier[rebuild_all] = keyword[False] identifier[rebuild_project] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[if] identifier[args] : identifier[LOG] . identifier[debug] ( literal[string] , identifier[args] ) identifier[command_args] ,* identifier[_] = identifier[args] identifier[rebuild_all] = identifier[command_args] . identifier[parsed] . identifier[all] identifier[rebuild_project] = identifier[command_args] . identifier[parsed] . identifier[project] keyword[if] identifier[rebuild_project] == literal[string] : identifier[rebuild_all] = keyword[True] keyword[if] identifier[rebuild_all] : identifier[LOG] . identifier[info] ( literal[string] ) keyword[elif] identifier[rebuild_project] keyword[is] keyword[None] : identifier[msg] = literal[string] identifier[LOG] . identifier[fatal] ( identifier[msg] ) keyword[raise] identifier[SystemExit] ( literal[string] . identifier[format] ( identifier[msg] )) keyword[else] : identifier[LOG] . identifier[info] ( literal[string] , identifier[rebuild_project] ) identifier[all_apps] = identifier[utils] . identifier[get_all_apps] () keyword[for] identifier[apps] keyword[in] identifier[all_apps] : keyword[if] literal[string] keyword[not] keyword[in] identifier[apps] : identifier[LOG] . identifier[info] ( literal[string] , identifier[apps] [ literal[string] ]) keyword[continue] identifier[app_name] = literal[string] . identifier[format] ( identifier[apps] [ literal[string] ], identifier[apps] [ literal[string] ]) keyword[if] identifier[apps] [ literal[string] ]. identifier[lower] ()== identifier[rebuild_project] . identifier[lower] () keyword[or] identifier[rebuild_all] : identifier[os] . identifier[environ] [ literal[string] ]= identifier[apps] [ literal[string] ] identifier[os] . identifier[environ] [ literal[string] ]= identifier[apps] [ literal[string] ] identifier[LOG] . identifier[info] ( literal[string] , identifier[app_name] ) identifier[runner] = identifier[ForemastRunner] () keyword[try] : identifier[runner] . identifier[write_configs] () identifier[runner] . identifier[create_pipeline] () identifier[runner] . identifier[cleanup] () keyword[except] identifier[Exception] : identifier[LOG] . identifier[warning] ( literal[string] , identifier[app_name] )
def rebuild_pipelines(*args): """Entry point for rebuilding pipelines. Use to rebuild all pipelines or a specific group. """ rebuild_all = False rebuild_project = os.getenv('REBUILD_PROJECT') if args: LOG.debug('Incoming arguments: %s', args) (command_args, *_) = args rebuild_all = command_args.parsed.all rebuild_project = command_args.parsed.project # depends on [control=['if'], data=[]] if rebuild_project == 'ALL': rebuild_all = True # depends on [control=['if'], data=[]] if rebuild_all: LOG.info('Rebuilding all projects.') # depends on [control=['if'], data=[]] elif rebuild_project is None: msg = 'No REBUILD_PROJECT variable found' LOG.fatal(msg) raise SystemExit('Error: {0}'.format(msg)) # depends on [control=['if'], data=[]] else: LOG.info('Rebuilding project: %s', rebuild_project) all_apps = utils.get_all_apps() for apps in all_apps: if 'repoProjectKey' not in apps: LOG.info('Skipping %s. No project key found', apps['name']) continue # depends on [control=['if'], data=['apps']] app_name = '{}/{}'.format(apps['repoProjectKey'], apps['repoSlug']) if apps['repoProjectKey'].lower() == rebuild_project.lower() or rebuild_all: os.environ['PROJECT'] = apps['repoProjectKey'] os.environ['GIT_REPO'] = apps['repoSlug'] LOG.info('Rebuilding pipelines for %s', app_name) runner = ForemastRunner() try: runner.write_configs() runner.create_pipeline() runner.cleanup() # depends on [control=['try'], data=[]] except Exception: # pylint: disable=broad-except LOG.warning('Error updating pipeline for %s', app_name) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['apps']]
def PYTHON_VERSION(stats, info): """Python interpreter version. This is a flag you can pass to `Stats.submit()`. """ # Some versions of Python have a \n in sys.version! version = sys.version.replace(' \n', ' ').replace('\n', ' ') python = ';'.join([str(c) for c in sys.version_info] + [version]) info.append(('python', python))
def function[PYTHON_VERSION, parameter[stats, info]]: constant[Python interpreter version. This is a flag you can pass to `Stats.submit()`. ] variable[version] assign[=] call[call[name[sys].version.replace, parameter[constant[ ], constant[ ]]].replace, parameter[constant[ ], constant[ ]]] variable[python] assign[=] call[constant[;].join, parameter[binary_operation[<ast.ListComp object at 0x7da20c991a80> + list[[<ast.Name object at 0x7da20c9926e0>]]]]] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da20c992890>, <ast.Name object at 0x7da20c992c80>]]]]
keyword[def] identifier[PYTHON_VERSION] ( identifier[stats] , identifier[info] ): literal[string] identifier[version] = identifier[sys] . identifier[version] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) identifier[python] = literal[string] . identifier[join] ([ identifier[str] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[sys] . identifier[version_info] ]+[ identifier[version] ]) identifier[info] . identifier[append] (( literal[string] , identifier[python] ))
def PYTHON_VERSION(stats, info): """Python interpreter version. This is a flag you can pass to `Stats.submit()`. """ # Some versions of Python have a \n in sys.version! version = sys.version.replace(' \n', ' ').replace('\n', ' ') python = ';'.join([str(c) for c in sys.version_info] + [version]) info.append(('python', python))
def print_help(self, session, command=None): """ Prints the available methods and their documentation, or the documentation of the given command. """ if command: # Single command mode if command in self._commands: # Argument is a name space self.__print_namespace_help(session, command) was_namespace = True else: was_namespace = False # Also print the name of matching commands try: # Extract command name space and name possibilities = self.get_ns_commands(command) except ValueError as ex: # Unknown command if not was_namespace: # ... and no name space were matching either -> error session.write_line(str(ex)) return False else: # Print the help of the found command if was_namespace: # Give some space session.write_line("\n\n") for namespace, cmd_name in possibilities: self.__print_namespace_help(session, namespace, cmd_name) else: # Get all name spaces namespaces = list(self._commands.keys()) namespaces.remove(DEFAULT_NAMESPACE) namespaces.sort() namespaces.insert(0, DEFAULT_NAMESPACE) first_ns = True for namespace in namespaces: if not first_ns: # Add empty lines session.write_line("\n\n") # Print the help of all commands self.__print_namespace_help(session, namespace) first_ns = False return None
def function[print_help, parameter[self, session, command]]: constant[ Prints the available methods and their documentation, or the documentation of the given command. ] if name[command] begin[:] if compare[name[command] in name[self]._commands] begin[:] call[name[self].__print_namespace_help, parameter[name[session], name[command]]] variable[was_namespace] assign[=] constant[True] <ast.Try object at 0x7da1b039be80> return[constant[None]]
keyword[def] identifier[print_help] ( identifier[self] , identifier[session] , identifier[command] = keyword[None] ): literal[string] keyword[if] identifier[command] : keyword[if] identifier[command] keyword[in] identifier[self] . identifier[_commands] : identifier[self] . identifier[__print_namespace_help] ( identifier[session] , identifier[command] ) identifier[was_namespace] = keyword[True] keyword[else] : identifier[was_namespace] = keyword[False] keyword[try] : identifier[possibilities] = identifier[self] . identifier[get_ns_commands] ( identifier[command] ) keyword[except] identifier[ValueError] keyword[as] identifier[ex] : keyword[if] keyword[not] identifier[was_namespace] : identifier[session] . identifier[write_line] ( identifier[str] ( identifier[ex] )) keyword[return] keyword[False] keyword[else] : keyword[if] identifier[was_namespace] : identifier[session] . identifier[write_line] ( literal[string] ) keyword[for] identifier[namespace] , identifier[cmd_name] keyword[in] identifier[possibilities] : identifier[self] . identifier[__print_namespace_help] ( identifier[session] , identifier[namespace] , identifier[cmd_name] ) keyword[else] : identifier[namespaces] = identifier[list] ( identifier[self] . identifier[_commands] . identifier[keys] ()) identifier[namespaces] . identifier[remove] ( identifier[DEFAULT_NAMESPACE] ) identifier[namespaces] . identifier[sort] () identifier[namespaces] . identifier[insert] ( literal[int] , identifier[DEFAULT_NAMESPACE] ) identifier[first_ns] = keyword[True] keyword[for] identifier[namespace] keyword[in] identifier[namespaces] : keyword[if] keyword[not] identifier[first_ns] : identifier[session] . identifier[write_line] ( literal[string] ) identifier[self] . identifier[__print_namespace_help] ( identifier[session] , identifier[namespace] ) identifier[first_ns] = keyword[False] keyword[return] keyword[None]
def print_help(self, session, command=None): """ Prints the available methods and their documentation, or the documentation of the given command. """ if command: # Single command mode if command in self._commands: # Argument is a name space self.__print_namespace_help(session, command) was_namespace = True # depends on [control=['if'], data=['command']] else: was_namespace = False # Also print the name of matching commands try: # Extract command name space and name possibilities = self.get_ns_commands(command) # depends on [control=['try'], data=[]] except ValueError as ex: # Unknown command if not was_namespace: # ... and no name space were matching either -> error session.write_line(str(ex)) return False # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] else: # Print the help of the found command if was_namespace: # Give some space session.write_line('\n\n') # depends on [control=['if'], data=[]] for (namespace, cmd_name) in possibilities: self.__print_namespace_help(session, namespace, cmd_name) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: # Get all name spaces namespaces = list(self._commands.keys()) namespaces.remove(DEFAULT_NAMESPACE) namespaces.sort() namespaces.insert(0, DEFAULT_NAMESPACE) first_ns = True for namespace in namespaces: if not first_ns: # Add empty lines session.write_line('\n\n') # depends on [control=['if'], data=[]] # Print the help of all commands self.__print_namespace_help(session, namespace) first_ns = False # depends on [control=['for'], data=['namespace']] return None
def parse_from_parent( self, parent, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the array data from the provided parent XML element.""" item_iter = parent.findall(self._item_path) return self._parse(item_iter, state)
def function[parse_from_parent, parameter[self, parent, state]]: constant[Parse the array data from the provided parent XML element.] variable[item_iter] assign[=] call[name[parent].findall, parameter[name[self]._item_path]] return[call[name[self]._parse, parameter[name[item_iter], name[state]]]]
keyword[def] identifier[parse_from_parent] ( identifier[self] , identifier[parent] , identifier[state] ): literal[string] identifier[item_iter] = identifier[parent] . identifier[findall] ( identifier[self] . identifier[_item_path] ) keyword[return] identifier[self] . identifier[_parse] ( identifier[item_iter] , identifier[state] )
def parse_from_parent(self, parent, state): # type: ET.Element # type: _ProcessorState # type: (...) -> Any 'Parse the array data from the provided parent XML element.' item_iter = parent.findall(self._item_path) return self._parse(item_iter, state)
def strictjoin(L, keycols, nullvals=None, renaming=None, Names=None): """ Combine two or more numpy ndarray with structured dtypes on common key column(s). Merge a list (or dictionary) of numpy arrays, given by `L`, on key columns listed in `keycols`. The ``strictjoin`` assumes the following restrictions: * each element of `keycol` must be a valid column name in `X` and each array in `L`, and all of the same data-type. * for each column `col` in `keycols`, and each array `A` in `L`, the values in `A[col]` must be unique, e.g. no repeats of values -- and same for `X[col]`. (Actually, the uniqueness criterion need not hold to the first tabarray in L, but first for all the subsequent ones.) * the *non*-key-column column names in each of the arrays must be disjoint from each other -- or disjoint after a renaming (see below). An error will be thrown if these conditions are not met. For a wrapper that attempts to meet these restrictions, see :func:`tabular.spreadsheet.join`. If you don't provide a value of `keycols`, the algorithm will attempt to infer which columns should be used by trying to find the largest set of common column names that contain unique values in each array and have the same data type. An error will be thrown if no such inference can be made. *Renaming of overlapping columns* If the non-keycol column names of the arrays overlap, ``join`` will by default attempt to rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. You can override the default renaming scheme using the `renamer` parameter. *Nullvalues for keycolumn differences* If there are regions of the keycolumns that are not overlapping between merged arrays, `join` will fill in the relevant entries with null values chosen by default: * '0' for integer columns * '0.0' for float columns * the empty character ('') for string columns. **Parameters** **L** : list or dictionary Numpy recarrays to merge. If `L` is a dictionary, the keys name each numpy recarray, and the corresponding values are the actual numpy recarrays. **keycols** : list of strings List of the names of the key columns along which to do the merging. **nullvals** : function, optional A function that returns a null value for a numpy format descriptor string, e.g. ``'<i4'`` or ``'|S5'``. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT` **renaming** : dictionary of dictionaries, optional Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above. For example, the result returned by: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **Returns** **result** : numpy ndarray with structured dtype Result of the join, e.g. the result of merging the input numpy arrays defined in `L` on the key columns listed in `keycols`. **See Also:** :func:`tabular.spreadsheet.join` """ if isinstance(L,dict): Names = L.keys() LL = L.values() else: if Names == None: Names = range(len(L)) else: assert len(Names) == len(L) LL = L if isinstance(keycols,str): keycols = [l.strip() for l in keycols.split(',')] assert all([set(keycols) <= set(l.dtype.names) for l in LL]), \ ('keycols,', str(keycols), ', must be valid column names in all arrays being merged.') assert all([isunique(l[keycols]) for l in LL[1:]]), \ ('values in keycol columns,', str(keycols), ', must be unique in all arrays being merged.') if renaming == None: renaming = {} assert RenamingIsInCorrectFormat(renaming, L, Names=Names), \ 'renaming is not in proper format ... ' L = dict([(k,ll.copy()) for (k,ll) in zip(Names,LL)]) LL = L.values() for i in Names: l = L[i] l.dtype = np.dtype(l.dtype.descr) if i in renaming.keys(): for k in renaming[i].keys(): if k not in keycols: renamecol(L[i], k, renaming[i][k]) l.sort(order = keycols) commons = set(Commons([l.dtype.names for l in LL])).difference(keycols) assert len(commons) == 0, ('The following (non-keycol) column names ' 'appear in more than on array being merged:', str(commons)) Result = colstack([(L[Names[0]][keycols])[0:0]] + [deletecols(L[k][0:0], keycols) \ for k in Names if deletecols(L[k][0:0], keycols) != None]) PL = powerlist(Names) ToGet = utils.listunion([[p for p in PL if len(p) == k] for k in range(1, len(Names))]) + [PL[-1]] for I in ToGet[::-1]: Ref = L[I[0]][keycols] for j in I[1:]: if len(Ref) > 0: Ref = Ref[fast.recarrayisin(Ref, L[j][keycols], weak=True)] else: break if len(Ref) > 0: D = [fast.recarrayisin(L[j][keycols], Ref, weak=True) for j in I] Ref0 = L[I[0]][keycols][D[0]] Reps0 = np.append(np.append([-1], (Ref0[1:] != Ref0[:-1]).nonzero()[0]),[len(Ref0)-1]) Reps0 = Reps0[1:] - Reps0[:-1] NewRows = colstack([Ref0] + [deletecols(L[j][D[i]], keycols).repeat(Reps0) if i > 0 else deletecols(L[j][D[i]], keycols) for (i, j) in enumerate(I) if deletecols(L[j][D[i]], keycols) != None]) for (i,j) in enumerate(I): L[j] = L[j][np.invert(D[i])] Result = rowstack([Result, NewRows], mode='nulls', nullvals=nullvals) return Result
def function[strictjoin, parameter[L, keycols, nullvals, renaming, Names]]: constant[ Combine two or more numpy ndarray with structured dtypes on common key column(s). Merge a list (or dictionary) of numpy arrays, given by `L`, on key columns listed in `keycols`. The ``strictjoin`` assumes the following restrictions: * each element of `keycol` must be a valid column name in `X` and each array in `L`, and all of the same data-type. * for each column `col` in `keycols`, and each array `A` in `L`, the values in `A[col]` must be unique, e.g. no repeats of values -- and same for `X[col]`. (Actually, the uniqueness criterion need not hold to the first tabarray in L, but first for all the subsequent ones.) * the *non*-key-column column names in each of the arrays must be disjoint from each other -- or disjoint after a renaming (see below). An error will be thrown if these conditions are not met. For a wrapper that attempts to meet these restrictions, see :func:`tabular.spreadsheet.join`. If you don't provide a value of `keycols`, the algorithm will attempt to infer which columns should be used by trying to find the largest set of common column names that contain unique values in each array and have the same data type. An error will be thrown if no such inference can be made. *Renaming of overlapping columns* If the non-keycol column names of the arrays overlap, ``join`` will by default attempt to rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. You can override the default renaming scheme using the `renamer` parameter. *Nullvalues for keycolumn differences* If there are regions of the keycolumns that are not overlapping between merged arrays, `join` will fill in the relevant entries with null values chosen by default: * '0' for integer columns * '0.0' for float columns * the empty character ('') for string columns. **Parameters** **L** : list or dictionary Numpy recarrays to merge. If `L` is a dictionary, the keys name each numpy recarray, and the corresponding values are the actual numpy recarrays. **keycols** : list of strings List of the names of the key columns along which to do the merging. **nullvals** : function, optional A function that returns a null value for a numpy format descriptor string, e.g. ``'<i4'`` or ``'|S5'``. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT` **renaming** : dictionary of dictionaries, optional Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above. For example, the result returned by: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **Returns** **result** : numpy ndarray with structured dtype Result of the join, e.g. the result of merging the input numpy arrays defined in `L` on the key columns listed in `keycols`. **See Also:** :func:`tabular.spreadsheet.join` ] if call[name[isinstance], parameter[name[L], name[dict]]] begin[:] variable[Names] assign[=] call[name[L].keys, parameter[]] variable[LL] assign[=] call[name[L].values, parameter[]] if call[name[isinstance], parameter[name[keycols], name[str]]] begin[:] variable[keycols] assign[=] <ast.ListComp object at 0x7da18bcc99f0> assert[call[name[all], parameter[<ast.ListComp object at 0x7da18bccb2e0>]]] assert[call[name[all], parameter[<ast.ListComp object at 0x7da18bccab90>]]] if compare[name[renaming] equal[==] constant[None]] begin[:] variable[renaming] assign[=] dictionary[[], []] assert[call[name[RenamingIsInCorrectFormat], parameter[name[renaming], name[L]]]] variable[L] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18bcc93f0>]] variable[LL] assign[=] call[name[L].values, parameter[]] for taget[name[i]] in starred[name[Names]] begin[:] variable[l] assign[=] call[name[L]][name[i]] name[l].dtype assign[=] call[name[np].dtype, parameter[name[l].dtype.descr]] if compare[name[i] in call[name[renaming].keys, parameter[]]] begin[:] for taget[name[k]] in starred[call[call[name[renaming]][name[i]].keys, parameter[]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[keycols]] begin[:] call[name[renamecol], parameter[call[name[L]][name[i]], name[k], call[call[name[renaming]][name[i]]][name[k]]]] call[name[l].sort, parameter[]] variable[commons] assign[=] call[call[name[set], parameter[call[name[Commons], parameter[<ast.ListComp object at 0x7da18bcc9cf0>]]]].difference, parameter[name[keycols]]] assert[compare[call[name[len], parameter[name[commons]]] equal[==] constant[0]]] variable[Result] assign[=] call[name[colstack], parameter[binary_operation[list[[<ast.Subscript object at 0x7da18bccb7f0>]] + <ast.ListComp object at 0x7da18bcc8c40>]]] variable[PL] assign[=] call[name[powerlist], parameter[name[Names]]] variable[ToGet] assign[=] binary_operation[call[name[utils].listunion, parameter[<ast.ListComp object at 0x7da18f723520>]] + list[[<ast.Subscript object at 0x7da18f722530>]]] for taget[name[I]] in starred[call[name[ToGet]][<ast.Slice object at 0x7da18f721a50>]] begin[:] variable[Ref] assign[=] call[call[name[L]][call[name[I]][constant[0]]]][name[keycols]] for taget[name[j]] in starred[call[name[I]][<ast.Slice object at 0x7da18f720ac0>]] begin[:] if compare[call[name[len], parameter[name[Ref]]] greater[>] constant[0]] begin[:] variable[Ref] assign[=] call[name[Ref]][call[name[fast].recarrayisin, parameter[name[Ref], call[call[name[L]][name[j]]][name[keycols]]]]] if compare[call[name[len], parameter[name[Ref]]] greater[>] constant[0]] begin[:] variable[D] assign[=] <ast.ListComp object at 0x7da18f721210> variable[Ref0] assign[=] call[call[call[name[L]][call[name[I]][constant[0]]]][name[keycols]]][call[name[D]][constant[0]]] variable[Reps0] assign[=] call[name[np].append, parameter[call[name[np].append, parameter[list[[<ast.UnaryOp object at 0x7da18f722140>]], call[call[compare[call[name[Ref0]][<ast.Slice object at 0x7da18f721000>] not_equal[!=] call[name[Ref0]][<ast.Slice object at 0x7da18f720280>]].nonzero, parameter[]]][constant[0]]]], list[[<ast.BinOp object at 0x7da18f721990>]]]] variable[Reps0] assign[=] binary_operation[call[name[Reps0]][<ast.Slice object at 0x7da18f720d90>] - call[name[Reps0]][<ast.Slice object at 0x7da18f723250>]] variable[NewRows] assign[=] call[name[colstack], parameter[binary_operation[list[[<ast.Name object at 0x7da18f7201f0>]] + <ast.ListComp object at 0x7da18f720520>]]] for taget[tuple[[<ast.Name object at 0x7da18ede42b0>, <ast.Name object at 0x7da18ede55d0>]]] in starred[call[name[enumerate], parameter[name[I]]]] begin[:] call[name[L]][name[j]] assign[=] call[call[name[L]][name[j]]][call[name[np].invert, parameter[call[name[D]][name[i]]]]] variable[Result] assign[=] call[name[rowstack], parameter[list[[<ast.Name object at 0x7da18ede6260>, <ast.Name object at 0x7da18ede5750>]]]] return[name[Result]]
keyword[def] identifier[strictjoin] ( identifier[L] , identifier[keycols] , identifier[nullvals] = keyword[None] , identifier[renaming] = keyword[None] , identifier[Names] = keyword[None] ): literal[string] keyword[if] identifier[isinstance] ( identifier[L] , identifier[dict] ): identifier[Names] = identifier[L] . identifier[keys] () identifier[LL] = identifier[L] . identifier[values] () keyword[else] : keyword[if] identifier[Names] == keyword[None] : identifier[Names] = identifier[range] ( identifier[len] ( identifier[L] )) keyword[else] : keyword[assert] identifier[len] ( identifier[Names] )== identifier[len] ( identifier[L] ) identifier[LL] = identifier[L] keyword[if] identifier[isinstance] ( identifier[keycols] , identifier[str] ): identifier[keycols] =[ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[keycols] . identifier[split] ( literal[string] )] keyword[assert] identifier[all] ([ identifier[set] ( identifier[keycols] )<= identifier[set] ( identifier[l] . identifier[dtype] . identifier[names] ) keyword[for] identifier[l] keyword[in] identifier[LL] ]),( literal[string] , identifier[str] ( identifier[keycols] ), literal[string] ) keyword[assert] identifier[all] ([ identifier[isunique] ( identifier[l] [ identifier[keycols] ]) keyword[for] identifier[l] keyword[in] identifier[LL] [ literal[int] :]]),( literal[string] , identifier[str] ( identifier[keycols] ), literal[string] ) keyword[if] identifier[renaming] == keyword[None] : identifier[renaming] ={} keyword[assert] identifier[RenamingIsInCorrectFormat] ( identifier[renaming] , identifier[L] , identifier[Names] = identifier[Names] ), literal[string] identifier[L] = identifier[dict] ([( identifier[k] , identifier[ll] . identifier[copy] ()) keyword[for] ( identifier[k] , identifier[ll] ) keyword[in] identifier[zip] ( identifier[Names] , identifier[LL] )]) identifier[LL] = identifier[L] . identifier[values] () keyword[for] identifier[i] keyword[in] identifier[Names] : identifier[l] = identifier[L] [ identifier[i] ] identifier[l] . identifier[dtype] = identifier[np] . identifier[dtype] ( identifier[l] . identifier[dtype] . identifier[descr] ) keyword[if] identifier[i] keyword[in] identifier[renaming] . identifier[keys] (): keyword[for] identifier[k] keyword[in] identifier[renaming] [ identifier[i] ]. identifier[keys] (): keyword[if] identifier[k] keyword[not] keyword[in] identifier[keycols] : identifier[renamecol] ( identifier[L] [ identifier[i] ], identifier[k] , identifier[renaming] [ identifier[i] ][ identifier[k] ]) identifier[l] . identifier[sort] ( identifier[order] = identifier[keycols] ) identifier[commons] = identifier[set] ( identifier[Commons] ([ identifier[l] . identifier[dtype] . identifier[names] keyword[for] identifier[l] keyword[in] identifier[LL] ])). identifier[difference] ( identifier[keycols] ) keyword[assert] identifier[len] ( identifier[commons] )== literal[int] ,( literal[string] literal[string] , identifier[str] ( identifier[commons] )) identifier[Result] = identifier[colstack] ([( identifier[L] [ identifier[Names] [ literal[int] ]][ identifier[keycols] ])[ literal[int] : literal[int] ]]+ [ identifier[deletecols] ( identifier[L] [ identifier[k] ][ literal[int] : literal[int] ], identifier[keycols] ) keyword[for] identifier[k] keyword[in] identifier[Names] keyword[if] identifier[deletecols] ( identifier[L] [ identifier[k] ][ literal[int] : literal[int] ], identifier[keycols] )!= keyword[None] ]) identifier[PL] = identifier[powerlist] ( identifier[Names] ) identifier[ToGet] = identifier[utils] . identifier[listunion] ([[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[PL] keyword[if] identifier[len] ( identifier[p] )== identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[Names] ))])+[ identifier[PL] [- literal[int] ]] keyword[for] identifier[I] keyword[in] identifier[ToGet] [::- literal[int] ]: identifier[Ref] = identifier[L] [ identifier[I] [ literal[int] ]][ identifier[keycols] ] keyword[for] identifier[j] keyword[in] identifier[I] [ literal[int] :]: keyword[if] identifier[len] ( identifier[Ref] )> literal[int] : identifier[Ref] = identifier[Ref] [ identifier[fast] . identifier[recarrayisin] ( identifier[Ref] , identifier[L] [ identifier[j] ][ identifier[keycols] ], identifier[weak] = keyword[True] )] keyword[else] : keyword[break] keyword[if] identifier[len] ( identifier[Ref] )> literal[int] : identifier[D] =[ identifier[fast] . identifier[recarrayisin] ( identifier[L] [ identifier[j] ][ identifier[keycols] ], identifier[Ref] , identifier[weak] = keyword[True] ) keyword[for] identifier[j] keyword[in] identifier[I] ] identifier[Ref0] = identifier[L] [ identifier[I] [ literal[int] ]][ identifier[keycols] ][ identifier[D] [ literal[int] ]] identifier[Reps0] = identifier[np] . identifier[append] ( identifier[np] . identifier[append] ([- literal[int] ], ( identifier[Ref0] [ literal[int] :]!= identifier[Ref0] [:- literal[int] ]). identifier[nonzero] ()[ literal[int] ]),[ identifier[len] ( identifier[Ref0] )- literal[int] ]) identifier[Reps0] = identifier[Reps0] [ literal[int] :]- identifier[Reps0] [:- literal[int] ] identifier[NewRows] = identifier[colstack] ([ identifier[Ref0] ]+ [ identifier[deletecols] ( identifier[L] [ identifier[j] ][ identifier[D] [ identifier[i] ]], identifier[keycols] ). identifier[repeat] ( identifier[Reps0] ) keyword[if] identifier[i] > literal[int] keyword[else] identifier[deletecols] ( identifier[L] [ identifier[j] ][ identifier[D] [ identifier[i] ]], identifier[keycols] ) keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[enumerate] ( identifier[I] ) keyword[if] identifier[deletecols] ( identifier[L] [ identifier[j] ][ identifier[D] [ identifier[i] ]], identifier[keycols] )!= keyword[None] ]) keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[enumerate] ( identifier[I] ): identifier[L] [ identifier[j] ]= identifier[L] [ identifier[j] ][ identifier[np] . identifier[invert] ( identifier[D] [ identifier[i] ])] identifier[Result] = identifier[rowstack] ([ identifier[Result] , identifier[NewRows] ], identifier[mode] = literal[string] , identifier[nullvals] = identifier[nullvals] ) keyword[return] identifier[Result]
def strictjoin(L, keycols, nullvals=None, renaming=None, Names=None): """ Combine two or more numpy ndarray with structured dtypes on common key column(s). Merge a list (or dictionary) of numpy arrays, given by `L`, on key columns listed in `keycols`. The ``strictjoin`` assumes the following restrictions: * each element of `keycol` must be a valid column name in `X` and each array in `L`, and all of the same data-type. * for each column `col` in `keycols`, and each array `A` in `L`, the values in `A[col]` must be unique, e.g. no repeats of values -- and same for `X[col]`. (Actually, the uniqueness criterion need not hold to the first tabarray in L, but first for all the subsequent ones.) * the *non*-key-column column names in each of the arrays must be disjoint from each other -- or disjoint after a renaming (see below). An error will be thrown if these conditions are not met. For a wrapper that attempts to meet these restrictions, see :func:`tabular.spreadsheet.join`. If you don't provide a value of `keycols`, the algorithm will attempt to infer which columns should be used by trying to find the largest set of common column names that contain unique values in each array and have the same data type. An error will be thrown if no such inference can be made. *Renaming of overlapping columns* If the non-keycol column names of the arrays overlap, ``join`` will by default attempt to rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. You can override the default renaming scheme using the `renamer` parameter. *Nullvalues for keycolumn differences* If there are regions of the keycolumns that are not overlapping between merged arrays, `join` will fill in the relevant entries with null values chosen by default: * '0' for integer columns * '0.0' for float columns * the empty character ('') for string columns. **Parameters** **L** : list or dictionary Numpy recarrays to merge. If `L` is a dictionary, the keys name each numpy recarray, and the corresponding values are the actual numpy recarrays. **keycols** : list of strings List of the names of the key columns along which to do the merging. **nullvals** : function, optional A function that returns a null value for a numpy format descriptor string, e.g. ``'<i4'`` or ``'|S5'``. See the default function for further documentation: :func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT` **renaming** : dictionary of dictionaries, optional Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above. For example, the result returned by: :func:`tabular.spreadsheet.DEFAULT_RENAMER` **Returns** **result** : numpy ndarray with structured dtype Result of the join, e.g. the result of merging the input numpy arrays defined in `L` on the key columns listed in `keycols`. **See Also:** :func:`tabular.spreadsheet.join` """ if isinstance(L, dict): Names = L.keys() LL = L.values() # depends on [control=['if'], data=[]] else: if Names == None: Names = range(len(L)) # depends on [control=['if'], data=['Names']] else: assert len(Names) == len(L) LL = L if isinstance(keycols, str): keycols = [l.strip() for l in keycols.split(',')] # depends on [control=['if'], data=[]] assert all([set(keycols) <= set(l.dtype.names) for l in LL]), ('keycols,', str(keycols), ', must be valid column names in all arrays being merged.') assert all([isunique(l[keycols]) for l in LL[1:]]), ('values in keycol columns,', str(keycols), ', must be unique in all arrays being merged.') if renaming == None: renaming = {} # depends on [control=['if'], data=['renaming']] assert RenamingIsInCorrectFormat(renaming, L, Names=Names), 'renaming is not in proper format ... ' L = dict([(k, ll.copy()) for (k, ll) in zip(Names, LL)]) LL = L.values() for i in Names: l = L[i] l.dtype = np.dtype(l.dtype.descr) if i in renaming.keys(): for k in renaming[i].keys(): if k not in keycols: renamecol(L[i], k, renaming[i][k]) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=['i']] l.sort(order=keycols) # depends on [control=['for'], data=['i']] commons = set(Commons([l.dtype.names for l in LL])).difference(keycols) assert len(commons) == 0, ('The following (non-keycol) column names appear in more than on array being merged:', str(commons)) Result = colstack([L[Names[0]][keycols][0:0]] + [deletecols(L[k][0:0], keycols) for k in Names if deletecols(L[k][0:0], keycols) != None]) PL = powerlist(Names) ToGet = utils.listunion([[p for p in PL if len(p) == k] for k in range(1, len(Names))]) + [PL[-1]] for I in ToGet[::-1]: Ref = L[I[0]][keycols] for j in I[1:]: if len(Ref) > 0: Ref = Ref[fast.recarrayisin(Ref, L[j][keycols], weak=True)] # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['j']] if len(Ref) > 0: D = [fast.recarrayisin(L[j][keycols], Ref, weak=True) for j in I] Ref0 = L[I[0]][keycols][D[0]] Reps0 = np.append(np.append([-1], (Ref0[1:] != Ref0[:-1]).nonzero()[0]), [len(Ref0) - 1]) Reps0 = Reps0[1:] - Reps0[:-1] NewRows = colstack([Ref0] + [deletecols(L[j][D[i]], keycols).repeat(Reps0) if i > 0 else deletecols(L[j][D[i]], keycols) for (i, j) in enumerate(I) if deletecols(L[j][D[i]], keycols) != None]) for (i, j) in enumerate(I): L[j] = L[j][np.invert(D[i])] # depends on [control=['for'], data=[]] Result = rowstack([Result, NewRows], mode='nulls', nullvals=nullvals) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['I']] return Result
def find_first_wt_parent(self, with_ip=False): """ Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns a parent Biosample ID if its wild_type attribute is True. Args: with_ip: `bool`. True means to restrict the search to the first parental Wild Type that also has an Immunoblot linked to it, which may serve as a control between another immunoblot. For example, it could be useful to compare the target protein bands in Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a descendent sample. Returns: `False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to it (if the `with_ip` parameter is set to True). `int`: The ID of the WT parent. """ parent_id = self.part_of_id if not parent_id: return False parent = Biosample(parent_id) if parent.wild_type: if with_ip and parent.immunoblot_ids: return parent.id elif not with_ip: return parent.id return parent.find_first_wt_parent(with_ip=with_ip)
def function[find_first_wt_parent, parameter[self, with_ip]]: constant[ Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns a parent Biosample ID if its wild_type attribute is True. Args: with_ip: `bool`. True means to restrict the search to the first parental Wild Type that also has an Immunoblot linked to it, which may serve as a control between another immunoblot. For example, it could be useful to compare the target protein bands in Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a descendent sample. Returns: `False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to it (if the `with_ip` parameter is set to True). `int`: The ID of the WT parent. ] variable[parent_id] assign[=] name[self].part_of_id if <ast.UnaryOp object at 0x7da1b10d6140> begin[:] return[constant[False]] variable[parent] assign[=] call[name[Biosample], parameter[name[parent_id]]] if name[parent].wild_type begin[:] if <ast.BoolOp object at 0x7da1b10d7ac0> begin[:] return[name[parent].id] return[call[name[parent].find_first_wt_parent, parameter[]]]
keyword[def] identifier[find_first_wt_parent] ( identifier[self] , identifier[with_ip] = keyword[False] ): literal[string] identifier[parent_id] = identifier[self] . identifier[part_of_id] keyword[if] keyword[not] identifier[parent_id] : keyword[return] keyword[False] identifier[parent] = identifier[Biosample] ( identifier[parent_id] ) keyword[if] identifier[parent] . identifier[wild_type] : keyword[if] identifier[with_ip] keyword[and] identifier[parent] . identifier[immunoblot_ids] : keyword[return] identifier[parent] . identifier[id] keyword[elif] keyword[not] identifier[with_ip] : keyword[return] identifier[parent] . identifier[id] keyword[return] identifier[parent] . identifier[find_first_wt_parent] ( identifier[with_ip] = identifier[with_ip] )
def find_first_wt_parent(self, with_ip=False): """ Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns a parent Biosample ID if its wild_type attribute is True. Args: with_ip: `bool`. True means to restrict the search to the first parental Wild Type that also has an Immunoblot linked to it, which may serve as a control between another immunoblot. For example, it could be useful to compare the target protein bands in Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a descendent sample. Returns: `False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to it (if the `with_ip` parameter is set to True). `int`: The ID of the WT parent. """ parent_id = self.part_of_id if not parent_id: return False # depends on [control=['if'], data=[]] parent = Biosample(parent_id) if parent.wild_type: if with_ip and parent.immunoblot_ids: return parent.id # depends on [control=['if'], data=[]] elif not with_ip: return parent.id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return parent.find_first_wt_parent(with_ip=with_ip)
def stats_for(self, dt): """ Returns stats for the month containing the given datetime """ # TODO - this would be nicer if we formatted the stats if not isinstance(dt, datetime): raise TypeError('stats_for requires a datetime object!') return self._client.get('{}/stats/'.format(dt.strftime('%Y/%m')))
def function[stats_for, parameter[self, dt]]: constant[ Returns stats for the month containing the given datetime ] if <ast.UnaryOp object at 0x7da1b0f078b0> begin[:] <ast.Raise object at 0x7da1b0f07370> return[call[name[self]._client.get, parameter[call[constant[{}/stats/].format, parameter[call[name[dt].strftime, parameter[constant[%Y/%m]]]]]]]]
keyword[def] identifier[stats_for] ( identifier[self] , identifier[dt] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[dt] , identifier[datetime] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[self] . identifier[_client] . identifier[get] ( literal[string] . identifier[format] ( identifier[dt] . identifier[strftime] ( literal[string] )))
def stats_for(self, dt): """ Returns stats for the month containing the given datetime """ # TODO - this would be nicer if we formatted the stats if not isinstance(dt, datetime): raise TypeError('stats_for requires a datetime object!') # depends on [control=['if'], data=[]] return self._client.get('{}/stats/'.format(dt.strftime('%Y/%m')))
def filter_by_attrs(self, **kwargs): """Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned containing only the variables for which all the filter tests pass. These tests are either ``key=value`` for which the attribute ``key`` has the exact value ``value`` or the callable passed into ``key=callable`` returns True. The callable will be passed a single value, either the value of the attribute ``key`` or ``None`` if the DataArray does not have an attribute with the name ``key``. Parameters ---------- **kwargs : key=value key : str Attribute name. value : callable or obj If value is a callable, it should return a boolean in the form of bool = func(attr) where attr is da.attrs[key]. Otherwise, value will be compared to the each DataArray's attrs[key]. Returns ------- new : Dataset New dataset with variables filtered by attribute. Examples -------- >>> # Create an example dataset: >>> import numpy as np >>> import pandas as pd >>> import xarray as xr >>> temp = 15 + 8 * np.random.randn(2, 2, 3) >>> precip = 10 * np.random.rand(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> dims = ['x', 'y', 'time'] >>> temp_attr = dict(standard_name='air_potential_temperature') >>> precip_attr = dict(standard_name='convective_precipitation_flux') >>> ds = xr.Dataset({ ... 'temperature': (dims, temp, temp_attr), ... 'precipitation': (dims, precip, precip_attr)}, ... coords={ ... 'lon': (['x', 'y'], lon), ... 'lat': (['x', 'y'], lat), ... 'time': pd.date_range('2014-09-06', periods=3), ... 'reference_time': pd.Timestamp('2014-09-05')}) >>> # Get variables matching a specific standard_name. >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux') <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: * x (x) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 lat (x, y) float64 42.25 42.21 42.63 42.59 * y (y) int64 0 1 reference_time datetime64[ns] 2014-09-05 lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 Data variables: precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ... >>> # Get all variables that have a standard_name attribute. >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 42.25 42.21 42.63 42.59 * x (x) int64 0 1 * y (y) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 2014-09-05 Data variables: temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ... precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ... """ # noqa selection = [] for var_name, variable in self.data_vars.items(): has_value_flag = False for attr_name, pattern in kwargs.items(): attr_value = variable.attrs.get(attr_name) if ((callable(pattern) and pattern(attr_value)) or attr_value == pattern): has_value_flag = True else: has_value_flag = False break if has_value_flag is True: selection.append(var_name) return self[selection]
def function[filter_by_attrs, parameter[self]]: constant[Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned containing only the variables for which all the filter tests pass. These tests are either ``key=value`` for which the attribute ``key`` has the exact value ``value`` or the callable passed into ``key=callable`` returns True. The callable will be passed a single value, either the value of the attribute ``key`` or ``None`` if the DataArray does not have an attribute with the name ``key``. Parameters ---------- **kwargs : key=value key : str Attribute name. value : callable or obj If value is a callable, it should return a boolean in the form of bool = func(attr) where attr is da.attrs[key]. Otherwise, value will be compared to the each DataArray's attrs[key]. Returns ------- new : Dataset New dataset with variables filtered by attribute. Examples -------- >>> # Create an example dataset: >>> import numpy as np >>> import pandas as pd >>> import xarray as xr >>> temp = 15 + 8 * np.random.randn(2, 2, 3) >>> precip = 10 * np.random.rand(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> dims = ['x', 'y', 'time'] >>> temp_attr = dict(standard_name='air_potential_temperature') >>> precip_attr = dict(standard_name='convective_precipitation_flux') >>> ds = xr.Dataset({ ... 'temperature': (dims, temp, temp_attr), ... 'precipitation': (dims, precip, precip_attr)}, ... coords={ ... 'lon': (['x', 'y'], lon), ... 'lat': (['x', 'y'], lat), ... 'time': pd.date_range('2014-09-06', periods=3), ... 'reference_time': pd.Timestamp('2014-09-05')}) >>> # Get variables matching a specific standard_name. >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux') <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: * x (x) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 lat (x, y) float64 42.25 42.21 42.63 42.59 * y (y) int64 0 1 reference_time datetime64[ns] 2014-09-05 lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 Data variables: precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ... >>> # Get all variables that have a standard_name attribute. >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 42.25 42.21 42.63 42.59 * x (x) int64 0 1 * y (y) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 2014-09-05 Data variables: temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ... precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ... ] variable[selection] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2041da110>, <ast.Name object at 0x7da2041d8a60>]]] in starred[call[name[self].data_vars.items, parameter[]]] begin[:] variable[has_value_flag] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da2041d86a0>, <ast.Name object at 0x7da2041da2f0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] variable[attr_value] assign[=] call[name[variable].attrs.get, parameter[name[attr_name]]] if <ast.BoolOp object at 0x7da2041db6d0> begin[:] variable[has_value_flag] assign[=] constant[True] if compare[name[has_value_flag] is constant[True]] begin[:] call[name[selection].append, parameter[name[var_name]]] return[call[name[self]][name[selection]]]
keyword[def] identifier[filter_by_attrs] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[selection] =[] keyword[for] identifier[var_name] , identifier[variable] keyword[in] identifier[self] . identifier[data_vars] . identifier[items] (): identifier[has_value_flag] = keyword[False] keyword[for] identifier[attr_name] , identifier[pattern] keyword[in] identifier[kwargs] . identifier[items] (): identifier[attr_value] = identifier[variable] . identifier[attrs] . identifier[get] ( identifier[attr_name] ) keyword[if] (( identifier[callable] ( identifier[pattern] ) keyword[and] identifier[pattern] ( identifier[attr_value] )) keyword[or] identifier[attr_value] == identifier[pattern] ): identifier[has_value_flag] = keyword[True] keyword[else] : identifier[has_value_flag] = keyword[False] keyword[break] keyword[if] identifier[has_value_flag] keyword[is] keyword[True] : identifier[selection] . identifier[append] ( identifier[var_name] ) keyword[return] identifier[self] [ identifier[selection] ]
def filter_by_attrs(self, **kwargs): """Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned containing only the variables for which all the filter tests pass. These tests are either ``key=value`` for which the attribute ``key`` has the exact value ``value`` or the callable passed into ``key=callable`` returns True. The callable will be passed a single value, either the value of the attribute ``key`` or ``None`` if the DataArray does not have an attribute with the name ``key``. Parameters ---------- **kwargs : key=value key : str Attribute name. value : callable or obj If value is a callable, it should return a boolean in the form of bool = func(attr) where attr is da.attrs[key]. Otherwise, value will be compared to the each DataArray's attrs[key]. Returns ------- new : Dataset New dataset with variables filtered by attribute. Examples -------- >>> # Create an example dataset: >>> import numpy as np >>> import pandas as pd >>> import xarray as xr >>> temp = 15 + 8 * np.random.randn(2, 2, 3) >>> precip = 10 * np.random.rand(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> dims = ['x', 'y', 'time'] >>> temp_attr = dict(standard_name='air_potential_temperature') >>> precip_attr = dict(standard_name='convective_precipitation_flux') >>> ds = xr.Dataset({ ... 'temperature': (dims, temp, temp_attr), ... 'precipitation': (dims, precip, precip_attr)}, ... coords={ ... 'lon': (['x', 'y'], lon), ... 'lat': (['x', 'y'], lat), ... 'time': pd.date_range('2014-09-06', periods=3), ... 'reference_time': pd.Timestamp('2014-09-05')}) >>> # Get variables matching a specific standard_name. >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux') <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: * x (x) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 lat (x, y) float64 42.25 42.21 42.63 42.59 * y (y) int64 0 1 reference_time datetime64[ns] 2014-09-05 lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 Data variables: precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ... >>> # Get all variables that have a standard_name attribute. >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) <xarray.Dataset> Dimensions: (time: 3, x: 2, y: 2) Coordinates: lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 42.25 42.21 42.63 42.59 * x (x) int64 0 1 * y (y) int64 0 1 * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 2014-09-05 Data variables: temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ... precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ... """ # noqa selection = [] for (var_name, variable) in self.data_vars.items(): has_value_flag = False for (attr_name, pattern) in kwargs.items(): attr_value = variable.attrs.get(attr_name) if callable(pattern) and pattern(attr_value) or attr_value == pattern: has_value_flag = True # depends on [control=['if'], data=[]] else: has_value_flag = False break # depends on [control=['for'], data=[]] if has_value_flag is True: selection.append(var_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return self[selection]
def _extract(self, path, outdir, filter_func=None): """Extract from a zip file, with an optional filter. :param function filter_func: optional filter with the filename as the parameter. Returns True if the file should be extracted.""" with open_zip(path) as archive_file: for name in archive_file.namelist(): # While we're at it, we also perform this safety test. if name.startswith('/') or name.startswith('..'): raise ValueError('Zip file contains unsafe path: {}'.format(name)) if (not filter_func or filter_func(name)): archive_file.extract(name, outdir)
def function[_extract, parameter[self, path, outdir, filter_func]]: constant[Extract from a zip file, with an optional filter. :param function filter_func: optional filter with the filename as the parameter. Returns True if the file should be extracted.] with call[name[open_zip], parameter[name[path]]] begin[:] for taget[name[name]] in starred[call[name[archive_file].namelist, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b22f8b80> begin[:] <ast.Raise object at 0x7da1b1ddef50> if <ast.BoolOp object at 0x7da1b1ddcf40> begin[:] call[name[archive_file].extract, parameter[name[name], name[outdir]]]
keyword[def] identifier[_extract] ( identifier[self] , identifier[path] , identifier[outdir] , identifier[filter_func] = keyword[None] ): literal[string] keyword[with] identifier[open_zip] ( identifier[path] ) keyword[as] identifier[archive_file] : keyword[for] identifier[name] keyword[in] identifier[archive_file] . identifier[namelist] (): keyword[if] identifier[name] . identifier[startswith] ( literal[string] ) keyword[or] identifier[name] . identifier[startswith] ( literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] )) keyword[if] ( keyword[not] identifier[filter_func] keyword[or] identifier[filter_func] ( identifier[name] )): identifier[archive_file] . identifier[extract] ( identifier[name] , identifier[outdir] )
def _extract(self, path, outdir, filter_func=None): """Extract from a zip file, with an optional filter. :param function filter_func: optional filter with the filename as the parameter. Returns True if the file should be extracted.""" with open_zip(path) as archive_file: for name in archive_file.namelist(): # While we're at it, we also perform this safety test. if name.startswith('/') or name.startswith('..'): raise ValueError('Zip file contains unsafe path: {}'.format(name)) # depends on [control=['if'], data=[]] if not filter_func or filter_func(name): archive_file.extract(name, outdir) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] # depends on [control=['with'], data=['archive_file']]
def delete(gandi, fqdn, name, type, force): """Delete record entry for a domain.""" domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return if not force: if not name and not type: prompt = ("Are you sure to delete all records for domain %s ?" % fqdn) elif name and not type: prompt = ("Are you sure to delete all '%s' name records for " "domain %s ?" % (name, fqdn)) else: prompt = ("Are you sure to delete all '%s' records of type %s " "for domain %s ?" % (name, type, fqdn)) proceed = click.confirm(prompt) if not proceed: return result = gandi.dns.del_record(fqdn, name, type) gandi.echo('Delete successful.') return result
def function[delete, parameter[gandi, fqdn, name, type, force]]: constant[Delete record entry for a domain.] variable[domains] assign[=] call[name[gandi].dns.list, parameter[]] variable[domains] assign[=] <ast.ListComp object at 0x7da20c6e6c50> if compare[name[fqdn] <ast.NotIn object at 0x7da2590d7190> name[domains]] begin[:] call[name[gandi].echo, parameter[binary_operation[constant[Sorry domain %s does not exist] <ast.Mod object at 0x7da2590d6920> name[fqdn]]]] call[name[gandi].echo, parameter[binary_operation[constant[Please use one of the following: %s] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[name[domains]]]]]] return[None] if <ast.UnaryOp object at 0x7da20c6e56c0> begin[:] if <ast.BoolOp object at 0x7da20c6e6f50> begin[:] variable[prompt] assign[=] binary_operation[constant[Are you sure to delete all records for domain %s ?] <ast.Mod object at 0x7da2590d6920> name[fqdn]] variable[proceed] assign[=] call[name[click].confirm, parameter[name[prompt]]] if <ast.UnaryOp object at 0x7da18dc056c0> begin[:] return[None] variable[result] assign[=] call[name[gandi].dns.del_record, parameter[name[fqdn], name[name], name[type]]] call[name[gandi].echo, parameter[constant[Delete successful.]]] return[name[result]]
keyword[def] identifier[delete] ( identifier[gandi] , identifier[fqdn] , identifier[name] , identifier[type] , identifier[force] ): literal[string] identifier[domains] = identifier[gandi] . identifier[dns] . identifier[list] () identifier[domains] =[ identifier[domain] [ literal[string] ] keyword[for] identifier[domain] keyword[in] identifier[domains] ] keyword[if] identifier[fqdn] keyword[not] keyword[in] identifier[domains] : identifier[gandi] . identifier[echo] ( literal[string] % identifier[fqdn] ) identifier[gandi] . identifier[echo] ( literal[string] % literal[string] . identifier[join] ( identifier[domains] )) keyword[return] keyword[if] keyword[not] identifier[force] : keyword[if] keyword[not] identifier[name] keyword[and] keyword[not] identifier[type] : identifier[prompt] =( literal[string] % identifier[fqdn] ) keyword[elif] identifier[name] keyword[and] keyword[not] identifier[type] : identifier[prompt] =( literal[string] literal[string] %( identifier[name] , identifier[fqdn] )) keyword[else] : identifier[prompt] =( literal[string] literal[string] %( identifier[name] , identifier[type] , identifier[fqdn] )) identifier[proceed] = identifier[click] . identifier[confirm] ( identifier[prompt] ) keyword[if] keyword[not] identifier[proceed] : keyword[return] identifier[result] = identifier[gandi] . identifier[dns] . identifier[del_record] ( identifier[fqdn] , identifier[name] , identifier[type] ) identifier[gandi] . identifier[echo] ( literal[string] ) keyword[return] identifier[result]
def delete(gandi, fqdn, name, type, force): """Delete record entry for a domain.""" domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return # depends on [control=['if'], data=['fqdn', 'domains']] if not force: if not name and (not type): prompt = 'Are you sure to delete all records for domain %s ?' % fqdn # depends on [control=['if'], data=[]] elif name and (not type): prompt = "Are you sure to delete all '%s' name records for domain %s ?" % (name, fqdn) # depends on [control=['if'], data=[]] else: prompt = "Are you sure to delete all '%s' records of type %s for domain %s ?" % (name, type, fqdn) proceed = click.confirm(prompt) if not proceed: return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] result = gandi.dns.del_record(fqdn, name, type) gandi.echo('Delete successful.') return result
def get_items(self, url, params=None, **kwargs): """Return a generator that GETs and yields individual JSON `items`. Yields individual `items` from Webex Teams's top-level {'items': [...]} JSON objects. Provides native support for RFC5988 Web Linking. The generator will request additional pages as needed until all items have been returned. Args: url(basestring): The URL of the API endpoint. params(dict): The parameters for the HTTP GET request. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint. MalformedResponse: If the returned response does not contain a top-level dictionary with an 'items' key. """ # Get generator for pages of JSON data pages = self.get_pages(url, params=params, **kwargs) for json_page in pages: assert isinstance(json_page, dict) items = json_page.get('items') if items is None: error_message = "'items' key not found in JSON data: " \ "{!r}".format(json_page) raise MalformedResponse(error_message) else: for item in items: yield item
def function[get_items, parameter[self, url, params]]: constant[Return a generator that GETs and yields individual JSON `items`. Yields individual `items` from Webex Teams's top-level {'items': [...]} JSON objects. Provides native support for RFC5988 Web Linking. The generator will request additional pages as needed until all items have been returned. Args: url(basestring): The URL of the API endpoint. params(dict): The parameters for the HTTP GET request. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint. MalformedResponse: If the returned response does not contain a top-level dictionary with an 'items' key. ] variable[pages] assign[=] call[name[self].get_pages, parameter[name[url]]] for taget[name[json_page]] in starred[name[pages]] begin[:] assert[call[name[isinstance], parameter[name[json_page], name[dict]]]] variable[items] assign[=] call[name[json_page].get, parameter[constant[items]]] if compare[name[items] is constant[None]] begin[:] variable[error_message] assign[=] call[constant['items' key not found in JSON data: {!r}].format, parameter[name[json_page]]] <ast.Raise object at 0x7da18c4ccc40>
keyword[def] identifier[get_items] ( identifier[self] , identifier[url] , identifier[params] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[pages] = identifier[self] . identifier[get_pages] ( identifier[url] , identifier[params] = identifier[params] ,** identifier[kwargs] ) keyword[for] identifier[json_page] keyword[in] identifier[pages] : keyword[assert] identifier[isinstance] ( identifier[json_page] , identifier[dict] ) identifier[items] = identifier[json_page] . identifier[get] ( literal[string] ) keyword[if] identifier[items] keyword[is] keyword[None] : identifier[error_message] = literal[string] literal[string] . identifier[format] ( identifier[json_page] ) keyword[raise] identifier[MalformedResponse] ( identifier[error_message] ) keyword[else] : keyword[for] identifier[item] keyword[in] identifier[items] : keyword[yield] identifier[item]
def get_items(self, url, params=None, **kwargs): """Return a generator that GETs and yields individual JSON `items`. Yields individual `items` from Webex Teams's top-level {'items': [...]} JSON objects. Provides native support for RFC5988 Web Linking. The generator will request additional pages as needed until all items have been returned. Args: url(basestring): The URL of the API endpoint. params(dict): The parameters for the HTTP GET request. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint. MalformedResponse: If the returned response does not contain a top-level dictionary with an 'items' key. """ # Get generator for pages of JSON data pages = self.get_pages(url, params=params, **kwargs) for json_page in pages: assert isinstance(json_page, dict) items = json_page.get('items') if items is None: error_message = "'items' key not found in JSON data: {!r}".format(json_page) raise MalformedResponse(error_message) # depends on [control=['if'], data=[]] else: for item in items: yield item # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['json_page']]
def bounding_square_polygon(self, inscribed_circle_radius_km=10.0): """ Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and having the specified radius in kilometers. The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates by Jan Philip Matuschek, owner of the intellectual property of such material. In short: - locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius - the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian :param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms :type inscribed_circle_radius_km: int or float :return: a `pyowm.utils.geo.Polygon` instance """ assert isinstance(inscribed_circle_radius_km, int) or isinstance(inscribed_circle_radius_km, float) assert inscribed_circle_radius_km > 0., 'Radius must be greater than zero' # turn metric distance to radians on the approximated local sphere rad_distance = float(inscribed_circle_radius_km) / EARTH_RADIUS_KM # calculating min/max lat for bounding box bb_min_lat_deg = self.lat * math.pi/180. - rad_distance bb_max_lat_deg = self.lat * math.pi/180. + rad_distance # now checking for poles... if bb_min_lat_deg > math.radians(-90) and bb_max_lat_deg < math.radians(90): # no poles in the bounding box delta_lon = math.asin(math.sin(rad_distance) / math.cos(math.radians(self.lat))) bb_min_lon_deg = math.radians(self.lon) - delta_lon if bb_min_lon_deg < math.radians(-180): bb_min_lon_deg += 2 * math.pi bb_max_lon_deg = math.radians(self.lon) + delta_lon if bb_max_lon_deg > math.radians(180): bb_max_lon_deg -= 2 * math.pi else: # a pole is contained in the bounding box bb_min_lat_deg = max(bb_min_lat_deg, math.radians(-90)) bb_max_lat_deg = min(bb_max_lat_deg, math.radians(90)) bb_min_lon_deg = math.radians(-180) bb_max_lon_deg = math.radians(180) # turn back from radians to decimal bb_min_lat = bb_min_lat_deg * 180./math.pi bb_max_lat = bb_max_lat_deg * 180./math.pi bb_min_lon = bb_min_lon_deg * 180./math.pi bb_max_lon = bb_max_lon_deg * 180./math.pi return Polygon([[ [bb_min_lon, bb_max_lat], [bb_max_lon, bb_max_lat], [bb_max_lon, bb_min_lat], [bb_min_lon, bb_min_lat], [bb_min_lon, bb_max_lat] ]])
def function[bounding_square_polygon, parameter[self, inscribed_circle_radius_km]]: constant[ Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and having the specified radius in kilometers. The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates by Jan Philip Matuschek, owner of the intellectual property of such material. In short: - locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius - the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian :param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms :type inscribed_circle_radius_km: int or float :return: a `pyowm.utils.geo.Polygon` instance ] assert[<ast.BoolOp object at 0x7da18f58cc40>] assert[compare[name[inscribed_circle_radius_km] greater[>] constant[0.0]]] variable[rad_distance] assign[=] binary_operation[call[name[float], parameter[name[inscribed_circle_radius_km]]] / name[EARTH_RADIUS_KM]] variable[bb_min_lat_deg] assign[=] binary_operation[binary_operation[binary_operation[name[self].lat * name[math].pi] / constant[180.0]] - name[rad_distance]] variable[bb_max_lat_deg] assign[=] binary_operation[binary_operation[binary_operation[name[self].lat * name[math].pi] / constant[180.0]] + name[rad_distance]] if <ast.BoolOp object at 0x7da18f58e8c0> begin[:] variable[delta_lon] assign[=] call[name[math].asin, parameter[binary_operation[call[name[math].sin, parameter[name[rad_distance]]] / call[name[math].cos, parameter[call[name[math].radians, parameter[name[self].lat]]]]]]] variable[bb_min_lon_deg] assign[=] binary_operation[call[name[math].radians, parameter[name[self].lon]] - name[delta_lon]] if compare[name[bb_min_lon_deg] less[<] call[name[math].radians, parameter[<ast.UnaryOp object at 0x7da18f58c220>]]] begin[:] <ast.AugAssign object at 0x7da18f58fc10> variable[bb_max_lon_deg] assign[=] binary_operation[call[name[math].radians, parameter[name[self].lon]] + name[delta_lon]] if compare[name[bb_max_lon_deg] greater[>] call[name[math].radians, parameter[constant[180]]]] begin[:] <ast.AugAssign object at 0x7da2044c21d0> variable[bb_min_lat] assign[=] binary_operation[binary_operation[name[bb_min_lat_deg] * constant[180.0]] / name[math].pi] variable[bb_max_lat] assign[=] binary_operation[binary_operation[name[bb_max_lat_deg] * constant[180.0]] / name[math].pi] variable[bb_min_lon] assign[=] binary_operation[binary_operation[name[bb_min_lon_deg] * constant[180.0]] / name[math].pi] variable[bb_max_lon] assign[=] binary_operation[binary_operation[name[bb_max_lon_deg] * constant[180.0]] / name[math].pi] return[call[name[Polygon], parameter[list[[<ast.List object at 0x7da2044c0310>]]]]]
keyword[def] identifier[bounding_square_polygon] ( identifier[self] , identifier[inscribed_circle_radius_km] = literal[int] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[inscribed_circle_radius_km] , identifier[int] ) keyword[or] identifier[isinstance] ( identifier[inscribed_circle_radius_km] , identifier[float] ) keyword[assert] identifier[inscribed_circle_radius_km] > literal[int] , literal[string] identifier[rad_distance] = identifier[float] ( identifier[inscribed_circle_radius_km] )/ identifier[EARTH_RADIUS_KM] identifier[bb_min_lat_deg] = identifier[self] . identifier[lat] * identifier[math] . identifier[pi] / literal[int] - identifier[rad_distance] identifier[bb_max_lat_deg] = identifier[self] . identifier[lat] * identifier[math] . identifier[pi] / literal[int] + identifier[rad_distance] keyword[if] identifier[bb_min_lat_deg] > identifier[math] . identifier[radians] (- literal[int] ) keyword[and] identifier[bb_max_lat_deg] < identifier[math] . identifier[radians] ( literal[int] ): identifier[delta_lon] = identifier[math] . identifier[asin] ( identifier[math] . identifier[sin] ( identifier[rad_distance] )/ identifier[math] . identifier[cos] ( identifier[math] . identifier[radians] ( identifier[self] . identifier[lat] ))) identifier[bb_min_lon_deg] = identifier[math] . identifier[radians] ( identifier[self] . identifier[lon] )- identifier[delta_lon] keyword[if] identifier[bb_min_lon_deg] < identifier[math] . identifier[radians] (- literal[int] ): identifier[bb_min_lon_deg] += literal[int] * identifier[math] . identifier[pi] identifier[bb_max_lon_deg] = identifier[math] . identifier[radians] ( identifier[self] . identifier[lon] )+ identifier[delta_lon] keyword[if] identifier[bb_max_lon_deg] > identifier[math] . identifier[radians] ( literal[int] ): identifier[bb_max_lon_deg] -= literal[int] * identifier[math] . identifier[pi] keyword[else] : identifier[bb_min_lat_deg] = identifier[max] ( identifier[bb_min_lat_deg] , identifier[math] . identifier[radians] (- literal[int] )) identifier[bb_max_lat_deg] = identifier[min] ( identifier[bb_max_lat_deg] , identifier[math] . identifier[radians] ( literal[int] )) identifier[bb_min_lon_deg] = identifier[math] . identifier[radians] (- literal[int] ) identifier[bb_max_lon_deg] = identifier[math] . identifier[radians] ( literal[int] ) identifier[bb_min_lat] = identifier[bb_min_lat_deg] * literal[int] / identifier[math] . identifier[pi] identifier[bb_max_lat] = identifier[bb_max_lat_deg] * literal[int] / identifier[math] . identifier[pi] identifier[bb_min_lon] = identifier[bb_min_lon_deg] * literal[int] / identifier[math] . identifier[pi] identifier[bb_max_lon] = identifier[bb_max_lon_deg] * literal[int] / identifier[math] . identifier[pi] keyword[return] identifier[Polygon] ([[ [ identifier[bb_min_lon] , identifier[bb_max_lat] ], [ identifier[bb_max_lon] , identifier[bb_max_lat] ], [ identifier[bb_max_lon] , identifier[bb_min_lat] ], [ identifier[bb_min_lon] , identifier[bb_min_lat] ], [ identifier[bb_min_lon] , identifier[bb_max_lat] ] ]])
def bounding_square_polygon(self, inscribed_circle_radius_km=10.0): """ Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and having the specified radius in kilometers. The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates by Jan Philip Matuschek, owner of the intellectual property of such material. In short: - locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius - the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian :param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms :type inscribed_circle_radius_km: int or float :return: a `pyowm.utils.geo.Polygon` instance """ assert isinstance(inscribed_circle_radius_km, int) or isinstance(inscribed_circle_radius_km, float) assert inscribed_circle_radius_km > 0.0, 'Radius must be greater than zero' # turn metric distance to radians on the approximated local sphere rad_distance = float(inscribed_circle_radius_km) / EARTH_RADIUS_KM # calculating min/max lat for bounding box bb_min_lat_deg = self.lat * math.pi / 180.0 - rad_distance bb_max_lat_deg = self.lat * math.pi / 180.0 + rad_distance # now checking for poles... if bb_min_lat_deg > math.radians(-90) and bb_max_lat_deg < math.radians(90): # no poles in the bounding box delta_lon = math.asin(math.sin(rad_distance) / math.cos(math.radians(self.lat))) bb_min_lon_deg = math.radians(self.lon) - delta_lon if bb_min_lon_deg < math.radians(-180): bb_min_lon_deg += 2 * math.pi # depends on [control=['if'], data=['bb_min_lon_deg']] bb_max_lon_deg = math.radians(self.lon) + delta_lon if bb_max_lon_deg > math.radians(180): bb_max_lon_deg -= 2 * math.pi # depends on [control=['if'], data=['bb_max_lon_deg']] # depends on [control=['if'], data=[]] else: # a pole is contained in the bounding box bb_min_lat_deg = max(bb_min_lat_deg, math.radians(-90)) bb_max_lat_deg = min(bb_max_lat_deg, math.radians(90)) bb_min_lon_deg = math.radians(-180) bb_max_lon_deg = math.radians(180) # turn back from radians to decimal bb_min_lat = bb_min_lat_deg * 180.0 / math.pi bb_max_lat = bb_max_lat_deg * 180.0 / math.pi bb_min_lon = bb_min_lon_deg * 180.0 / math.pi bb_max_lon = bb_max_lon_deg * 180.0 / math.pi return Polygon([[[bb_min_lon, bb_max_lat], [bb_max_lon, bb_max_lat], [bb_max_lon, bb_min_lat], [bb_min_lon, bb_min_lat], [bb_min_lon, bb_max_lat]]])
def setup_placeholders(self): """ Creates the TensorFlow placeholders, variables, ops and functions for this model. NOTE: Does not add the internal state placeholders and initialization values to the model yet as that requires the model's Network (if any) to be generated first. """ # States for name in sorted(self.states_spec): self.states_input[name] = tf.placeholder( dtype=util.tf_dtype(self.states_spec[name]['type']), shape=(None,) + tuple(self.states_spec[name]['shape']), name=('state-' + name) ) # States preprocessing if self.states_preprocessing_spec is None: for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] elif not isinstance(self.states_preprocessing_spec, list) and \ all(name in self.states_spec for name in self.states_preprocessing_spec): for name in sorted(self.states_spec): if name in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec( spec=self.states_preprocessing_spec[name], kwargs=dict(shape=self.states_spec[name]['shape']) ) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing else: self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] # Single preprocessor for all components of our state space elif "type" in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape'])) for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing else: for name in sorted(self.states_spec): preprocessing = PreprocessorStack.from_spec( spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape']) ) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing # Actions for name in sorted(self.actions_spec): self.actions_input[name] = tf.placeholder( dtype=util.tf_dtype(self.actions_spec[name]['type']), shape=(None,) + tuple(self.actions_spec[name]['shape']), name=('action-' + name) ) # Actions exploration if self.actions_exploration_spec is None: pass elif all(name in self.actions_spec for name in self.actions_exploration_spec): for name in sorted(self.actions_spec): if name in self.actions_exploration: self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec[name]) else: for name in sorted(self.actions_spec): self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec) # Terminal self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal') # Reward self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward') # Reward preprocessing if self.reward_preprocessing_spec is not None: self.reward_preprocessing = PreprocessorStack.from_spec( spec=self.reward_preprocessing_spec, # TODO this can eventually have more complex shapes? kwargs=dict(shape=()) ) if self.reward_preprocessing.processed_shape(shape=()) != (): raise TensorForceError("Invalid reward preprocessing!") # Deterministic/independent action flag (should probably be the same) self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic') self.independent_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='independent')
def function[setup_placeholders, parameter[self]]: constant[ Creates the TensorFlow placeholders, variables, ops and functions for this model. NOTE: Does not add the internal state placeholders and initialization values to the model yet as that requires the model's Network (if any) to be generated first. ] for taget[name[name]] in starred[call[name[sorted], parameter[name[self].states_spec]]] begin[:] call[name[self].states_input][name[name]] assign[=] call[name[tf].placeholder, parameter[]] if compare[name[self].states_preprocessing_spec is constant[None]] begin[:] for taget[name[name]] in starred[call[name[sorted], parameter[name[self].states_spec]]] begin[:] call[call[name[self].states_spec][name[name]]][constant[unprocessed_shape]] assign[=] call[call[name[self].states_spec][name[name]]][constant[shape]] for taget[name[name]] in starred[call[name[sorted], parameter[name[self].actions_spec]]] begin[:] call[name[self].actions_input][name[name]] assign[=] call[name[tf].placeholder, parameter[]] if compare[name[self].actions_exploration_spec is constant[None]] begin[:] pass name[self].terminal_input assign[=] call[name[tf].placeholder, parameter[]] name[self].reward_input assign[=] call[name[tf].placeholder, parameter[]] if compare[name[self].reward_preprocessing_spec is_not constant[None]] begin[:] name[self].reward_preprocessing assign[=] call[name[PreprocessorStack].from_spec, parameter[]] if compare[call[name[self].reward_preprocessing.processed_shape, parameter[]] not_equal[!=] tuple[[]]] begin[:] <ast.Raise object at 0x7da18f813820> name[self].deterministic_input assign[=] call[name[tf].placeholder, parameter[]] name[self].independent_input assign[=] call[name[tf].placeholder, parameter[]]
keyword[def] identifier[setup_placeholders] ( identifier[self] ): literal[string] keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[states_spec] ): identifier[self] . identifier[states_input] [ identifier[name] ]= identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]), identifier[shape] =( keyword[None] ,)+ identifier[tuple] ( identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]), identifier[name] =( literal[string] + identifier[name] ) ) keyword[if] identifier[self] . identifier[states_preprocessing_spec] keyword[is] keyword[None] : keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[states_spec] ): identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ] keyword[elif] keyword[not] identifier[isinstance] ( identifier[self] . identifier[states_preprocessing_spec] , identifier[list] ) keyword[and] identifier[all] ( identifier[name] keyword[in] identifier[self] . identifier[states_spec] keyword[for] identifier[name] keyword[in] identifier[self] . identifier[states_preprocessing_spec] ): keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[states_spec] ): keyword[if] identifier[name] keyword[in] identifier[self] . identifier[states_preprocessing_spec] : identifier[preprocessing] = identifier[PreprocessorStack] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[states_preprocessing_spec] [ identifier[name] ], identifier[kwargs] = identifier[dict] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]) ) identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ] identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[preprocessing] . identifier[processed_shape] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]) identifier[self] . identifier[states_preprocessing] [ identifier[name] ]= identifier[preprocessing] keyword[else] : identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ] keyword[elif] literal[string] keyword[in] identifier[self] . identifier[states_preprocessing_spec] : identifier[preprocessing] = identifier[PreprocessorStack] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[states_preprocessing_spec] , identifier[kwargs] = identifier[dict] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ])) keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[states_spec] ): identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ] identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[preprocessing] . identifier[processed_shape] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]) identifier[self] . identifier[states_preprocessing] [ identifier[name] ]= identifier[preprocessing] keyword[else] : keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[states_spec] ): identifier[preprocessing] = identifier[PreprocessorStack] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[states_preprocessing_spec] , identifier[kwargs] = identifier[dict] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]) ) identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ] identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]= identifier[preprocessing] . identifier[processed_shape] ( identifier[shape] = identifier[self] . identifier[states_spec] [ identifier[name] ][ literal[string] ]) identifier[self] . identifier[states_preprocessing] [ identifier[name] ]= identifier[preprocessing] keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[actions_spec] ): identifier[self] . identifier[actions_input] [ identifier[name] ]= identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( identifier[self] . identifier[actions_spec] [ identifier[name] ][ literal[string] ]), identifier[shape] =( keyword[None] ,)+ identifier[tuple] ( identifier[self] . identifier[actions_spec] [ identifier[name] ][ literal[string] ]), identifier[name] =( literal[string] + identifier[name] ) ) keyword[if] identifier[self] . identifier[actions_exploration_spec] keyword[is] keyword[None] : keyword[pass] keyword[elif] identifier[all] ( identifier[name] keyword[in] identifier[self] . identifier[actions_spec] keyword[for] identifier[name] keyword[in] identifier[self] . identifier[actions_exploration_spec] ): keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[actions_spec] ): keyword[if] identifier[name] keyword[in] identifier[self] . identifier[actions_exploration] : identifier[self] . identifier[actions_exploration] [ identifier[name] ]= identifier[Exploration] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[actions_exploration_spec] [ identifier[name] ]) keyword[else] : keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[self] . identifier[actions_spec] ): identifier[self] . identifier[actions_exploration] [ identifier[name] ]= identifier[Exploration] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[actions_exploration_spec] ) identifier[self] . identifier[terminal_input] = identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( literal[string] ), identifier[shape] =( keyword[None] ,), identifier[name] = literal[string] ) identifier[self] . identifier[reward_input] = identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( literal[string] ), identifier[shape] =( keyword[None] ,), identifier[name] = literal[string] ) keyword[if] identifier[self] . identifier[reward_preprocessing_spec] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[reward_preprocessing] = identifier[PreprocessorStack] . identifier[from_spec] ( identifier[spec] = identifier[self] . identifier[reward_preprocessing_spec] , identifier[kwargs] = identifier[dict] ( identifier[shape] =()) ) keyword[if] identifier[self] . identifier[reward_preprocessing] . identifier[processed_shape] ( identifier[shape] =())!=(): keyword[raise] identifier[TensorForceError] ( literal[string] ) identifier[self] . identifier[deterministic_input] = identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( literal[string] ), identifier[shape] =(), identifier[name] = literal[string] ) identifier[self] . identifier[independent_input] = identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[util] . identifier[tf_dtype] ( literal[string] ), identifier[shape] =(), identifier[name] = literal[string] )
def setup_placeholders(self): """ Creates the TensorFlow placeholders, variables, ops and functions for this model. NOTE: Does not add the internal state placeholders and initialization values to the model yet as that requires the model's Network (if any) to be generated first. """ # States for name in sorted(self.states_spec): self.states_input[name] = tf.placeholder(dtype=util.tf_dtype(self.states_spec[name]['type']), shape=(None,) + tuple(self.states_spec[name]['shape']), name='state-' + name) # depends on [control=['for'], data=['name']] # States preprocessing if self.states_preprocessing_spec is None: for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] elif not isinstance(self.states_preprocessing_spec, list) and all((name in self.states_spec for name in self.states_preprocessing_spec)): for name in sorted(self.states_spec): if name in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec[name], kwargs=dict(shape=self.states_spec[name]['shape'])) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing # depends on [control=['if'], data=['name']] else: self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] # Single preprocessor for all components of our state space elif 'type' in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape'])) for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] else: for name in sorted(self.states_spec): preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape'])) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing # depends on [control=['for'], data=['name']] # Actions for name in sorted(self.actions_spec): self.actions_input[name] = tf.placeholder(dtype=util.tf_dtype(self.actions_spec[name]['type']), shape=(None,) + tuple(self.actions_spec[name]['shape']), name='action-' + name) # depends on [control=['for'], data=['name']] # Actions exploration if self.actions_exploration_spec is None: pass # depends on [control=['if'], data=[]] elif all((name in self.actions_spec for name in self.actions_exploration_spec)): for name in sorted(self.actions_spec): if name in self.actions_exploration: self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec[name]) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] else: for name in sorted(self.actions_spec): self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec) # depends on [control=['for'], data=['name']] # Terminal self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal') # Reward self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward') # Reward preprocessing if self.reward_preprocessing_spec is not None: # TODO this can eventually have more complex shapes? self.reward_preprocessing = PreprocessorStack.from_spec(spec=self.reward_preprocessing_spec, kwargs=dict(shape=())) if self.reward_preprocessing.processed_shape(shape=()) != (): raise TensorForceError('Invalid reward preprocessing!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Deterministic/independent action flag (should probably be the same) self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic') self.independent_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='independent')
def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region)
def function[get_elasticache_replication_groups_by_region, parameter[self, region]]: constant[ Makes an AWS API call to the list of ElastiCache replication groups in a particular region.] <ast.Try object at 0x7da20c7c88e0> <ast.Try object at 0x7da20c7cabf0> for taget[name[replication_group]] in starred[name[replication_groups]] begin[:] call[name[self].add_elasticache_replication_group, parameter[name[replication_group], name[region]]]
keyword[def] identifier[get_elasticache_replication_groups_by_region] ( identifier[self] , identifier[region] ): literal[string] keyword[try] : identifier[conn] = identifier[elasticache] . identifier[connect_to_region] ( identifier[region] ) keyword[if] identifier[conn] : identifier[response] = identifier[conn] . identifier[describe_replication_groups] () keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] : identifier[error] = identifier[e] . identifier[reason] keyword[if] identifier[e] . identifier[error_code] == literal[string] : identifier[error] = identifier[self] . identifier[get_auth_error_message] () keyword[if] keyword[not] identifier[e] . identifier[reason] == literal[string] : identifier[error] = literal[string] % identifier[e] . identifier[message] identifier[self] . identifier[fail_with_error] ( identifier[error] , literal[string] ) keyword[try] : identifier[replication_groups] = identifier[response] [ literal[string] ][ literal[string] ][ literal[string] ] keyword[except] identifier[KeyError] keyword[as] identifier[e] : identifier[error] = literal[string] identifier[self] . identifier[fail_with_error] ( identifier[error] , literal[string] ) keyword[for] identifier[replication_group] keyword[in] identifier[replication_groups] : identifier[self] . identifier[add_elasticache_replication_group] ( identifier[replication_group] , identifier[region] )
def get_elasticache_replication_groups_by_region(self, region): """ Makes an AWS API call to the list of ElastiCache replication groups in a particular region.""" # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: response = conn.describe_replication_groups() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() # depends on [control=['if'], data=[]] if not e.reason == 'Forbidden': error = 'Looks like AWS ElastiCache [Replication Groups] is down:\n%s' % e.message # depends on [control=['if'], data=[]] self.fail_with_error(error, 'getting ElastiCache clusters') # depends on [control=['except'], data=['e']] try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] # depends on [control=['try'], data=[]] except KeyError as e: error = 'ElastiCache [Replication Groups] query to AWS failed (unexpected format).' self.fail_with_error(error, 'getting ElastiCache clusters') # depends on [control=['except'], data=[]] for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) # depends on [control=['for'], data=['replication_group']]
def islice(self, start, end): """ Returns a new DateTimeIndex, containing a subslice of the timestamps in this index, as specified by the given integer start and end locations. Parameters ---------- start : int The location of the start of the range, inclusive. end : int The location of the end of the range, exclusive. """ jdt_index = self._jdt_index.islice(start, end) return DateTimeIndex(jdt_index=jdt_index)
def function[islice, parameter[self, start, end]]: constant[ Returns a new DateTimeIndex, containing a subslice of the timestamps in this index, as specified by the given integer start and end locations. Parameters ---------- start : int The location of the start of the range, inclusive. end : int The location of the end of the range, exclusive. ] variable[jdt_index] assign[=] call[name[self]._jdt_index.islice, parameter[name[start], name[end]]] return[call[name[DateTimeIndex], parameter[]]]
keyword[def] identifier[islice] ( identifier[self] , identifier[start] , identifier[end] ): literal[string] identifier[jdt_index] = identifier[self] . identifier[_jdt_index] . identifier[islice] ( identifier[start] , identifier[end] ) keyword[return] identifier[DateTimeIndex] ( identifier[jdt_index] = identifier[jdt_index] )
def islice(self, start, end): """ Returns a new DateTimeIndex, containing a subslice of the timestamps in this index, as specified by the given integer start and end locations. Parameters ---------- start : int The location of the start of the range, inclusive. end : int The location of the end of the range, exclusive. """ jdt_index = self._jdt_index.islice(start, end) return DateTimeIndex(jdt_index=jdt_index)
def set_content(self, value: RequestContent, *, content_type: str = None): ''' Sets the content of the request. ''' assert self._attached_files is None, \ 'cannot set content because you already attached files.' guessed_content_type = 'application/octet-stream' if value is None: guessed_content_type = 'text/plain' self._content = b'' elif isinstance(value, str): guessed_content_type = 'text/plain' self._content = value.encode('utf-8') else: guessed_content_type = 'application/octet-stream' self._content = value self.content_type = (content_type if content_type is not None else guessed_content_type)
def function[set_content, parameter[self, value]]: constant[ Sets the content of the request. ] assert[compare[name[self]._attached_files is constant[None]]] variable[guessed_content_type] assign[=] constant[application/octet-stream] if compare[name[value] is constant[None]] begin[:] variable[guessed_content_type] assign[=] constant[text/plain] name[self]._content assign[=] constant[b''] name[self].content_type assign[=] <ast.IfExp object at 0x7da20c76e6e0>
keyword[def] identifier[set_content] ( identifier[self] , identifier[value] : identifier[RequestContent] ,*, identifier[content_type] : identifier[str] = keyword[None] ): literal[string] keyword[assert] identifier[self] . identifier[_attached_files] keyword[is] keyword[None] , literal[string] identifier[guessed_content_type] = literal[string] keyword[if] identifier[value] keyword[is] keyword[None] : identifier[guessed_content_type] = literal[string] identifier[self] . identifier[_content] = literal[string] keyword[elif] identifier[isinstance] ( identifier[value] , identifier[str] ): identifier[guessed_content_type] = literal[string] identifier[self] . identifier[_content] = identifier[value] . identifier[encode] ( literal[string] ) keyword[else] : identifier[guessed_content_type] = literal[string] identifier[self] . identifier[_content] = identifier[value] identifier[self] . identifier[content_type] =( identifier[content_type] keyword[if] identifier[content_type] keyword[is] keyword[not] keyword[None] keyword[else] identifier[guessed_content_type] )
def set_content(self, value: RequestContent, *, content_type: str=None): """ Sets the content of the request. """ assert self._attached_files is None, 'cannot set content because you already attached files.' guessed_content_type = 'application/octet-stream' if value is None: guessed_content_type = 'text/plain' self._content = b'' # depends on [control=['if'], data=[]] elif isinstance(value, str): guessed_content_type = 'text/plain' self._content = value.encode('utf-8') # depends on [control=['if'], data=[]] else: guessed_content_type = 'application/octet-stream' self._content = value self.content_type = content_type if content_type is not None else guessed_content_type
def computeNoCall(fileName): """Computes the number of no call. :param fileName: the name of the file :type fileName: str Reads the ``ped`` file created by Plink using the ``recodeA`` options (see :py:func:`createPedChr24UsingPlink`) and computes the number and percentage of no calls on the chromosome ``24``. """ outputFile = None try: outputFile = open(fileName + ".noCall", "w") except IOError: msg = "%s: can't write file" % fileName + ".noCall" raise ProgramError(msg) print >>outputFile, "\t".join(["PED", "ID", "SEX", "nbGeno", "nbNoCall"]) try: toPrint = [] with open(fileName, "r") as inputFile: for i, line in enumerate(inputFile): row = line.rstrip("\r\n").split(" ") if i != 0: # This is data genotypes = np.array(row[6:]) nbMarker = len(genotypes) nbNA = len(np.where(genotypes == "NA")[0]) toPrint.append((row[0], row[1], row[4], str(nbMarker), str(nbNA))) toPrint.sort(reverse=True, key=lambda values: int(values[4])) for row in toPrint: print >>outputFile, "\t".join(row) except IOError: msg = "%(fileName)s: no such file" % locals() raise ProgramError(msg) # Closing the output file outputFile.close()
def function[computeNoCall, parameter[fileName]]: constant[Computes the number of no call. :param fileName: the name of the file :type fileName: str Reads the ``ped`` file created by Plink using the ``recodeA`` options (see :py:func:`createPedChr24UsingPlink`) and computes the number and percentage of no calls on the chromosome ``24``. ] variable[outputFile] assign[=] constant[None] <ast.Try object at 0x7da1b0a1f1f0> tuple[[<ast.BinOp object at 0x7da1b0a1fb20>, <ast.Call object at 0x7da1b0a1dff0>]] <ast.Try object at 0x7da1b0a1e7a0> call[name[outputFile].close, parameter[]]
keyword[def] identifier[computeNoCall] ( identifier[fileName] ): literal[string] identifier[outputFile] = keyword[None] keyword[try] : identifier[outputFile] = identifier[open] ( identifier[fileName] + literal[string] , literal[string] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[fileName] + literal[string] keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[print] >> identifier[outputFile] , literal[string] . identifier[join] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[try] : identifier[toPrint] =[] keyword[with] identifier[open] ( identifier[fileName] , literal[string] ) keyword[as] identifier[inputFile] : keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[inputFile] ): identifier[row] = identifier[line] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] ) keyword[if] identifier[i] != literal[int] : identifier[genotypes] = identifier[np] . identifier[array] ( identifier[row] [ literal[int] :]) identifier[nbMarker] = identifier[len] ( identifier[genotypes] ) identifier[nbNA] = identifier[len] ( identifier[np] . identifier[where] ( identifier[genotypes] == literal[string] )[ literal[int] ]) identifier[toPrint] . identifier[append] (( identifier[row] [ literal[int] ], identifier[row] [ literal[int] ], identifier[row] [ literal[int] ], identifier[str] ( identifier[nbMarker] ), identifier[str] ( identifier[nbNA] ))) identifier[toPrint] . identifier[sort] ( identifier[reverse] = keyword[True] , identifier[key] = keyword[lambda] identifier[values] : identifier[int] ( identifier[values] [ literal[int] ])) keyword[for] identifier[row] keyword[in] identifier[toPrint] : identifier[print] >> identifier[outputFile] , literal[string] . identifier[join] ( identifier[row] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[outputFile] . identifier[close] ()
def computeNoCall(fileName): """Computes the number of no call. :param fileName: the name of the file :type fileName: str Reads the ``ped`` file created by Plink using the ``recodeA`` options (see :py:func:`createPedChr24UsingPlink`) and computes the number and percentage of no calls on the chromosome ``24``. """ outputFile = None try: outputFile = open(fileName + '.noCall', 'w') # depends on [control=['try'], data=[]] except IOError: msg = "%s: can't write file" % fileName + '.noCall' raise ProgramError(msg) # depends on [control=['except'], data=[]] (print >> outputFile, '\t'.join(['PED', 'ID', 'SEX', 'nbGeno', 'nbNoCall'])) try: toPrint = [] with open(fileName, 'r') as inputFile: for (i, line) in enumerate(inputFile): row = line.rstrip('\r\n').split(' ') if i != 0: # This is data genotypes = np.array(row[6:]) nbMarker = len(genotypes) nbNA = len(np.where(genotypes == 'NA')[0]) toPrint.append((row[0], row[1], row[4], str(nbMarker), str(nbNA))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['inputFile']] toPrint.sort(reverse=True, key=lambda values: int(values[4])) for row in toPrint: (print >> outputFile, '\t'.join(row)) # depends on [control=['for'], data=['row']] # depends on [control=['try'], data=[]] except IOError: msg = '%(fileName)s: no such file' % locals() raise ProgramError(msg) # depends on [control=['except'], data=[]] # Closing the output file outputFile.close()
def to_bytes(graph: BELGraph, protocol: int = HIGHEST_PROTOCOL) -> bytes: """Convert a graph to bytes with pickle. Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable pickle, choose 0, 1, or 2. :param graph: A BEL network :param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``. .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format """ raise_for_not_bel(graph) return dumps(graph, protocol=protocol)
def function[to_bytes, parameter[graph, protocol]]: constant[Convert a graph to bytes with pickle. Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable pickle, choose 0, 1, or 2. :param graph: A BEL network :param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``. .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format ] call[name[raise_for_not_bel], parameter[name[graph]]] return[call[name[dumps], parameter[name[graph]]]]
keyword[def] identifier[to_bytes] ( identifier[graph] : identifier[BELGraph] , identifier[protocol] : identifier[int] = identifier[HIGHEST_PROTOCOL] )-> identifier[bytes] : literal[string] identifier[raise_for_not_bel] ( identifier[graph] ) keyword[return] identifier[dumps] ( identifier[graph] , identifier[protocol] = identifier[protocol] )
def to_bytes(graph: BELGraph, protocol: int=HIGHEST_PROTOCOL) -> bytes: """Convert a graph to bytes with pickle. Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable pickle, choose 0, 1, or 2. :param graph: A BEL network :param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``. .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format """ raise_for_not_bel(graph) return dumps(graph, protocol=protocol)
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'): """ Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str """ has_node = graph.has_node # Namespace shortcut for speed. n = counter() # Counter. node_id_format = _format.format(initial_guess) # Node id format. node_id = initial_guess # Initial guess. while has_node(node_id): # Check if node id is used. node_id = node_id_format % n() # Guess. return node_id
def function[get_unused_node_id, parameter[graph, initial_guess, _format]]: constant[ Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str ] variable[has_node] assign[=] name[graph].has_node variable[n] assign[=] call[name[counter], parameter[]] variable[node_id_format] assign[=] call[name[_format].format, parameter[name[initial_guess]]] variable[node_id] assign[=] name[initial_guess] while call[name[has_node], parameter[name[node_id]]] begin[:] variable[node_id] assign[=] binary_operation[name[node_id_format] <ast.Mod object at 0x7da2590d6920> call[name[n], parameter[]]] return[name[node_id]]
keyword[def] identifier[get_unused_node_id] ( identifier[graph] , identifier[initial_guess] = literal[string] , identifier[_format] = literal[string] ): literal[string] identifier[has_node] = identifier[graph] . identifier[has_node] identifier[n] = identifier[counter] () identifier[node_id_format] = identifier[_format] . identifier[format] ( identifier[initial_guess] ) identifier[node_id] = identifier[initial_guess] keyword[while] identifier[has_node] ( identifier[node_id] ): identifier[node_id] = identifier[node_id_format] % identifier[n] () keyword[return] identifier[node_id]
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'): """ Finds an unused node id in `graph`. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :param initial_guess: Initial node id guess. :type initial_guess: str, optional :param _format: Format to generate the new node id if the given is already used. :type _format: str, optional :return: An unused node id. :rtype: str """ has_node = graph.has_node # Namespace shortcut for speed. n = counter() # Counter. node_id_format = _format.format(initial_guess) # Node id format. node_id = initial_guess # Initial guess. while has_node(node_id): # Check if node id is used. node_id = node_id_format % n() # Guess. # depends on [control=['while'], data=[]] return node_id
def unmasked_blurred_image_of_planes_and_galaxies_from_padded_grid_stack_and_psf(planes, padded_grid_stack, psf): """For lens data, compute the unmasked blurred image of every unmasked unblurred image of every galaxy in each \ plane. To do this, this function iterates over all planes and then galaxies to extract their unmasked unblurred \ images. If a galaxy in a plane has a pixelization, the unmasked image of that galaxy in the plane is returned as None \ as as the inversion's model image cannot be mapped to an unmasked version. This relies on using the lens data's padded-grid, which is a grid of (y,x) coordinates which extends over the \ entire image as opposed to just the masked region. This returns a list of lists, where each list index corresponds to [plane_index][galaxy_index]. Parameters ---------- planes : [plane.Plane] The list of planes the unmasked blurred images are computed using. padded_grid_stack : grids.GridStack A padded-grid_stack, whose padded grid is used for PSF convolution. psf : ccd.PSF The PSF of the image used for convolution. """ return [plane.unmasked_blurred_image_of_galaxies_from_psf(padded_grid_stack, psf) for plane in planes]
def function[unmasked_blurred_image_of_planes_and_galaxies_from_padded_grid_stack_and_psf, parameter[planes, padded_grid_stack, psf]]: constant[For lens data, compute the unmasked blurred image of every unmasked unblurred image of every galaxy in each plane. To do this, this function iterates over all planes and then galaxies to extract their unmasked unblurred images. If a galaxy in a plane has a pixelization, the unmasked image of that galaxy in the plane is returned as None as as the inversion's model image cannot be mapped to an unmasked version. This relies on using the lens data's padded-grid, which is a grid of (y,x) coordinates which extends over the entire image as opposed to just the masked region. This returns a list of lists, where each list index corresponds to [plane_index][galaxy_index]. Parameters ---------- planes : [plane.Plane] The list of planes the unmasked blurred images are computed using. padded_grid_stack : grids.GridStack A padded-grid_stack, whose padded grid is used for PSF convolution. psf : ccd.PSF The PSF of the image used for convolution. ] return[<ast.ListComp object at 0x7da18f00f580>]
keyword[def] identifier[unmasked_blurred_image_of_planes_and_galaxies_from_padded_grid_stack_and_psf] ( identifier[planes] , identifier[padded_grid_stack] , identifier[psf] ): literal[string] keyword[return] [ identifier[plane] . identifier[unmasked_blurred_image_of_galaxies_from_psf] ( identifier[padded_grid_stack] , identifier[psf] ) keyword[for] identifier[plane] keyword[in] identifier[planes] ]
def unmasked_blurred_image_of_planes_and_galaxies_from_padded_grid_stack_and_psf(planes, padded_grid_stack, psf): """For lens data, compute the unmasked blurred image of every unmasked unblurred image of every galaxy in each plane. To do this, this function iterates over all planes and then galaxies to extract their unmasked unblurred images. If a galaxy in a plane has a pixelization, the unmasked image of that galaxy in the plane is returned as None as as the inversion's model image cannot be mapped to an unmasked version. This relies on using the lens data's padded-grid, which is a grid of (y,x) coordinates which extends over the entire image as opposed to just the masked region. This returns a list of lists, where each list index corresponds to [plane_index][galaxy_index]. Parameters ---------- planes : [plane.Plane] The list of planes the unmasked blurred images are computed using. padded_grid_stack : grids.GridStack A padded-grid_stack, whose padded grid is used for PSF convolution. psf : ccd.PSF The PSF of the image used for convolution. """ return [plane.unmasked_blurred_image_of_galaxies_from_psf(padded_grid_stack, psf) for plane in planes]
def getrecord(**kwargs): """Create OAI-PMH response for verb Identify.""" record_dumper = serializer(kwargs['metadataPrefix']) pid = OAIIDProvider.get(pid_value=kwargs['identifier']).pid record = Record.get_record(pid.object_uuid) e_tree, e_getrecord = verb(**kwargs) e_record = SubElement(e_getrecord, etree.QName(NS_OAIPMH, 'record')) header( e_record, identifier=pid.pid_value, datestamp=record.updated, sets=record.get('_oai', {}).get('sets', []), ) e_metadata = SubElement(e_record, etree.QName(NS_OAIPMH, 'metadata')) e_metadata.append(record_dumper(pid, {'_source': record})) return e_tree
def function[getrecord, parameter[]]: constant[Create OAI-PMH response for verb Identify.] variable[record_dumper] assign[=] call[name[serializer], parameter[call[name[kwargs]][constant[metadataPrefix]]]] variable[pid] assign[=] call[name[OAIIDProvider].get, parameter[]].pid variable[record] assign[=] call[name[Record].get_record, parameter[name[pid].object_uuid]] <ast.Tuple object at 0x7da20c6c4dc0> assign[=] call[name[verb], parameter[]] variable[e_record] assign[=] call[name[SubElement], parameter[name[e_getrecord], call[name[etree].QName, parameter[name[NS_OAIPMH], constant[record]]]]] call[name[header], parameter[name[e_record]]] variable[e_metadata] assign[=] call[name[SubElement], parameter[name[e_record], call[name[etree].QName, parameter[name[NS_OAIPMH], constant[metadata]]]]] call[name[e_metadata].append, parameter[call[name[record_dumper], parameter[name[pid], dictionary[[<ast.Constant object at 0x7da20e956ce0>], [<ast.Name object at 0x7da20e955150>]]]]]] return[name[e_tree]]
keyword[def] identifier[getrecord] (** identifier[kwargs] ): literal[string] identifier[record_dumper] = identifier[serializer] ( identifier[kwargs] [ literal[string] ]) identifier[pid] = identifier[OAIIDProvider] . identifier[get] ( identifier[pid_value] = identifier[kwargs] [ literal[string] ]). identifier[pid] identifier[record] = identifier[Record] . identifier[get_record] ( identifier[pid] . identifier[object_uuid] ) identifier[e_tree] , identifier[e_getrecord] = identifier[verb] (** identifier[kwargs] ) identifier[e_record] = identifier[SubElement] ( identifier[e_getrecord] , identifier[etree] . identifier[QName] ( identifier[NS_OAIPMH] , literal[string] )) identifier[header] ( identifier[e_record] , identifier[identifier] = identifier[pid] . identifier[pid_value] , identifier[datestamp] = identifier[record] . identifier[updated] , identifier[sets] = identifier[record] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,[]), ) identifier[e_metadata] = identifier[SubElement] ( identifier[e_record] , identifier[etree] . identifier[QName] ( identifier[NS_OAIPMH] , literal[string] )) identifier[e_metadata] . identifier[append] ( identifier[record_dumper] ( identifier[pid] ,{ literal[string] : identifier[record] })) keyword[return] identifier[e_tree]
def getrecord(**kwargs): """Create OAI-PMH response for verb Identify.""" record_dumper = serializer(kwargs['metadataPrefix']) pid = OAIIDProvider.get(pid_value=kwargs['identifier']).pid record = Record.get_record(pid.object_uuid) (e_tree, e_getrecord) = verb(**kwargs) e_record = SubElement(e_getrecord, etree.QName(NS_OAIPMH, 'record')) header(e_record, identifier=pid.pid_value, datestamp=record.updated, sets=record.get('_oai', {}).get('sets', [])) e_metadata = SubElement(e_record, etree.QName(NS_OAIPMH, 'metadata')) e_metadata.append(record_dumper(pid, {'_source': record})) return e_tree
def import_taskfile(self, refobj, taskfileinfo): """Import the given taskfileinfo and update the refobj :param refobj: the refobject :type refobj: refobject :param taskfileinfo: the taskfileinfo to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None """ # work in root namespace with common.preserve_namespace(":"): jbfile = JB_File(taskfileinfo) filepath = jbfile.get_fullpath() ns_suggestion = reftrack.get_namespace(taskfileinfo) nodes = cmds.file(filepath, i=True, namespace=ns_suggestion, returnNewNodes=True, preserveReferences=True) # import assert nodes, 'Nothing was imported! this is unusual!' ns = common.get_top_namespace(nodes[0]) # get the actual namespace cmds.setAttr("%s.namespace" % refobj, ns, type="string") nscontent = cmds.namespaceInfo(ns, listOnlyDependencyNodes=True, dagPath=True) # get the content scenenode = self.get_scenenode(nscontent) self.get_refobjinter().connect_reftrack_scenenode(refobj, scenenode) dagcontent = cmds.ls(nodes, ap=True, assemblies=True) # get only the dagnodes so we can group them if not dagcontent: return # no need for a top group if there are not dagnodes to group # group the dagnodes in the new namespace grpname = reftrack.get_groupname(taskfileinfo) reftrack.group_content(dagcontent, ns, grpname, "jb_asset") return
def function[import_taskfile, parameter[self, refobj, taskfileinfo]]: constant[Import the given taskfileinfo and update the refobj :param refobj: the refobject :type refobj: refobject :param taskfileinfo: the taskfileinfo to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None ] with call[name[common].preserve_namespace, parameter[constant[:]]] begin[:] variable[jbfile] assign[=] call[name[JB_File], parameter[name[taskfileinfo]]] variable[filepath] assign[=] call[name[jbfile].get_fullpath, parameter[]] variable[ns_suggestion] assign[=] call[name[reftrack].get_namespace, parameter[name[taskfileinfo]]] variable[nodes] assign[=] call[name[cmds].file, parameter[name[filepath]]] assert[name[nodes]] variable[ns] assign[=] call[name[common].get_top_namespace, parameter[call[name[nodes]][constant[0]]]] call[name[cmds].setAttr, parameter[binary_operation[constant[%s.namespace] <ast.Mod object at 0x7da2590d6920> name[refobj]], name[ns]]] variable[nscontent] assign[=] call[name[cmds].namespaceInfo, parameter[name[ns]]] variable[scenenode] assign[=] call[name[self].get_scenenode, parameter[name[nscontent]]] call[call[name[self].get_refobjinter, parameter[]].connect_reftrack_scenenode, parameter[name[refobj], name[scenenode]]] variable[dagcontent] assign[=] call[name[cmds].ls, parameter[name[nodes]]] if <ast.UnaryOp object at 0x7da18fe91300> begin[:] return[None] variable[grpname] assign[=] call[name[reftrack].get_groupname, parameter[name[taskfileinfo]]] call[name[reftrack].group_content, parameter[name[dagcontent], name[ns], name[grpname], constant[jb_asset]]] return[None]
keyword[def] identifier[import_taskfile] ( identifier[self] , identifier[refobj] , identifier[taskfileinfo] ): literal[string] keyword[with] identifier[common] . identifier[preserve_namespace] ( literal[string] ): identifier[jbfile] = identifier[JB_File] ( identifier[taskfileinfo] ) identifier[filepath] = identifier[jbfile] . identifier[get_fullpath] () identifier[ns_suggestion] = identifier[reftrack] . identifier[get_namespace] ( identifier[taskfileinfo] ) identifier[nodes] = identifier[cmds] . identifier[file] ( identifier[filepath] , identifier[i] = keyword[True] , identifier[namespace] = identifier[ns_suggestion] , identifier[returnNewNodes] = keyword[True] , identifier[preserveReferences] = keyword[True] ) keyword[assert] identifier[nodes] , literal[string] identifier[ns] = identifier[common] . identifier[get_top_namespace] ( identifier[nodes] [ literal[int] ]) identifier[cmds] . identifier[setAttr] ( literal[string] % identifier[refobj] , identifier[ns] , identifier[type] = literal[string] ) identifier[nscontent] = identifier[cmds] . identifier[namespaceInfo] ( identifier[ns] , identifier[listOnlyDependencyNodes] = keyword[True] , identifier[dagPath] = keyword[True] ) identifier[scenenode] = identifier[self] . identifier[get_scenenode] ( identifier[nscontent] ) identifier[self] . identifier[get_refobjinter] (). identifier[connect_reftrack_scenenode] ( identifier[refobj] , identifier[scenenode] ) identifier[dagcontent] = identifier[cmds] . identifier[ls] ( identifier[nodes] , identifier[ap] = keyword[True] , identifier[assemblies] = keyword[True] ) keyword[if] keyword[not] identifier[dagcontent] : keyword[return] identifier[grpname] = identifier[reftrack] . identifier[get_groupname] ( identifier[taskfileinfo] ) identifier[reftrack] . identifier[group_content] ( identifier[dagcontent] , identifier[ns] , identifier[grpname] , literal[string] ) keyword[return]
def import_taskfile(self, refobj, taskfileinfo): """Import the given taskfileinfo and update the refobj :param refobj: the refobject :type refobj: refobject :param taskfileinfo: the taskfileinfo to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None """ # work in root namespace with common.preserve_namespace(':'): jbfile = JB_File(taskfileinfo) filepath = jbfile.get_fullpath() ns_suggestion = reftrack.get_namespace(taskfileinfo) nodes = cmds.file(filepath, i=True, namespace=ns_suggestion, returnNewNodes=True, preserveReferences=True) # import assert nodes, 'Nothing was imported! this is unusual!' ns = common.get_top_namespace(nodes[0]) # get the actual namespace cmds.setAttr('%s.namespace' % refobj, ns, type='string') nscontent = cmds.namespaceInfo(ns, listOnlyDependencyNodes=True, dagPath=True) # get the content scenenode = self.get_scenenode(nscontent) self.get_refobjinter().connect_reftrack_scenenode(refobj, scenenode) dagcontent = cmds.ls(nodes, ap=True, assemblies=True) # get only the dagnodes so we can group them if not dagcontent: return # no need for a top group if there are not dagnodes to group # depends on [control=['if'], data=[]] # group the dagnodes in the new namespace grpname = reftrack.get_groupname(taskfileinfo) reftrack.group_content(dagcontent, ns, grpname, 'jb_asset') return # depends on [control=['with'], data=[]]
def add_instance_groups(self, jobflow_id, instance_groups): """ Adds instance groups to a running cluster. :type jobflow_id: str :param jobflow_id: The id of the jobflow which will take the new instance groups :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: A list of instance groups to add to the job """ if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} params['JobFlowId'] = jobflow_id params.update(self._build_instance_group_list_args(instance_groups)) return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST')
def function[add_instance_groups, parameter[self, jobflow_id, instance_groups]]: constant[ Adds instance groups to a running cluster. :type jobflow_id: str :param jobflow_id: The id of the jobflow which will take the new instance groups :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: A list of instance groups to add to the job ] if compare[call[name[type], parameter[name[instance_groups]]] not_equal[!=] name[types].ListType] begin[:] variable[instance_groups] assign[=] list[[<ast.Name object at 0x7da1b2661300>]] variable[params] assign[=] dictionary[[], []] call[name[params]][constant[JobFlowId]] assign[=] name[jobflow_id] call[name[params].update, parameter[call[name[self]._build_instance_group_list_args, parameter[name[instance_groups]]]]] return[call[name[self].get_object, parameter[constant[AddInstanceGroups], name[params], name[AddInstanceGroupsResponse]]]]
keyword[def] identifier[add_instance_groups] ( identifier[self] , identifier[jobflow_id] , identifier[instance_groups] ): literal[string] keyword[if] identifier[type] ( identifier[instance_groups] )!= identifier[types] . identifier[ListType] : identifier[instance_groups] =[ identifier[instance_groups] ] identifier[params] ={} identifier[params] [ literal[string] ]= identifier[jobflow_id] identifier[params] . identifier[update] ( identifier[self] . identifier[_build_instance_group_list_args] ( identifier[instance_groups] )) keyword[return] identifier[self] . identifier[get_object] ( literal[string] , identifier[params] , identifier[AddInstanceGroupsResponse] , identifier[verb] = literal[string] )
def add_instance_groups(self, jobflow_id, instance_groups): """ Adds instance groups to a running cluster. :type jobflow_id: str :param jobflow_id: The id of the jobflow which will take the new instance groups :type instance_groups: list(boto.emr.InstanceGroup) :param instance_groups: A list of instance groups to add to the job """ if type(instance_groups) != types.ListType: instance_groups = [instance_groups] # depends on [control=['if'], data=[]] params = {} params['JobFlowId'] = jobflow_id params.update(self._build_instance_group_list_args(instance_groups)) return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST')
def compare_commits(self, base, head): """Compare two commits. :param str base: (required), base for the comparison :param str head: (required), compare this against base :returns: :class:`Comparison <github3.repos.comparison.Comparison>` if successful, else None """ url = self._build_url('compare', base + '...' + head, base_url=self._api) json = self._json(self._get(url), 200) return Comparison(json) if json else None
def function[compare_commits, parameter[self, base, head]]: constant[Compare two commits. :param str base: (required), base for the comparison :param str head: (required), compare this against base :returns: :class:`Comparison <github3.repos.comparison.Comparison>` if successful, else None ] variable[url] assign[=] call[name[self]._build_url, parameter[constant[compare], binary_operation[binary_operation[name[base] + constant[...]] + name[head]]]] variable[json] assign[=] call[name[self]._json, parameter[call[name[self]._get, parameter[name[url]]], constant[200]]] return[<ast.IfExp object at 0x7da1b0fbb0d0>]
keyword[def] identifier[compare_commits] ( identifier[self] , identifier[base] , identifier[head] ): literal[string] identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] , identifier[base] + literal[string] + identifier[head] , identifier[base_url] = identifier[self] . identifier[_api] ) identifier[json] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[url] ), literal[int] ) keyword[return] identifier[Comparison] ( identifier[json] ) keyword[if] identifier[json] keyword[else] keyword[None]
def compare_commits(self, base, head): """Compare two commits. :param str base: (required), base for the comparison :param str head: (required), compare this against base :returns: :class:`Comparison <github3.repos.comparison.Comparison>` if successful, else None """ url = self._build_url('compare', base + '...' + head, base_url=self._api) json = self._json(self._get(url), 200) return Comparison(json) if json else None
def max_pool(x, dim): """Max pooling operation.""" return tf.nn.max_pool( x, ksize=[1, dim, dim, 1], strides=[1, dim, dim, 1], padding='SAME')
def function[max_pool, parameter[x, dim]]: constant[Max pooling operation.] return[call[name[tf].nn.max_pool, parameter[name[x]]]]
keyword[def] identifier[max_pool] ( identifier[x] , identifier[dim] ): literal[string] keyword[return] identifier[tf] . identifier[nn] . identifier[max_pool] ( identifier[x] , identifier[ksize] =[ literal[int] , identifier[dim] , identifier[dim] , literal[int] ], identifier[strides] =[ literal[int] , identifier[dim] , identifier[dim] , literal[int] ], identifier[padding] = literal[string] )
def max_pool(x, dim): """Max pooling operation.""" return tf.nn.max_pool(x, ksize=[1, dim, dim, 1], strides=[1, dim, dim, 1], padding='SAME')
def add_feed(url): """add to db""" with Database("feeds") as db: title = feedparser.parse(url).feed.title name = str(title) db[name] = url return name
def function[add_feed, parameter[url]]: constant[add to db] with call[name[Database], parameter[constant[feeds]]] begin[:] variable[title] assign[=] call[name[feedparser].parse, parameter[name[url]]].feed.title variable[name] assign[=] call[name[str], parameter[name[title]]] call[name[db]][name[name]] assign[=] name[url] return[name[name]]
keyword[def] identifier[add_feed] ( identifier[url] ): literal[string] keyword[with] identifier[Database] ( literal[string] ) keyword[as] identifier[db] : identifier[title] = identifier[feedparser] . identifier[parse] ( identifier[url] ). identifier[feed] . identifier[title] identifier[name] = identifier[str] ( identifier[title] ) identifier[db] [ identifier[name] ]= identifier[url] keyword[return] identifier[name]
def add_feed(url): """add to db""" with Database('feeds') as db: title = feedparser.parse(url).feed.title name = str(title) db[name] = url return name # depends on [control=['with'], data=['db']]
def tokenize(self, data): """ Tokenizes the given string. A token is a 4-tuple of the form: (token_type, tag_name, tag_options, token_text) token_type One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA tag_name The name of the tag if token_type=TOKEN_TAG_*, otherwise None tag_options A dictionary of options specified for TOKEN_TAG_START, otherwise None token_text The original token text """ data = data.replace('\r\n', '\n').replace('\r', '\n') pos = start = end = 0 ld = len(data) tokens = [] while pos < ld: start = data.find(self.tag_opener, pos) if start >= pos: # Check to see if there was data between this start and the last end. if start > pos: tl = self._newline_tokenize(data[pos:start]) tokens.extend(tl) pos = start # Find the extent of this tag, if it's ever closed. end, found_close = self._tag_extent(data, start) if found_close: tag = data[start:end] valid, tag_name, closer, opts = self._parse_tag(tag) # Make sure this is a well-formed, recognized tag, otherwise it's just data. if valid and tag_name in self.recognized_tags: if closer: tokens.append((self.TOKEN_TAG_END, tag_name, None, tag)) else: tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag)) elif valid and self.drop_unrecognized and tag_name not in self.recognized_tags: # If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it. pass else: tokens.extend(self._newline_tokenize(tag)) else: # We didn't find a closing tag, tack it on as text. tokens.extend(self._newline_tokenize(data[start:end])) pos = end else: # No more tags left to parse. break if pos < ld: tl = self._newline_tokenize(data[pos:]) tokens.extend(tl) return tokens
def function[tokenize, parameter[self, data]]: constant[ Tokenizes the given string. A token is a 4-tuple of the form: (token_type, tag_name, tag_options, token_text) token_type One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA tag_name The name of the tag if token_type=TOKEN_TAG_*, otherwise None tag_options A dictionary of options specified for TOKEN_TAG_START, otherwise None token_text The original token text ] variable[data] assign[=] call[call[name[data].replace, parameter[constant[ ], constant[ ]]].replace, parameter[constant[ ], constant[ ]]] variable[pos] assign[=] constant[0] variable[ld] assign[=] call[name[len], parameter[name[data]]] variable[tokens] assign[=] list[[]] while compare[name[pos] less[<] name[ld]] begin[:] variable[start] assign[=] call[name[data].find, parameter[name[self].tag_opener, name[pos]]] if compare[name[start] greater_or_equal[>=] name[pos]] begin[:] if compare[name[start] greater[>] name[pos]] begin[:] variable[tl] assign[=] call[name[self]._newline_tokenize, parameter[call[name[data]][<ast.Slice object at 0x7da1b0653d60>]]] call[name[tokens].extend, parameter[name[tl]]] variable[pos] assign[=] name[start] <ast.Tuple object at 0x7da1b06525c0> assign[=] call[name[self]._tag_extent, parameter[name[data], name[start]]] if name[found_close] begin[:] variable[tag] assign[=] call[name[data]][<ast.Slice object at 0x7da1b0651d50>] <ast.Tuple object at 0x7da1b0650970> assign[=] call[name[self]._parse_tag, parameter[name[tag]]] if <ast.BoolOp object at 0x7da1b0653a60> begin[:] if name[closer] begin[:] call[name[tokens].append, parameter[tuple[[<ast.Attribute object at 0x7da1b0650dc0>, <ast.Name object at 0x7da1b06518d0>, <ast.Constant object at 0x7da1b0653f70>, <ast.Name object at 0x7da1b0653280>]]]] variable[pos] assign[=] name[end] if compare[name[pos] less[<] name[ld]] begin[:] variable[tl] assign[=] call[name[self]._newline_tokenize, parameter[call[name[data]][<ast.Slice object at 0x7da1b050c5b0>]]] call[name[tokens].extend, parameter[name[tl]]] return[name[tokens]]
keyword[def] identifier[tokenize] ( identifier[self] , identifier[data] ): literal[string] identifier[data] = identifier[data] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) identifier[pos] = identifier[start] = identifier[end] = literal[int] identifier[ld] = identifier[len] ( identifier[data] ) identifier[tokens] =[] keyword[while] identifier[pos] < identifier[ld] : identifier[start] = identifier[data] . identifier[find] ( identifier[self] . identifier[tag_opener] , identifier[pos] ) keyword[if] identifier[start] >= identifier[pos] : keyword[if] identifier[start] > identifier[pos] : identifier[tl] = identifier[self] . identifier[_newline_tokenize] ( identifier[data] [ identifier[pos] : identifier[start] ]) identifier[tokens] . identifier[extend] ( identifier[tl] ) identifier[pos] = identifier[start] identifier[end] , identifier[found_close] = identifier[self] . identifier[_tag_extent] ( identifier[data] , identifier[start] ) keyword[if] identifier[found_close] : identifier[tag] = identifier[data] [ identifier[start] : identifier[end] ] identifier[valid] , identifier[tag_name] , identifier[closer] , identifier[opts] = identifier[self] . identifier[_parse_tag] ( identifier[tag] ) keyword[if] identifier[valid] keyword[and] identifier[tag_name] keyword[in] identifier[self] . identifier[recognized_tags] : keyword[if] identifier[closer] : identifier[tokens] . identifier[append] (( identifier[self] . identifier[TOKEN_TAG_END] , identifier[tag_name] , keyword[None] , identifier[tag] )) keyword[else] : identifier[tokens] . identifier[append] (( identifier[self] . identifier[TOKEN_TAG_START] , identifier[tag_name] , identifier[opts] , identifier[tag] )) keyword[elif] identifier[valid] keyword[and] identifier[self] . identifier[drop_unrecognized] keyword[and] identifier[tag_name] keyword[not] keyword[in] identifier[self] . identifier[recognized_tags] : keyword[pass] keyword[else] : identifier[tokens] . identifier[extend] ( identifier[self] . identifier[_newline_tokenize] ( identifier[tag] )) keyword[else] : identifier[tokens] . identifier[extend] ( identifier[self] . identifier[_newline_tokenize] ( identifier[data] [ identifier[start] : identifier[end] ])) identifier[pos] = identifier[end] keyword[else] : keyword[break] keyword[if] identifier[pos] < identifier[ld] : identifier[tl] = identifier[self] . identifier[_newline_tokenize] ( identifier[data] [ identifier[pos] :]) identifier[tokens] . identifier[extend] ( identifier[tl] ) keyword[return] identifier[tokens]
def tokenize(self, data): """ Tokenizes the given string. A token is a 4-tuple of the form: (token_type, tag_name, tag_options, token_text) token_type One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA tag_name The name of the tag if token_type=TOKEN_TAG_*, otherwise None tag_options A dictionary of options specified for TOKEN_TAG_START, otherwise None token_text The original token text """ data = data.replace('\r\n', '\n').replace('\r', '\n') pos = start = end = 0 ld = len(data) tokens = [] while pos < ld: start = data.find(self.tag_opener, pos) if start >= pos: # Check to see if there was data between this start and the last end. if start > pos: tl = self._newline_tokenize(data[pos:start]) tokens.extend(tl) pos = start # depends on [control=['if'], data=['start', 'pos']] # Find the extent of this tag, if it's ever closed. (end, found_close) = self._tag_extent(data, start) if found_close: tag = data[start:end] (valid, tag_name, closer, opts) = self._parse_tag(tag) # Make sure this is a well-formed, recognized tag, otherwise it's just data. if valid and tag_name in self.recognized_tags: if closer: tokens.append((self.TOKEN_TAG_END, tag_name, None, tag)) # depends on [control=['if'], data=[]] else: tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag)) # depends on [control=['if'], data=[]] elif valid and self.drop_unrecognized and (tag_name not in self.recognized_tags): # If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it. pass # depends on [control=['if'], data=[]] else: tokens.extend(self._newline_tokenize(tag)) # depends on [control=['if'], data=[]] else: # We didn't find a closing tag, tack it on as text. tokens.extend(self._newline_tokenize(data[start:end])) pos = end # depends on [control=['if'], data=['start', 'pos']] else: # No more tags left to parse. break # depends on [control=['while'], data=['pos']] if pos < ld: tl = self._newline_tokenize(data[pos:]) tokens.extend(tl) # depends on [control=['if'], data=['pos']] return tokens
def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) logger.debug('Looking up "%s" in the cache', cache_url) cc = self.parse_cache_control(request.headers) # Bail out if the request insists on fresh data if "no-cache" in cc: logger.debug('Request header has "no-cache", cache bypassed') return False if "max-age" in cc and cc["max-age"] == 0: logger.debug('Request header has "max_age" as 0, cache bypassed') return False # Request allows serving from the cache, let's see if we find something cache_data = self.cache.get(cache_url) if cache_data is None: logger.debug("No cache entry available") return False # Check whether it can be deserialized resp = self.serializer.loads(request, cache_data) if not resp: logger.warning("Cache entry deserialization failed, entry ignored") return False # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: msg = ( 'Returning cached "301 Moved Permanently" response ' "(ignoring date and etag information)" ) logger.debug(msg) return resp headers = CaseInsensitiveDict(resp.headers) if not headers or "date" not in headers: if "etag" not in headers: # Without date or etag, the cached response can never be used # and should be deleted. logger.debug("Purging cached response: no date or etag") self.cache.delete(cache_url) logger.debug("Ignoring cached response: no date") return False now = time.time() date = calendar.timegm(parsedate_tz(headers["date"])) current_age = max(0, now - date) logger.debug("Current age based on date: %i", current_age) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if "max-age" in resp_cc: freshness_lifetime = resp_cc["max-age"] logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) # If there isn't a max-age, check for an expires header elif "expires" in headers: expires = parsedate_tz(headers["expires"]) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) # Determine if we are setting freshness limit in the # request. Note, this overrides what was in the response. if "max-age" in cc: freshness_lifetime = cc["max-age"] logger.debug( "Freshness lifetime from request max-age: %i", freshness_lifetime ) if "min-fresh" in cc: min_fresh = cc["min-fresh"] # adjust our current age by our min fresh current_age += min_fresh logger.debug("Adjusted current age from min-fresh: %i", current_age) # Return entry if it is fresh enough if freshness_lifetime > current_age: logger.debug('The response is "fresh", returning cached response') logger.debug("%i > %i", freshness_lifetime, current_age) return resp # we're not fresh. If we don't have an Etag, clear it out if "etag" not in headers: logger.debug('The cached response is "stale" with no etag, purging') self.cache.delete(cache_url) # return the original handler return False
def function[cached_request, parameter[self, request]]: constant[ Return a cached response if it exists in the cache, otherwise return False. ] variable[cache_url] assign[=] call[name[self].cache_url, parameter[name[request].url]] call[name[logger].debug, parameter[constant[Looking up "%s" in the cache], name[cache_url]]] variable[cc] assign[=] call[name[self].parse_cache_control, parameter[name[request].headers]] if compare[constant[no-cache] in name[cc]] begin[:] call[name[logger].debug, parameter[constant[Request header has "no-cache", cache bypassed]]] return[constant[False]] if <ast.BoolOp object at 0x7da1b2344970> begin[:] call[name[logger].debug, parameter[constant[Request header has "max_age" as 0, cache bypassed]]] return[constant[False]] variable[cache_data] assign[=] call[name[self].cache.get, parameter[name[cache_url]]] if compare[name[cache_data] is constant[None]] begin[:] call[name[logger].debug, parameter[constant[No cache entry available]]] return[constant[False]] variable[resp] assign[=] call[name[self].serializer.loads, parameter[name[request], name[cache_data]]] if <ast.UnaryOp object at 0x7da1b23458d0> begin[:] call[name[logger].warning, parameter[constant[Cache entry deserialization failed, entry ignored]]] return[constant[False]] if compare[name[resp].status equal[==] constant[301]] begin[:] variable[msg] assign[=] constant[Returning cached "301 Moved Permanently" response (ignoring date and etag information)] call[name[logger].debug, parameter[name[msg]]] return[name[resp]] variable[headers] assign[=] call[name[CaseInsensitiveDict], parameter[name[resp].headers]] if <ast.BoolOp object at 0x7da1b2346a70> begin[:] if compare[constant[etag] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:] call[name[logger].debug, parameter[constant[Purging cached response: no date or etag]]] call[name[self].cache.delete, parameter[name[cache_url]]] call[name[logger].debug, parameter[constant[Ignoring cached response: no date]]] return[constant[False]] variable[now] assign[=] call[name[time].time, parameter[]] variable[date] assign[=] call[name[calendar].timegm, parameter[call[name[parsedate_tz], parameter[call[name[headers]][constant[date]]]]]] variable[current_age] assign[=] call[name[max], parameter[constant[0], binary_operation[name[now] - name[date]]]] call[name[logger].debug, parameter[constant[Current age based on date: %i], name[current_age]]] variable[resp_cc] assign[=] call[name[self].parse_cache_control, parameter[name[headers]]] variable[freshness_lifetime] assign[=] constant[0] if compare[constant[max-age] in name[resp_cc]] begin[:] variable[freshness_lifetime] assign[=] call[name[resp_cc]][constant[max-age]] call[name[logger].debug, parameter[constant[Freshness lifetime from max-age: %i], name[freshness_lifetime]]] if compare[constant[max-age] in name[cc]] begin[:] variable[freshness_lifetime] assign[=] call[name[cc]][constant[max-age]] call[name[logger].debug, parameter[constant[Freshness lifetime from request max-age: %i], name[freshness_lifetime]]] if compare[constant[min-fresh] in name[cc]] begin[:] variable[min_fresh] assign[=] call[name[cc]][constant[min-fresh]] <ast.AugAssign object at 0x7da18ede7220> call[name[logger].debug, parameter[constant[Adjusted current age from min-fresh: %i], name[current_age]]] if compare[name[freshness_lifetime] greater[>] name[current_age]] begin[:] call[name[logger].debug, parameter[constant[The response is "fresh", returning cached response]]] call[name[logger].debug, parameter[constant[%i > %i], name[freshness_lifetime], name[current_age]]] return[name[resp]] if compare[constant[etag] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:] call[name[logger].debug, parameter[constant[The cached response is "stale" with no etag, purging]]] call[name[self].cache.delete, parameter[name[cache_url]]] return[constant[False]]
keyword[def] identifier[cached_request] ( identifier[self] , identifier[request] ): literal[string] identifier[cache_url] = identifier[self] . identifier[cache_url] ( identifier[request] . identifier[url] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[cache_url] ) identifier[cc] = identifier[self] . identifier[parse_cache_control] ( identifier[request] . identifier[headers] ) keyword[if] literal[string] keyword[in] identifier[cc] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] keyword[if] literal[string] keyword[in] identifier[cc] keyword[and] identifier[cc] [ literal[string] ]== literal[int] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] identifier[cache_data] = identifier[self] . identifier[cache] . identifier[get] ( identifier[cache_url] ) keyword[if] identifier[cache_data] keyword[is] keyword[None] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] identifier[resp] = identifier[self] . identifier[serializer] . identifier[loads] ( identifier[request] , identifier[cache_data] ) keyword[if] keyword[not] identifier[resp] : identifier[logger] . identifier[warning] ( literal[string] ) keyword[return] keyword[False] keyword[if] identifier[resp] . identifier[status] == literal[int] : identifier[msg] =( literal[string] literal[string] ) identifier[logger] . identifier[debug] ( identifier[msg] ) keyword[return] identifier[resp] identifier[headers] = identifier[CaseInsensitiveDict] ( identifier[resp] . identifier[headers] ) keyword[if] keyword[not] identifier[headers] keyword[or] literal[string] keyword[not] keyword[in] identifier[headers] : keyword[if] literal[string] keyword[not] keyword[in] identifier[headers] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[cache] . identifier[delete] ( identifier[cache_url] ) identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] identifier[now] = identifier[time] . identifier[time] () identifier[date] = identifier[calendar] . identifier[timegm] ( identifier[parsedate_tz] ( identifier[headers] [ literal[string] ])) identifier[current_age] = identifier[max] ( literal[int] , identifier[now] - identifier[date] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[current_age] ) identifier[resp_cc] = identifier[self] . identifier[parse_cache_control] ( identifier[headers] ) identifier[freshness_lifetime] = literal[int] keyword[if] literal[string] keyword[in] identifier[resp_cc] : identifier[freshness_lifetime] = identifier[resp_cc] [ literal[string] ] identifier[logger] . identifier[debug] ( literal[string] , identifier[freshness_lifetime] ) keyword[elif] literal[string] keyword[in] identifier[headers] : identifier[expires] = identifier[parsedate_tz] ( identifier[headers] [ literal[string] ]) keyword[if] identifier[expires] keyword[is] keyword[not] keyword[None] : identifier[expire_time] = identifier[calendar] . identifier[timegm] ( identifier[expires] )- identifier[date] identifier[freshness_lifetime] = identifier[max] ( literal[int] , identifier[expire_time] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[freshness_lifetime] ) keyword[if] literal[string] keyword[in] identifier[cc] : identifier[freshness_lifetime] = identifier[cc] [ literal[string] ] identifier[logger] . identifier[debug] ( literal[string] , identifier[freshness_lifetime] ) keyword[if] literal[string] keyword[in] identifier[cc] : identifier[min_fresh] = identifier[cc] [ literal[string] ] identifier[current_age] += identifier[min_fresh] identifier[logger] . identifier[debug] ( literal[string] , identifier[current_age] ) keyword[if] identifier[freshness_lifetime] > identifier[current_age] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[freshness_lifetime] , identifier[current_age] ) keyword[return] identifier[resp] keyword[if] literal[string] keyword[not] keyword[in] identifier[headers] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[cache] . identifier[delete] ( identifier[cache_url] ) keyword[return] keyword[False]
def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) logger.debug('Looking up "%s" in the cache', cache_url) cc = self.parse_cache_control(request.headers) # Bail out if the request insists on fresh data if 'no-cache' in cc: logger.debug('Request header has "no-cache", cache bypassed') return False # depends on [control=['if'], data=[]] if 'max-age' in cc and cc['max-age'] == 0: logger.debug('Request header has "max_age" as 0, cache bypassed') return False # depends on [control=['if'], data=[]] # Request allows serving from the cache, let's see if we find something cache_data = self.cache.get(cache_url) if cache_data is None: logger.debug('No cache entry available') return False # depends on [control=['if'], data=[]] # Check whether it can be deserialized resp = self.serializer.loads(request, cache_data) if not resp: logger.warning('Cache entry deserialization failed, entry ignored') return False # depends on [control=['if'], data=[]] # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: msg = 'Returning cached "301 Moved Permanently" response (ignoring date and etag information)' logger.debug(msg) return resp # depends on [control=['if'], data=[]] headers = CaseInsensitiveDict(resp.headers) if not headers or 'date' not in headers: if 'etag' not in headers: # Without date or etag, the cached response can never be used # and should be deleted. logger.debug('Purging cached response: no date or etag') self.cache.delete(cache_url) # depends on [control=['if'], data=[]] logger.debug('Ignoring cached response: no date') return False # depends on [control=['if'], data=[]] now = time.time() date = calendar.timegm(parsedate_tz(headers['date'])) current_age = max(0, now - date) logger.debug('Current age based on date: %i', current_age) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if 'max-age' in resp_cc: freshness_lifetime = resp_cc['max-age'] logger.debug('Freshness lifetime from max-age: %i', freshness_lifetime) # depends on [control=['if'], data=['resp_cc']] # If there isn't a max-age, check for an expires header elif 'expires' in headers: expires = parsedate_tz(headers['expires']) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) logger.debug('Freshness lifetime from expires: %i', freshness_lifetime) # depends on [control=['if'], data=['expires']] # depends on [control=['if'], data=['headers']] # Determine if we are setting freshness limit in the # request. Note, this overrides what was in the response. if 'max-age' in cc: freshness_lifetime = cc['max-age'] logger.debug('Freshness lifetime from request max-age: %i', freshness_lifetime) # depends on [control=['if'], data=['cc']] if 'min-fresh' in cc: min_fresh = cc['min-fresh'] # adjust our current age by our min fresh current_age += min_fresh logger.debug('Adjusted current age from min-fresh: %i', current_age) # depends on [control=['if'], data=['cc']] # Return entry if it is fresh enough if freshness_lifetime > current_age: logger.debug('The response is "fresh", returning cached response') logger.debug('%i > %i', freshness_lifetime, current_age) return resp # depends on [control=['if'], data=['freshness_lifetime', 'current_age']] # we're not fresh. If we don't have an Etag, clear it out if 'etag' not in headers: logger.debug('The cached response is "stale" with no etag, purging') self.cache.delete(cache_url) # depends on [control=['if'], data=[]] # return the original handler return False
def reboot(at_time=None): ''' Reboot the system at_time The wait time in minutes before the system will be rebooted. CLI Example: .. code-block:: bash salt '*' system.reboot ''' cmd = ['shutdown', '-r', ('{0}'.format(at_time) if at_time else 'now')] ret = __salt__['cmd.run'](cmd, python_shell=False) return ret
def function[reboot, parameter[at_time]]: constant[ Reboot the system at_time The wait time in minutes before the system will be rebooted. CLI Example: .. code-block:: bash salt '*' system.reboot ] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b1c11d50>, <ast.Constant object at 0x7da1b1c11300>, <ast.IfExp object at 0x7da1b1c111e0>]] variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]] return[name[ret]]
keyword[def] identifier[reboot] ( identifier[at_time] = keyword[None] ): literal[string] identifier[cmd] =[ literal[string] , literal[string] ,( literal[string] . identifier[format] ( identifier[at_time] ) keyword[if] identifier[at_time] keyword[else] literal[string] )] identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] ) keyword[return] identifier[ret]
def reboot(at_time=None): """ Reboot the system at_time The wait time in minutes before the system will be rebooted. CLI Example: .. code-block:: bash salt '*' system.reboot """ cmd = ['shutdown', '-r', '{0}'.format(at_time) if at_time else 'now'] ret = __salt__['cmd.run'](cmd, python_shell=False) return ret
def get_data(self, field, function=None, default=None): """ Get data from the striplog. """ f = function or utils.null data = [] for iv in self: d = iv.data.get(field) if d is None: if default is not None: d = default else: d = np.nan data.append(f(d)) return np.array(data)
def function[get_data, parameter[self, field, function, default]]: constant[ Get data from the striplog. ] variable[f] assign[=] <ast.BoolOp object at 0x7da1b26af580> variable[data] assign[=] list[[]] for taget[name[iv]] in starred[name[self]] begin[:] variable[d] assign[=] call[name[iv].data.get, parameter[name[field]]] if compare[name[d] is constant[None]] begin[:] if compare[name[default] is_not constant[None]] begin[:] variable[d] assign[=] name[default] call[name[data].append, parameter[call[name[f], parameter[name[d]]]]] return[call[name[np].array, parameter[name[data]]]]
keyword[def] identifier[get_data] ( identifier[self] , identifier[field] , identifier[function] = keyword[None] , identifier[default] = keyword[None] ): literal[string] identifier[f] = identifier[function] keyword[or] identifier[utils] . identifier[null] identifier[data] =[] keyword[for] identifier[iv] keyword[in] identifier[self] : identifier[d] = identifier[iv] . identifier[data] . identifier[get] ( identifier[field] ) keyword[if] identifier[d] keyword[is] keyword[None] : keyword[if] identifier[default] keyword[is] keyword[not] keyword[None] : identifier[d] = identifier[default] keyword[else] : identifier[d] = identifier[np] . identifier[nan] identifier[data] . identifier[append] ( identifier[f] ( identifier[d] )) keyword[return] identifier[np] . identifier[array] ( identifier[data] )
def get_data(self, field, function=None, default=None): """ Get data from the striplog. """ f = function or utils.null data = [] for iv in self: d = iv.data.get(field) if d is None: if default is not None: d = default # depends on [control=['if'], data=['default']] else: d = np.nan # depends on [control=['if'], data=['d']] data.append(f(d)) # depends on [control=['for'], data=['iv']] return np.array(data)
def pad_line_to_ontonotes(line, domain) -> List[str]: """ Pad line to conform to ontonotes representation. """ word_ind, word = line[ : 2] pos = 'XX' oie_tags = line[2 : ] line_num = 0 parse = "-" lemma = "-" return [domain, line_num, word_ind, word, pos, parse, lemma, '-',\ '-', '-', '*'] + list(oie_tags) + ['-', ]
def function[pad_line_to_ontonotes, parameter[line, domain]]: constant[ Pad line to conform to ontonotes representation. ] <ast.Tuple object at 0x7da2054a7a90> assign[=] call[name[line]][<ast.Slice object at 0x7da2054a56f0>] variable[pos] assign[=] constant[XX] variable[oie_tags] assign[=] call[name[line]][<ast.Slice object at 0x7da2054a51b0>] variable[line_num] assign[=] constant[0] variable[parse] assign[=] constant[-] variable[lemma] assign[=] constant[-] return[binary_operation[binary_operation[list[[<ast.Name object at 0x7da2054a49a0>, <ast.Name object at 0x7da2054a7760>, <ast.Name object at 0x7da2054a4a90>, <ast.Name object at 0x7da2054a55a0>, <ast.Name object at 0x7da2054a5d50>, <ast.Name object at 0x7da2054a5030>, <ast.Name object at 0x7da2054a66b0>, <ast.Constant object at 0x7da2054a66e0>, <ast.Constant object at 0x7da2054a5ed0>, <ast.Constant object at 0x7da2054a5d20>, <ast.Constant object at 0x7da2054a47c0>]] + call[name[list], parameter[name[oie_tags]]]] + list[[<ast.Constant object at 0x7da2049603d0>]]]]
keyword[def] identifier[pad_line_to_ontonotes] ( identifier[line] , identifier[domain] )-> identifier[List] [ identifier[str] ]: literal[string] identifier[word_ind] , identifier[word] = identifier[line] [: literal[int] ] identifier[pos] = literal[string] identifier[oie_tags] = identifier[line] [ literal[int] :] identifier[line_num] = literal[int] identifier[parse] = literal[string] identifier[lemma] = literal[string] keyword[return] [ identifier[domain] , identifier[line_num] , identifier[word_ind] , identifier[word] , identifier[pos] , identifier[parse] , identifier[lemma] , literal[string] , literal[string] , literal[string] , literal[string] ]+ identifier[list] ( identifier[oie_tags] )+[ literal[string] ,]
def pad_line_to_ontonotes(line, domain) -> List[str]: """ Pad line to conform to ontonotes representation. """ (word_ind, word) = line[:2] pos = 'XX' oie_tags = line[2:] line_num = 0 parse = '-' lemma = '-' return [domain, line_num, word_ind, word, pos, parse, lemma, '-', '-', '-', '*'] + list(oie_tags) + ['-']
def calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, page_nums_to_crop): """Given a list of full-page boxes (media boxes) and a list of tight bounding boxes for each page, calculate and return another list giving the list of bounding boxes to crop down to. The parameter `angle_list` is a list of rotation angles which correspond to the pages. The pages selected to crop are in the set `page_nums_to_crop`.""" # Definition: the deltas are the four differences, one for each margin, # between the original full page box and the final, cropped full-page box. # In the usual case where margin sizes decrease these are the same as the # four margin-reduction values (in absolute points). The deltas are # usually positive but they can be negative due to either percentRetain>100 # or a large enough absolute offset (in which case the size of the # corresponding margin will increase). When percentRetain<0 the deltas are # always greater than the absolute difference between the full page and a # tight bounding box, and so part of the text within the tight bounding box # will also be cropped (unless absolute offsets are used to counter that). num_pages = len(bounding_box_list) page_range = range(num_pages) num_pages_to_crop = len(page_nums_to_crop) # Handle the '--samePageSize' option. # Note that this is always done first, even before evenodd is handled. It # is only applied to the pages in the set `page_nums_to_crop`. order_n = 0 if args.samePageSizeOrderStat: args.samePageSize = True order_n = min(args.samePageSizeOrderStat[0], num_pages_to_crop - 1) order_n = max(order_n, 0) if args.samePageSize: if args.verbose: print("\nSetting each page size to the smallest box bounding all the pages.") if order_n != 0: print("But ignoring the largest {} pages in calculating each edge." .format(order_n)) same_size_bounding_box = [ # We want the smallest of the left and bottom edges. sorted(full_page_box_list[pg][0] for pg in page_nums_to_crop), sorted(full_page_box_list[pg][1] for pg in page_nums_to_crop), # We want the largest of the right and top edges. sorted((full_page_box_list[pg][2] for pg in page_nums_to_crop), reverse=True), sorted((full_page_box_list[pg][3] for pg in page_nums_to_crop), reverse=True) ] same_size_bounding_box = [sortlist[order_n] for sortlist in same_size_bounding_box] new_full_page_box_list = [] for p_num, box in enumerate(full_page_box_list): if p_num not in page_nums_to_crop: new_full_page_box_list.append(box) else: new_full_page_box_list.append(same_size_bounding_box) full_page_box_list = new_full_page_box_list # Handle the '--evenodd' option if it was selected. if args.evenodd: even_page_nums_to_crop = {p_num for p_num in page_nums_to_crop if p_num % 2 == 0} odd_page_nums_to_crop = {p_num for p_num in page_nums_to_crop if p_num % 2 != 0} if args.uniform: uniform_set_with_even_odd = True else: uniform_set_with_even_odd = False # Recurse on even and odd pages, after resetting some options. if args.verbose: print("\nRecursively calculating crops for even and odd pages.") args.evenodd = False # Avoid infinite recursion. args.uniform = True # --evenodd implies uniform, just on each separate group even_crop_list = calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, even_page_nums_to_crop) odd_crop_list = calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, odd_page_nums_to_crop) # Recombine the even and odd pages. combine_even_odd = [] for p_num in page_range: if p_num % 2 == 0: combine_even_odd.append(even_crop_list[p_num]) else: combine_even_odd.append(odd_crop_list[p_num]) # Handle the case where --uniform was set with --evenodd. if uniform_set_with_even_odd: min_bottom_margin = min(box[1] for p_num, box in enumerate(combine_even_odd) if p_num in page_nums_to_crop) max_top_margin = max(box[3] for p_num, box in enumerate(combine_even_odd) if p_num in page_nums_to_crop) combine_even_odd = [[box[0], min_bottom_margin, box[2], max_top_margin] for box in combine_even_odd] return combine_even_odd # Before calculating the crops we modify the percentRetain and # absoluteOffset values for all the pages according to any specified. # rotations for the pages. This is so, for example, uniform cropping is # relative to what the user actually sees. rotated_percent_retain = [mod_box_for_rotation(args.percentRetain, angle_list[m_val]) for m_val in range(num_pages)] rotated_absolute_offset = [mod_box_for_rotation(args.absoluteOffset, angle_list[m_val]) for m_val in range(num_pages)] # Calculate the list of deltas to be used to modify the original page # sizes. Basically, a delta is the absolute diff between the full and # tight-bounding boxes, scaled according to the user's percentRetain, with # any absolute offset then added (lb) or subtracted (tr) as appropriate. # # The deltas are all positive unless absoluteOffset changes that or # percent>100. They are added (lb) or subtracted (tr) as appropriate. delta_list = [] for p_num, t_box, f_box in zip(list(range(len(full_page_box_list))), bounding_box_list, full_page_box_list): deltas = [abs(t_box[m_val] - f_box[m_val]) for m_val in range(4)] adj_deltas = [deltas[m_val] * (100.0-rotated_percent_retain[p_num][m_val]) / 100.0 for m_val in range(4)] adj_deltas = [adj_deltas[m_val] + rotated_absolute_offset[p_num][m_val] for m_val in range(4)] delta_list.append(adj_deltas) # Handle the '--uniform' options if one was selected. if args.uniformOrderPercent: percent_val = args.uniformOrderPercent[0] if percent_val < 0.0: percent_val = 0.0 if percent_val > 100.0: percent_val = 100.0 args.uniformOrderStat = [int(round(num_pages_to_crop * percent_val / 100.0))] if args.uniform or args.uniformOrderStat or args.uniformOrderStat4: if args.verbose: print("\nAll the selected pages will be uniformly cropped.") # Expand to tuples containing page nums, to better print verbose information. delta_list = [(delta_list[j], j+1) for j in page_range] # Note +1 added here. # Only look at the deltas which correspond to pages selected for cropping. # The values will then be sorted for each margin and selected. crop_delta_list = [delta_list[j] for j in page_range if j in page_nums_to_crop] # Handle order stats; m_vals are the four index values into the sorted # delta lists, one per margin. m_vals = [0, 0, 0, 0] if args.uniformOrderStat4: m_vals = args.uniformOrderStat4 elif args.uniformOrderStat: m_vals = [args.uniformOrderStat[0]] * 4 fixed_m_vals = [] for m_val in m_vals: if m_val < 0 or m_val >= num_pages_to_crop: print("\nWarning: The selected order statistic is out of range.", "Setting to closest value.", file=sys.stderr) if m_val >= num_pages_to_crop: m_val = num_pages_to_crop - 1 if m_val < 0: m_val = 0 fixed_m_vals.append(m_val) m_vals = fixed_m_vals if args.verbose and (args.uniformOrderStat or args.uniformOrderPercent or args.uniformOrderStat4): print("\nPer-margin, the", m_vals, "smallest delta values over the selected pages\nwill be ignored" " when choosing common, uniform delta values.") # Get a sorted list of (delta, page_num) tuples for each margin. left_vals = sorted([(box[0][0], box[1]) for box in crop_delta_list]) lower_vals = sorted([(box[0][1], box[1]) for box in crop_delta_list]) right_vals = sorted([(box[0][2], box[1]) for box in crop_delta_list]) upper_vals = sorted([(box[0][3], box[1]) for box in crop_delta_list]) delta_list = [[left_vals[m_vals[0]][0], lower_vals[m_vals[1]][0], right_vals[m_vals[2]][0], upper_vals[m_vals[3]][0]]] * num_pages if args.verbose: delta_page_nums = [left_vals[m_vals[0]][1], lower_vals[m_vals[1]][1], right_vals[m_vals[2]][1], upper_vals[m_vals[3]][1]] print("\nThe smallest delta values actually used to set the uniform" " cropping\namounts (ignoring any '-m' skips and pages in ranges" " not cropped) were\nfound on these pages, numbered from 1:\n ", delta_page_nums) print("\nThe final delta values themselves are:\n ", delta_list[0]) # Apply the delta modifications to the full boxes to get the final sizes. final_crop_list = [] for f_box, deltas in zip(full_page_box_list, delta_list): final_crop_list.append((f_box[0] + deltas[0], f_box[1] + deltas[1], f_box[2] - deltas[2], f_box[3] - deltas[3])) # Set the page ratios if user chose that option. if args.setPageRatios: ratio = args.setPageRatios[0] if args.verbose: print("\nSetting all page width to height ratios to:", ratio) ratio_set_crop_list = [] for left, bottom, right, top in final_crop_list: width = right - left horizontal_center = (right + left) / 2.0 height = top - bottom vertical_center = (top + bottom) / 2.0 new_height = width / ratio if new_height < height: new_width = height * ratio assert new_width >= width ratio_set_crop_list.append((horizontal_center - new_width/2.0, bottom, horizontal_center + new_width/2.0, top)) else: ratio_set_crop_list.append((left, vertical_center - new_height/2.0, right, vertical_center + new_height/2.0)) final_crop_list = ratio_set_crop_list return final_crop_list
def function[calculate_crop_list, parameter[full_page_box_list, bounding_box_list, angle_list, page_nums_to_crop]]: constant[Given a list of full-page boxes (media boxes) and a list of tight bounding boxes for each page, calculate and return another list giving the list of bounding boxes to crop down to. The parameter `angle_list` is a list of rotation angles which correspond to the pages. The pages selected to crop are in the set `page_nums_to_crop`.] variable[num_pages] assign[=] call[name[len], parameter[name[bounding_box_list]]] variable[page_range] assign[=] call[name[range], parameter[name[num_pages]]] variable[num_pages_to_crop] assign[=] call[name[len], parameter[name[page_nums_to_crop]]] variable[order_n] assign[=] constant[0] if name[args].samePageSizeOrderStat begin[:] name[args].samePageSize assign[=] constant[True] variable[order_n] assign[=] call[name[min], parameter[call[name[args].samePageSizeOrderStat][constant[0]], binary_operation[name[num_pages_to_crop] - constant[1]]]] variable[order_n] assign[=] call[name[max], parameter[name[order_n], constant[0]]] if name[args].samePageSize begin[:] if name[args].verbose begin[:] call[name[print], parameter[constant[ Setting each page size to the smallest box bounding all the pages.]]] if compare[name[order_n] not_equal[!=] constant[0]] begin[:] call[name[print], parameter[call[constant[But ignoring the largest {} pages in calculating each edge.].format, parameter[name[order_n]]]]] variable[same_size_bounding_box] assign[=] list[[<ast.Call object at 0x7da1b11b91e0>, <ast.Call object at 0x7da1b11b91b0>, <ast.Call object at 0x7da1b11b86a0>, <ast.Call object at 0x7da1b11b9210>]] variable[same_size_bounding_box] assign[=] <ast.ListComp object at 0x7da1b11b8340> variable[new_full_page_box_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b11bbdc0>, <ast.Name object at 0x7da1b11b83d0>]]] in starred[call[name[enumerate], parameter[name[full_page_box_list]]]] begin[:] if compare[name[p_num] <ast.NotIn object at 0x7da2590d7190> name[page_nums_to_crop]] begin[:] call[name[new_full_page_box_list].append, parameter[name[box]]] variable[full_page_box_list] assign[=] name[new_full_page_box_list] if name[args].evenodd begin[:] variable[even_page_nums_to_crop] assign[=] <ast.SetComp object at 0x7da1b1123eb0> variable[odd_page_nums_to_crop] assign[=] <ast.SetComp object at 0x7da1b1123c40> if name[args].uniform begin[:] variable[uniform_set_with_even_odd] assign[=] constant[True] if name[args].verbose begin[:] call[name[print], parameter[constant[ Recursively calculating crops for even and odd pages.]]] name[args].evenodd assign[=] constant[False] name[args].uniform assign[=] constant[True] variable[even_crop_list] assign[=] call[name[calculate_crop_list], parameter[name[full_page_box_list], name[bounding_box_list], name[angle_list], name[even_page_nums_to_crop]]] variable[odd_crop_list] assign[=] call[name[calculate_crop_list], parameter[name[full_page_box_list], name[bounding_box_list], name[angle_list], name[odd_page_nums_to_crop]]] variable[combine_even_odd] assign[=] list[[]] for taget[name[p_num]] in starred[name[page_range]] begin[:] if compare[binary_operation[name[p_num] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:] call[name[combine_even_odd].append, parameter[call[name[even_crop_list]][name[p_num]]]] if name[uniform_set_with_even_odd] begin[:] variable[min_bottom_margin] assign[=] call[name[min], parameter[<ast.GeneratorExp object at 0x7da1b1122c20>]] variable[max_top_margin] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b1122890>]] variable[combine_even_odd] assign[=] <ast.ListComp object at 0x7da1b1122560> return[name[combine_even_odd]] variable[rotated_percent_retain] assign[=] <ast.ListComp object at 0x7da1b11221d0> variable[rotated_absolute_offset] assign[=] <ast.ListComp object at 0x7da1b1121ed0> variable[delta_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1121b40>, <ast.Name object at 0x7da1b1121b10>, <ast.Name object at 0x7da1b1121ae0>]]] in starred[call[name[zip], parameter[call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[full_page_box_list]]]]]]], name[bounding_box_list], name[full_page_box_list]]]] begin[:] variable[deltas] assign[=] <ast.ListComp object at 0x7da1b1121840> variable[adj_deltas] assign[=] <ast.ListComp object at 0x7da1b11214e0> variable[adj_deltas] assign[=] <ast.ListComp object at 0x7da1b11210c0> call[name[delta_list].append, parameter[name[adj_deltas]]] if name[args].uniformOrderPercent begin[:] variable[percent_val] assign[=] call[name[args].uniformOrderPercent][constant[0]] if compare[name[percent_val] less[<] constant[0.0]] begin[:] variable[percent_val] assign[=] constant[0.0] if compare[name[percent_val] greater[>] constant[100.0]] begin[:] variable[percent_val] assign[=] constant[100.0] name[args].uniformOrderStat assign[=] list[[<ast.Call object at 0x7da1b1234790>]] if <ast.BoolOp object at 0x7da1b1234040> begin[:] if name[args].verbose begin[:] call[name[print], parameter[constant[ All the selected pages will be uniformly cropped.]]] variable[delta_list] assign[=] <ast.ListComp object at 0x7da1b1236bc0> variable[crop_delta_list] assign[=] <ast.ListComp object at 0x7da1b1236e60> variable[m_vals] assign[=] list[[<ast.Constant object at 0x7da1b1234b50>, <ast.Constant object at 0x7da1b1235c00>, <ast.Constant object at 0x7da1b1237310>, <ast.Constant object at 0x7da1b1237fd0>]] if name[args].uniformOrderStat4 begin[:] variable[m_vals] assign[=] name[args].uniformOrderStat4 variable[fixed_m_vals] assign[=] list[[]] for taget[name[m_val]] in starred[name[m_vals]] begin[:] if <ast.BoolOp object at 0x7da1b1237220> begin[:] call[name[print], parameter[constant[ Warning: The selected order statistic is out of range.], constant[Setting to closest value.]]] if compare[name[m_val] greater_or_equal[>=] name[num_pages_to_crop]] begin[:] variable[m_val] assign[=] binary_operation[name[num_pages_to_crop] - constant[1]] if compare[name[m_val] less[<] constant[0]] begin[:] variable[m_val] assign[=] constant[0] call[name[fixed_m_vals].append, parameter[name[m_val]]] variable[m_vals] assign[=] name[fixed_m_vals] if <ast.BoolOp object at 0x7da1b1237a30> begin[:] call[name[print], parameter[constant[ Per-margin, the], name[m_vals], constant[smallest delta values over the selected pages will be ignored when choosing common, uniform delta values.]]] variable[left_vals] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1293eb0>]] variable[lower_vals] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1293430>]] variable[right_vals] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1291ab0>]] variable[upper_vals] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1292ef0>]] variable[delta_list] assign[=] binary_operation[list[[<ast.List object at 0x7da1b1290fd0>]] * name[num_pages]] if name[args].verbose begin[:] variable[delta_page_nums] assign[=] list[[<ast.Subscript object at 0x7da1b1293f40>, <ast.Subscript object at 0x7da1b1290280>, <ast.Subscript object at 0x7da1b1293850>, <ast.Subscript object at 0x7da1b12934c0>]] call[name[print], parameter[constant[ The smallest delta values actually used to set the uniform cropping amounts (ignoring any '-m' skips and pages in ranges not cropped) were found on these pages, numbered from 1: ], name[delta_page_nums]]] call[name[print], parameter[constant[ The final delta values themselves are: ], call[name[delta_list]][constant[0]]]] variable[final_crop_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b12912d0>, <ast.Name object at 0x7da1b1291a20>]]] in starred[call[name[zip], parameter[name[full_page_box_list], name[delta_list]]]] begin[:] call[name[final_crop_list].append, parameter[tuple[[<ast.BinOp object at 0x7da1b1291b10>, <ast.BinOp object at 0x7da1b1291660>, <ast.BinOp object at 0x7da1b1292a70>, <ast.BinOp object at 0x7da1b1290400>]]]] if name[args].setPageRatios begin[:] variable[ratio] assign[=] call[name[args].setPageRatios][constant[0]] if name[args].verbose begin[:] call[name[print], parameter[constant[ Setting all page width to height ratios to:], name[ratio]]] variable[ratio_set_crop_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b12f2b90>, <ast.Name object at 0x7da1b12f0f40>, <ast.Name object at 0x7da1b12f2950>, <ast.Name object at 0x7da1b12f06a0>]]] in starred[name[final_crop_list]] begin[:] variable[width] assign[=] binary_operation[name[right] - name[left]] variable[horizontal_center] assign[=] binary_operation[binary_operation[name[right] + name[left]] / constant[2.0]] variable[height] assign[=] binary_operation[name[top] - name[bottom]] variable[vertical_center] assign[=] binary_operation[binary_operation[name[top] + name[bottom]] / constant[2.0]] variable[new_height] assign[=] binary_operation[name[width] / name[ratio]] if compare[name[new_height] less[<] name[height]] begin[:] variable[new_width] assign[=] binary_operation[name[height] * name[ratio]] assert[compare[name[new_width] greater_or_equal[>=] name[width]]] call[name[ratio_set_crop_list].append, parameter[tuple[[<ast.BinOp object at 0x7da1b12f1ff0>, <ast.Name object at 0x7da1b12f2020>, <ast.BinOp object at 0x7da1b12f1150>, <ast.Name object at 0x7da1b12f1570>]]]] variable[final_crop_list] assign[=] name[ratio_set_crop_list] return[name[final_crop_list]]
keyword[def] identifier[calculate_crop_list] ( identifier[full_page_box_list] , identifier[bounding_box_list] , identifier[angle_list] , identifier[page_nums_to_crop] ): literal[string] identifier[num_pages] = identifier[len] ( identifier[bounding_box_list] ) identifier[page_range] = identifier[range] ( identifier[num_pages] ) identifier[num_pages_to_crop] = identifier[len] ( identifier[page_nums_to_crop] ) identifier[order_n] = literal[int] keyword[if] identifier[args] . identifier[samePageSizeOrderStat] : identifier[args] . identifier[samePageSize] = keyword[True] identifier[order_n] = identifier[min] ( identifier[args] . identifier[samePageSizeOrderStat] [ literal[int] ], identifier[num_pages_to_crop] - literal[int] ) identifier[order_n] = identifier[max] ( identifier[order_n] , literal[int] ) keyword[if] identifier[args] . identifier[samePageSize] : keyword[if] identifier[args] . identifier[verbose] : identifier[print] ( literal[string] ) keyword[if] identifier[order_n] != literal[int] : identifier[print] ( literal[string] . identifier[format] ( identifier[order_n] )) identifier[same_size_bounding_box] =[ identifier[sorted] ( identifier[full_page_box_list] [ identifier[pg] ][ literal[int] ] keyword[for] identifier[pg] keyword[in] identifier[page_nums_to_crop] ), identifier[sorted] ( identifier[full_page_box_list] [ identifier[pg] ][ literal[int] ] keyword[for] identifier[pg] keyword[in] identifier[page_nums_to_crop] ), identifier[sorted] (( identifier[full_page_box_list] [ identifier[pg] ][ literal[int] ] keyword[for] identifier[pg] keyword[in] identifier[page_nums_to_crop] ), identifier[reverse] = keyword[True] ), identifier[sorted] (( identifier[full_page_box_list] [ identifier[pg] ][ literal[int] ] keyword[for] identifier[pg] keyword[in] identifier[page_nums_to_crop] ), identifier[reverse] = keyword[True] ) ] identifier[same_size_bounding_box] =[ identifier[sortlist] [ identifier[order_n] ] keyword[for] identifier[sortlist] keyword[in] identifier[same_size_bounding_box] ] identifier[new_full_page_box_list] =[] keyword[for] identifier[p_num] , identifier[box] keyword[in] identifier[enumerate] ( identifier[full_page_box_list] ): keyword[if] identifier[p_num] keyword[not] keyword[in] identifier[page_nums_to_crop] : identifier[new_full_page_box_list] . identifier[append] ( identifier[box] ) keyword[else] : identifier[new_full_page_box_list] . identifier[append] ( identifier[same_size_bounding_box] ) identifier[full_page_box_list] = identifier[new_full_page_box_list] keyword[if] identifier[args] . identifier[evenodd] : identifier[even_page_nums_to_crop] ={ identifier[p_num] keyword[for] identifier[p_num] keyword[in] identifier[page_nums_to_crop] keyword[if] identifier[p_num] % literal[int] == literal[int] } identifier[odd_page_nums_to_crop] ={ identifier[p_num] keyword[for] identifier[p_num] keyword[in] identifier[page_nums_to_crop] keyword[if] identifier[p_num] % literal[int] != literal[int] } keyword[if] identifier[args] . identifier[uniform] : identifier[uniform_set_with_even_odd] = keyword[True] keyword[else] : identifier[uniform_set_with_even_odd] = keyword[False] keyword[if] identifier[args] . identifier[verbose] : identifier[print] ( literal[string] ) identifier[args] . identifier[evenodd] = keyword[False] identifier[args] . identifier[uniform] = keyword[True] identifier[even_crop_list] = identifier[calculate_crop_list] ( identifier[full_page_box_list] , identifier[bounding_box_list] , identifier[angle_list] , identifier[even_page_nums_to_crop] ) identifier[odd_crop_list] = identifier[calculate_crop_list] ( identifier[full_page_box_list] , identifier[bounding_box_list] , identifier[angle_list] , identifier[odd_page_nums_to_crop] ) identifier[combine_even_odd] =[] keyword[for] identifier[p_num] keyword[in] identifier[page_range] : keyword[if] identifier[p_num] % literal[int] == literal[int] : identifier[combine_even_odd] . identifier[append] ( identifier[even_crop_list] [ identifier[p_num] ]) keyword[else] : identifier[combine_even_odd] . identifier[append] ( identifier[odd_crop_list] [ identifier[p_num] ]) keyword[if] identifier[uniform_set_with_even_odd] : identifier[min_bottom_margin] = identifier[min] ( identifier[box] [ literal[int] ] keyword[for] identifier[p_num] , identifier[box] keyword[in] identifier[enumerate] ( identifier[combine_even_odd] ) keyword[if] identifier[p_num] keyword[in] identifier[page_nums_to_crop] ) identifier[max_top_margin] = identifier[max] ( identifier[box] [ literal[int] ] keyword[for] identifier[p_num] , identifier[box] keyword[in] identifier[enumerate] ( identifier[combine_even_odd] ) keyword[if] identifier[p_num] keyword[in] identifier[page_nums_to_crop] ) identifier[combine_even_odd] =[[ identifier[box] [ literal[int] ], identifier[min_bottom_margin] , identifier[box] [ literal[int] ], identifier[max_top_margin] ] keyword[for] identifier[box] keyword[in] identifier[combine_even_odd] ] keyword[return] identifier[combine_even_odd] identifier[rotated_percent_retain] =[ identifier[mod_box_for_rotation] ( identifier[args] . identifier[percentRetain] , identifier[angle_list] [ identifier[m_val] ]) keyword[for] identifier[m_val] keyword[in] identifier[range] ( identifier[num_pages] )] identifier[rotated_absolute_offset] =[ identifier[mod_box_for_rotation] ( identifier[args] . identifier[absoluteOffset] , identifier[angle_list] [ identifier[m_val] ]) keyword[for] identifier[m_val] keyword[in] identifier[range] ( identifier[num_pages] )] identifier[delta_list] =[] keyword[for] identifier[p_num] , identifier[t_box] , identifier[f_box] keyword[in] identifier[zip] ( identifier[list] ( identifier[range] ( identifier[len] ( identifier[full_page_box_list] ))), identifier[bounding_box_list] , identifier[full_page_box_list] ): identifier[deltas] =[ identifier[abs] ( identifier[t_box] [ identifier[m_val] ]- identifier[f_box] [ identifier[m_val] ]) keyword[for] identifier[m_val] keyword[in] identifier[range] ( literal[int] )] identifier[adj_deltas] =[ identifier[deltas] [ identifier[m_val] ]*( literal[int] - identifier[rotated_percent_retain] [ identifier[p_num] ][ identifier[m_val] ])/ literal[int] keyword[for] identifier[m_val] keyword[in] identifier[range] ( literal[int] )] identifier[adj_deltas] =[ identifier[adj_deltas] [ identifier[m_val] ]+ identifier[rotated_absolute_offset] [ identifier[p_num] ][ identifier[m_val] ] keyword[for] identifier[m_val] keyword[in] identifier[range] ( literal[int] )] identifier[delta_list] . identifier[append] ( identifier[adj_deltas] ) keyword[if] identifier[args] . identifier[uniformOrderPercent] : identifier[percent_val] = identifier[args] . identifier[uniformOrderPercent] [ literal[int] ] keyword[if] identifier[percent_val] < literal[int] : identifier[percent_val] = literal[int] keyword[if] identifier[percent_val] > literal[int] : identifier[percent_val] = literal[int] identifier[args] . identifier[uniformOrderStat] =[ identifier[int] ( identifier[round] ( identifier[num_pages_to_crop] * identifier[percent_val] / literal[int] ))] keyword[if] identifier[args] . identifier[uniform] keyword[or] identifier[args] . identifier[uniformOrderStat] keyword[or] identifier[args] . identifier[uniformOrderStat4] : keyword[if] identifier[args] . identifier[verbose] : identifier[print] ( literal[string] ) identifier[delta_list] =[( identifier[delta_list] [ identifier[j] ], identifier[j] + literal[int] ) keyword[for] identifier[j] keyword[in] identifier[page_range] ] identifier[crop_delta_list] =[ identifier[delta_list] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[page_range] keyword[if] identifier[j] keyword[in] identifier[page_nums_to_crop] ] identifier[m_vals] =[ literal[int] , literal[int] , literal[int] , literal[int] ] keyword[if] identifier[args] . identifier[uniformOrderStat4] : identifier[m_vals] = identifier[args] . identifier[uniformOrderStat4] keyword[elif] identifier[args] . identifier[uniformOrderStat] : identifier[m_vals] =[ identifier[args] . identifier[uniformOrderStat] [ literal[int] ]]* literal[int] identifier[fixed_m_vals] =[] keyword[for] identifier[m_val] keyword[in] identifier[m_vals] : keyword[if] identifier[m_val] < literal[int] keyword[or] identifier[m_val] >= identifier[num_pages_to_crop] : identifier[print] ( literal[string] , literal[string] , identifier[file] = identifier[sys] . identifier[stderr] ) keyword[if] identifier[m_val] >= identifier[num_pages_to_crop] : identifier[m_val] = identifier[num_pages_to_crop] - literal[int] keyword[if] identifier[m_val] < literal[int] : identifier[m_val] = literal[int] identifier[fixed_m_vals] . identifier[append] ( identifier[m_val] ) identifier[m_vals] = identifier[fixed_m_vals] keyword[if] identifier[args] . identifier[verbose] keyword[and] ( identifier[args] . identifier[uniformOrderStat] keyword[or] identifier[args] . identifier[uniformOrderPercent] keyword[or] identifier[args] . identifier[uniformOrderStat4] ): identifier[print] ( literal[string] , identifier[m_vals] , literal[string] literal[string] ) identifier[left_vals] = identifier[sorted] ([( identifier[box] [ literal[int] ][ literal[int] ], identifier[box] [ literal[int] ]) keyword[for] identifier[box] keyword[in] identifier[crop_delta_list] ]) identifier[lower_vals] = identifier[sorted] ([( identifier[box] [ literal[int] ][ literal[int] ], identifier[box] [ literal[int] ]) keyword[for] identifier[box] keyword[in] identifier[crop_delta_list] ]) identifier[right_vals] = identifier[sorted] ([( identifier[box] [ literal[int] ][ literal[int] ], identifier[box] [ literal[int] ]) keyword[for] identifier[box] keyword[in] identifier[crop_delta_list] ]) identifier[upper_vals] = identifier[sorted] ([( identifier[box] [ literal[int] ][ literal[int] ], identifier[box] [ literal[int] ]) keyword[for] identifier[box] keyword[in] identifier[crop_delta_list] ]) identifier[delta_list] =[[ identifier[left_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[lower_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[right_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[upper_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ]]]* identifier[num_pages] keyword[if] identifier[args] . identifier[verbose] : identifier[delta_page_nums] =[ identifier[left_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[lower_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[right_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ], identifier[upper_vals] [ identifier[m_vals] [ literal[int] ]][ literal[int] ]] identifier[print] ( literal[string] literal[string] literal[string] , identifier[delta_page_nums] ) identifier[print] ( literal[string] , identifier[delta_list] [ literal[int] ]) identifier[final_crop_list] =[] keyword[for] identifier[f_box] , identifier[deltas] keyword[in] identifier[zip] ( identifier[full_page_box_list] , identifier[delta_list] ): identifier[final_crop_list] . identifier[append] (( identifier[f_box] [ literal[int] ]+ identifier[deltas] [ literal[int] ], identifier[f_box] [ literal[int] ]+ identifier[deltas] [ literal[int] ], identifier[f_box] [ literal[int] ]- identifier[deltas] [ literal[int] ], identifier[f_box] [ literal[int] ]- identifier[deltas] [ literal[int] ])) keyword[if] identifier[args] . identifier[setPageRatios] : identifier[ratio] = identifier[args] . identifier[setPageRatios] [ literal[int] ] keyword[if] identifier[args] . identifier[verbose] : identifier[print] ( literal[string] , identifier[ratio] ) identifier[ratio_set_crop_list] =[] keyword[for] identifier[left] , identifier[bottom] , identifier[right] , identifier[top] keyword[in] identifier[final_crop_list] : identifier[width] = identifier[right] - identifier[left] identifier[horizontal_center] =( identifier[right] + identifier[left] )/ literal[int] identifier[height] = identifier[top] - identifier[bottom] identifier[vertical_center] =( identifier[top] + identifier[bottom] )/ literal[int] identifier[new_height] = identifier[width] / identifier[ratio] keyword[if] identifier[new_height] < identifier[height] : identifier[new_width] = identifier[height] * identifier[ratio] keyword[assert] identifier[new_width] >= identifier[width] identifier[ratio_set_crop_list] . identifier[append] (( identifier[horizontal_center] - identifier[new_width] / literal[int] , identifier[bottom] , identifier[horizontal_center] + identifier[new_width] / literal[int] , identifier[top] )) keyword[else] : identifier[ratio_set_crop_list] . identifier[append] (( identifier[left] , identifier[vertical_center] - identifier[new_height] / literal[int] , identifier[right] , identifier[vertical_center] + identifier[new_height] / literal[int] )) identifier[final_crop_list] = identifier[ratio_set_crop_list] keyword[return] identifier[final_crop_list]
def calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, page_nums_to_crop): """Given a list of full-page boxes (media boxes) and a list of tight bounding boxes for each page, calculate and return another list giving the list of bounding boxes to crop down to. The parameter `angle_list` is a list of rotation angles which correspond to the pages. The pages selected to crop are in the set `page_nums_to_crop`.""" # Definition: the deltas are the four differences, one for each margin, # between the original full page box and the final, cropped full-page box. # In the usual case where margin sizes decrease these are the same as the # four margin-reduction values (in absolute points). The deltas are # usually positive but they can be negative due to either percentRetain>100 # or a large enough absolute offset (in which case the size of the # corresponding margin will increase). When percentRetain<0 the deltas are # always greater than the absolute difference between the full page and a # tight bounding box, and so part of the text within the tight bounding box # will also be cropped (unless absolute offsets are used to counter that). num_pages = len(bounding_box_list) page_range = range(num_pages) num_pages_to_crop = len(page_nums_to_crop) # Handle the '--samePageSize' option. # Note that this is always done first, even before evenodd is handled. It # is only applied to the pages in the set `page_nums_to_crop`. order_n = 0 if args.samePageSizeOrderStat: args.samePageSize = True order_n = min(args.samePageSizeOrderStat[0], num_pages_to_crop - 1) order_n = max(order_n, 0) # depends on [control=['if'], data=[]] if args.samePageSize: if args.verbose: print('\nSetting each page size to the smallest box bounding all the pages.') if order_n != 0: print('But ignoring the largest {} pages in calculating each edge.'.format(order_n)) # depends on [control=['if'], data=['order_n']] # depends on [control=['if'], data=[]] # We want the smallest of the left and bottom edges. # We want the largest of the right and top edges. same_size_bounding_box = [sorted((full_page_box_list[pg][0] for pg in page_nums_to_crop)), sorted((full_page_box_list[pg][1] for pg in page_nums_to_crop)), sorted((full_page_box_list[pg][2] for pg in page_nums_to_crop), reverse=True), sorted((full_page_box_list[pg][3] for pg in page_nums_to_crop), reverse=True)] same_size_bounding_box = [sortlist[order_n] for sortlist in same_size_bounding_box] new_full_page_box_list = [] for (p_num, box) in enumerate(full_page_box_list): if p_num not in page_nums_to_crop: new_full_page_box_list.append(box) # depends on [control=['if'], data=[]] else: new_full_page_box_list.append(same_size_bounding_box) # depends on [control=['for'], data=[]] full_page_box_list = new_full_page_box_list # depends on [control=['if'], data=[]] # Handle the '--evenodd' option if it was selected. if args.evenodd: even_page_nums_to_crop = {p_num for p_num in page_nums_to_crop if p_num % 2 == 0} odd_page_nums_to_crop = {p_num for p_num in page_nums_to_crop if p_num % 2 != 0} if args.uniform: uniform_set_with_even_odd = True # depends on [control=['if'], data=[]] else: uniform_set_with_even_odd = False # Recurse on even and odd pages, after resetting some options. if args.verbose: print('\nRecursively calculating crops for even and odd pages.') # depends on [control=['if'], data=[]] args.evenodd = False # Avoid infinite recursion. args.uniform = True # --evenodd implies uniform, just on each separate group even_crop_list = calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, even_page_nums_to_crop) odd_crop_list = calculate_crop_list(full_page_box_list, bounding_box_list, angle_list, odd_page_nums_to_crop) # Recombine the even and odd pages. combine_even_odd = [] for p_num in page_range: if p_num % 2 == 0: combine_even_odd.append(even_crop_list[p_num]) # depends on [control=['if'], data=[]] else: combine_even_odd.append(odd_crop_list[p_num]) # depends on [control=['for'], data=['p_num']] # Handle the case where --uniform was set with --evenodd. if uniform_set_with_even_odd: min_bottom_margin = min((box[1] for (p_num, box) in enumerate(combine_even_odd) if p_num in page_nums_to_crop)) max_top_margin = max((box[3] for (p_num, box) in enumerate(combine_even_odd) if p_num in page_nums_to_crop)) combine_even_odd = [[box[0], min_bottom_margin, box[2], max_top_margin] for box in combine_even_odd] # depends on [control=['if'], data=[]] return combine_even_odd # depends on [control=['if'], data=[]] # Before calculating the crops we modify the percentRetain and # absoluteOffset values for all the pages according to any specified. # rotations for the pages. This is so, for example, uniform cropping is # relative to what the user actually sees. rotated_percent_retain = [mod_box_for_rotation(args.percentRetain, angle_list[m_val]) for m_val in range(num_pages)] rotated_absolute_offset = [mod_box_for_rotation(args.absoluteOffset, angle_list[m_val]) for m_val in range(num_pages)] # Calculate the list of deltas to be used to modify the original page # sizes. Basically, a delta is the absolute diff between the full and # tight-bounding boxes, scaled according to the user's percentRetain, with # any absolute offset then added (lb) or subtracted (tr) as appropriate. # # The deltas are all positive unless absoluteOffset changes that or # percent>100. They are added (lb) or subtracted (tr) as appropriate. delta_list = [] for (p_num, t_box, f_box) in zip(list(range(len(full_page_box_list))), bounding_box_list, full_page_box_list): deltas = [abs(t_box[m_val] - f_box[m_val]) for m_val in range(4)] adj_deltas = [deltas[m_val] * (100.0 - rotated_percent_retain[p_num][m_val]) / 100.0 for m_val in range(4)] adj_deltas = [adj_deltas[m_val] + rotated_absolute_offset[p_num][m_val] for m_val in range(4)] delta_list.append(adj_deltas) # depends on [control=['for'], data=[]] # Handle the '--uniform' options if one was selected. if args.uniformOrderPercent: percent_val = args.uniformOrderPercent[0] if percent_val < 0.0: percent_val = 0.0 # depends on [control=['if'], data=['percent_val']] if percent_val > 100.0: percent_val = 100.0 # depends on [control=['if'], data=['percent_val']] args.uniformOrderStat = [int(round(num_pages_to_crop * percent_val / 100.0))] # depends on [control=['if'], data=[]] if args.uniform or args.uniformOrderStat or args.uniformOrderStat4: if args.verbose: print('\nAll the selected pages will be uniformly cropped.') # depends on [control=['if'], data=[]] # Expand to tuples containing page nums, to better print verbose information. delta_list = [(delta_list[j], j + 1) for j in page_range] # Note +1 added here. # Only look at the deltas which correspond to pages selected for cropping. # The values will then be sorted for each margin and selected. crop_delta_list = [delta_list[j] for j in page_range if j in page_nums_to_crop] # Handle order stats; m_vals are the four index values into the sorted # delta lists, one per margin. m_vals = [0, 0, 0, 0] if args.uniformOrderStat4: m_vals = args.uniformOrderStat4 # depends on [control=['if'], data=[]] elif args.uniformOrderStat: m_vals = [args.uniformOrderStat[0]] * 4 # depends on [control=['if'], data=[]] fixed_m_vals = [] for m_val in m_vals: if m_val < 0 or m_val >= num_pages_to_crop: print('\nWarning: The selected order statistic is out of range.', 'Setting to closest value.', file=sys.stderr) if m_val >= num_pages_to_crop: m_val = num_pages_to_crop - 1 # depends on [control=['if'], data=['m_val', 'num_pages_to_crop']] if m_val < 0: m_val = 0 # depends on [control=['if'], data=['m_val']] # depends on [control=['if'], data=[]] fixed_m_vals.append(m_val) # depends on [control=['for'], data=['m_val']] m_vals = fixed_m_vals if args.verbose and (args.uniformOrderStat or args.uniformOrderPercent or args.uniformOrderStat4): print('\nPer-margin, the', m_vals, 'smallest delta values over the selected pages\nwill be ignored when choosing common, uniform delta values.') # depends on [control=['if'], data=[]] # Get a sorted list of (delta, page_num) tuples for each margin. left_vals = sorted([(box[0][0], box[1]) for box in crop_delta_list]) lower_vals = sorted([(box[0][1], box[1]) for box in crop_delta_list]) right_vals = sorted([(box[0][2], box[1]) for box in crop_delta_list]) upper_vals = sorted([(box[0][3], box[1]) for box in crop_delta_list]) delta_list = [[left_vals[m_vals[0]][0], lower_vals[m_vals[1]][0], right_vals[m_vals[2]][0], upper_vals[m_vals[3]][0]]] * num_pages if args.verbose: delta_page_nums = [left_vals[m_vals[0]][1], lower_vals[m_vals[1]][1], right_vals[m_vals[2]][1], upper_vals[m_vals[3]][1]] print("\nThe smallest delta values actually used to set the uniform cropping\namounts (ignoring any '-m' skips and pages in ranges not cropped) were\nfound on these pages, numbered from 1:\n ", delta_page_nums) print('\nThe final delta values themselves are:\n ', delta_list[0]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Apply the delta modifications to the full boxes to get the final sizes. final_crop_list = [] for (f_box, deltas) in zip(full_page_box_list, delta_list): final_crop_list.append((f_box[0] + deltas[0], f_box[1] + deltas[1], f_box[2] - deltas[2], f_box[3] - deltas[3])) # depends on [control=['for'], data=[]] # Set the page ratios if user chose that option. if args.setPageRatios: ratio = args.setPageRatios[0] if args.verbose: print('\nSetting all page width to height ratios to:', ratio) # depends on [control=['if'], data=[]] ratio_set_crop_list = [] for (left, bottom, right, top) in final_crop_list: width = right - left horizontal_center = (right + left) / 2.0 height = top - bottom vertical_center = (top + bottom) / 2.0 new_height = width / ratio if new_height < height: new_width = height * ratio assert new_width >= width ratio_set_crop_list.append((horizontal_center - new_width / 2.0, bottom, horizontal_center + new_width / 2.0, top)) # depends on [control=['if'], data=['height']] else: ratio_set_crop_list.append((left, vertical_center - new_height / 2.0, right, vertical_center + new_height / 2.0)) # depends on [control=['for'], data=[]] final_crop_list = ratio_set_crop_list # depends on [control=['if'], data=[]] return final_crop_list
def associate_flavor(self, flavor, body): """Associate a Neutron service flavor with a profile.""" return self.post(self.flavor_profile_bindings_path % (flavor), body=body)
def function[associate_flavor, parameter[self, flavor, body]]: constant[Associate a Neutron service flavor with a profile.] return[call[name[self].post, parameter[binary_operation[name[self].flavor_profile_bindings_path <ast.Mod object at 0x7da2590d6920> name[flavor]]]]]
keyword[def] identifier[associate_flavor] ( identifier[self] , identifier[flavor] , identifier[body] ): literal[string] keyword[return] identifier[self] . identifier[post] ( identifier[self] . identifier[flavor_profile_bindings_path] % ( identifier[flavor] ), identifier[body] = identifier[body] )
def associate_flavor(self, flavor, body): """Associate a Neutron service flavor with a profile.""" return self.post(self.flavor_profile_bindings_path % flavor, body=body)