code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def write(self, input_str): """ Adds content to the Dockerfile. :param input_str: Content. :type input_str: unicode | str """ self.check_not_finalized() if isinstance(input_str, six.binary_type): self.fileobj.write(input_str) else: self.fileobj.write(input_str.encode('utf-8'))
def function[write, parameter[self, input_str]]: constant[ Adds content to the Dockerfile. :param input_str: Content. :type input_str: unicode | str ] call[name[self].check_not_finalized, parameter[]] if call[name[isinstance], parameter[name[input_str], name[six].binary_type]] begin[:] call[name[self].fileobj.write, parameter[name[input_str]]]
keyword[def] identifier[write] ( identifier[self] , identifier[input_str] ): literal[string] identifier[self] . identifier[check_not_finalized] () keyword[if] identifier[isinstance] ( identifier[input_str] , identifier[six] . identifier[binary_type] ): identifier[self] . identifier[fileobj] . identifier[write] ( identifier[input_str] ) keyword[else] : identifier[self] . identifier[fileobj] . identifier[write] ( identifier[input_str] . identifier[encode] ( literal[string] ))
def write(self, input_str): """ Adds content to the Dockerfile. :param input_str: Content. :type input_str: unicode | str """ self.check_not_finalized() if isinstance(input_str, six.binary_type): self.fileobj.write(input_str) # depends on [control=['if'], data=[]] else: self.fileobj.write(input_str.encode('utf-8'))
def _get_battery(self): """ Get the battery """ try: battery = { "charge": self._dev.charge(), "isCharging": self._dev.isCharging() == 1, } except Exception: return None return battery
def function[_get_battery, parameter[self]]: constant[ Get the battery ] <ast.Try object at 0x7da20c795570> return[name[battery]]
keyword[def] identifier[_get_battery] ( identifier[self] ): literal[string] keyword[try] : identifier[battery] ={ literal[string] : identifier[self] . identifier[_dev] . identifier[charge] (), literal[string] : identifier[self] . identifier[_dev] . identifier[isCharging] ()== literal[int] , } keyword[except] identifier[Exception] : keyword[return] keyword[None] keyword[return] identifier[battery]
def _get_battery(self): """ Get the battery """ try: battery = {'charge': self._dev.charge(), 'isCharging': self._dev.isCharging() == 1} # depends on [control=['try'], data=[]] except Exception: return None # depends on [control=['except'], data=[]] return battery
def main(): """Run playbook""" for flag in ('--check',): if flag not in sys.argv: sys.argv.append(flag) obj = PlaybookCLI(sys.argv) obj.parse() obj.run()
def function[main, parameter[]]: constant[Run playbook] for taget[name[flag]] in starred[tuple[[<ast.Constant object at 0x7da1b1605a50>]]] begin[:] if compare[name[flag] <ast.NotIn object at 0x7da2590d7190> name[sys].argv] begin[:] call[name[sys].argv.append, parameter[name[flag]]] variable[obj] assign[=] call[name[PlaybookCLI], parameter[name[sys].argv]] call[name[obj].parse, parameter[]] call[name[obj].run, parameter[]]
keyword[def] identifier[main] (): literal[string] keyword[for] identifier[flag] keyword[in] ( literal[string] ,): keyword[if] identifier[flag] keyword[not] keyword[in] identifier[sys] . identifier[argv] : identifier[sys] . identifier[argv] . identifier[append] ( identifier[flag] ) identifier[obj] = identifier[PlaybookCLI] ( identifier[sys] . identifier[argv] ) identifier[obj] . identifier[parse] () identifier[obj] . identifier[run] ()
def main(): """Run playbook""" for flag in ('--check',): if flag not in sys.argv: sys.argv.append(flag) # depends on [control=['if'], data=['flag']] # depends on [control=['for'], data=['flag']] obj = PlaybookCLI(sys.argv) obj.parse() obj.run()
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'environments') and self.environments is not None: _dict['environments'] = [x._to_dict() for x in self.environments] return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da18bcc94e0> begin[:] call[name[_dict]][constant[environments]] assign[=] <ast.ListComp object at 0x7da18bcc9690> return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[environments] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[environments] ] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'environments') and self.environments is not None: _dict['environments'] = [x._to_dict() for x in self.environments] # depends on [control=['if'], data=[]] return _dict
def list_message_files (package, suffix=".mo"): """Return list of all found message files and their installation paths.""" for fname in glob.glob("po/*" + suffix): # basename (without extension) is a locale name localename = os.path.splitext(os.path.basename(fname))[0] domainname = "%s.mo" % package.lower() yield (fname, os.path.join( "share", "locale", localename, "LC_MESSAGES", domainname))
def function[list_message_files, parameter[package, suffix]]: constant[Return list of all found message files and their installation paths.] for taget[name[fname]] in starred[call[name[glob].glob, parameter[binary_operation[constant[po/*] + name[suffix]]]]] begin[:] variable[localename] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[fname]]]]]][constant[0]] variable[domainname] assign[=] binary_operation[constant[%s.mo] <ast.Mod object at 0x7da2590d6920> call[name[package].lower, parameter[]]] <ast.Yield object at 0x7da1b0ab9f00>
keyword[def] identifier[list_message_files] ( identifier[package] , identifier[suffix] = literal[string] ): literal[string] keyword[for] identifier[fname] keyword[in] identifier[glob] . identifier[glob] ( literal[string] + identifier[suffix] ): identifier[localename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[fname] ))[ literal[int] ] identifier[domainname] = literal[string] % identifier[package] . identifier[lower] () keyword[yield] ( identifier[fname] , identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] , identifier[localename] , literal[string] , identifier[domainname] ))
def list_message_files(package, suffix='.mo'): """Return list of all found message files and their installation paths.""" for fname in glob.glob('po/*' + suffix): # basename (without extension) is a locale name localename = os.path.splitext(os.path.basename(fname))[0] domainname = '%s.mo' % package.lower() yield (fname, os.path.join('share', 'locale', localename, 'LC_MESSAGES', domainname)) # depends on [control=['for'], data=['fname']]
def concatclusts(outhandle, alignbits): """ concatenates sorted aligned cluster tmpfiles and removes them.""" with gzip.open(outhandle, 'wb') as out: for fname in alignbits: with open(fname) as infile: out.write(infile.read()+"//\n//\n")
def function[concatclusts, parameter[outhandle, alignbits]]: constant[ concatenates sorted aligned cluster tmpfiles and removes them.] with call[name[gzip].open, parameter[name[outhandle], constant[wb]]] begin[:] for taget[name[fname]] in starred[name[alignbits]] begin[:] with call[name[open], parameter[name[fname]]] begin[:] call[name[out].write, parameter[binary_operation[call[name[infile].read, parameter[]] + constant[// // ]]]]
keyword[def] identifier[concatclusts] ( identifier[outhandle] , identifier[alignbits] ): literal[string] keyword[with] identifier[gzip] . identifier[open] ( identifier[outhandle] , literal[string] ) keyword[as] identifier[out] : keyword[for] identifier[fname] keyword[in] identifier[alignbits] : keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[infile] : identifier[out] . identifier[write] ( identifier[infile] . identifier[read] ()+ literal[string] )
def concatclusts(outhandle, alignbits): """ concatenates sorted aligned cluster tmpfiles and removes them.""" with gzip.open(outhandle, 'wb') as out: for fname in alignbits: with open(fname) as infile: out.write(infile.read() + '//\n//\n') # depends on [control=['with'], data=['infile']] # depends on [control=['for'], data=['fname']] # depends on [control=['with'], data=['out']]
def present(name, auth=None, **kwargs): ''' Ensure an role exists name Name of the role description An arbitrary description of the role ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name role = __salt__['keystoneng.role_get'](**kwargs) if not role: if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Role will be created.' return ret role = __salt__['keystoneng.role_create'](**kwargs) ret['changes']['id'] = role.id ret['changes']['name'] = role.name ret['comment'] = 'Created role' return ret # NOTE(SamYaple): Update support pending https://review.openstack.org/#/c/496992/ return ret
def function[present, parameter[name, auth]]: constant[ Ensure an role exists name Name of the role description An arbitrary description of the role ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b217b9a0>, <ast.Constant object at 0x7da1b217bb50>, <ast.Constant object at 0x7da1b217b970>, <ast.Constant object at 0x7da1b217b430>], [<ast.Name object at 0x7da1b217b460>, <ast.Dict object at 0x7da1b217b280>, <ast.Constant object at 0x7da1b217b310>, <ast.Constant object at 0x7da1b217b670>]] variable[kwargs] assign[=] call[call[name[__utils__]][constant[args.clean_kwargs]], parameter[]] call[call[name[__salt__]][constant[keystoneng.setup_clouds]], parameter[name[auth]]] call[name[kwargs]][constant[name]] assign[=] name[name] variable[role] assign[=] call[call[name[__salt__]][constant[keystoneng.role_get]], parameter[]] if <ast.UnaryOp object at 0x7da1b21a8640> begin[:] if compare[call[name[__opts__]][constant[test]] is constant[True]] begin[:] call[name[ret]][constant[result]] assign[=] constant[None] call[name[ret]][constant[changes]] assign[=] name[kwargs] call[name[ret]][constant[comment]] assign[=] constant[Role will be created.] return[name[ret]] variable[role] assign[=] call[call[name[__salt__]][constant[keystoneng.role_create]], parameter[]] call[call[name[ret]][constant[changes]]][constant[id]] assign[=] name[role].id call[call[name[ret]][constant[changes]]][constant[name]] assign[=] name[role].name call[name[ret]][constant[comment]] assign[=] constant[Created role] return[name[ret]] return[name[ret]]
keyword[def] identifier[present] ( identifier[name] , identifier[auth] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[True] , literal[string] : literal[string] } identifier[kwargs] = identifier[__utils__] [ literal[string] ](** identifier[kwargs] ) identifier[__salt__] [ literal[string] ]( identifier[auth] ) identifier[kwargs] [ literal[string] ]= identifier[name] identifier[role] = identifier[__salt__] [ literal[string] ](** identifier[kwargs] ) keyword[if] keyword[not] identifier[role] : keyword[if] identifier[__opts__] [ literal[string] ] keyword[is] keyword[True] : identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]= identifier[kwargs] identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret] identifier[role] = identifier[__salt__] [ literal[string] ](** identifier[kwargs] ) identifier[ret] [ literal[string] ][ literal[string] ]= identifier[role] . identifier[id] identifier[ret] [ literal[string] ][ literal[string] ]= identifier[role] . identifier[name] identifier[ret] [ literal[string] ]= literal[string] keyword[return] identifier[ret] keyword[return] identifier[ret]
def present(name, auth=None, **kwargs): """ Ensure an role exists name Name of the role description An arbitrary description of the role """ ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name role = __salt__['keystoneng.role_get'](**kwargs) if not role: if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Role will be created.' return ret # depends on [control=['if'], data=[]] role = __salt__['keystoneng.role_create'](**kwargs) ret['changes']['id'] = role.id ret['changes']['name'] = role.name ret['comment'] = 'Created role' return ret # depends on [control=['if'], data=[]] # NOTE(SamYaple): Update support pending https://review.openstack.org/#/c/496992/ return ret
def require(*args, **kwargs): ''' Install a set of packages using pip This is designed to be an interface for IPython notebooks that replicates the requirements.txt pip format. This lets notebooks specify which versions of packages they need inside the notebook itself. This function is the general-purpose interface that lets the caller specify any version string for any package. ''' # If called with no arguments, returns requirements list if not args and not kwargs: return freeze() # Construct array of requirements requirements = list(args) extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs] requirements.extend(extra) args = ['install', '-q'] args.extend(requirements) pip.main(args)
def function[require, parameter[]]: constant[ Install a set of packages using pip This is designed to be an interface for IPython notebooks that replicates the requirements.txt pip format. This lets notebooks specify which versions of packages they need inside the notebook itself. This function is the general-purpose interface that lets the caller specify any version string for any package. ] if <ast.BoolOp object at 0x7da18f813070> begin[:] return[call[name[freeze], parameter[]]] variable[requirements] assign[=] call[name[list], parameter[name[args]]] variable[extra] assign[=] <ast.ListComp object at 0x7da20c7cbca0> call[name[requirements].extend, parameter[name[extra]]] variable[args] assign[=] list[[<ast.Constant object at 0x7da18ede7a00>, <ast.Constant object at 0x7da18ede7f10>]] call[name[args].extend, parameter[name[requirements]]] call[name[pip].main, parameter[name[args]]]
keyword[def] identifier[require] (* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[args] keyword[and] keyword[not] identifier[kwargs] : keyword[return] identifier[freeze] () identifier[requirements] = identifier[list] ( identifier[args] ) identifier[extra] =[ literal[string] . identifier[format] ( identifier[kw] , identifier[kwargs] [ identifier[kw] ]) keyword[for] identifier[kw] keyword[in] identifier[kwargs] ] identifier[requirements] . identifier[extend] ( identifier[extra] ) identifier[args] =[ literal[string] , literal[string] ] identifier[args] . identifier[extend] ( identifier[requirements] ) identifier[pip] . identifier[main] ( identifier[args] )
def require(*args, **kwargs): """ Install a set of packages using pip This is designed to be an interface for IPython notebooks that replicates the requirements.txt pip format. This lets notebooks specify which versions of packages they need inside the notebook itself. This function is the general-purpose interface that lets the caller specify any version string for any package. """ # If called with no arguments, returns requirements list if not args and (not kwargs): return freeze() # depends on [control=['if'], data=[]] # Construct array of requirements requirements = list(args) extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs] requirements.extend(extra) args = ['install', '-q'] args.extend(requirements) pip.main(args)
def detach_screens(self, screen_ids): """Unplugs monitors from the virtual graphics card. in screen_ids of type int """ if not isinstance(screen_ids, list): raise TypeError("screen_ids can only be an instance of type list") for a in screen_ids[:10]: if not isinstance(a, baseinteger): raise TypeError( "array can only contain objects of type baseinteger") self._call("detachScreens", in_p=[screen_ids])
def function[detach_screens, parameter[self, screen_ids]]: constant[Unplugs monitors from the virtual graphics card. in screen_ids of type int ] if <ast.UnaryOp object at 0x7da20e9b3d00> begin[:] <ast.Raise object at 0x7da20e9b1720> for taget[name[a]] in starred[call[name[screen_ids]][<ast.Slice object at 0x7da20e9b2a40>]] begin[:] if <ast.UnaryOp object at 0x7da204347970> begin[:] <ast.Raise object at 0x7da204346ce0> call[name[self]._call, parameter[constant[detachScreens]]]
keyword[def] identifier[detach_screens] ( identifier[self] , identifier[screen_ids] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[screen_ids] , identifier[list] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] identifier[a] keyword[in] identifier[screen_ids] [: literal[int] ]: keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[baseinteger] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[screen_ids] ])
def detach_screens(self, screen_ids): """Unplugs monitors from the virtual graphics card. in screen_ids of type int """ if not isinstance(screen_ids, list): raise TypeError('screen_ids can only be an instance of type list') # depends on [control=['if'], data=[]] for a in screen_ids[:10]: if not isinstance(a, baseinteger): raise TypeError('array can only contain objects of type baseinteger') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] self._call('detachScreens', in_p=[screen_ids])
def _send_frame(self, connection, frame): """ Sends a frame to a specific subscriber connection. (This method assumes it is being called from within a lock-guarded public method.) @param connection: The subscriber connection object to send to. @type connection: L{coilmq.server.StompConnection} @param frame: The frame to send. @type frame: L{stompclient.frame.Frame} """ assert connection is not None assert frame is not None self.log.debug("Delivering frame %s to connection %s" % (frame, connection)) if connection.reliable_subscriber: if connection in self._pending: raise RuntimeError("Connection already has a pending frame.") self.log.debug( "Tracking frame %s as pending for connection %s" % (frame, connection)) self._pending[connection] = frame connection.send_frame(frame)
def function[_send_frame, parameter[self, connection, frame]]: constant[ Sends a frame to a specific subscriber connection. (This method assumes it is being called from within a lock-guarded public method.) @param connection: The subscriber connection object to send to. @type connection: L{coilmq.server.StompConnection} @param frame: The frame to send. @type frame: L{stompclient.frame.Frame} ] assert[compare[name[connection] is_not constant[None]]] assert[compare[name[frame] is_not constant[None]]] call[name[self].log.debug, parameter[binary_operation[constant[Delivering frame %s to connection %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b193b430>, <ast.Name object at 0x7da1b193b640>]]]]] if name[connection].reliable_subscriber begin[:] if compare[name[connection] in name[self]._pending] begin[:] <ast.Raise object at 0x7da1b193abf0> call[name[self].log.debug, parameter[binary_operation[constant[Tracking frame %s as pending for connection %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1938490>, <ast.Name object at 0x7da1b1938250>]]]]] call[name[self]._pending][name[connection]] assign[=] name[frame] call[name[connection].send_frame, parameter[name[frame]]]
keyword[def] identifier[_send_frame] ( identifier[self] , identifier[connection] , identifier[frame] ): literal[string] keyword[assert] identifier[connection] keyword[is] keyword[not] keyword[None] keyword[assert] identifier[frame] keyword[is] keyword[not] keyword[None] identifier[self] . identifier[log] . identifier[debug] ( literal[string] % ( identifier[frame] , identifier[connection] )) keyword[if] identifier[connection] . identifier[reliable_subscriber] : keyword[if] identifier[connection] keyword[in] identifier[self] . identifier[_pending] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[self] . identifier[log] . identifier[debug] ( literal[string] %( identifier[frame] , identifier[connection] )) identifier[self] . identifier[_pending] [ identifier[connection] ]= identifier[frame] identifier[connection] . identifier[send_frame] ( identifier[frame] )
def _send_frame(self, connection, frame): """ Sends a frame to a specific subscriber connection. (This method assumes it is being called from within a lock-guarded public method.) @param connection: The subscriber connection object to send to. @type connection: L{coilmq.server.StompConnection} @param frame: The frame to send. @type frame: L{stompclient.frame.Frame} """ assert connection is not None assert frame is not None self.log.debug('Delivering frame %s to connection %s' % (frame, connection)) if connection.reliable_subscriber: if connection in self._pending: raise RuntimeError('Connection already has a pending frame.') # depends on [control=['if'], data=[]] self.log.debug('Tracking frame %s as pending for connection %s' % (frame, connection)) self._pending[connection] = frame # depends on [control=['if'], data=[]] connection.send_frame(frame)
def compile_to_python(exp, env, done=None): '''assemble steps from dao expression to python code''' original_exp = exp compiler = Compiler() if done is None: done = il.Done(compiler.new_var(il.ConstLocalVar('v'))) compiler.exit_block_cont_map = {} compiler.continue_block_cont_map = {} compiler.protect_cont = done if env is None: env = Environment() exp = element(exp) exp = exp.alpha(env, compiler) exp = exp.cps(compiler, done) exp.analyse(compiler) env = Environment() exp = exp.optimize(env, compiler) #exp = exp.tail_recursive_convert() function = compiler.new_var(il.ConstLocalVar('compiled_dao_function')) exp = il.Function(function, (), exp) exp = il.begin(*exp.pythonize(env, compiler)[0]) if isinstance(exp, il.Begin): exp = exp.statements[0] exp.body = exp.body.replace_return_with_yield() compiler = Compiler() result = exp.to_code(compiler) return prelude + result
def function[compile_to_python, parameter[exp, env, done]]: constant[assemble steps from dao expression to python code] variable[original_exp] assign[=] name[exp] variable[compiler] assign[=] call[name[Compiler], parameter[]] if compare[name[done] is constant[None]] begin[:] variable[done] assign[=] call[name[il].Done, parameter[call[name[compiler].new_var, parameter[call[name[il].ConstLocalVar, parameter[constant[v]]]]]]] name[compiler].exit_block_cont_map assign[=] dictionary[[], []] name[compiler].continue_block_cont_map assign[=] dictionary[[], []] name[compiler].protect_cont assign[=] name[done] if compare[name[env] is constant[None]] begin[:] variable[env] assign[=] call[name[Environment], parameter[]] variable[exp] assign[=] call[name[element], parameter[name[exp]]] variable[exp] assign[=] call[name[exp].alpha, parameter[name[env], name[compiler]]] variable[exp] assign[=] call[name[exp].cps, parameter[name[compiler], name[done]]] call[name[exp].analyse, parameter[name[compiler]]] variable[env] assign[=] call[name[Environment], parameter[]] variable[exp] assign[=] call[name[exp].optimize, parameter[name[env], name[compiler]]] variable[function] assign[=] call[name[compiler].new_var, parameter[call[name[il].ConstLocalVar, parameter[constant[compiled_dao_function]]]]] variable[exp] assign[=] call[name[il].Function, parameter[name[function], tuple[[]], name[exp]]] variable[exp] assign[=] call[name[il].begin, parameter[<ast.Starred object at 0x7da207f03340>]] if call[name[isinstance], parameter[name[exp], name[il].Begin]] begin[:] variable[exp] assign[=] call[name[exp].statements][constant[0]] name[exp].body assign[=] call[name[exp].body.replace_return_with_yield, parameter[]] variable[compiler] assign[=] call[name[Compiler], parameter[]] variable[result] assign[=] call[name[exp].to_code, parameter[name[compiler]]] return[binary_operation[name[prelude] + name[result]]]
keyword[def] identifier[compile_to_python] ( identifier[exp] , identifier[env] , identifier[done] = keyword[None] ): literal[string] identifier[original_exp] = identifier[exp] identifier[compiler] = identifier[Compiler] () keyword[if] identifier[done] keyword[is] keyword[None] : identifier[done] = identifier[il] . identifier[Done] ( identifier[compiler] . identifier[new_var] ( identifier[il] . identifier[ConstLocalVar] ( literal[string] ))) identifier[compiler] . identifier[exit_block_cont_map] ={} identifier[compiler] . identifier[continue_block_cont_map] ={} identifier[compiler] . identifier[protect_cont] = identifier[done] keyword[if] identifier[env] keyword[is] keyword[None] : identifier[env] = identifier[Environment] () identifier[exp] = identifier[element] ( identifier[exp] ) identifier[exp] = identifier[exp] . identifier[alpha] ( identifier[env] , identifier[compiler] ) identifier[exp] = identifier[exp] . identifier[cps] ( identifier[compiler] , identifier[done] ) identifier[exp] . identifier[analyse] ( identifier[compiler] ) identifier[env] = identifier[Environment] () identifier[exp] = identifier[exp] . identifier[optimize] ( identifier[env] , identifier[compiler] ) identifier[function] = identifier[compiler] . identifier[new_var] ( identifier[il] . identifier[ConstLocalVar] ( literal[string] )) identifier[exp] = identifier[il] . identifier[Function] ( identifier[function] ,(), identifier[exp] ) identifier[exp] = identifier[il] . identifier[begin] (* identifier[exp] . identifier[pythonize] ( identifier[env] , identifier[compiler] )[ literal[int] ]) keyword[if] identifier[isinstance] ( identifier[exp] , identifier[il] . identifier[Begin] ): identifier[exp] = identifier[exp] . identifier[statements] [ literal[int] ] identifier[exp] . identifier[body] = identifier[exp] . identifier[body] . identifier[replace_return_with_yield] () identifier[compiler] = identifier[Compiler] () identifier[result] = identifier[exp] . identifier[to_code] ( identifier[compiler] ) keyword[return] identifier[prelude] + identifier[result]
def compile_to_python(exp, env, done=None): """assemble steps from dao expression to python code""" original_exp = exp compiler = Compiler() if done is None: done = il.Done(compiler.new_var(il.ConstLocalVar('v'))) # depends on [control=['if'], data=['done']] compiler.exit_block_cont_map = {} compiler.continue_block_cont_map = {} compiler.protect_cont = done if env is None: env = Environment() # depends on [control=['if'], data=['env']] exp = element(exp) exp = exp.alpha(env, compiler) exp = exp.cps(compiler, done) exp.analyse(compiler) env = Environment() exp = exp.optimize(env, compiler) #exp = exp.tail_recursive_convert() function = compiler.new_var(il.ConstLocalVar('compiled_dao_function')) exp = il.Function(function, (), exp) exp = il.begin(*exp.pythonize(env, compiler)[0]) if isinstance(exp, il.Begin): exp = exp.statements[0] # depends on [control=['if'], data=[]] exp.body = exp.body.replace_return_with_yield() compiler = Compiler() result = exp.to_code(compiler) return prelude + result
def num_taps(sample_rate, transitionwidth, gpass, gstop): """Returns the number of taps for an FIR filter with the given shape Parameters ---------- sample_rate : `float` sampling rate of target data transitionwidth : `float` the width (in the same units as `sample_rate` of the transition from stop-band to pass-band gpass : `float` the maximum loss in the passband (dB) gstop : `float` the minimum attenuation in the stopband (dB) Returns ------- numtaps : `int` the number of taps for an FIR filter Notes ----- Credit: http://dsp.stackexchange.com/a/31077/8223 """ gpass = 10 ** (-gpass / 10.) gstop = 10 ** (-gstop / 10.) return int(2/3. * log10(1 / (10 * gpass * gstop)) * sample_rate / transitionwidth)
def function[num_taps, parameter[sample_rate, transitionwidth, gpass, gstop]]: constant[Returns the number of taps for an FIR filter with the given shape Parameters ---------- sample_rate : `float` sampling rate of target data transitionwidth : `float` the width (in the same units as `sample_rate` of the transition from stop-band to pass-band gpass : `float` the maximum loss in the passband (dB) gstop : `float` the minimum attenuation in the stopband (dB) Returns ------- numtaps : `int` the number of taps for an FIR filter Notes ----- Credit: http://dsp.stackexchange.com/a/31077/8223 ] variable[gpass] assign[=] binary_operation[constant[10] ** binary_operation[<ast.UnaryOp object at 0x7da204566c50> / constant[10.0]]] variable[gstop] assign[=] binary_operation[constant[10] ** binary_operation[<ast.UnaryOp object at 0x7da204566f20> / constant[10.0]]] return[call[name[int], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] / constant[3.0]] * call[name[log10], parameter[binary_operation[constant[1] / binary_operation[binary_operation[constant[10] * name[gpass]] * name[gstop]]]]]] * name[sample_rate]] / name[transitionwidth]]]]]
keyword[def] identifier[num_taps] ( identifier[sample_rate] , identifier[transitionwidth] , identifier[gpass] , identifier[gstop] ): literal[string] identifier[gpass] = literal[int] **(- identifier[gpass] / literal[int] ) identifier[gstop] = literal[int] **(- identifier[gstop] / literal[int] ) keyword[return] identifier[int] ( literal[int] / literal[int] * identifier[log10] ( literal[int] /( literal[int] * identifier[gpass] * identifier[gstop] ))* identifier[sample_rate] / identifier[transitionwidth] )
def num_taps(sample_rate, transitionwidth, gpass, gstop): """Returns the number of taps for an FIR filter with the given shape Parameters ---------- sample_rate : `float` sampling rate of target data transitionwidth : `float` the width (in the same units as `sample_rate` of the transition from stop-band to pass-band gpass : `float` the maximum loss in the passband (dB) gstop : `float` the minimum attenuation in the stopband (dB) Returns ------- numtaps : `int` the number of taps for an FIR filter Notes ----- Credit: http://dsp.stackexchange.com/a/31077/8223 """ gpass = 10 ** (-gpass / 10.0) gstop = 10 ** (-gstop / 10.0) return int(2 / 3.0 * log10(1 / (10 * gpass * gstop)) * sample_rate / transitionwidth)
def read(self): """Read the config file, if it exists. Using defaults otherwise.""" for config_file in self.config_file_paths(): logger.info('Search glances.conf file in {}'.format(config_file)) if os.path.exists(config_file): try: with open(config_file, encoding='utf-8') as f: self.parser.read_file(f) self.parser.read(f) logger.info("Read configuration file '{}'".format(config_file)) except UnicodeDecodeError as err: logger.error("Can not read configuration file '{}': {}".format(config_file, err)) sys.exit(1) # Save the loaded configuration file path (issue #374) self._loaded_config_file = config_file break # Quicklook if not self.parser.has_section('quicklook'): self.parser.add_section('quicklook') self.set_default_cwc('quicklook', 'cpu') self.set_default_cwc('quicklook', 'mem') self.set_default_cwc('quicklook', 'swap') # CPU if not self.parser.has_section('cpu'): self.parser.add_section('cpu') self.set_default_cwc('cpu', 'user') self.set_default_cwc('cpu', 'system') self.set_default_cwc('cpu', 'steal') # By default I/O wait should be lower than 1/number of CPU cores iowait_bottleneck = (1.0 / multiprocessing.cpu_count()) * 100.0 self.set_default_cwc('cpu', 'iowait', [str(iowait_bottleneck - (iowait_bottleneck * 0.20)), str(iowait_bottleneck - (iowait_bottleneck * 0.10)), str(iowait_bottleneck)]) # Context switches bottleneck identification #1212 ctx_switches_bottleneck = (500000 * 0.10) * multiprocessing.cpu_count() self.set_default_cwc('cpu', 'ctx_switches', [str(ctx_switches_bottleneck - (ctx_switches_bottleneck * 0.20)), str(ctx_switches_bottleneck - (ctx_switches_bottleneck * 0.10)), str(ctx_switches_bottleneck)]) # Per-CPU if not self.parser.has_section('percpu'): self.parser.add_section('percpu') self.set_default_cwc('percpu', 'user') self.set_default_cwc('percpu', 'system') # Load if not self.parser.has_section('load'): self.parser.add_section('load') self.set_default_cwc('load', cwc=['0.7', '1.0', '5.0']) # Mem if not self.parser.has_section('mem'): self.parser.add_section('mem') self.set_default_cwc('mem') # Swap if not self.parser.has_section('memswap'): self.parser.add_section('memswap') self.set_default_cwc('memswap') # NETWORK if not self.parser.has_section('network'): self.parser.add_section('network') self.set_default_cwc('network', 'rx') self.set_default_cwc('network', 'tx') # FS if not self.parser.has_section('fs'): self.parser.add_section('fs') self.set_default_cwc('fs') # Sensors if not self.parser.has_section('sensors'): self.parser.add_section('sensors') self.set_default_cwc('sensors', 'temperature_core', cwc=['60', '70', '80']) self.set_default_cwc('sensors', 'temperature_hdd', cwc=['45', '52', '60']) self.set_default_cwc('sensors', 'battery', cwc=['80', '90', '95']) # Process list if not self.parser.has_section('processlist'): self.parser.add_section('processlist') self.set_default_cwc('processlist', 'cpu') self.set_default_cwc('processlist', 'mem')
def function[read, parameter[self]]: constant[Read the config file, if it exists. Using defaults otherwise.] for taget[name[config_file]] in starred[call[name[self].config_file_paths, parameter[]]] begin[:] call[name[logger].info, parameter[call[constant[Search glances.conf file in {}].format, parameter[name[config_file]]]]] if call[name[os].path.exists, parameter[name[config_file]]] begin[:] <ast.Try object at 0x7da2041d8af0> name[self]._loaded_config_file assign[=] name[config_file] break if <ast.UnaryOp object at 0x7da18f09d5d0> begin[:] call[name[self].parser.add_section, parameter[constant[quicklook]]] call[name[self].set_default_cwc, parameter[constant[quicklook], constant[cpu]]] call[name[self].set_default_cwc, parameter[constant[quicklook], constant[mem]]] call[name[self].set_default_cwc, parameter[constant[quicklook], constant[swap]]] if <ast.UnaryOp object at 0x7da18f09e1a0> begin[:] call[name[self].parser.add_section, parameter[constant[cpu]]] call[name[self].set_default_cwc, parameter[constant[cpu], constant[user]]] call[name[self].set_default_cwc, parameter[constant[cpu], constant[system]]] call[name[self].set_default_cwc, parameter[constant[cpu], constant[steal]]] variable[iowait_bottleneck] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[multiprocessing].cpu_count, parameter[]]] * constant[100.0]] call[name[self].set_default_cwc, parameter[constant[cpu], constant[iowait], list[[<ast.Call object at 0x7da18f09d3f0>, <ast.Call object at 0x7da18f09d2a0>, <ast.Call object at 0x7da18f09e650>]]]] variable[ctx_switches_bottleneck] assign[=] binary_operation[binary_operation[constant[500000] * constant[0.1]] * call[name[multiprocessing].cpu_count, parameter[]]] call[name[self].set_default_cwc, parameter[constant[cpu], constant[ctx_switches], list[[<ast.Call object at 0x7da18f09d210>, <ast.Call object at 0x7da18f09ed10>, <ast.Call object at 0x7da18f09f280>]]]] if <ast.UnaryOp object at 0x7da18f09cb20> begin[:] call[name[self].parser.add_section, parameter[constant[percpu]]] call[name[self].set_default_cwc, parameter[constant[percpu], constant[user]]] call[name[self].set_default_cwc, parameter[constant[percpu], constant[system]]] if <ast.UnaryOp object at 0x7da18f09c910> begin[:] call[name[self].parser.add_section, parameter[constant[load]]] call[name[self].set_default_cwc, parameter[constant[load]]] if <ast.UnaryOp object at 0x7da18f09d0c0> begin[:] call[name[self].parser.add_section, parameter[constant[mem]]] call[name[self].set_default_cwc, parameter[constant[mem]]] if <ast.UnaryOp object at 0x7da18ede7bb0> begin[:] call[name[self].parser.add_section, parameter[constant[memswap]]] call[name[self].set_default_cwc, parameter[constant[memswap]]] if <ast.UnaryOp object at 0x7da2044c0820> begin[:] call[name[self].parser.add_section, parameter[constant[network]]] call[name[self].set_default_cwc, parameter[constant[network], constant[rx]]] call[name[self].set_default_cwc, parameter[constant[network], constant[tx]]] if <ast.UnaryOp object at 0x7da2044c12a0> begin[:] call[name[self].parser.add_section, parameter[constant[fs]]] call[name[self].set_default_cwc, parameter[constant[fs]]] if <ast.UnaryOp object at 0x7da2044c0d90> begin[:] call[name[self].parser.add_section, parameter[constant[sensors]]] call[name[self].set_default_cwc, parameter[constant[sensors], constant[temperature_core]]] call[name[self].set_default_cwc, parameter[constant[sensors], constant[temperature_hdd]]] call[name[self].set_default_cwc, parameter[constant[sensors], constant[battery]]] if <ast.UnaryOp object at 0x7da2044c2950> begin[:] call[name[self].parser.add_section, parameter[constant[processlist]]] call[name[self].set_default_cwc, parameter[constant[processlist], constant[cpu]]] call[name[self].set_default_cwc, parameter[constant[processlist], constant[mem]]]
keyword[def] identifier[read] ( identifier[self] ): literal[string] keyword[for] identifier[config_file] keyword[in] identifier[self] . identifier[config_file_paths] (): identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[config_file] )) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[config_file] ): keyword[try] : keyword[with] identifier[open] ( identifier[config_file] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] : identifier[self] . identifier[parser] . identifier[read_file] ( identifier[f] ) identifier[self] . identifier[parser] . identifier[read] ( identifier[f] ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[config_file] )) keyword[except] identifier[UnicodeDecodeError] keyword[as] identifier[err] : identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[config_file] , identifier[err] )) identifier[sys] . identifier[exit] ( literal[int] ) identifier[self] . identifier[_loaded_config_file] = identifier[config_file] keyword[break] keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[iowait_bottleneck] =( literal[int] / identifier[multiprocessing] . identifier[cpu_count] ())* literal[int] identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] , [ identifier[str] ( identifier[iowait_bottleneck] -( identifier[iowait_bottleneck] * literal[int] )), identifier[str] ( identifier[iowait_bottleneck] -( identifier[iowait_bottleneck] * literal[int] )), identifier[str] ( identifier[iowait_bottleneck] )]) identifier[ctx_switches_bottleneck] =( literal[int] * literal[int] )* identifier[multiprocessing] . identifier[cpu_count] () identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] , [ identifier[str] ( identifier[ctx_switches_bottleneck] -( identifier[ctx_switches_bottleneck] * literal[int] )), identifier[str] ( identifier[ctx_switches_bottleneck] -( identifier[ctx_switches_bottleneck] * literal[int] )), identifier[str] ( identifier[ctx_switches_bottleneck] )]) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , identifier[cwc] =[ literal[string] , literal[string] , literal[string] ]) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] ) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] , identifier[cwc] =[ literal[string] , literal[string] , literal[string] ]) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] , identifier[cwc] =[ literal[string] , literal[string] , literal[string] ]) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] , identifier[cwc] =[ literal[string] , literal[string] , literal[string] ]) keyword[if] keyword[not] identifier[self] . identifier[parser] . identifier[has_section] ( literal[string] ): identifier[self] . identifier[parser] . identifier[add_section] ( literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] ) identifier[self] . identifier[set_default_cwc] ( literal[string] , literal[string] )
def read(self): """Read the config file, if it exists. Using defaults otherwise.""" for config_file in self.config_file_paths(): logger.info('Search glances.conf file in {}'.format(config_file)) if os.path.exists(config_file): try: with open(config_file, encoding='utf-8') as f: self.parser.read_file(f) self.parser.read(f) # depends on [control=['with'], data=['f']] logger.info("Read configuration file '{}'".format(config_file)) # depends on [control=['try'], data=[]] except UnicodeDecodeError as err: logger.error("Can not read configuration file '{}': {}".format(config_file, err)) sys.exit(1) # depends on [control=['except'], data=['err']] # Save the loaded configuration file path (issue #374) self._loaded_config_file = config_file break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['config_file']] # Quicklook if not self.parser.has_section('quicklook'): self.parser.add_section('quicklook') # depends on [control=['if'], data=[]] self.set_default_cwc('quicklook', 'cpu') self.set_default_cwc('quicklook', 'mem') self.set_default_cwc('quicklook', 'swap') # CPU if not self.parser.has_section('cpu'): self.parser.add_section('cpu') # depends on [control=['if'], data=[]] self.set_default_cwc('cpu', 'user') self.set_default_cwc('cpu', 'system') self.set_default_cwc('cpu', 'steal') # By default I/O wait should be lower than 1/number of CPU cores iowait_bottleneck = 1.0 / multiprocessing.cpu_count() * 100.0 self.set_default_cwc('cpu', 'iowait', [str(iowait_bottleneck - iowait_bottleneck * 0.2), str(iowait_bottleneck - iowait_bottleneck * 0.1), str(iowait_bottleneck)]) # Context switches bottleneck identification #1212 ctx_switches_bottleneck = 500000 * 0.1 * multiprocessing.cpu_count() self.set_default_cwc('cpu', 'ctx_switches', [str(ctx_switches_bottleneck - ctx_switches_bottleneck * 0.2), str(ctx_switches_bottleneck - ctx_switches_bottleneck * 0.1), str(ctx_switches_bottleneck)]) # Per-CPU if not self.parser.has_section('percpu'): self.parser.add_section('percpu') # depends on [control=['if'], data=[]] self.set_default_cwc('percpu', 'user') self.set_default_cwc('percpu', 'system') # Load if not self.parser.has_section('load'): self.parser.add_section('load') # depends on [control=['if'], data=[]] self.set_default_cwc('load', cwc=['0.7', '1.0', '5.0']) # Mem if not self.parser.has_section('mem'): self.parser.add_section('mem') # depends on [control=['if'], data=[]] self.set_default_cwc('mem') # Swap if not self.parser.has_section('memswap'): self.parser.add_section('memswap') # depends on [control=['if'], data=[]] self.set_default_cwc('memswap') # NETWORK if not self.parser.has_section('network'): self.parser.add_section('network') # depends on [control=['if'], data=[]] self.set_default_cwc('network', 'rx') self.set_default_cwc('network', 'tx') # FS if not self.parser.has_section('fs'): self.parser.add_section('fs') # depends on [control=['if'], data=[]] self.set_default_cwc('fs') # Sensors if not self.parser.has_section('sensors'): self.parser.add_section('sensors') # depends on [control=['if'], data=[]] self.set_default_cwc('sensors', 'temperature_core', cwc=['60', '70', '80']) self.set_default_cwc('sensors', 'temperature_hdd', cwc=['45', '52', '60']) self.set_default_cwc('sensors', 'battery', cwc=['80', '90', '95']) # Process list if not self.parser.has_section('processlist'): self.parser.add_section('processlist') # depends on [control=['if'], data=[]] self.set_default_cwc('processlist', 'cpu') self.set_default_cwc('processlist', 'mem')
def json_data(self, instance, default=None): """Get a JSON compatible value """ value = self.get(instance) return value or default
def function[json_data, parameter[self, instance, default]]: constant[Get a JSON compatible value ] variable[value] assign[=] call[name[self].get, parameter[name[instance]]] return[<ast.BoolOp object at 0x7da20c6a9960>]
keyword[def] identifier[json_data] ( identifier[self] , identifier[instance] , identifier[default] = keyword[None] ): literal[string] identifier[value] = identifier[self] . identifier[get] ( identifier[instance] ) keyword[return] identifier[value] keyword[or] identifier[default]
def json_data(self, instance, default=None): """Get a JSON compatible value """ value = self.get(instance) return value or default
def edge(self, from_node, to_node, edge_type="", **args): """draw an edge from a node to another. """ self._stream.write( '%s%sedge: {sourcename:"%s" targetname:"%s"' % (self._indent, edge_type, from_node, to_node) ) self._write_attributes(EDGE_ATTRS, **args) self._stream.write("}\n")
def function[edge, parameter[self, from_node, to_node, edge_type]]: constant[draw an edge from a node to another. ] call[name[self]._stream.write, parameter[binary_operation[constant[%s%sedge: {sourcename:"%s" targetname:"%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0317880>, <ast.Name object at 0x7da1b0316800>, <ast.Name object at 0x7da1b0315990>, <ast.Name object at 0x7da1b0317c10>]]]]] call[name[self]._write_attributes, parameter[name[EDGE_ATTRS]]] call[name[self]._stream.write, parameter[constant[} ]]]
keyword[def] identifier[edge] ( identifier[self] , identifier[from_node] , identifier[to_node] , identifier[edge_type] = literal[string] ,** identifier[args] ): literal[string] identifier[self] . identifier[_stream] . identifier[write] ( literal[string] %( identifier[self] . identifier[_indent] , identifier[edge_type] , identifier[from_node] , identifier[to_node] ) ) identifier[self] . identifier[_write_attributes] ( identifier[EDGE_ATTRS] ,** identifier[args] ) identifier[self] . identifier[_stream] . identifier[write] ( literal[string] )
def edge(self, from_node, to_node, edge_type='', **args): """draw an edge from a node to another. """ self._stream.write('%s%sedge: {sourcename:"%s" targetname:"%s"' % (self._indent, edge_type, from_node, to_node)) self._write_attributes(EDGE_ATTRS, **args) self._stream.write('}\n')
def array2root(arr, filename, treename='tree', mode='update'): """Convert a numpy array into a ROOT TTree and save it in a ROOT TFile. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array filename : str Name of the output ROOT TFile. A new file will be created if it doesn't already exist. treename : str (optional, default='tree') Name of the ROOT TTree that will be created. If a TTree with the same name already exists in the TFile, it will be extended as documented in :func:`array2tree`. mode : str (optional, default='update') Mode used to open the ROOT TFile ('update' or 'recreate'). See Also -------- array2tree tree2array root2array Examples -------- >>> from root_numpy import array2root, root2array >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([(1, 2.5, 3.4), (4, 5.0, 6.8)], dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')]) >>> >>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''], ... dtype=[('string', 'S3')]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)], dtype=[('string', 'S3')]) >>> >>> a = np.array([([1, 2, 3],), ... ([4, 5, 6],)], ... dtype=[('array', np.int32, (3,))]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([([1, 2, 3],), ([4, 5, 6],)], dtype=[('array', '<i4', (3,))]) """ _librootnumpy.array2root(arr, filename, treename, mode)
def function[array2root, parameter[arr, filename, treename, mode]]: constant[Convert a numpy array into a ROOT TTree and save it in a ROOT TFile. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array filename : str Name of the output ROOT TFile. A new file will be created if it doesn't already exist. treename : str (optional, default='tree') Name of the ROOT TTree that will be created. If a TTree with the same name already exists in the TFile, it will be extended as documented in :func:`array2tree`. mode : str (optional, default='update') Mode used to open the ROOT TFile ('update' or 'recreate'). See Also -------- array2tree tree2array root2array Examples -------- >>> from root_numpy import array2root, root2array >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([(1, 2.5, 3.4), (4, 5.0, 6.8)], dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')]) >>> >>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''], ... dtype=[('string', 'S3')]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)], dtype=[('string', 'S3')]) >>> >>> a = np.array([([1, 2, 3],), ... ([4, 5, 6],)], ... dtype=[('array', np.int32, (3,))]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([([1, 2, 3],), ([4, 5, 6],)], dtype=[('array', '<i4', (3,))]) ] call[name[_librootnumpy].array2root, parameter[name[arr], name[filename], name[treename], name[mode]]]
keyword[def] identifier[array2root] ( identifier[arr] , identifier[filename] , identifier[treename] = literal[string] , identifier[mode] = literal[string] ): literal[string] identifier[_librootnumpy] . identifier[array2root] ( identifier[arr] , identifier[filename] , identifier[treename] , identifier[mode] )
def array2root(arr, filename, treename='tree', mode='update'): """Convert a numpy array into a ROOT TTree and save it in a ROOT TFile. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array filename : str Name of the output ROOT TFile. A new file will be created if it doesn't already exist. treename : str (optional, default='tree') Name of the ROOT TTree that will be created. If a TTree with the same name already exists in the TFile, it will be extended as documented in :func:`array2tree`. mode : str (optional, default='update') Mode used to open the ROOT TFile ('update' or 'recreate'). See Also -------- array2tree tree2array root2array Examples -------- >>> from root_numpy import array2root, root2array >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([(1, 2.5, 3.4), (4, 5.0, 6.8)], dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')]) >>> >>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''], ... dtype=[('string', 'S3')]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)], dtype=[('string', 'S3')]) >>> >>> a = np.array([([1, 2, 3],), ... ([4, 5, 6],)], ... dtype=[('array', np.int32, (3,))]) >>> array2root(a, 'test.root', mode='recreate') >>> root2array('test.root') array([([1, 2, 3],), ([4, 5, 6],)], dtype=[('array', '<i4', (3,))]) """ _librootnumpy.array2root(arr, filename, treename, mode)
def p_new_expr(self, p): """new_expr : member_expr | NEW new_expr """ if len(p) == 2: p[0] = p[1] else: p[0] = ast.NewExpr(p[2])
def function[p_new_expr, parameter[self, p]]: constant[new_expr : member_expr | NEW new_expr ] if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:] call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]]
keyword[def] identifier[p_new_expr] ( identifier[self] , identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ] keyword[else] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[NewExpr] ( identifier[p] [ literal[int] ])
def p_new_expr(self, p): """new_expr : member_expr | NEW new_expr """ if len(p) == 2: p[0] = p[1] # depends on [control=['if'], data=[]] else: p[0] = ast.NewExpr(p[2])
def content(self, contents): """The content(s) of the email :param contents: The content(s) of the email :type contents: Content, list(Content) """ if isinstance(contents, list): for c in contents: self.add_content(c) else: self.add_content(contents)
def function[content, parameter[self, contents]]: constant[The content(s) of the email :param contents: The content(s) of the email :type contents: Content, list(Content) ] if call[name[isinstance], parameter[name[contents], name[list]]] begin[:] for taget[name[c]] in starred[name[contents]] begin[:] call[name[self].add_content, parameter[name[c]]]
keyword[def] identifier[content] ( identifier[self] , identifier[contents] ): literal[string] keyword[if] identifier[isinstance] ( identifier[contents] , identifier[list] ): keyword[for] identifier[c] keyword[in] identifier[contents] : identifier[self] . identifier[add_content] ( identifier[c] ) keyword[else] : identifier[self] . identifier[add_content] ( identifier[contents] )
def content(self, contents): """The content(s) of the email :param contents: The content(s) of the email :type contents: Content, list(Content) """ if isinstance(contents, list): for c in contents: self.add_content(c) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=[]] else: self.add_content(contents)
def list_transaction(hostname, username, password, label): ''' A function to connect to a bigip device and list an existing transaction. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password label the label of this transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.list_transaction bigip admin admin my_transaction ''' #build the session bigip_session = _build_session(username, password) #pull the trans id from the grain trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label)) if trans_id: #post to REST to get trans id try: response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}/commands'.format(trans_id=trans_id)) return _load_response(response) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) else: return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \ ' bigip.start_transaction function'
def function[list_transaction, parameter[hostname, username, password, label]]: constant[ A function to connect to a bigip device and list an existing transaction. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password label the label of this transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.list_transaction bigip admin admin my_transaction ] variable[bigip_session] assign[=] call[name[_build_session], parameter[name[username], name[password]]] variable[trans_id] assign[=] call[call[name[__salt__]][constant[grains.get]], parameter[call[constant[bigip_f5_trans:{label}].format, parameter[]]]] if name[trans_id] begin[:] <ast.Try object at 0x7da18f09fe50>
keyword[def] identifier[list_transaction] ( identifier[hostname] , identifier[username] , identifier[password] , identifier[label] ): literal[string] identifier[bigip_session] = identifier[_build_session] ( identifier[username] , identifier[password] ) identifier[trans_id] = identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] ( identifier[label] = identifier[label] )) keyword[if] identifier[trans_id] : keyword[try] : identifier[response] = identifier[bigip_session] . identifier[get] ( identifier[BIG_IP_URL_BASE] . identifier[format] ( identifier[host] = identifier[hostname] )+ literal[string] . identifier[format] ( identifier[trans_id] = identifier[trans_id] )) keyword[return] identifier[_load_response] ( identifier[response] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[e] : keyword[return] identifier[_load_connection_error] ( identifier[hostname] , identifier[e] ) keyword[else] : keyword[return] literal[string] literal[string]
def list_transaction(hostname, username, password, label): """ A function to connect to a bigip device and list an existing transaction. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password label the label of this transaction stored within the grain: ``bigip_f5_trans:<label>`` CLI Example:: salt '*' bigip.list_transaction bigip admin admin my_transaction """ #build the session bigip_session = _build_session(username, password) #pull the trans id from the grain trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label)) if trans_id: #post to REST to get trans id try: response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname) + '/transaction/{trans_id}/commands'.format(trans_id=trans_id)) return _load_response(response) # depends on [control=['try'], data=[]] except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] else: return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the bigip.start_transaction function'
def logout(self, force=False): """ Interactive logout - ensures uid/tid cleared so `cloudgenix.API` object/ requests.Session can be re-used. **Parameters:**: - **force**: Bool, force logout API call, even when using a static AUTH_TOKEN. **Returns:** Bool of whether the operation succeeded. """ # Extract requests session for manipulation. session = self._parent_class.expose_session() # if force = True, or token_session = None/False, call logout API. if force or not self._parent_class.token_session: # Call Logout result = self._parent_class.get.logout() if result.cgx_status: # clear info from session. self._parent_class.tenant_id = None self._parent_class.tenant_name = None self._parent_class.is_esp = None self._parent_class.client_id = None self._parent_class.address_string = None self._parent_class.email = None self._parent_class._user_id = None self._parent_class._password = None self._parent_class.roles = None self._parent_class.token_session = None # Cookies are removed via LOGOUT API call. if X-Auth-Token set, clear. if session.headers.get('X-Auth-Token'): self._parent_class.remove_header('X-Auth-Token') return result.cgx_status else: # Token Session and not forced. api_logger.debug('TOKEN SESSION, LOGOUT API NOT CALLED.') # clear info from session. self._parent_class.tenant_id = None self._parent_class.tenant_name = None self._parent_class.is_esp = None self._parent_class.client_id = None self._parent_class.address_string = None self._parent_class.email = None self._parent_class._user_id = None self._parent_class._password = None self._parent_class.roles = None self._parent_class.token_session = None # if X-Auth-Token set, clear. if session.headers.get('X-Auth-Token'): self._parent_class.remove_header('X-Auth-Token') return True
def function[logout, parameter[self, force]]: constant[ Interactive logout - ensures uid/tid cleared so `cloudgenix.API` object/ requests.Session can be re-used. **Parameters:**: - **force**: Bool, force logout API call, even when using a static AUTH_TOKEN. **Returns:** Bool of whether the operation succeeded. ] variable[session] assign[=] call[name[self]._parent_class.expose_session, parameter[]] if <ast.BoolOp object at 0x7da1b0fb0070> begin[:] variable[result] assign[=] call[name[self]._parent_class.get.logout, parameter[]] if name[result].cgx_status begin[:] name[self]._parent_class.tenant_id assign[=] constant[None] name[self]._parent_class.tenant_name assign[=] constant[None] name[self]._parent_class.is_esp assign[=] constant[None] name[self]._parent_class.client_id assign[=] constant[None] name[self]._parent_class.address_string assign[=] constant[None] name[self]._parent_class.email assign[=] constant[None] name[self]._parent_class._user_id assign[=] constant[None] name[self]._parent_class._password assign[=] constant[None] name[self]._parent_class.roles assign[=] constant[None] name[self]._parent_class.token_session assign[=] constant[None] if call[name[session].headers.get, parameter[constant[X-Auth-Token]]] begin[:] call[name[self]._parent_class.remove_header, parameter[constant[X-Auth-Token]]] return[name[result].cgx_status]
keyword[def] identifier[logout] ( identifier[self] , identifier[force] = keyword[False] ): literal[string] identifier[session] = identifier[self] . identifier[_parent_class] . identifier[expose_session] () keyword[if] identifier[force] keyword[or] keyword[not] identifier[self] . identifier[_parent_class] . identifier[token_session] : identifier[result] = identifier[self] . identifier[_parent_class] . identifier[get] . identifier[logout] () keyword[if] identifier[result] . identifier[cgx_status] : identifier[self] . identifier[_parent_class] . identifier[tenant_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[tenant_name] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[is_esp] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[client_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[address_string] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[email] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[_user_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[_password] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[roles] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[token_session] = keyword[None] keyword[if] identifier[session] . identifier[headers] . identifier[get] ( literal[string] ): identifier[self] . identifier[_parent_class] . identifier[remove_header] ( literal[string] ) keyword[return] identifier[result] . identifier[cgx_status] keyword[else] : identifier[api_logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_parent_class] . identifier[tenant_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[tenant_name] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[is_esp] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[client_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[address_string] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[email] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[_user_id] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[_password] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[roles] = keyword[None] identifier[self] . identifier[_parent_class] . identifier[token_session] = keyword[None] keyword[if] identifier[session] . identifier[headers] . identifier[get] ( literal[string] ): identifier[self] . identifier[_parent_class] . identifier[remove_header] ( literal[string] ) keyword[return] keyword[True]
def logout(self, force=False): """ Interactive logout - ensures uid/tid cleared so `cloudgenix.API` object/ requests.Session can be re-used. **Parameters:**: - **force**: Bool, force logout API call, even when using a static AUTH_TOKEN. **Returns:** Bool of whether the operation succeeded. """ # Extract requests session for manipulation. session = self._parent_class.expose_session() # if force = True, or token_session = None/False, call logout API. if force or not self._parent_class.token_session: # Call Logout result = self._parent_class.get.logout() if result.cgx_status: # clear info from session. self._parent_class.tenant_id = None self._parent_class.tenant_name = None self._parent_class.is_esp = None self._parent_class.client_id = None self._parent_class.address_string = None self._parent_class.email = None self._parent_class._user_id = None self._parent_class._password = None self._parent_class.roles = None self._parent_class.token_session = None # Cookies are removed via LOGOUT API call. if X-Auth-Token set, clear. if session.headers.get('X-Auth-Token'): self._parent_class.remove_header('X-Auth-Token') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return result.cgx_status # depends on [control=['if'], data=[]] else: # Token Session and not forced. api_logger.debug('TOKEN SESSION, LOGOUT API NOT CALLED.') # clear info from session. self._parent_class.tenant_id = None self._parent_class.tenant_name = None self._parent_class.is_esp = None self._parent_class.client_id = None self._parent_class.address_string = None self._parent_class.email = None self._parent_class._user_id = None self._parent_class._password = None self._parent_class.roles = None self._parent_class.token_session = None # if X-Auth-Token set, clear. if session.headers.get('X-Auth-Token'): self._parent_class.remove_header('X-Auth-Token') # depends on [control=['if'], data=[]] return True
def close(self): """Close the socket""" if self.is_open(): fd = self._fd self._fd = -1 if self.uses_nanoconfig: wrapper.nc_close(fd) else: _nn_check_positive_rtn(wrapper.nn_close(fd))
def function[close, parameter[self]]: constant[Close the socket] if call[name[self].is_open, parameter[]] begin[:] variable[fd] assign[=] name[self]._fd name[self]._fd assign[=] <ast.UnaryOp object at 0x7da2044c3f10> if name[self].uses_nanoconfig begin[:] call[name[wrapper].nc_close, parameter[name[fd]]]
keyword[def] identifier[close] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[is_open] (): identifier[fd] = identifier[self] . identifier[_fd] identifier[self] . identifier[_fd] =- literal[int] keyword[if] identifier[self] . identifier[uses_nanoconfig] : identifier[wrapper] . identifier[nc_close] ( identifier[fd] ) keyword[else] : identifier[_nn_check_positive_rtn] ( identifier[wrapper] . identifier[nn_close] ( identifier[fd] ))
def close(self): """Close the socket""" if self.is_open(): fd = self._fd self._fd = -1 if self.uses_nanoconfig: wrapper.nc_close(fd) # depends on [control=['if'], data=[]] else: _nn_check_positive_rtn(wrapper.nn_close(fd)) # depends on [control=['if'], data=[]]
def add_slice(self, name, input_name, output_name, axis, start_index = 0, end_index = -1, stride = 1): """ Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride], start_index is included, while end_index is exclusive. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. axis: str axis along which input is sliced. allowed values: 'channel', 'height', 'width' start_index: int must be non-negative. end_index: int negative indexing is supported. stride: int must be positive. See Also -------- add_permute, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.slice # Set the parameters if start_index < 0: raise ValueError("Invalid start_index value %d. Must be non-negative." % start_index) if stride < 1: raise ValueError("Invalid stride value %d. Must be positive." % stride) spec_layer_params.startIndex = start_index spec_layer_params.endIndex = end_index spec_layer_params.stride = stride if axis == 'channel': spec_layer_params.axis = \ _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('CHANNEL_AXIS') elif axis == 'height': spec_layer_params.axis = \ _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('HEIGHT_AXIS') elif axis == 'width': spec_layer_params.axis = \ _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('WIDTH_AXIS') else: raise NotImplementedError( 'Unsupported Slice axis %s ' % axis)
def function[add_slice, parameter[self, name, input_name, output_name, axis, start_index, end_index, stride]]: constant[ Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride], start_index is included, while end_index is exclusive. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. axis: str axis along which input is sliced. allowed values: 'channel', 'height', 'width' start_index: int must be non-negative. end_index: int negative indexing is supported. stride: int must be positive. See Also -------- add_permute, add_reshape ] variable[spec] assign[=] name[self].spec variable[nn_spec] assign[=] name[self].nn_spec variable[spec_layer] assign[=] call[name[nn_spec].layers.add, parameter[]] name[spec_layer].name assign[=] name[name] call[name[spec_layer].input.append, parameter[name[input_name]]] call[name[spec_layer].output.append, parameter[name[output_name]]] variable[spec_layer_params] assign[=] name[spec_layer].slice if compare[name[start_index] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da1b2065cc0> if compare[name[stride] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da1b2064af0> name[spec_layer_params].startIndex assign[=] name[start_index] name[spec_layer_params].endIndex assign[=] name[end_index] name[spec_layer_params].stride assign[=] name[stride] if compare[name[axis] equal[==] constant[channel]] begin[:] name[spec_layer_params].axis assign[=] call[name[_NeuralNetwork_pb2].SliceLayerParams.SliceAxis.Value, parameter[constant[CHANNEL_AXIS]]]
keyword[def] identifier[add_slice] ( identifier[self] , identifier[name] , identifier[input_name] , identifier[output_name] , identifier[axis] , identifier[start_index] = literal[int] , identifier[end_index] =- literal[int] , identifier[stride] = literal[int] ): literal[string] identifier[spec] = identifier[self] . identifier[spec] identifier[nn_spec] = identifier[self] . identifier[nn_spec] identifier[spec_layer] = identifier[nn_spec] . identifier[layers] . identifier[add] () identifier[spec_layer] . identifier[name] = identifier[name] identifier[spec_layer] . identifier[input] . identifier[append] ( identifier[input_name] ) identifier[spec_layer] . identifier[output] . identifier[append] ( identifier[output_name] ) identifier[spec_layer_params] = identifier[spec_layer] . identifier[slice] keyword[if] identifier[start_index] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[start_index] ) keyword[if] identifier[stride] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[stride] ) identifier[spec_layer_params] . identifier[startIndex] = identifier[start_index] identifier[spec_layer_params] . identifier[endIndex] = identifier[end_index] identifier[spec_layer_params] . identifier[stride] = identifier[stride] keyword[if] identifier[axis] == literal[string] : identifier[spec_layer_params] . identifier[axis] = identifier[_NeuralNetwork_pb2] . identifier[SliceLayerParams] . identifier[SliceAxis] . identifier[Value] ( literal[string] ) keyword[elif] identifier[axis] == literal[string] : identifier[spec_layer_params] . identifier[axis] = identifier[_NeuralNetwork_pb2] . identifier[SliceLayerParams] . identifier[SliceAxis] . identifier[Value] ( literal[string] ) keyword[elif] identifier[axis] == literal[string] : identifier[spec_layer_params] . identifier[axis] = identifier[_NeuralNetwork_pb2] . identifier[SliceLayerParams] . identifier[SliceAxis] . identifier[Value] ( literal[string] ) keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] % identifier[axis] )
def add_slice(self, name, input_name, output_name, axis, start_index=0, end_index=-1, stride=1): """ Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride], start_index is included, while end_index is exclusive. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. axis: str axis along which input is sliced. allowed values: 'channel', 'height', 'width' start_index: int must be non-negative. end_index: int negative indexing is supported. stride: int must be positive. See Also -------- add_permute, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.slice # Set the parameters if start_index < 0: raise ValueError('Invalid start_index value %d. Must be non-negative.' % start_index) # depends on [control=['if'], data=['start_index']] if stride < 1: raise ValueError('Invalid stride value %d. Must be positive.' % stride) # depends on [control=['if'], data=['stride']] spec_layer_params.startIndex = start_index spec_layer_params.endIndex = end_index spec_layer_params.stride = stride if axis == 'channel': spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('CHANNEL_AXIS') # depends on [control=['if'], data=[]] elif axis == 'height': spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('HEIGHT_AXIS') # depends on [control=['if'], data=[]] elif axis == 'width': spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('WIDTH_AXIS') # depends on [control=['if'], data=[]] else: raise NotImplementedError('Unsupported Slice axis %s ' % axis)
def _wrap(func, shape, context=None, axis=(0,), dtype=None, npartitions=None): """ Wrap an existing numpy constructor in a parallelized construction """ if isinstance(shape, int): shape = (shape,) key_shape, value_shape = get_kv_shape(shape, ConstructSpark._format_axes(axis, shape)) split = len(key_shape) # make the keys rdd = context.parallelize(list(product(*[arange(x) for x in key_shape])), npartitions) # use a map to make the arrays in parallel rdd = rdd.map(lambda x: (x, func(value_shape, dtype, order='C'))) return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
def function[_wrap, parameter[func, shape, context, axis, dtype, npartitions]]: constant[ Wrap an existing numpy constructor in a parallelized construction ] if call[name[isinstance], parameter[name[shape], name[int]]] begin[:] variable[shape] assign[=] tuple[[<ast.Name object at 0x7da18eb555a0>]] <ast.Tuple object at 0x7da18eb57a30> assign[=] call[name[get_kv_shape], parameter[name[shape], call[name[ConstructSpark]._format_axes, parameter[name[axis], name[shape]]]]] variable[split] assign[=] call[name[len], parameter[name[key_shape]]] variable[rdd] assign[=] call[name[context].parallelize, parameter[call[name[list], parameter[call[name[product], parameter[<ast.Starred object at 0x7da204620fd0>]]]], name[npartitions]]] variable[rdd] assign[=] call[name[rdd].map, parameter[<ast.Lambda object at 0x7da204622d10>]] return[call[name[BoltArraySpark], parameter[name[rdd]]]]
keyword[def] identifier[_wrap] ( identifier[func] , identifier[shape] , identifier[context] = keyword[None] , identifier[axis] =( literal[int] ,), identifier[dtype] = keyword[None] , identifier[npartitions] = keyword[None] ): literal[string] keyword[if] identifier[isinstance] ( identifier[shape] , identifier[int] ): identifier[shape] =( identifier[shape] ,) identifier[key_shape] , identifier[value_shape] = identifier[get_kv_shape] ( identifier[shape] , identifier[ConstructSpark] . identifier[_format_axes] ( identifier[axis] , identifier[shape] )) identifier[split] = identifier[len] ( identifier[key_shape] ) identifier[rdd] = identifier[context] . identifier[parallelize] ( identifier[list] ( identifier[product] (*[ identifier[arange] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[key_shape] ])), identifier[npartitions] ) identifier[rdd] = identifier[rdd] . identifier[map] ( keyword[lambda] identifier[x] :( identifier[x] , identifier[func] ( identifier[value_shape] , identifier[dtype] , identifier[order] = literal[string] ))) keyword[return] identifier[BoltArraySpark] ( identifier[rdd] , identifier[shape] = identifier[shape] , identifier[split] = identifier[split] , identifier[dtype] = identifier[dtype] )
def _wrap(func, shape, context=None, axis=(0,), dtype=None, npartitions=None): """ Wrap an existing numpy constructor in a parallelized construction """ if isinstance(shape, int): shape = (shape,) # depends on [control=['if'], data=[]] (key_shape, value_shape) = get_kv_shape(shape, ConstructSpark._format_axes(axis, shape)) split = len(key_shape) # make the keys rdd = context.parallelize(list(product(*[arange(x) for x in key_shape])), npartitions) # use a map to make the arrays in parallel rdd = rdd.map(lambda x: (x, func(value_shape, dtype, order='C'))) return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
def angular_power_spectrum(self): """Returns the angular power spectrum for the set of coefficients. That is, we compute n c_n = sum cnm * conj( cnm ) m=-n Returns: power_spectrum (numpy.array, dtype=double) spectrum as a function of n. """ # Added this routine as a result of my discussions with Ajinkya Nene #https://github.com/anene list_of_modes = self._reshape_m_vecs() Nmodes = len(list_of_modes) angular_power = np.zeros( Nmodes, dtype = np.double) for n in range(0, Nmodes): mode = np.array( list_of_modes[n], dtype = np.complex128 ) angular_power[n] = np.sum( np.abs(mode) ** 2 ) return angular_power
def function[angular_power_spectrum, parameter[self]]: constant[Returns the angular power spectrum for the set of coefficients. That is, we compute n c_n = sum cnm * conj( cnm ) m=-n Returns: power_spectrum (numpy.array, dtype=double) spectrum as a function of n. ] variable[list_of_modes] assign[=] call[name[self]._reshape_m_vecs, parameter[]] variable[Nmodes] assign[=] call[name[len], parameter[name[list_of_modes]]] variable[angular_power] assign[=] call[name[np].zeros, parameter[name[Nmodes]]] for taget[name[n]] in starred[call[name[range], parameter[constant[0], name[Nmodes]]]] begin[:] variable[mode] assign[=] call[name[np].array, parameter[call[name[list_of_modes]][name[n]]]] call[name[angular_power]][name[n]] assign[=] call[name[np].sum, parameter[binary_operation[call[name[np].abs, parameter[name[mode]]] ** constant[2]]]] return[name[angular_power]]
keyword[def] identifier[angular_power_spectrum] ( identifier[self] ): literal[string] identifier[list_of_modes] = identifier[self] . identifier[_reshape_m_vecs] () identifier[Nmodes] = identifier[len] ( identifier[list_of_modes] ) identifier[angular_power] = identifier[np] . identifier[zeros] ( identifier[Nmodes] , identifier[dtype] = identifier[np] . identifier[double] ) keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[Nmodes] ): identifier[mode] = identifier[np] . identifier[array] ( identifier[list_of_modes] [ identifier[n] ], identifier[dtype] = identifier[np] . identifier[complex128] ) identifier[angular_power] [ identifier[n] ]= identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[mode] )** literal[int] ) keyword[return] identifier[angular_power]
def angular_power_spectrum(self): """Returns the angular power spectrum for the set of coefficients. That is, we compute n c_n = sum cnm * conj( cnm ) m=-n Returns: power_spectrum (numpy.array, dtype=double) spectrum as a function of n. """ # Added this routine as a result of my discussions with Ajinkya Nene #https://github.com/anene list_of_modes = self._reshape_m_vecs() Nmodes = len(list_of_modes) angular_power = np.zeros(Nmodes, dtype=np.double) for n in range(0, Nmodes): mode = np.array(list_of_modes[n], dtype=np.complex128) angular_power[n] = np.sum(np.abs(mode) ** 2) # depends on [control=['for'], data=['n']] return angular_power
def _json_path_search(self, json_dict, expr): """ Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:*\n _json_dict_ - JSON dictionary;\n _expr_ - string of fuzzy search for items within the directory;\n *Returns:*\n List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:*\n JsonValidatorError """ path = parse(expr) results = path.find(json_dict) if len(results) is 0: raise JsonValidatorError("Nothing found in the dictionary {0} using the given path {1}".format( str(json_dict), str(expr))) return results
def function[_json_path_search, parameter[self, json_dict, expr]]: constant[ Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:* _json_dict_ - JSON dictionary; _expr_ - string of fuzzy search for items within the directory; *Returns:* List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:* JsonValidatorError ] variable[path] assign[=] call[name[parse], parameter[name[expr]]] variable[results] assign[=] call[name[path].find, parameter[name[json_dict]]] if compare[call[name[len], parameter[name[results]]] is constant[0]] begin[:] <ast.Raise object at 0x7da1b10e7400> return[name[results]]
keyword[def] identifier[_json_path_search] ( identifier[self] , identifier[json_dict] , identifier[expr] ): literal[string] identifier[path] = identifier[parse] ( identifier[expr] ) identifier[results] = identifier[path] . identifier[find] ( identifier[json_dict] ) keyword[if] identifier[len] ( identifier[results] ) keyword[is] literal[int] : keyword[raise] identifier[JsonValidatorError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[json_dict] ), identifier[str] ( identifier[expr] ))) keyword[return] identifier[results]
def _json_path_search(self, json_dict, expr): """ Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:* _json_dict_ - JSON dictionary; _expr_ - string of fuzzy search for items within the directory; *Returns:* List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:* JsonValidatorError """ path = parse(expr) results = path.find(json_dict) if len(results) is 0: raise JsonValidatorError('Nothing found in the dictionary {0} using the given path {1}'.format(str(json_dict), str(expr))) # depends on [control=['if'], data=[]] return results
def mutual_accessibility(graph): """ Mutual-accessibility matrix (strongly connected components). @type graph: graph, digraph @param graph: Graph. @rtype: dictionary @return: Mutual-accessibility information for each node. """ recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes())*2,recursionlimit)) mutual_access = {} stack = [] low = {} def visit(node): if node in low: return num = len(low) low[node] = num stack_pos = len(stack) stack.append(node) for successor in graph.neighbors(node): visit(successor) low[node] = min(low[node], low[successor]) if num == low[node]: component = stack[stack_pos:] del stack[stack_pos:] component.sort() for each in component: mutual_access[each] = component for item in component: low[item] = len(graph) for node in graph: visit(node) setrecursionlimit(recursionlimit) return mutual_access
def function[mutual_accessibility, parameter[graph]]: constant[ Mutual-accessibility matrix (strongly connected components). @type graph: graph, digraph @param graph: Graph. @rtype: dictionary @return: Mutual-accessibility information for each node. ] variable[recursionlimit] assign[=] call[name[getrecursionlimit], parameter[]] call[name[setrecursionlimit], parameter[call[name[max], parameter[binary_operation[call[name[len], parameter[call[name[graph].nodes, parameter[]]]] * constant[2]], name[recursionlimit]]]]] variable[mutual_access] assign[=] dictionary[[], []] variable[stack] assign[=] list[[]] variable[low] assign[=] dictionary[[], []] def function[visit, parameter[node]]: if compare[name[node] in name[low]] begin[:] return[None] variable[num] assign[=] call[name[len], parameter[name[low]]] call[name[low]][name[node]] assign[=] name[num] variable[stack_pos] assign[=] call[name[len], parameter[name[stack]]] call[name[stack].append, parameter[name[node]]] for taget[name[successor]] in starred[call[name[graph].neighbors, parameter[name[node]]]] begin[:] call[name[visit], parameter[name[successor]]] call[name[low]][name[node]] assign[=] call[name[min], parameter[call[name[low]][name[node]], call[name[low]][name[successor]]]] if compare[name[num] equal[==] call[name[low]][name[node]]] begin[:] variable[component] assign[=] call[name[stack]][<ast.Slice object at 0x7da1b18a06a0>] <ast.Delete object at 0x7da1b18a0790> call[name[component].sort, parameter[]] for taget[name[each]] in starred[name[component]] begin[:] call[name[mutual_access]][name[each]] assign[=] name[component] for taget[name[item]] in starred[name[component]] begin[:] call[name[low]][name[item]] assign[=] call[name[len], parameter[name[graph]]] for taget[name[node]] in starred[name[graph]] begin[:] call[name[visit], parameter[name[node]]] call[name[setrecursionlimit], parameter[name[recursionlimit]]] return[name[mutual_access]]
keyword[def] identifier[mutual_accessibility] ( identifier[graph] ): literal[string] identifier[recursionlimit] = identifier[getrecursionlimit] () identifier[setrecursionlimit] ( identifier[max] ( identifier[len] ( identifier[graph] . identifier[nodes] ())* literal[int] , identifier[recursionlimit] )) identifier[mutual_access] ={} identifier[stack] =[] identifier[low] ={} keyword[def] identifier[visit] ( identifier[node] ): keyword[if] identifier[node] keyword[in] identifier[low] : keyword[return] identifier[num] = identifier[len] ( identifier[low] ) identifier[low] [ identifier[node] ]= identifier[num] identifier[stack_pos] = identifier[len] ( identifier[stack] ) identifier[stack] . identifier[append] ( identifier[node] ) keyword[for] identifier[successor] keyword[in] identifier[graph] . identifier[neighbors] ( identifier[node] ): identifier[visit] ( identifier[successor] ) identifier[low] [ identifier[node] ]= identifier[min] ( identifier[low] [ identifier[node] ], identifier[low] [ identifier[successor] ]) keyword[if] identifier[num] == identifier[low] [ identifier[node] ]: identifier[component] = identifier[stack] [ identifier[stack_pos] :] keyword[del] identifier[stack] [ identifier[stack_pos] :] identifier[component] . identifier[sort] () keyword[for] identifier[each] keyword[in] identifier[component] : identifier[mutual_access] [ identifier[each] ]= identifier[component] keyword[for] identifier[item] keyword[in] identifier[component] : identifier[low] [ identifier[item] ]= identifier[len] ( identifier[graph] ) keyword[for] identifier[node] keyword[in] identifier[graph] : identifier[visit] ( identifier[node] ) identifier[setrecursionlimit] ( identifier[recursionlimit] ) keyword[return] identifier[mutual_access]
def mutual_accessibility(graph): """ Mutual-accessibility matrix (strongly connected components). @type graph: graph, digraph @param graph: Graph. @rtype: dictionary @return: Mutual-accessibility information for each node. """ recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes()) * 2, recursionlimit)) mutual_access = {} stack = [] low = {} def visit(node): if node in low: return # depends on [control=['if'], data=[]] num = len(low) low[node] = num stack_pos = len(stack) stack.append(node) for successor in graph.neighbors(node): visit(successor) low[node] = min(low[node], low[successor]) # depends on [control=['for'], data=['successor']] if num == low[node]: component = stack[stack_pos:] del stack[stack_pos:] component.sort() for each in component: mutual_access[each] = component # depends on [control=['for'], data=['each']] for item in component: low[item] = len(graph) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] for node in graph: visit(node) # depends on [control=['for'], data=['node']] setrecursionlimit(recursionlimit) return mutual_access
def _recursive_get(self, key, dic=None): """ Gets contents of requirement key recursively so users can search for specific keys inside nested requirement dicts. :param key: key or dot separated string of keys to look for. :param dic: Optional dictionary to use in the search. If not provided, self._requirements is used. :return: results of search or None """ return recursive_search(key, dic) if dic else recursive_search(key, self._requirements)
def function[_recursive_get, parameter[self, key, dic]]: constant[ Gets contents of requirement key recursively so users can search for specific keys inside nested requirement dicts. :param key: key or dot separated string of keys to look for. :param dic: Optional dictionary to use in the search. If not provided, self._requirements is used. :return: results of search or None ] return[<ast.IfExp object at 0x7da1b0e162f0>]
keyword[def] identifier[_recursive_get] ( identifier[self] , identifier[key] , identifier[dic] = keyword[None] ): literal[string] keyword[return] identifier[recursive_search] ( identifier[key] , identifier[dic] ) keyword[if] identifier[dic] keyword[else] identifier[recursive_search] ( identifier[key] , identifier[self] . identifier[_requirements] )
def _recursive_get(self, key, dic=None): """ Gets contents of requirement key recursively so users can search for specific keys inside nested requirement dicts. :param key: key or dot separated string of keys to look for. :param dic: Optional dictionary to use in the search. If not provided, self._requirements is used. :return: results of search or None """ return recursive_search(key, dic) if dic else recursive_search(key, self._requirements)
def density_und(CIJ): ''' Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray undirected (weighted/binary) connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is undirected and has no self-connections. Weight information is discarded. ''' n = len(CIJ) k = np.size(np.where(np.triu(CIJ).flatten())) kden = k / ((n * n - n) / 2) return kden, n, k
def function[density_und, parameter[CIJ]]: constant[ Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray undirected (weighted/binary) connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is undirected and has no self-connections. Weight information is discarded. ] variable[n] assign[=] call[name[len], parameter[name[CIJ]]] variable[k] assign[=] call[name[np].size, parameter[call[name[np].where, parameter[call[call[name[np].triu, parameter[name[CIJ]]].flatten, parameter[]]]]]] variable[kden] assign[=] binary_operation[name[k] / binary_operation[binary_operation[binary_operation[name[n] * name[n]] - name[n]] / constant[2]]] return[tuple[[<ast.Name object at 0x7da18ede6c20>, <ast.Name object at 0x7da18ede5840>, <ast.Name object at 0x7da18ede5d50>]]]
keyword[def] identifier[density_und] ( identifier[CIJ] ): literal[string] identifier[n] = identifier[len] ( identifier[CIJ] ) identifier[k] = identifier[np] . identifier[size] ( identifier[np] . identifier[where] ( identifier[np] . identifier[triu] ( identifier[CIJ] ). identifier[flatten] ())) identifier[kden] = identifier[k] /(( identifier[n] * identifier[n] - identifier[n] )/ literal[int] ) keyword[return] identifier[kden] , identifier[n] , identifier[k]
def density_und(CIJ): """ Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray undirected (weighted/binary) connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is undirected and has no self-connections. Weight information is discarded. """ n = len(CIJ) k = np.size(np.where(np.triu(CIJ).flatten())) kden = k / ((n * n - n) / 2) return (kden, n, k)
def count_matching(self, selector, offset=0): """Count the number of readings matching selector. Args: selector (DataStreamSelector): The selector that we want to count matching readings for. offset (int): The starting offset that we should begin counting at. Returns: int: The number of matching readings. """ if selector.output: data = self.streaming_data elif selector.buffered: data = self.storage_data else: raise ArgumentError("You can only pass a buffered selector to count_matching", selector=selector) count = 0 for i in range(offset, len(data)): reading = data[i] stream = DataStream.FromEncoded(reading.stream) if selector.matches(stream): count += 1 return count
def function[count_matching, parameter[self, selector, offset]]: constant[Count the number of readings matching selector. Args: selector (DataStreamSelector): The selector that we want to count matching readings for. offset (int): The starting offset that we should begin counting at. Returns: int: The number of matching readings. ] if name[selector].output begin[:] variable[data] assign[=] name[self].streaming_data variable[count] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[name[offset], call[name[len], parameter[name[data]]]]]] begin[:] variable[reading] assign[=] call[name[data]][name[i]] variable[stream] assign[=] call[name[DataStream].FromEncoded, parameter[name[reading].stream]] if call[name[selector].matches, parameter[name[stream]]] begin[:] <ast.AugAssign object at 0x7da20e957700> return[name[count]]
keyword[def] identifier[count_matching] ( identifier[self] , identifier[selector] , identifier[offset] = literal[int] ): literal[string] keyword[if] identifier[selector] . identifier[output] : identifier[data] = identifier[self] . identifier[streaming_data] keyword[elif] identifier[selector] . identifier[buffered] : identifier[data] = identifier[self] . identifier[storage_data] keyword[else] : keyword[raise] identifier[ArgumentError] ( literal[string] , identifier[selector] = identifier[selector] ) identifier[count] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[offset] , identifier[len] ( identifier[data] )): identifier[reading] = identifier[data] [ identifier[i] ] identifier[stream] = identifier[DataStream] . identifier[FromEncoded] ( identifier[reading] . identifier[stream] ) keyword[if] identifier[selector] . identifier[matches] ( identifier[stream] ): identifier[count] += literal[int] keyword[return] identifier[count]
def count_matching(self, selector, offset=0): """Count the number of readings matching selector. Args: selector (DataStreamSelector): The selector that we want to count matching readings for. offset (int): The starting offset that we should begin counting at. Returns: int: The number of matching readings. """ if selector.output: data = self.streaming_data # depends on [control=['if'], data=[]] elif selector.buffered: data = self.storage_data # depends on [control=['if'], data=[]] else: raise ArgumentError('You can only pass a buffered selector to count_matching', selector=selector) count = 0 for i in range(offset, len(data)): reading = data[i] stream = DataStream.FromEncoded(reading.stream) if selector.matches(stream): count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return count
def total_energy_matrix(self): """ The total energy matrix. Each matrix element (i, j) corresponds to the total interaction energy between site i and site j. Note that this does not include the charged-cell energy, which is only important when the simulation cell is not charge balanced. """ totalenergy = self._recip + self._real for i in range(len(self._point)): totalenergy[i, i] += self._point[i] return totalenergy
def function[total_energy_matrix, parameter[self]]: constant[ The total energy matrix. Each matrix element (i, j) corresponds to the total interaction energy between site i and site j. Note that this does not include the charged-cell energy, which is only important when the simulation cell is not charge balanced. ] variable[totalenergy] assign[=] binary_operation[name[self]._recip + name[self]._real] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._point]]]]] begin[:] <ast.AugAssign object at 0x7da18c4ce7a0> return[name[totalenergy]]
keyword[def] identifier[total_energy_matrix] ( identifier[self] ): literal[string] identifier[totalenergy] = identifier[self] . identifier[_recip] + identifier[self] . identifier[_real] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_point] )): identifier[totalenergy] [ identifier[i] , identifier[i] ]+= identifier[self] . identifier[_point] [ identifier[i] ] keyword[return] identifier[totalenergy]
def total_energy_matrix(self): """ The total energy matrix. Each matrix element (i, j) corresponds to the total interaction energy between site i and site j. Note that this does not include the charged-cell energy, which is only important when the simulation cell is not charge balanced. """ totalenergy = self._recip + self._real for i in range(len(self._point)): totalenergy[i, i] += self._point[i] # depends on [control=['for'], data=['i']] return totalenergy
def save(self): """Save the changes to the instance and any related objects.""" # first call save with commit=False for all Forms for form in self._forms: if isinstance(form, BaseForm): form.save(commit=False) # call save on the instance self.instance.save() # call any post-commit hooks that have been stashed on Forms for form in self.forms: if isinstance(form, BaseForm): if hasattr(form, 'save_m2m'): form.save_m2m() if hasattr(form, 'save_related'): form.save_related() # call save on any formsets for form in self._forms: if isinstance(form, BaseFormSet): form.save(commit=True) return self.instance
def function[save, parameter[self]]: constant[Save the changes to the instance and any related objects.] for taget[name[form]] in starred[name[self]._forms] begin[:] if call[name[isinstance], parameter[name[form], name[BaseForm]]] begin[:] call[name[form].save, parameter[]] call[name[self].instance.save, parameter[]] for taget[name[form]] in starred[name[self].forms] begin[:] if call[name[isinstance], parameter[name[form], name[BaseForm]]] begin[:] if call[name[hasattr], parameter[name[form], constant[save_m2m]]] begin[:] call[name[form].save_m2m, parameter[]] if call[name[hasattr], parameter[name[form], constant[save_related]]] begin[:] call[name[form].save_related, parameter[]] for taget[name[form]] in starred[name[self]._forms] begin[:] if call[name[isinstance], parameter[name[form], name[BaseFormSet]]] begin[:] call[name[form].save, parameter[]] return[name[self].instance]
keyword[def] identifier[save] ( identifier[self] ): literal[string] keyword[for] identifier[form] keyword[in] identifier[self] . identifier[_forms] : keyword[if] identifier[isinstance] ( identifier[form] , identifier[BaseForm] ): identifier[form] . identifier[save] ( identifier[commit] = keyword[False] ) identifier[self] . identifier[instance] . identifier[save] () keyword[for] identifier[form] keyword[in] identifier[self] . identifier[forms] : keyword[if] identifier[isinstance] ( identifier[form] , identifier[BaseForm] ): keyword[if] identifier[hasattr] ( identifier[form] , literal[string] ): identifier[form] . identifier[save_m2m] () keyword[if] identifier[hasattr] ( identifier[form] , literal[string] ): identifier[form] . identifier[save_related] () keyword[for] identifier[form] keyword[in] identifier[self] . identifier[_forms] : keyword[if] identifier[isinstance] ( identifier[form] , identifier[BaseFormSet] ): identifier[form] . identifier[save] ( identifier[commit] = keyword[True] ) keyword[return] identifier[self] . identifier[instance]
def save(self): """Save the changes to the instance and any related objects.""" # first call save with commit=False for all Forms for form in self._forms: if isinstance(form, BaseForm): form.save(commit=False) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['form']] # call save on the instance self.instance.save() # call any post-commit hooks that have been stashed on Forms for form in self.forms: if isinstance(form, BaseForm): if hasattr(form, 'save_m2m'): form.save_m2m() # depends on [control=['if'], data=[]] if hasattr(form, 'save_related'): form.save_related() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['form']] # call save on any formsets for form in self._forms: if isinstance(form, BaseFormSet): form.save(commit=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['form']] return self.instance
def run_commands(*commands, **kwargs): ''' Sends the commands over the transport to the device. This function sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use ``config()`` or ``enable()``. transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``socket``, ``http_local``, ``http``, and ``https``. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. host: ``localhost`` The IP address or DNS host name of the connection device. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. username: ``admin`` The username to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. password The password to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. port The TCP port of the endpoint for the eAPI connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. enablepwd The enable mode password if required by the destination node. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. CLI Example: .. code-block:: bash salt '*' pyeapi.run_commands 'show version' salt '*' pyeapi.run_commands 'show version' encoding=text salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak Output example: .. code-block:: text veos1: |_ ---------- architecture: i386 bootupTimestamp: 1527541728.53 hardwareRevision: internalBuildId: 63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f internalVersion: 4.18.1F-4591672.4181F isIntlVersion: False memFree: 501468 memTotal: 1893316 modelName: vEOS serialNumber: systemMacAddress: 52:54:00:3f:e6:d0 version: 4.18.1F ''' encoding = kwargs.pop('encoding', 'json') send_enable = kwargs.pop('send_enable', True) output = call('run_commands', commands, encoding=encoding, send_enable=send_enable, **kwargs) if encoding == 'text': ret = [] for res in output: ret.append(res['output']) return ret return output
def function[run_commands, parameter[]]: constant[ Sends the commands over the transport to the device. This function sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use ``config()`` or ``enable()``. transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``socket``, ``http_local``, ``http``, and ``https``. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. host: ``localhost`` The IP address or DNS host name of the connection device. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. username: ``admin`` The username to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. password The password to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. port The TCP port of the endpoint for the eAPI connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. enablepwd The enable mode password if required by the destination node. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. CLI Example: .. code-block:: bash salt '*' pyeapi.run_commands 'show version' salt '*' pyeapi.run_commands 'show version' encoding=text salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak Output example: .. code-block:: text veos1: |_ ---------- architecture: i386 bootupTimestamp: 1527541728.53 hardwareRevision: internalBuildId: 63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f internalVersion: 4.18.1F-4591672.4181F isIntlVersion: False memFree: 501468 memTotal: 1893316 modelName: vEOS serialNumber: systemMacAddress: 52:54:00:3f:e6:d0 version: 4.18.1F ] variable[encoding] assign[=] call[name[kwargs].pop, parameter[constant[encoding], constant[json]]] variable[send_enable] assign[=] call[name[kwargs].pop, parameter[constant[send_enable], constant[True]]] variable[output] assign[=] call[name[call], parameter[constant[run_commands], name[commands]]] if compare[name[encoding] equal[==] constant[text]] begin[:] variable[ret] assign[=] list[[]] for taget[name[res]] in starred[name[output]] begin[:] call[name[ret].append, parameter[call[name[res]][constant[output]]]] return[name[ret]] return[name[output]]
keyword[def] identifier[run_commands] (* identifier[commands] ,** identifier[kwargs] ): literal[string] identifier[encoding] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ) identifier[send_enable] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ) identifier[output] = identifier[call] ( literal[string] , identifier[commands] , identifier[encoding] = identifier[encoding] , identifier[send_enable] = identifier[send_enable] , ** identifier[kwargs] ) keyword[if] identifier[encoding] == literal[string] : identifier[ret] =[] keyword[for] identifier[res] keyword[in] identifier[output] : identifier[ret] . identifier[append] ( identifier[res] [ literal[string] ]) keyword[return] identifier[ret] keyword[return] identifier[output]
def run_commands(*commands, **kwargs): """ Sends the commands over the transport to the device. This function sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use ``config()`` or ``enable()``. transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``socket``, ``http_local``, ``http``, and ``https``. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. host: ``localhost`` The IP address or DNS host name of the connection device. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. username: ``admin`` The username to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. password The password to pass to the device to authenticate the eAPI connection. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. port The TCP port of the endpoint for the eAPI connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. enablepwd The enable mode password if required by the destination node. .. note:: This argument does not need to be specified when running in a :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion. CLI Example: .. code-block:: bash salt '*' pyeapi.run_commands 'show version' salt '*' pyeapi.run_commands 'show version' encoding=text salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak Output example: .. code-block:: text veos1: |_ ---------- architecture: i386 bootupTimestamp: 1527541728.53 hardwareRevision: internalBuildId: 63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f internalVersion: 4.18.1F-4591672.4181F isIntlVersion: False memFree: 501468 memTotal: 1893316 modelName: vEOS serialNumber: systemMacAddress: 52:54:00:3f:e6:d0 version: 4.18.1F """ encoding = kwargs.pop('encoding', 'json') send_enable = kwargs.pop('send_enable', True) output = call('run_commands', commands, encoding=encoding, send_enable=send_enable, **kwargs) if encoding == 'text': ret = [] for res in output: ret.append(res['output']) # depends on [control=['for'], data=['res']] return ret # depends on [control=['if'], data=[]] return output
def download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024 * 1024 * 10, loadbar_length=10): """Download a url. prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook) :type url: str :type filepath: str :param filepath: path to download to :param resume: if True resume download from existing file chunk :param overwrite: if True remove any existing filepath :param chunk_size: None or int in bytes :param loadbar_length: int length of load bar :return: """ resume_header = None loaded_size = 0 write_mode = 'wb' if os.path.exists(filepath): if overwrite: os.remove(filepath) elif resume: # if we want to resume, first try and see if the file is already complete loaded_size = os.path.getsize(filepath) clength = requests.head(url).headers.get('content-length') if clength is not None: if int(clength) == loaded_size: return None # give the point to resume at resume_header = {'Range': 'bytes=%s-' % loaded_size} write_mode = 'ab' else: return None stream = False if chunk_size is None else True # start printing with no return character, so that we can have everything on one line print("Downloading {0:s}: ".format(url), end="") response = requests.get(url, stream=stream, headers=resume_header) # raise error if download was unsuccessful response.raise_for_status() # get the size of the file if available total_length = response.headers.get('content-length') if total_length is not None: total_length = float(total_length) + loaded_size print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="") print("[", end="") parent = os.path.dirname(filepath) if not os.path.exists(parent) and parent: os.makedirs(parent) with io.open(filepath, write_mode) as f: loaded = 0 for chunk in response.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks # print our progress bar if total_length is not None and chunk_size is not None: while loaded < loadbar_length * loaded_size / total_length: print("=", end='') loaded += 1 loaded_size += chunk_size f.write(chunk) if total_length is None: print("=" * loadbar_length, end='') else: while loaded < loadbar_length: print("=", end='') loaded += 1 print("] Finished")
def function[download_to_file, parameter[url, filepath, resume, overwrite, chunk_size, loadbar_length]]: constant[Download a url. prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook) :type url: str :type filepath: str :param filepath: path to download to :param resume: if True resume download from existing file chunk :param overwrite: if True remove any existing filepath :param chunk_size: None or int in bytes :param loadbar_length: int length of load bar :return: ] variable[resume_header] assign[=] constant[None] variable[loaded_size] assign[=] constant[0] variable[write_mode] assign[=] constant[wb] if call[name[os].path.exists, parameter[name[filepath]]] begin[:] if name[overwrite] begin[:] call[name[os].remove, parameter[name[filepath]]] variable[stream] assign[=] <ast.IfExp object at 0x7da20cabdc30> call[name[print], parameter[call[constant[Downloading {0:s}: ].format, parameter[name[url]]]]] variable[response] assign[=] call[name[requests].get, parameter[name[url]]] call[name[response].raise_for_status, parameter[]] variable[total_length] assign[=] call[name[response].headers.get, parameter[constant[content-length]]] if compare[name[total_length] is_not constant[None]] begin[:] variable[total_length] assign[=] binary_operation[call[name[float], parameter[name[total_length]]] + name[loaded_size]] call[name[print], parameter[call[constant[{0:.2f}Mb/{1:} ].format, parameter[binary_operation[name[total_length] / binary_operation[constant[1024] * constant[1024]]], name[loadbar_length]]]]] call[name[print], parameter[constant[[]]] variable[parent] assign[=] call[name[os].path.dirname, parameter[name[filepath]]] if <ast.BoolOp object at 0x7da1b1848cd0> begin[:] call[name[os].makedirs, parameter[name[parent]]] with call[name[io].open, parameter[name[filepath], name[write_mode]]] begin[:] variable[loaded] assign[=] constant[0] for taget[name[chunk]] in starred[call[name[response].iter_content, parameter[]]] begin[:] if name[chunk] begin[:] if <ast.BoolOp object at 0x7da18eb56530> begin[:] while compare[name[loaded] less[<] binary_operation[binary_operation[name[loadbar_length] * name[loaded_size]] / name[total_length]]] begin[:] call[name[print], parameter[constant[=]]] <ast.AugAssign object at 0x7da18eb55bd0> <ast.AugAssign object at 0x7da18eb55720> call[name[f].write, parameter[name[chunk]]] if compare[name[total_length] is constant[None]] begin[:] call[name[print], parameter[binary_operation[constant[=] * name[loadbar_length]]]] call[name[print], parameter[constant[] Finished]]]
keyword[def] identifier[download_to_file] ( identifier[url] , identifier[filepath] , identifier[resume] = keyword[False] , identifier[overwrite] = keyword[False] , identifier[chunk_size] = literal[int] * literal[int] * literal[int] , identifier[loadbar_length] = literal[int] ): literal[string] identifier[resume_header] = keyword[None] identifier[loaded_size] = literal[int] identifier[write_mode] = literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filepath] ): keyword[if] identifier[overwrite] : identifier[os] . identifier[remove] ( identifier[filepath] ) keyword[elif] identifier[resume] : identifier[loaded_size] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[filepath] ) identifier[clength] = identifier[requests] . identifier[head] ( identifier[url] ). identifier[headers] . identifier[get] ( literal[string] ) keyword[if] identifier[clength] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[int] ( identifier[clength] )== identifier[loaded_size] : keyword[return] keyword[None] identifier[resume_header] ={ literal[string] : literal[string] % identifier[loaded_size] } identifier[write_mode] = literal[string] keyword[else] : keyword[return] keyword[None] identifier[stream] = keyword[False] keyword[if] identifier[chunk_size] keyword[is] keyword[None] keyword[else] keyword[True] identifier[print] ( literal[string] . identifier[format] ( identifier[url] ), identifier[end] = literal[string] ) identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[stream] = identifier[stream] , identifier[headers] = identifier[resume_header] ) identifier[response] . identifier[raise_for_status] () identifier[total_length] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] ) keyword[if] identifier[total_length] keyword[is] keyword[not] keyword[None] : identifier[total_length] = identifier[float] ( identifier[total_length] )+ identifier[loaded_size] identifier[print] ( literal[string] . identifier[format] ( identifier[total_length] /( literal[int] * literal[int] ), identifier[loadbar_length] ), identifier[end] = literal[string] ) identifier[print] ( literal[string] , identifier[end] = literal[string] ) identifier[parent] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[filepath] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[parent] ) keyword[and] identifier[parent] : identifier[os] . identifier[makedirs] ( identifier[parent] ) keyword[with] identifier[io] . identifier[open] ( identifier[filepath] , identifier[write_mode] ) keyword[as] identifier[f] : identifier[loaded] = literal[int] keyword[for] identifier[chunk] keyword[in] identifier[response] . identifier[iter_content] ( identifier[chunk_size] = identifier[chunk_size] ): keyword[if] identifier[chunk] : keyword[if] identifier[total_length] keyword[is] keyword[not] keyword[None] keyword[and] identifier[chunk_size] keyword[is] keyword[not] keyword[None] : keyword[while] identifier[loaded] < identifier[loadbar_length] * identifier[loaded_size] / identifier[total_length] : identifier[print] ( literal[string] , identifier[end] = literal[string] ) identifier[loaded] += literal[int] identifier[loaded_size] += identifier[chunk_size] identifier[f] . identifier[write] ( identifier[chunk] ) keyword[if] identifier[total_length] keyword[is] keyword[None] : identifier[print] ( literal[string] * identifier[loadbar_length] , identifier[end] = literal[string] ) keyword[else] : keyword[while] identifier[loaded] < identifier[loadbar_length] : identifier[print] ( literal[string] , identifier[end] = literal[string] ) identifier[loaded] += literal[int] identifier[print] ( literal[string] )
def download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024 * 1024 * 10, loadbar_length=10): """Download a url. prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook) :type url: str :type filepath: str :param filepath: path to download to :param resume: if True resume download from existing file chunk :param overwrite: if True remove any existing filepath :param chunk_size: None or int in bytes :param loadbar_length: int length of load bar :return: """ resume_header = None loaded_size = 0 write_mode = 'wb' if os.path.exists(filepath): if overwrite: os.remove(filepath) # depends on [control=['if'], data=[]] elif resume: # if we want to resume, first try and see if the file is already complete loaded_size = os.path.getsize(filepath) clength = requests.head(url).headers.get('content-length') if clength is not None: if int(clength) == loaded_size: return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['clength']] # give the point to resume at resume_header = {'Range': 'bytes=%s-' % loaded_size} write_mode = 'ab' # depends on [control=['if'], data=[]] else: return None # depends on [control=['if'], data=[]] stream = False if chunk_size is None else True # start printing with no return character, so that we can have everything on one line print('Downloading {0:s}: '.format(url), end='') response = requests.get(url, stream=stream, headers=resume_header) # raise error if download was unsuccessful response.raise_for_status() # get the size of the file if available total_length = response.headers.get('content-length') if total_length is not None: total_length = float(total_length) + loaded_size print('{0:.2f}Mb/{1:} '.format(total_length / (1024 * 1024), loadbar_length), end='') # depends on [control=['if'], data=['total_length']] print('[', end='') parent = os.path.dirname(filepath) if not os.path.exists(parent) and parent: os.makedirs(parent) # depends on [control=['if'], data=[]] with io.open(filepath, write_mode) as f: loaded = 0 for chunk in response.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks # print our progress bar if total_length is not None and chunk_size is not None: while loaded < loadbar_length * loaded_size / total_length: print('=', end='') loaded += 1 # depends on [control=['while'], data=['loaded']] loaded_size += chunk_size # depends on [control=['if'], data=[]] f.write(chunk) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] if total_length is None: print('=' * loadbar_length, end='') # depends on [control=['if'], data=[]] else: while loaded < loadbar_length: print('=', end='') loaded += 1 # depends on [control=['while'], data=['loaded']] # depends on [control=['with'], data=['f']] print('] Finished')
def safe_compile(self, settings, sourcepath, destination): """ Safe compile It won't raise compile error and instead return compile success state as a boolean with a message. It will create needed directory structure first if it contain some directories that does not allready exists. Args: settings (boussole.conf.model.Settings): Project settings. sourcepath (str): Source file path to compile to CSS. destination (str): Destination path for compiled CSS. Returns: tuple: A tuple of (success state, message). * success state: is boolean weither the compile is a success or not; * message: Message accorded to success. If compile fails, the message will contains returned error from libsass, if success just the destination path. """ source_map_destination = None if settings.SOURCE_MAP: source_map_destination = self.change_extension(destination, "map") try: content = sass.compile( filename=sourcepath, output_style=settings.OUTPUT_STYLES, source_comments=settings.SOURCE_COMMENTS, include_paths=settings.LIBRARY_PATHS, custom_import_extensions=settings.CUSTOM_IMPORT_EXTENSIONS, # Sourcemap is allways in the same directory than compiled # CSS file output_filename_hint=destination, source_map_filename=source_map_destination, ) except sass.CompileError as e: return False, six.text_type(e) else: # Compiler return a tuple (css, map) if sourcemap is # enabled sourcemap = None if settings.SOURCE_MAP: content, sourcemap = content self.write_content(content, destination) # Write sourcemap if any if sourcemap: self.write_content(sourcemap, source_map_destination) return True, destination
def function[safe_compile, parameter[self, settings, sourcepath, destination]]: constant[ Safe compile It won't raise compile error and instead return compile success state as a boolean with a message. It will create needed directory structure first if it contain some directories that does not allready exists. Args: settings (boussole.conf.model.Settings): Project settings. sourcepath (str): Source file path to compile to CSS. destination (str): Destination path for compiled CSS. Returns: tuple: A tuple of (success state, message). * success state: is boolean weither the compile is a success or not; * message: Message accorded to success. If compile fails, the message will contains returned error from libsass, if success just the destination path. ] variable[source_map_destination] assign[=] constant[None] if name[settings].SOURCE_MAP begin[:] variable[source_map_destination] assign[=] call[name[self].change_extension, parameter[name[destination], constant[map]]] <ast.Try object at 0x7da1b094a8f0>
keyword[def] identifier[safe_compile] ( identifier[self] , identifier[settings] , identifier[sourcepath] , identifier[destination] ): literal[string] identifier[source_map_destination] = keyword[None] keyword[if] identifier[settings] . identifier[SOURCE_MAP] : identifier[source_map_destination] = identifier[self] . identifier[change_extension] ( identifier[destination] , literal[string] ) keyword[try] : identifier[content] = identifier[sass] . identifier[compile] ( identifier[filename] = identifier[sourcepath] , identifier[output_style] = identifier[settings] . identifier[OUTPUT_STYLES] , identifier[source_comments] = identifier[settings] . identifier[SOURCE_COMMENTS] , identifier[include_paths] = identifier[settings] . identifier[LIBRARY_PATHS] , identifier[custom_import_extensions] = identifier[settings] . identifier[CUSTOM_IMPORT_EXTENSIONS] , identifier[output_filename_hint] = identifier[destination] , identifier[source_map_filename] = identifier[source_map_destination] , ) keyword[except] identifier[sass] . identifier[CompileError] keyword[as] identifier[e] : keyword[return] keyword[False] , identifier[six] . identifier[text_type] ( identifier[e] ) keyword[else] : identifier[sourcemap] = keyword[None] keyword[if] identifier[settings] . identifier[SOURCE_MAP] : identifier[content] , identifier[sourcemap] = identifier[content] identifier[self] . identifier[write_content] ( identifier[content] , identifier[destination] ) keyword[if] identifier[sourcemap] : identifier[self] . identifier[write_content] ( identifier[sourcemap] , identifier[source_map_destination] ) keyword[return] keyword[True] , identifier[destination]
def safe_compile(self, settings, sourcepath, destination): """ Safe compile It won't raise compile error and instead return compile success state as a boolean with a message. It will create needed directory structure first if it contain some directories that does not allready exists. Args: settings (boussole.conf.model.Settings): Project settings. sourcepath (str): Source file path to compile to CSS. destination (str): Destination path for compiled CSS. Returns: tuple: A tuple of (success state, message). * success state: is boolean weither the compile is a success or not; * message: Message accorded to success. If compile fails, the message will contains returned error from libsass, if success just the destination path. """ source_map_destination = None if settings.SOURCE_MAP: source_map_destination = self.change_extension(destination, 'map') # depends on [control=['if'], data=[]] try: # Sourcemap is allways in the same directory than compiled # CSS file content = sass.compile(filename=sourcepath, output_style=settings.OUTPUT_STYLES, source_comments=settings.SOURCE_COMMENTS, include_paths=settings.LIBRARY_PATHS, custom_import_extensions=settings.CUSTOM_IMPORT_EXTENSIONS, output_filename_hint=destination, source_map_filename=source_map_destination) # depends on [control=['try'], data=[]] except sass.CompileError as e: return (False, six.text_type(e)) # depends on [control=['except'], data=['e']] else: # Compiler return a tuple (css, map) if sourcemap is # enabled sourcemap = None if settings.SOURCE_MAP: (content, sourcemap) = content # depends on [control=['if'], data=[]] self.write_content(content, destination) # Write sourcemap if any if sourcemap: self.write_content(sourcemap, source_map_destination) # depends on [control=['if'], data=[]] return (True, destination)
def handle_message(self, response, ignore_subscribe_messages=False): """ Parses a pub/sub message. If the channel or pattern was subscribed to with a message handler, the handler is invoked instead of a parsed message being returned. """ message_type = nativestr(response[0]) if message_type == 'pmessage': message = { 'type': message_type, 'pattern': response[1], 'channel': response[2], 'data': response[3] } elif message_type == 'pong': message = { 'type': message_type, 'pattern': None, 'channel': None, 'data': response[1] } else: message = { 'type': message_type, 'pattern': None, 'channel': response[1], 'data': response[2] } # if this is an unsubscribe message, remove it from memory if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: if message_type == 'punsubscribe': pattern = response[1] if pattern in self.pending_unsubscribe_patterns: self.pending_unsubscribe_patterns.remove(pattern) self.patterns.pop(pattern, None) else: channel = response[1] if channel in self.pending_unsubscribe_channels: self.pending_unsubscribe_channels.remove(channel) self.channels.pop(channel, None) if message_type in self.PUBLISH_MESSAGE_TYPES: # if there's a message handler, invoke it if message_type == 'pmessage': handler = self.patterns.get(message['pattern'], None) else: handler = self.channels.get(message['channel'], None) if handler: handler(message) return None elif message_type != 'pong': # this is a subscribe/unsubscribe message. ignore if we don't # want them if ignore_subscribe_messages or self.ignore_subscribe_messages: return None return message
def function[handle_message, parameter[self, response, ignore_subscribe_messages]]: constant[ Parses a pub/sub message. If the channel or pattern was subscribed to with a message handler, the handler is invoked instead of a parsed message being returned. ] variable[message_type] assign[=] call[name[nativestr], parameter[call[name[response]][constant[0]]]] if compare[name[message_type] equal[==] constant[pmessage]] begin[:] variable[message] assign[=] dictionary[[<ast.Constant object at 0x7da207f03640>, <ast.Constant object at 0x7da207f02950>, <ast.Constant object at 0x7da207f01c90>, <ast.Constant object at 0x7da207f039d0>], [<ast.Name object at 0x7da207f03070>, <ast.Subscript object at 0x7da207f03af0>, <ast.Subscript object at 0x7da207f02aa0>, <ast.Subscript object at 0x7da207f02710>]] if compare[name[message_type] in name[self].UNSUBSCRIBE_MESSAGE_TYPES] begin[:] if compare[name[message_type] equal[==] constant[punsubscribe]] begin[:] variable[pattern] assign[=] call[name[response]][constant[1]] if compare[name[pattern] in name[self].pending_unsubscribe_patterns] begin[:] call[name[self].pending_unsubscribe_patterns.remove, parameter[name[pattern]]] call[name[self].patterns.pop, parameter[name[pattern], constant[None]]] if compare[name[message_type] in name[self].PUBLISH_MESSAGE_TYPES] begin[:] if compare[name[message_type] equal[==] constant[pmessage]] begin[:] variable[handler] assign[=] call[name[self].patterns.get, parameter[call[name[message]][constant[pattern]], constant[None]]] if name[handler] begin[:] call[name[handler], parameter[name[message]]] return[constant[None]] return[name[message]]
keyword[def] identifier[handle_message] ( identifier[self] , identifier[response] , identifier[ignore_subscribe_messages] = keyword[False] ): literal[string] identifier[message_type] = identifier[nativestr] ( identifier[response] [ literal[int] ]) keyword[if] identifier[message_type] == literal[string] : identifier[message] ={ literal[string] : identifier[message_type] , literal[string] : identifier[response] [ literal[int] ], literal[string] : identifier[response] [ literal[int] ], literal[string] : identifier[response] [ literal[int] ] } keyword[elif] identifier[message_type] == literal[string] : identifier[message] ={ literal[string] : identifier[message_type] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : identifier[response] [ literal[int] ] } keyword[else] : identifier[message] ={ literal[string] : identifier[message_type] , literal[string] : keyword[None] , literal[string] : identifier[response] [ literal[int] ], literal[string] : identifier[response] [ literal[int] ] } keyword[if] identifier[message_type] keyword[in] identifier[self] . identifier[UNSUBSCRIBE_MESSAGE_TYPES] : keyword[if] identifier[message_type] == literal[string] : identifier[pattern] = identifier[response] [ literal[int] ] keyword[if] identifier[pattern] keyword[in] identifier[self] . identifier[pending_unsubscribe_patterns] : identifier[self] . identifier[pending_unsubscribe_patterns] . identifier[remove] ( identifier[pattern] ) identifier[self] . identifier[patterns] . identifier[pop] ( identifier[pattern] , keyword[None] ) keyword[else] : identifier[channel] = identifier[response] [ literal[int] ] keyword[if] identifier[channel] keyword[in] identifier[self] . identifier[pending_unsubscribe_channels] : identifier[self] . identifier[pending_unsubscribe_channels] . identifier[remove] ( identifier[channel] ) identifier[self] . identifier[channels] . identifier[pop] ( identifier[channel] , keyword[None] ) keyword[if] identifier[message_type] keyword[in] identifier[self] . identifier[PUBLISH_MESSAGE_TYPES] : keyword[if] identifier[message_type] == literal[string] : identifier[handler] = identifier[self] . identifier[patterns] . identifier[get] ( identifier[message] [ literal[string] ], keyword[None] ) keyword[else] : identifier[handler] = identifier[self] . identifier[channels] . identifier[get] ( identifier[message] [ literal[string] ], keyword[None] ) keyword[if] identifier[handler] : identifier[handler] ( identifier[message] ) keyword[return] keyword[None] keyword[elif] identifier[message_type] != literal[string] : keyword[if] identifier[ignore_subscribe_messages] keyword[or] identifier[self] . identifier[ignore_subscribe_messages] : keyword[return] keyword[None] keyword[return] identifier[message]
def handle_message(self, response, ignore_subscribe_messages=False): """ Parses a pub/sub message. If the channel or pattern was subscribed to with a message handler, the handler is invoked instead of a parsed message being returned. """ message_type = nativestr(response[0]) if message_type == 'pmessage': message = {'type': message_type, 'pattern': response[1], 'channel': response[2], 'data': response[3]} # depends on [control=['if'], data=['message_type']] elif message_type == 'pong': message = {'type': message_type, 'pattern': None, 'channel': None, 'data': response[1]} # depends on [control=['if'], data=['message_type']] else: message = {'type': message_type, 'pattern': None, 'channel': response[1], 'data': response[2]} # if this is an unsubscribe message, remove it from memory if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: if message_type == 'punsubscribe': pattern = response[1] if pattern in self.pending_unsubscribe_patterns: self.pending_unsubscribe_patterns.remove(pattern) self.patterns.pop(pattern, None) # depends on [control=['if'], data=['pattern']] # depends on [control=['if'], data=[]] else: channel = response[1] if channel in self.pending_unsubscribe_channels: self.pending_unsubscribe_channels.remove(channel) self.channels.pop(channel, None) # depends on [control=['if'], data=['channel']] # depends on [control=['if'], data=['message_type']] if message_type in self.PUBLISH_MESSAGE_TYPES: # if there's a message handler, invoke it if message_type == 'pmessage': handler = self.patterns.get(message['pattern'], None) # depends on [control=['if'], data=[]] else: handler = self.channels.get(message['channel'], None) if handler: handler(message) return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['message_type']] elif message_type != 'pong': # this is a subscribe/unsubscribe message. ignore if we don't # want them if ignore_subscribe_messages or self.ignore_subscribe_messages: return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return message
def safe_call(func, *args, **kwargs): """ 安全调用 """ try: return func(*args, **kwargs) except Exception as e: logger.error('exc occur. e: %s, func: %s', e, func, exc_info=True) # 调用方可以通过 isinstance(e, BaseException) 来判断是否发生了异常 return e
def function[safe_call, parameter[func]]: constant[ 安全调用 ] <ast.Try object at 0x7da18c4ce290>
keyword[def] identifier[safe_call] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[try] : keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] , identifier[e] , identifier[func] , identifier[exc_info] = keyword[True] ) keyword[return] identifier[e]
def safe_call(func, *args, **kwargs): """ 安全调用 """ try: return func(*args, **kwargs) # depends on [control=['try'], data=[]] except Exception as e: logger.error('exc occur. e: %s, func: %s', e, func, exc_info=True) # 调用方可以通过 isinstance(e, BaseException) 来判断是否发生了异常 return e # depends on [control=['except'], data=['e']]
def calculate_content_length(self): """Returns the content length if available or `None` otherwise.""" try: self._ensure_sequence() except RuntimeError: return None return sum(len(x) for x in self.response)
def function[calculate_content_length, parameter[self]]: constant[Returns the content length if available or `None` otherwise.] <ast.Try object at 0x7da20c6c6e60> return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da20c6c66b0>]]]
keyword[def] identifier[calculate_content_length] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[_ensure_sequence] () keyword[except] identifier[RuntimeError] : keyword[return] keyword[None] keyword[return] identifier[sum] ( identifier[len] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[response] )
def calculate_content_length(self): """Returns the content length if available or `None` otherwise.""" try: self._ensure_sequence() # depends on [control=['try'], data=[]] except RuntimeError: return None # depends on [control=['except'], data=[]] return sum((len(x) for x in self.response))
def process_sample(job, inputs, tar_id): """ Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar """ job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # I/O tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar')) # Untar File and concat subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) # Grab files from tarball fastqs = [] for root, subdir, files in os.walk(work_dir): fastqs.extend([os.path.join(root, x) for x in files]) # Check for read 1 and read 2 files r1 = sorted([x for x in fastqs if 'R1' in x]) r2 = sorted([x for x in fastqs if 'R2' in x]) if not r1 or not r2: # Check if using a different standard r1 = sorted([x for x in fastqs if '_1' in x]) r2 = sorted([x for x in fastqs if '_2' in x]) # Prune file name matches from each list if len(r1) > len(r2): r1 = [x for x in r1 if x not in r2] elif len(r2) > len(r1): r2 = [x for x in r2 if x not in r1] # Flag if data is single-ended assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format(r1, r2) command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat' with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen([command] + r1, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen([command] + r2, stdout=f2) p1.wait() p2.wait() # Write to fileStore r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(tar_id) # Start cutadapt step job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()
def function[process_sample, parameter[job, inputs, tar_id]]: constant[ Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar ] call[name[job].fileStore.logToMaster, parameter[call[constant[Processing sample into read pairs: {}].format, parameter[name[inputs].uuid]]]] variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]] variable[tar_path] assign[=] call[name[job].fileStore.readGlobalFile, parameter[name[tar_id], call[name[os].path.join, parameter[name[work_dir], constant[sample.tar]]]]] call[name[subprocess].check_call, parameter[list[[<ast.Constant object at 0x7da18eb55f00>, <ast.Constant object at 0x7da18eb56740>, <ast.Name object at 0x7da18eb55b40>, <ast.Constant object at 0x7da18eb56d10>, <ast.Name object at 0x7da18eb57a00>]]]] call[name[os].remove, parameter[call[name[os].path.join, parameter[name[work_dir], constant[sample.tar]]]]] variable[fastqs] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18eb57730>, <ast.Name object at 0x7da18eb54700>, <ast.Name object at 0x7da18eb54ac0>]]] in starred[call[name[os].walk, parameter[name[work_dir]]]] begin[:] call[name[fastqs].extend, parameter[<ast.ListComp object at 0x7da18eb57160>]] variable[r1] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18eb54fa0>]] variable[r2] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18eb54fd0>]] if <ast.BoolOp object at 0x7da18eb55900> begin[:] variable[r1] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18eb55c00>]] variable[r2] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18eb557e0>]] if compare[call[name[len], parameter[name[r1]]] greater[>] call[name[len], parameter[name[r2]]]] begin[:] variable[r1] assign[=] <ast.ListComp object at 0x7da18eb57880> assert[<ast.BoolOp object at 0x7da18eb56b90>] variable[command] assign[=] <ast.IfExp object at 0x7da18eb56b60> with call[name[open], parameter[call[name[os].path.join, parameter[name[work_dir], constant[R1.fastq]]], constant[w]]] begin[:] variable[p1] assign[=] call[name[subprocess].Popen, parameter[binary_operation[list[[<ast.Name object at 0x7da20e955030>]] + name[r1]]]] with call[name[open], parameter[call[name[os].path.join, parameter[name[work_dir], constant[R2.fastq]]], constant[w]]] begin[:] variable[p2] assign[=] call[name[subprocess].Popen, parameter[binary_operation[list[[<ast.Name object at 0x7da20e957550>]] + name[r2]]]] call[name[p1].wait, parameter[]] call[name[p2].wait, parameter[]] variable[r1_id] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[call[name[os].path.join, parameter[name[work_dir], constant[R1.fastq]]]]] variable[r2_id] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[call[name[os].path.join, parameter[name[work_dir], constant[R2.fastq]]]]] call[name[job].fileStore.deleteGlobalFile, parameter[name[tar_id]]] call[call[name[job].addChildJobFn, parameter[name[cutadapt], name[inputs], name[r1_id], name[r2_id]]].rv, parameter[]]
keyword[def] identifier[process_sample] ( identifier[job] , identifier[inputs] , identifier[tar_id] ): literal[string] identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] . identifier[format] ( identifier[inputs] . identifier[uuid] )) identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] () identifier[tar_path] = identifier[job] . identifier[fileStore] . identifier[readGlobalFile] ( identifier[tar_id] , identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] )) identifier[subprocess] . identifier[check_call] ([ literal[string] , literal[string] , identifier[tar_path] , literal[string] , identifier[work_dir] ]) identifier[os] . identifier[remove] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] )) identifier[fastqs] =[] keyword[for] identifier[root] , identifier[subdir] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[work_dir] ): identifier[fastqs] . identifier[extend] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[files] ]) identifier[r1] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[fastqs] keyword[if] literal[string] keyword[in] identifier[x] ]) identifier[r2] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[fastqs] keyword[if] literal[string] keyword[in] identifier[x] ]) keyword[if] keyword[not] identifier[r1] keyword[or] keyword[not] identifier[r2] : identifier[r1] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[fastqs] keyword[if] literal[string] keyword[in] identifier[x] ]) identifier[r2] = identifier[sorted] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[fastqs] keyword[if] literal[string] keyword[in] identifier[x] ]) keyword[if] identifier[len] ( identifier[r1] )> identifier[len] ( identifier[r2] ): identifier[r1] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[r1] keyword[if] identifier[x] keyword[not] keyword[in] identifier[r2] ] keyword[elif] identifier[len] ( identifier[r2] )> identifier[len] ( identifier[r1] ): identifier[r2] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[r2] keyword[if] identifier[x] keyword[not] keyword[in] identifier[r1] ] keyword[assert] identifier[r1] keyword[and] identifier[r2] , literal[string] . identifier[format] ( identifier[r1] , identifier[r2] ) identifier[command] = literal[string] keyword[if] identifier[r1] [ literal[int] ]. identifier[endswith] ( literal[string] ) keyword[and] identifier[r2] [ literal[int] ]. identifier[endswith] ( literal[string] ) keyword[else] literal[string] keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ), literal[string] ) keyword[as] identifier[f1] : identifier[p1] = identifier[subprocess] . identifier[Popen] ([ identifier[command] ]+ identifier[r1] , identifier[stdout] = identifier[f1] ) keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ), literal[string] ) keyword[as] identifier[f2] : identifier[p2] = identifier[subprocess] . identifier[Popen] ([ identifier[command] ]+ identifier[r2] , identifier[stdout] = identifier[f2] ) identifier[p1] . identifier[wait] () identifier[p2] . identifier[wait] () identifier[r1_id] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] )) identifier[r2_id] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] )) identifier[job] . identifier[fileStore] . identifier[deleteGlobalFile] ( identifier[tar_id] ) identifier[job] . identifier[addChildJobFn] ( identifier[cutadapt] , identifier[inputs] , identifier[r1_id] , identifier[r2_id] , identifier[disk] = literal[string] ). identifier[rv] ()
def process_sample(job, inputs, tar_id): """ Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar """ job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # I/O tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar')) # Untar File and concat subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) # Grab files from tarball fastqs = [] for (root, subdir, files) in os.walk(work_dir): fastqs.extend([os.path.join(root, x) for x in files]) # depends on [control=['for'], data=[]] # Check for read 1 and read 2 files r1 = sorted([x for x in fastqs if 'R1' in x]) r2 = sorted([x for x in fastqs if 'R2' in x]) if not r1 or not r2: # Check if using a different standard r1 = sorted([x for x in fastqs if '_1' in x]) r2 = sorted([x for x in fastqs if '_2' in x]) # depends on [control=['if'], data=[]] # Prune file name matches from each list if len(r1) > len(r2): r1 = [x for x in r1 if x not in r2] # depends on [control=['if'], data=[]] elif len(r2) > len(r1): r2 = [x for x in r2 if x not in r1] # depends on [control=['if'], data=[]] # Flag if data is single-ended assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format(r1, r2) command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat' with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen([command] + r1, stdout=f1) # depends on [control=['with'], data=['f1']] with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen([command] + r2, stdout=f2) # depends on [control=['with'], data=['f2']] p1.wait() p2.wait() # Write to fileStore r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(tar_id) # Start cutadapt step job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()
def write_json_file(self, path): """ Serialize this VariantCollection to a JSON representation and write it out to a text file. """ with open(path, "w") as f: f.write(self.to_json())
def function[write_json_file, parameter[self, path]]: constant[ Serialize this VariantCollection to a JSON representation and write it out to a text file. ] with call[name[open], parameter[name[path], constant[w]]] begin[:] call[name[f].write, parameter[call[name[self].to_json, parameter[]]]]
keyword[def] identifier[write_json_file] ( identifier[self] , identifier[path] ): literal[string] keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[self] . identifier[to_json] ())
def write_json_file(self, path): """ Serialize this VariantCollection to a JSON representation and write it out to a text file. """ with open(path, 'w') as f: f.write(self.to_json()) # depends on [control=['with'], data=['f']]
def parse_crop(crop, xy_image, xy_window): """ Returns x, y offsets for cropping. The window area should fit inside image but it works out anyway """ x_alias_percent = { 'left': '0%', 'center': '50%', 'right': '100%', } y_alias_percent = { 'top': '0%', 'center': '50%', 'bottom': '100%', } xy_crop = crop.split(' ') if len(xy_crop) == 1: if crop in x_alias_percent: x_crop = x_alias_percent[crop] y_crop = '50%' elif crop in y_alias_percent: y_crop = y_alias_percent[crop] x_crop = '50%' else: x_crop, y_crop = crop, crop elif len(xy_crop) == 2: x_crop, y_crop = xy_crop x_crop = x_alias_percent.get(x_crop, x_crop) y_crop = y_alias_percent.get(y_crop, y_crop) else: raise ThumbnailParseError('Unrecognized crop option: %s' % crop) def get_offset(crop, epsilon): m = bgpos_pat.match(crop) if not m: raise ThumbnailParseError('Unrecognized crop option: %s' % crop) value = int(m.group('value')) # we only take ints in the regexp unit = m.group('unit') if unit == '%': value = epsilon * value / 100.0 # return ∈ [0, epsilon] return int(max(0, min(value, epsilon))) offset_x = get_offset(x_crop, xy_image[0] - xy_window[0]) offset_y = get_offset(y_crop, xy_image[1] - xy_window[1]) return offset_x, offset_y
def function[parse_crop, parameter[crop, xy_image, xy_window]]: constant[ Returns x, y offsets for cropping. The window area should fit inside image but it works out anyway ] variable[x_alias_percent] assign[=] dictionary[[<ast.Constant object at 0x7da18bc71180>, <ast.Constant object at 0x7da18bc73310>, <ast.Constant object at 0x7da18bc71b10>], [<ast.Constant object at 0x7da18bc70e80>, <ast.Constant object at 0x7da18bc71240>, <ast.Constant object at 0x7da18bc730a0>]] variable[y_alias_percent] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72f80>, <ast.Constant object at 0x7da18bc712a0>, <ast.Constant object at 0x7da18bc70ee0>], [<ast.Constant object at 0x7da18bc728c0>, <ast.Constant object at 0x7da18bc70280>, <ast.Constant object at 0x7da18bc720e0>]] variable[xy_crop] assign[=] call[name[crop].split, parameter[constant[ ]]] if compare[call[name[len], parameter[name[xy_crop]]] equal[==] constant[1]] begin[:] if compare[name[crop] in name[x_alias_percent]] begin[:] variable[x_crop] assign[=] call[name[x_alias_percent]][name[crop]] variable[y_crop] assign[=] constant[50%] def function[get_offset, parameter[crop, epsilon]]: variable[m] assign[=] call[name[bgpos_pat].match, parameter[name[crop]]] if <ast.UnaryOp object at 0x7da18bc70ca0> begin[:] <ast.Raise object at 0x7da18bc70ac0> variable[value] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[value]]]]] variable[unit] assign[=] call[name[m].group, parameter[constant[unit]]] if compare[name[unit] equal[==] constant[%]] begin[:] variable[value] assign[=] binary_operation[binary_operation[name[epsilon] * name[value]] / constant[100.0]] return[call[name[int], parameter[call[name[max], parameter[constant[0], call[name[min], parameter[name[value], name[epsilon]]]]]]]] variable[offset_x] assign[=] call[name[get_offset], parameter[name[x_crop], binary_operation[call[name[xy_image]][constant[0]] - call[name[xy_window]][constant[0]]]]] variable[offset_y] assign[=] call[name[get_offset], parameter[name[y_crop], binary_operation[call[name[xy_image]][constant[1]] - call[name[xy_window]][constant[1]]]]] return[tuple[[<ast.Name object at 0x7da18bc72c50>, <ast.Name object at 0x7da18bc72620>]]]
keyword[def] identifier[parse_crop] ( identifier[crop] , identifier[xy_image] , identifier[xy_window] ): literal[string] identifier[x_alias_percent] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , } identifier[y_alias_percent] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , } identifier[xy_crop] = identifier[crop] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[xy_crop] )== literal[int] : keyword[if] identifier[crop] keyword[in] identifier[x_alias_percent] : identifier[x_crop] = identifier[x_alias_percent] [ identifier[crop] ] identifier[y_crop] = literal[string] keyword[elif] identifier[crop] keyword[in] identifier[y_alias_percent] : identifier[y_crop] = identifier[y_alias_percent] [ identifier[crop] ] identifier[x_crop] = literal[string] keyword[else] : identifier[x_crop] , identifier[y_crop] = identifier[crop] , identifier[crop] keyword[elif] identifier[len] ( identifier[xy_crop] )== literal[int] : identifier[x_crop] , identifier[y_crop] = identifier[xy_crop] identifier[x_crop] = identifier[x_alias_percent] . identifier[get] ( identifier[x_crop] , identifier[x_crop] ) identifier[y_crop] = identifier[y_alias_percent] . identifier[get] ( identifier[y_crop] , identifier[y_crop] ) keyword[else] : keyword[raise] identifier[ThumbnailParseError] ( literal[string] % identifier[crop] ) keyword[def] identifier[get_offset] ( identifier[crop] , identifier[epsilon] ): identifier[m] = identifier[bgpos_pat] . identifier[match] ( identifier[crop] ) keyword[if] keyword[not] identifier[m] : keyword[raise] identifier[ThumbnailParseError] ( literal[string] % identifier[crop] ) identifier[value] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] )) identifier[unit] = identifier[m] . identifier[group] ( literal[string] ) keyword[if] identifier[unit] == literal[string] : identifier[value] = identifier[epsilon] * identifier[value] / literal[int] keyword[return] identifier[int] ( identifier[max] ( literal[int] , identifier[min] ( identifier[value] , identifier[epsilon] ))) identifier[offset_x] = identifier[get_offset] ( identifier[x_crop] , identifier[xy_image] [ literal[int] ]- identifier[xy_window] [ literal[int] ]) identifier[offset_y] = identifier[get_offset] ( identifier[y_crop] , identifier[xy_image] [ literal[int] ]- identifier[xy_window] [ literal[int] ]) keyword[return] identifier[offset_x] , identifier[offset_y]
def parse_crop(crop, xy_image, xy_window): """ Returns x, y offsets for cropping. The window area should fit inside image but it works out anyway """ x_alias_percent = {'left': '0%', 'center': '50%', 'right': '100%'} y_alias_percent = {'top': '0%', 'center': '50%', 'bottom': '100%'} xy_crop = crop.split(' ') if len(xy_crop) == 1: if crop in x_alias_percent: x_crop = x_alias_percent[crop] y_crop = '50%' # depends on [control=['if'], data=['crop', 'x_alias_percent']] elif crop in y_alias_percent: y_crop = y_alias_percent[crop] x_crop = '50%' # depends on [control=['if'], data=['crop', 'y_alias_percent']] else: (x_crop, y_crop) = (crop, crop) # depends on [control=['if'], data=[]] elif len(xy_crop) == 2: (x_crop, y_crop) = xy_crop x_crop = x_alias_percent.get(x_crop, x_crop) y_crop = y_alias_percent.get(y_crop, y_crop) # depends on [control=['if'], data=[]] else: raise ThumbnailParseError('Unrecognized crop option: %s' % crop) def get_offset(crop, epsilon): m = bgpos_pat.match(crop) if not m: raise ThumbnailParseError('Unrecognized crop option: %s' % crop) # depends on [control=['if'], data=[]] value = int(m.group('value')) # we only take ints in the regexp unit = m.group('unit') if unit == '%': value = epsilon * value / 100.0 # depends on [control=['if'], data=[]] # return ∈ [0, epsilon] return int(max(0, min(value, epsilon))) offset_x = get_offset(x_crop, xy_image[0] - xy_window[0]) offset_y = get_offset(y_crop, xy_image[1] - xy_window[1]) return (offset_x, offset_y)
def print_subcommands(data, nested_content, markDownHelp=False, settings=None): """ Each subcommand is a dictionary with the following keys: ['usage', 'action_groups', 'bare_usage', 'name', 'help'] In essence, this is all tossed in a new section with the title 'name'. Apparently there can also be a 'description' entry. """ definitions = map_nested_definitions(nested_content) items = [] if 'children' in data: subCommands = nodes.section(ids=["Sub-commands:"]) subCommands += nodes.title('Sub-commands:', 'Sub-commands:') for child in data['children']: sec = nodes.section(ids=[child['name']]) sec += nodes.title(child['name'], child['name']) if 'description' in child and child['description']: desc = [child['description']] elif child['help']: desc = [child['help']] else: desc = ['Undocumented'] # Handle nested content subContent = [] if child['name'] in definitions: classifier, s, subContent = definitions[child['name']] if classifier == '@replace': desc = [s] elif classifier == '@after': desc.append(s) elif classifier == '@before': desc.insert(0, s) for element in renderList(desc, markDownHelp): sec += element sec += nodes.literal_block(text=child['bare_usage']) for x in print_action_groups(child, nested_content + subContent, markDownHelp, settings=settings): sec += x for x in print_subcommands(child, nested_content + subContent, markDownHelp, settings=settings): sec += x if 'epilog' in child and child['epilog']: for element in renderList([child['epilog']], markDownHelp): sec += element subCommands += sec items.append(subCommands) return items
def function[print_subcommands, parameter[data, nested_content, markDownHelp, settings]]: constant[ Each subcommand is a dictionary with the following keys: ['usage', 'action_groups', 'bare_usage', 'name', 'help'] In essence, this is all tossed in a new section with the title 'name'. Apparently there can also be a 'description' entry. ] variable[definitions] assign[=] call[name[map_nested_definitions], parameter[name[nested_content]]] variable[items] assign[=] list[[]] if compare[constant[children] in name[data]] begin[:] variable[subCommands] assign[=] call[name[nodes].section, parameter[]] <ast.AugAssign object at 0x7da18bccadd0> for taget[name[child]] in starred[call[name[data]][constant[children]]] begin[:] variable[sec] assign[=] call[name[nodes].section, parameter[]] <ast.AugAssign object at 0x7da18bcc9900> if <ast.BoolOp object at 0x7da18bcc98d0> begin[:] variable[desc] assign[=] list[[<ast.Subscript object at 0x7da18bcc8250>]] variable[subContent] assign[=] list[[]] if compare[call[name[child]][constant[name]] in name[definitions]] begin[:] <ast.Tuple object at 0x7da18bccabc0> assign[=] call[name[definitions]][call[name[child]][constant[name]]] if compare[name[classifier] equal[==] constant[@replace]] begin[:] variable[desc] assign[=] list[[<ast.Name object at 0x7da18bcc86a0>]] for taget[name[element]] in starred[call[name[renderList], parameter[name[desc], name[markDownHelp]]]] begin[:] <ast.AugAssign object at 0x7da18bcc90c0> <ast.AugAssign object at 0x7da18bcc8ac0> for taget[name[x]] in starred[call[name[print_action_groups], parameter[name[child], binary_operation[name[nested_content] + name[subContent]], name[markDownHelp]]]] begin[:] <ast.AugAssign object at 0x7da18bcca620> for taget[name[x]] in starred[call[name[print_subcommands], parameter[name[child], binary_operation[name[nested_content] + name[subContent]], name[markDownHelp]]]] begin[:] <ast.AugAssign object at 0x7da18bcc8e20> if <ast.BoolOp object at 0x7da18bcca680> begin[:] for taget[name[element]] in starred[call[name[renderList], parameter[list[[<ast.Subscript object at 0x7da18bccb0d0>]], name[markDownHelp]]]] begin[:] <ast.AugAssign object at 0x7da18bcca380> <ast.AugAssign object at 0x7da18bcca8f0> call[name[items].append, parameter[name[subCommands]]] return[name[items]]
keyword[def] identifier[print_subcommands] ( identifier[data] , identifier[nested_content] , identifier[markDownHelp] = keyword[False] , identifier[settings] = keyword[None] ): literal[string] identifier[definitions] = identifier[map_nested_definitions] ( identifier[nested_content] ) identifier[items] =[] keyword[if] literal[string] keyword[in] identifier[data] : identifier[subCommands] = identifier[nodes] . identifier[section] ( identifier[ids] =[ literal[string] ]) identifier[subCommands] += identifier[nodes] . identifier[title] ( literal[string] , literal[string] ) keyword[for] identifier[child] keyword[in] identifier[data] [ literal[string] ]: identifier[sec] = identifier[nodes] . identifier[section] ( identifier[ids] =[ identifier[child] [ literal[string] ]]) identifier[sec] += identifier[nodes] . identifier[title] ( identifier[child] [ literal[string] ], identifier[child] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[child] keyword[and] identifier[child] [ literal[string] ]: identifier[desc] =[ identifier[child] [ literal[string] ]] keyword[elif] identifier[child] [ literal[string] ]: identifier[desc] =[ identifier[child] [ literal[string] ]] keyword[else] : identifier[desc] =[ literal[string] ] identifier[subContent] =[] keyword[if] identifier[child] [ literal[string] ] keyword[in] identifier[definitions] : identifier[classifier] , identifier[s] , identifier[subContent] = identifier[definitions] [ identifier[child] [ literal[string] ]] keyword[if] identifier[classifier] == literal[string] : identifier[desc] =[ identifier[s] ] keyword[elif] identifier[classifier] == literal[string] : identifier[desc] . identifier[append] ( identifier[s] ) keyword[elif] identifier[classifier] == literal[string] : identifier[desc] . identifier[insert] ( literal[int] , identifier[s] ) keyword[for] identifier[element] keyword[in] identifier[renderList] ( identifier[desc] , identifier[markDownHelp] ): identifier[sec] += identifier[element] identifier[sec] += identifier[nodes] . identifier[literal_block] ( identifier[text] = identifier[child] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[print_action_groups] ( identifier[child] , identifier[nested_content] + identifier[subContent] , identifier[markDownHelp] , identifier[settings] = identifier[settings] ): identifier[sec] += identifier[x] keyword[for] identifier[x] keyword[in] identifier[print_subcommands] ( identifier[child] , identifier[nested_content] + identifier[subContent] , identifier[markDownHelp] , identifier[settings] = identifier[settings] ): identifier[sec] += identifier[x] keyword[if] literal[string] keyword[in] identifier[child] keyword[and] identifier[child] [ literal[string] ]: keyword[for] identifier[element] keyword[in] identifier[renderList] ([ identifier[child] [ literal[string] ]], identifier[markDownHelp] ): identifier[sec] += identifier[element] identifier[subCommands] += identifier[sec] identifier[items] . identifier[append] ( identifier[subCommands] ) keyword[return] identifier[items]
def print_subcommands(data, nested_content, markDownHelp=False, settings=None): """ Each subcommand is a dictionary with the following keys: ['usage', 'action_groups', 'bare_usage', 'name', 'help'] In essence, this is all tossed in a new section with the title 'name'. Apparently there can also be a 'description' entry. """ definitions = map_nested_definitions(nested_content) items = [] if 'children' in data: subCommands = nodes.section(ids=['Sub-commands:']) subCommands += nodes.title('Sub-commands:', 'Sub-commands:') for child in data['children']: sec = nodes.section(ids=[child['name']]) sec += nodes.title(child['name'], child['name']) if 'description' in child and child['description']: desc = [child['description']] # depends on [control=['if'], data=[]] elif child['help']: desc = [child['help']] # depends on [control=['if'], data=[]] else: desc = ['Undocumented'] # Handle nested content subContent = [] if child['name'] in definitions: (classifier, s, subContent) = definitions[child['name']] if classifier == '@replace': desc = [s] # depends on [control=['if'], data=[]] elif classifier == '@after': desc.append(s) # depends on [control=['if'], data=[]] elif classifier == '@before': desc.insert(0, s) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['definitions']] for element in renderList(desc, markDownHelp): sec += element # depends on [control=['for'], data=['element']] sec += nodes.literal_block(text=child['bare_usage']) for x in print_action_groups(child, nested_content + subContent, markDownHelp, settings=settings): sec += x # depends on [control=['for'], data=['x']] for x in print_subcommands(child, nested_content + subContent, markDownHelp, settings=settings): sec += x # depends on [control=['for'], data=['x']] if 'epilog' in child and child['epilog']: for element in renderList([child['epilog']], markDownHelp): sec += element # depends on [control=['for'], data=['element']] # depends on [control=['if'], data=[]] subCommands += sec # depends on [control=['for'], data=['child']] items.append(subCommands) # depends on [control=['if'], data=['data']] return items
def gen_radio_list(sig_dic): ''' For generating List view HTML file for RADIO. for each item. ''' view_zuoxiang = '''<span class="iga_pd_val">''' dic_tmp = sig_dic['dic'] for key in dic_tmp.keys(): tmp_str = '''{{% if postinfo.extinfo['{0}'][0] == "{1}" %}} {2} {{% end %}} '''.format(sig_dic['en'], key, dic_tmp[key]) view_zuoxiang += tmp_str view_zuoxiang += '''</span>''' return view_zuoxiang
def function[gen_radio_list, parameter[sig_dic]]: constant[ For generating List view HTML file for RADIO. for each item. ] variable[view_zuoxiang] assign[=] constant[<span class="iga_pd_val">] variable[dic_tmp] assign[=] call[name[sig_dic]][constant[dic]] for taget[name[key]] in starred[call[name[dic_tmp].keys, parameter[]]] begin[:] variable[tmp_str] assign[=] call[constant[{{% if postinfo.extinfo['{0}'][0] == "{1}" %}} {2} {{% end %}} ].format, parameter[call[name[sig_dic]][constant[en]], name[key], call[name[dic_tmp]][name[key]]]] <ast.AugAssign object at 0x7da1b04679d0> <ast.AugAssign object at 0x7da1b0467820> return[name[view_zuoxiang]]
keyword[def] identifier[gen_radio_list] ( identifier[sig_dic] ): literal[string] identifier[view_zuoxiang] = literal[string] identifier[dic_tmp] = identifier[sig_dic] [ literal[string] ] keyword[for] identifier[key] keyword[in] identifier[dic_tmp] . identifier[keys] (): identifier[tmp_str] = literal[string] . identifier[format] ( identifier[sig_dic] [ literal[string] ], identifier[key] , identifier[dic_tmp] [ identifier[key] ]) identifier[view_zuoxiang] += identifier[tmp_str] identifier[view_zuoxiang] += literal[string] keyword[return] identifier[view_zuoxiang]
def gen_radio_list(sig_dic): """ For generating List view HTML file for RADIO. for each item. """ view_zuoxiang = '<span class="iga_pd_val">' dic_tmp = sig_dic['dic'] for key in dic_tmp.keys(): tmp_str = '{{% if postinfo.extinfo[\'{0}\'][0] == "{1}" %}} {2} {{% end %}}\n '.format(sig_dic['en'], key, dic_tmp[key]) view_zuoxiang += tmp_str # depends on [control=['for'], data=['key']] view_zuoxiang += '</span>' return view_zuoxiang
def set_trace(self, frame=None): """Remember starting frame. This is used with pytest, which does not use pdb.set_trace(). """ if hasattr(local, '_pdbpp_completing'): # Handle set_trace being called during completion, e.g. with # fancycompleter's attr_matches. return if frame is None: frame = sys._getframe().f_back self._via_set_trace_frame = frame return super(Pdb, self).set_trace(frame)
def function[set_trace, parameter[self, frame]]: constant[Remember starting frame. This is used with pytest, which does not use pdb.set_trace(). ] if call[name[hasattr], parameter[name[local], constant[_pdbpp_completing]]] begin[:] return[None] if compare[name[frame] is constant[None]] begin[:] variable[frame] assign[=] call[name[sys]._getframe, parameter[]].f_back name[self]._via_set_trace_frame assign[=] name[frame] return[call[call[name[super], parameter[name[Pdb], name[self]]].set_trace, parameter[name[frame]]]]
keyword[def] identifier[set_trace] ( identifier[self] , identifier[frame] = keyword[None] ): literal[string] keyword[if] identifier[hasattr] ( identifier[local] , literal[string] ): keyword[return] keyword[if] identifier[frame] keyword[is] keyword[None] : identifier[frame] = identifier[sys] . identifier[_getframe] (). identifier[f_back] identifier[self] . identifier[_via_set_trace_frame] = identifier[frame] keyword[return] identifier[super] ( identifier[Pdb] , identifier[self] ). identifier[set_trace] ( identifier[frame] )
def set_trace(self, frame=None): """Remember starting frame. This is used with pytest, which does not use pdb.set_trace(). """ if hasattr(local, '_pdbpp_completing'): # Handle set_trace being called during completion, e.g. with # fancycompleter's attr_matches. return # depends on [control=['if'], data=[]] if frame is None: frame = sys._getframe().f_back # depends on [control=['if'], data=['frame']] self._via_set_trace_frame = frame return super(Pdb, self).set_trace(frame)
def p_statement(self, p): """statement : OPTION_AND_VALUE """ p[0] = ['statement', p[1][0], p[1][1]] if self.options.get('lowercasenames'): p[0][1] = p[0][1].lower() if (not self.options.get('nostripvalues') and not hasattr(p[0][2], 'is_single_quoted') and not hasattr(p[0][2], 'is_double_quoted')): p[0][2] = p[0][2].rstrip()
def function[p_statement, parameter[self, p]]: constant[statement : OPTION_AND_VALUE ] call[name[p]][constant[0]] assign[=] list[[<ast.Constant object at 0x7da20c7c8220>, <ast.Subscript object at 0x7da20c7cbc10>, <ast.Subscript object at 0x7da20c7c8970>]] if call[name[self].options.get, parameter[constant[lowercasenames]]] begin[:] call[call[name[p]][constant[0]]][constant[1]] assign[=] call[call[call[name[p]][constant[0]]][constant[1]].lower, parameter[]] if <ast.BoolOp object at 0x7da20c7c9ab0> begin[:] call[call[name[p]][constant[0]]][constant[2]] assign[=] call[call[call[name[p]][constant[0]]][constant[2]].rstrip, parameter[]]
keyword[def] identifier[p_statement] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]=[ literal[string] , identifier[p] [ literal[int] ][ literal[int] ], identifier[p] [ literal[int] ][ literal[int] ]] keyword[if] identifier[self] . identifier[options] . identifier[get] ( literal[string] ): identifier[p] [ literal[int] ][ literal[int] ]= identifier[p] [ literal[int] ][ literal[int] ]. identifier[lower] () keyword[if] ( keyword[not] identifier[self] . identifier[options] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[hasattr] ( identifier[p] [ literal[int] ][ literal[int] ], literal[string] ) keyword[and] keyword[not] identifier[hasattr] ( identifier[p] [ literal[int] ][ literal[int] ], literal[string] )): identifier[p] [ literal[int] ][ literal[int] ]= identifier[p] [ literal[int] ][ literal[int] ]. identifier[rstrip] ()
def p_statement(self, p): """statement : OPTION_AND_VALUE """ p[0] = ['statement', p[1][0], p[1][1]] if self.options.get('lowercasenames'): p[0][1] = p[0][1].lower() # depends on [control=['if'], data=[]] if not self.options.get('nostripvalues') and (not hasattr(p[0][2], 'is_single_quoted')) and (not hasattr(p[0][2], 'is_double_quoted')): p[0][2] = p[0][2].rstrip() # depends on [control=['if'], data=[]]
def get_grid(grid_id): """ Return the specified grid. :param grid_id: The grid identification in h2o :returns: an :class:`H2OGridSearch` instance. """ assert_is_type(grid_id, str) grid_json = api("GET /99/Grids/%s" % grid_id) models = [get_model(key["name"]) for key in grid_json["model_ids"]] # get first model returned in list of models from grid search to get model class (binomial, multinomial, etc) first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0] gs = H2OGridSearch(None, {}, grid_id) gs._resolve_grid(grid_id, grid_json, first_model_json) gs.models = models hyper_params = {param: set() for param in gs.hyper_names} for param in gs.hyper_names: for model in models: if isinstance(model.full_parameters[param]["actual_value"], list): hyper_params[param].add(model.full_parameters[param]["actual_value"][0]) else: hyper_params[param].add(model.full_parameters[param]["actual_value"]) hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()} gs.hyper_params = hyper_params gs.model = model.__class__() return gs
def function[get_grid, parameter[grid_id]]: constant[ Return the specified grid. :param grid_id: The grid identification in h2o :returns: an :class:`H2OGridSearch` instance. ] call[name[assert_is_type], parameter[name[grid_id], name[str]]] variable[grid_json] assign[=] call[name[api], parameter[binary_operation[constant[GET /99/Grids/%s] <ast.Mod object at 0x7da2590d6920> name[grid_id]]]] variable[models] assign[=] <ast.ListComp object at 0x7da1b2346230> variable[first_model_json] assign[=] call[call[call[name[api], parameter[binary_operation[constant[GET /3/Models/%s] <ast.Mod object at 0x7da2590d6920> call[call[call[name[grid_json]][constant[model_ids]]][constant[0]]][constant[name]]]]]][constant[models]]][constant[0]] variable[gs] assign[=] call[name[H2OGridSearch], parameter[constant[None], dictionary[[], []], name[grid_id]]] call[name[gs]._resolve_grid, parameter[name[grid_id], name[grid_json], name[first_model_json]]] name[gs].models assign[=] name[models] variable[hyper_params] assign[=] <ast.DictComp object at 0x7da1b2346260> for taget[name[param]] in starred[name[gs].hyper_names] begin[:] for taget[name[model]] in starred[name[models]] begin[:] if call[name[isinstance], parameter[call[call[name[model].full_parameters][name[param]]][constant[actual_value]], name[list]]] begin[:] call[call[name[hyper_params]][name[param]].add, parameter[call[call[call[name[model].full_parameters][name[param]]][constant[actual_value]]][constant[0]]]] variable[hyper_params] assign[=] <ast.DictComp object at 0x7da1b2345270> name[gs].hyper_params assign[=] name[hyper_params] name[gs].model assign[=] call[name[model].__class__, parameter[]] return[name[gs]]
keyword[def] identifier[get_grid] ( identifier[grid_id] ): literal[string] identifier[assert_is_type] ( identifier[grid_id] , identifier[str] ) identifier[grid_json] = identifier[api] ( literal[string] % identifier[grid_id] ) identifier[models] =[ identifier[get_model] ( identifier[key] [ literal[string] ]) keyword[for] identifier[key] keyword[in] identifier[grid_json] [ literal[string] ]] identifier[first_model_json] = identifier[api] ( literal[string] % identifier[grid_json] [ literal[string] ][ literal[int] ][ literal[string] ])[ literal[string] ][ literal[int] ] identifier[gs] = identifier[H2OGridSearch] ( keyword[None] ,{}, identifier[grid_id] ) identifier[gs] . identifier[_resolve_grid] ( identifier[grid_id] , identifier[grid_json] , identifier[first_model_json] ) identifier[gs] . identifier[models] = identifier[models] identifier[hyper_params] ={ identifier[param] : identifier[set] () keyword[for] identifier[param] keyword[in] identifier[gs] . identifier[hyper_names] } keyword[for] identifier[param] keyword[in] identifier[gs] . identifier[hyper_names] : keyword[for] identifier[model] keyword[in] identifier[models] : keyword[if] identifier[isinstance] ( identifier[model] . identifier[full_parameters] [ identifier[param] ][ literal[string] ], identifier[list] ): identifier[hyper_params] [ identifier[param] ]. identifier[add] ( identifier[model] . identifier[full_parameters] [ identifier[param] ][ literal[string] ][ literal[int] ]) keyword[else] : identifier[hyper_params] [ identifier[param] ]. identifier[add] ( identifier[model] . identifier[full_parameters] [ identifier[param] ][ literal[string] ]) identifier[hyper_params] ={ identifier[str] ( identifier[param] ): identifier[list] ( identifier[vals] ) keyword[for] identifier[param] , identifier[vals] keyword[in] identifier[hyper_params] . identifier[items] ()} identifier[gs] . identifier[hyper_params] = identifier[hyper_params] identifier[gs] . identifier[model] = identifier[model] . identifier[__class__] () keyword[return] identifier[gs]
def get_grid(grid_id): """ Return the specified grid. :param grid_id: The grid identification in h2o :returns: an :class:`H2OGridSearch` instance. """ assert_is_type(grid_id, str) grid_json = api('GET /99/Grids/%s' % grid_id) models = [get_model(key['name']) for key in grid_json['model_ids']] # get first model returned in list of models from grid search to get model class (binomial, multinomial, etc) first_model_json = api('GET /3/Models/%s' % grid_json['model_ids'][0]['name'])['models'][0] gs = H2OGridSearch(None, {}, grid_id) gs._resolve_grid(grid_id, grid_json, first_model_json) gs.models = models hyper_params = {param: set() for param in gs.hyper_names} for param in gs.hyper_names: for model in models: if isinstance(model.full_parameters[param]['actual_value'], list): hyper_params[param].add(model.full_parameters[param]['actual_value'][0]) # depends on [control=['if'], data=[]] else: hyper_params[param].add(model.full_parameters[param]['actual_value']) # depends on [control=['for'], data=['model']] # depends on [control=['for'], data=['param']] hyper_params = {str(param): list(vals) for (param, vals) in hyper_params.items()} gs.hyper_params = hyper_params gs.model = model.__class__() return gs
def fastaParseSgd(header): """Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header """ rePattern = '([\S]+)\s([\S]+).+(\".+\")' ID, name, description = re.match(rePattern, header).groups() info = {'id':ID, 'name':name, 'description':description} return info
def function[fastaParseSgd, parameter[header]]: constant[Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header ] variable[rePattern] assign[=] constant[([\S]+)\s([\S]+).+(".+")] <ast.Tuple object at 0x7da18dc999f0> assign[=] call[call[name[re].match, parameter[name[rePattern], name[header]]].groups, parameter[]] variable[info] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9a470>, <ast.Constant object at 0x7da18dc988e0>, <ast.Constant object at 0x7da18dc98d60>], [<ast.Name object at 0x7da18dc99750>, <ast.Name object at 0x7da18dc9b3a0>, <ast.Name object at 0x7da18dc98730>]] return[name[info]]
keyword[def] identifier[fastaParseSgd] ( identifier[header] ): literal[string] identifier[rePattern] = literal[string] identifier[ID] , identifier[name] , identifier[description] = identifier[re] . identifier[match] ( identifier[rePattern] , identifier[header] ). identifier[groups] () identifier[info] ={ literal[string] : identifier[ID] , literal[string] : identifier[name] , literal[string] : identifier[description] } keyword[return] identifier[info]
def fastaParseSgd(header): """Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header """ rePattern = '([\\S]+)\\s([\\S]+).+(".+")' (ID, name, description) = re.match(rePattern, header).groups() info = {'id': ID, 'name': name, 'description': description} return info
def find_all(cls, vid=None, pid=None): """ Returns all FTDI devices matching our vendor and product IDs. :returns: list of devices :raises: :py:class:`~alarmdecoder.util.CommError` """ if not have_pyftdi: raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.') cls.__devices = [] query = cls.PRODUCT_IDS if vid and pid: query = [(vid, pid)] try: cls.__devices = Ftdi.find_all(query, nocache=True) except (usb.core.USBError, FtdiError) as err: raise CommError('Error enumerating AD2USB devices: {0}'.format(str(err)), err) return cls.__devices
def function[find_all, parameter[cls, vid, pid]]: constant[ Returns all FTDI devices matching our vendor and product IDs. :returns: list of devices :raises: :py:class:`~alarmdecoder.util.CommError` ] if <ast.UnaryOp object at 0x7da1b2766830> begin[:] <ast.Raise object at 0x7da1b27649d0> name[cls].__devices assign[=] list[[]] variable[query] assign[=] name[cls].PRODUCT_IDS if <ast.BoolOp object at 0x7da1b2767490> begin[:] variable[query] assign[=] list[[<ast.Tuple object at 0x7da1b2764040>]] <ast.Try object at 0x7da1b27678b0> return[name[cls].__devices]
keyword[def] identifier[find_all] ( identifier[cls] , identifier[vid] = keyword[None] , identifier[pid] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[have_pyftdi] : keyword[raise] identifier[ImportError] ( literal[string] ) identifier[cls] . identifier[__devices] =[] identifier[query] = identifier[cls] . identifier[PRODUCT_IDS] keyword[if] identifier[vid] keyword[and] identifier[pid] : identifier[query] =[( identifier[vid] , identifier[pid] )] keyword[try] : identifier[cls] . identifier[__devices] = identifier[Ftdi] . identifier[find_all] ( identifier[query] , identifier[nocache] = keyword[True] ) keyword[except] ( identifier[usb] . identifier[core] . identifier[USBError] , identifier[FtdiError] ) keyword[as] identifier[err] : keyword[raise] identifier[CommError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[err] )), identifier[err] ) keyword[return] identifier[cls] . identifier[__devices]
def find_all(cls, vid=None, pid=None): """ Returns all FTDI devices matching our vendor and product IDs. :returns: list of devices :raises: :py:class:`~alarmdecoder.util.CommError` """ if not have_pyftdi: raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.') # depends on [control=['if'], data=[]] cls.__devices = [] query = cls.PRODUCT_IDS if vid and pid: query = [(vid, pid)] # depends on [control=['if'], data=[]] try: cls.__devices = Ftdi.find_all(query, nocache=True) # depends on [control=['try'], data=[]] except (usb.core.USBError, FtdiError) as err: raise CommError('Error enumerating AD2USB devices: {0}'.format(str(err)), err) # depends on [control=['except'], data=['err']] return cls.__devices
def grace_period(msg='', seconds=10): """ Gives user a window to stop a process before it happens """ import time print(msg) override = util_arg.get_argflag(('--yes', '--y', '-y')) print('starting grace period') if override: print('ending based on command line flag') return True for count in reversed(range(1, seconds + 1)): time.sleep(1) print('%d' % (count,)) print('%d' % (0,)) print('grace period is over') return True
def function[grace_period, parameter[msg, seconds]]: constant[ Gives user a window to stop a process before it happens ] import module[time] call[name[print], parameter[name[msg]]] variable[override] assign[=] call[name[util_arg].get_argflag, parameter[tuple[[<ast.Constant object at 0x7da1b24ae590>, <ast.Constant object at 0x7da1b24ae650>, <ast.Constant object at 0x7da1b24afdf0>]]]] call[name[print], parameter[constant[starting grace period]]] if name[override] begin[:] call[name[print], parameter[constant[ending based on command line flag]]] return[constant[True]] for taget[name[count]] in starred[call[name[reversed], parameter[call[name[range], parameter[constant[1], binary_operation[name[seconds] + constant[1]]]]]]] begin[:] call[name[time].sleep, parameter[constant[1]]] call[name[print], parameter[binary_operation[constant[%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24aee60>]]]]] call[name[print], parameter[binary_operation[constant[%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Constant object at 0x7da1b24aef80>]]]]] call[name[print], parameter[constant[grace period is over]]] return[constant[True]]
keyword[def] identifier[grace_period] ( identifier[msg] = literal[string] , identifier[seconds] = literal[int] ): literal[string] keyword[import] identifier[time] identifier[print] ( identifier[msg] ) identifier[override] = identifier[util_arg] . identifier[get_argflag] (( literal[string] , literal[string] , literal[string] )) identifier[print] ( literal[string] ) keyword[if] identifier[override] : identifier[print] ( literal[string] ) keyword[return] keyword[True] keyword[for] identifier[count] keyword[in] identifier[reversed] ( identifier[range] ( literal[int] , identifier[seconds] + literal[int] )): identifier[time] . identifier[sleep] ( literal[int] ) identifier[print] ( literal[string] %( identifier[count] ,)) identifier[print] ( literal[string] %( literal[int] ,)) identifier[print] ( literal[string] ) keyword[return] keyword[True]
def grace_period(msg='', seconds=10): """ Gives user a window to stop a process before it happens """ import time print(msg) override = util_arg.get_argflag(('--yes', '--y', '-y')) print('starting grace period') if override: print('ending based on command line flag') return True # depends on [control=['if'], data=[]] for count in reversed(range(1, seconds + 1)): time.sleep(1) print('%d' % (count,)) # depends on [control=['for'], data=['count']] print('%d' % (0,)) print('grace period is over') return True
def _bed_iter(bed): """ Given an open BED file, yield tuples of (`chrom`, `chrom_iter`) where `chrom_iter` yields tuples of (`start`, `stop`). """ records = (line.split()[:3] for line in bed if not (line.startswith('browser') or line.startswith('track'))) for chrom, chrom_iter in itertools.groupby(records, lambda x: x[0]): yield chrom, ((int(start), int(stop)) for _, start, stop in chrom_iter)
def function[_bed_iter, parameter[bed]]: constant[ Given an open BED file, yield tuples of (`chrom`, `chrom_iter`) where `chrom_iter` yields tuples of (`start`, `stop`). ] variable[records] assign[=] <ast.GeneratorExp object at 0x7da1b2347220> for taget[tuple[[<ast.Name object at 0x7da1b23452a0>, <ast.Name object at 0x7da1b2347e20>]]] in starred[call[name[itertools].groupby, parameter[name[records], <ast.Lambda object at 0x7da1b2346b00>]]] begin[:] <ast.Yield object at 0x7da1b2344a30>
keyword[def] identifier[_bed_iter] ( identifier[bed] ): literal[string] identifier[records] =( identifier[line] . identifier[split] ()[: literal[int] ] keyword[for] identifier[line] keyword[in] identifier[bed] keyword[if] keyword[not] ( identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[line] . identifier[startswith] ( literal[string] ))) keyword[for] identifier[chrom] , identifier[chrom_iter] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[records] , keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]): keyword[yield] identifier[chrom] ,(( identifier[int] ( identifier[start] ), identifier[int] ( identifier[stop] )) keyword[for] identifier[_] , identifier[start] , identifier[stop] keyword[in] identifier[chrom_iter] )
def _bed_iter(bed): """ Given an open BED file, yield tuples of (`chrom`, `chrom_iter`) where `chrom_iter` yields tuples of (`start`, `stop`). """ records = (line.split()[:3] for line in bed if not (line.startswith('browser') or line.startswith('track'))) for (chrom, chrom_iter) in itertools.groupby(records, lambda x: x[0]): yield (chrom, ((int(start), int(stop)) for (_, start, stop) in chrom_iter)) # depends on [control=['for'], data=[]]
def decrypt_ecb_cts(self, data): """ Return an iterator that decrypts `data` using the Electronic Codebook with Ciphertext Stealing (ECB-CTS) mode of operation. ECB-CTS mode can only operate on `data` that is greater than 8 bytes in length. Each iteration, except the last, always returns a block-sized :obj:`bytes` object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object with a length less than the block-size, if `data` is not a multiple of the block-size in length. `data` should be a :obj:`bytes`-like object that is greater than 8 bytes in length. If it is not, a :exc:`ValueError` exception is raised. """ data_len = len(data) if data_len <= 8: raise ValueError("data is not greater than 8 bytes in length") S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack u4_2_pack = self._u4_2_pack u4_2_unpack = self._u4_2_unpack decrypt = self._decrypt extra_bytes = data_len % 8 last_block_stop_i = data_len - extra_bytes cipher_L, cipher_R = u4_2_unpack(data[0:8]) plain_block = u4_2_pack( *decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack) ) for cipher_L, cipher_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]): yield plain_block plain_block = u4_2_pack( *decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack) ) cipher_L, cipher_R = u4_2_unpack( data[last_block_stop_i:] + plain_block[extra_bytes:] ) yield u4_2_pack( *decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack) ) yield plain_block[:extra_bytes]
def function[decrypt_ecb_cts, parameter[self, data]]: constant[ Return an iterator that decrypts `data` using the Electronic Codebook with Ciphertext Stealing (ECB-CTS) mode of operation. ECB-CTS mode can only operate on `data` that is greater than 8 bytes in length. Each iteration, except the last, always returns a block-sized :obj:`bytes` object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object with a length less than the block-size, if `data` is not a multiple of the block-size in length. `data` should be a :obj:`bytes`-like object that is greater than 8 bytes in length. If it is not, a :exc:`ValueError` exception is raised. ] variable[data_len] assign[=] call[name[len], parameter[name[data]]] if compare[name[data_len] less_or_equal[<=] constant[8]] begin[:] <ast.Raise object at 0x7da18bcc8160> <ast.Tuple object at 0x7da18bcc9300> assign[=] name[self].S variable[P] assign[=] name[self].P variable[u4_1_pack] assign[=] name[self]._u4_1_pack variable[u1_4_unpack] assign[=] name[self]._u1_4_unpack variable[u4_2_pack] assign[=] name[self]._u4_2_pack variable[u4_2_unpack] assign[=] name[self]._u4_2_unpack variable[decrypt] assign[=] name[self]._decrypt variable[extra_bytes] assign[=] binary_operation[name[data_len] <ast.Mod object at 0x7da2590d6920> constant[8]] variable[last_block_stop_i] assign[=] binary_operation[name[data_len] - name[extra_bytes]] <ast.Tuple object at 0x7da18bccb5e0> assign[=] call[name[u4_2_unpack], parameter[call[name[data]][<ast.Slice object at 0x7da18bcca3b0>]]] variable[plain_block] assign[=] call[name[u4_2_pack], parameter[<ast.Starred object at 0x7da18bcca800>]] for taget[tuple[[<ast.Name object at 0x7da18bccbfa0>, <ast.Name object at 0x7da18bcc8490>]]] in starred[call[name[self]._u4_2_iter_unpack, parameter[call[name[data]][<ast.Slice object at 0x7da18bccbbb0>]]]] begin[:] <ast.Yield object at 0x7da18bcc9900> variable[plain_block] assign[=] call[name[u4_2_pack], parameter[<ast.Starred object at 0x7da18bccbd90>]] <ast.Tuple object at 0x7da18bccb0d0> assign[=] call[name[u4_2_unpack], parameter[binary_operation[call[name[data]][<ast.Slice object at 0x7da18bcc8760>] + call[name[plain_block]][<ast.Slice object at 0x7da18bcca1d0>]]]] <ast.Yield object at 0x7da18bccbeb0> <ast.Yield object at 0x7da18bcc8c70>
keyword[def] identifier[decrypt_ecb_cts] ( identifier[self] , identifier[data] ): literal[string] identifier[data_len] = identifier[len] ( identifier[data] ) keyword[if] identifier[data_len] <= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] = identifier[self] . identifier[S] identifier[P] = identifier[self] . identifier[P] identifier[u4_1_pack] = identifier[self] . identifier[_u4_1_pack] identifier[u1_4_unpack] = identifier[self] . identifier[_u1_4_unpack] identifier[u4_2_pack] = identifier[self] . identifier[_u4_2_pack] identifier[u4_2_unpack] = identifier[self] . identifier[_u4_2_unpack] identifier[decrypt] = identifier[self] . identifier[_decrypt] identifier[extra_bytes] = identifier[data_len] % literal[int] identifier[last_block_stop_i] = identifier[data_len] - identifier[extra_bytes] identifier[cipher_L] , identifier[cipher_R] = identifier[u4_2_unpack] ( identifier[data] [ literal[int] : literal[int] ]) identifier[plain_block] = identifier[u4_2_pack] ( * identifier[decrypt] ( identifier[cipher_L] , identifier[cipher_R] , identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] , identifier[u4_1_pack] , identifier[u1_4_unpack] ) ) keyword[for] identifier[cipher_L] , identifier[cipher_R] keyword[in] identifier[self] . identifier[_u4_2_iter_unpack] ( identifier[data] [ literal[int] : identifier[last_block_stop_i] ]): keyword[yield] identifier[plain_block] identifier[plain_block] = identifier[u4_2_pack] ( * identifier[decrypt] ( identifier[cipher_L] , identifier[cipher_R] , identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] , identifier[u4_1_pack] , identifier[u1_4_unpack] ) ) identifier[cipher_L] , identifier[cipher_R] = identifier[u4_2_unpack] ( identifier[data] [ identifier[last_block_stop_i] :]+ identifier[plain_block] [ identifier[extra_bytes] :] ) keyword[yield] identifier[u4_2_pack] ( * identifier[decrypt] ( identifier[cipher_L] , identifier[cipher_R] , identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] , identifier[u4_1_pack] , identifier[u1_4_unpack] ) ) keyword[yield] identifier[plain_block] [: identifier[extra_bytes] ]
def decrypt_ecb_cts(self, data): """ Return an iterator that decrypts `data` using the Electronic Codebook with Ciphertext Stealing (ECB-CTS) mode of operation. ECB-CTS mode can only operate on `data` that is greater than 8 bytes in length. Each iteration, except the last, always returns a block-sized :obj:`bytes` object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object with a length less than the block-size, if `data` is not a multiple of the block-size in length. `data` should be a :obj:`bytes`-like object that is greater than 8 bytes in length. If it is not, a :exc:`ValueError` exception is raised. """ data_len = len(data) if data_len <= 8: raise ValueError('data is not greater than 8 bytes in length') # depends on [control=['if'], data=[]] (S1, S2, S3, S4) = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack u4_2_pack = self._u4_2_pack u4_2_unpack = self._u4_2_unpack decrypt = self._decrypt extra_bytes = data_len % 8 last_block_stop_i = data_len - extra_bytes (cipher_L, cipher_R) = u4_2_unpack(data[0:8]) plain_block = u4_2_pack(*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)) for (cipher_L, cipher_R) in self._u4_2_iter_unpack(data[8:last_block_stop_i]): yield plain_block plain_block = u4_2_pack(*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)) # depends on [control=['for'], data=[]] (cipher_L, cipher_R) = u4_2_unpack(data[last_block_stop_i:] + plain_block[extra_bytes:]) yield u4_2_pack(*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)) yield plain_block[:extra_bytes]
def connection(self): """ A property to retrieve the sampler connection information. """ return {'host': self.host, 'namespace': self.namespace, 'username': self.username, 'password': self.password}
def function[connection, parameter[self]]: constant[ A property to retrieve the sampler connection information. ] return[dictionary[[<ast.Constant object at 0x7da18f09c400>, <ast.Constant object at 0x7da18f09ce50>, <ast.Constant object at 0x7da18f09e5c0>, <ast.Constant object at 0x7da18f09cbb0>], [<ast.Attribute object at 0x7da18f09da20>, <ast.Attribute object at 0x7da18f09cb80>, <ast.Attribute object at 0x7da18f09f310>, <ast.Attribute object at 0x7da18f09d240>]]]
keyword[def] identifier[connection] ( identifier[self] ): literal[string] keyword[return] { literal[string] : identifier[self] . identifier[host] , literal[string] : identifier[self] . identifier[namespace] , literal[string] : identifier[self] . identifier[username] , literal[string] : identifier[self] . identifier[password] }
def connection(self): """ A property to retrieve the sampler connection information. """ return {'host': self.host, 'namespace': self.namespace, 'username': self.username, 'password': self.password}
def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0): """ CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location. """ try: return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type)) except Exception as e: print(e) print("Could not Open Hierarchy")
def function[open_hierarchy, parameter[self, path, relative_to_object_id, object_id, create_file_type]]: constant[ CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location. ] <ast.Try object at 0x7da1b26ac460>
keyword[def] identifier[open_hierarchy] ( identifier[self] , identifier[path] , identifier[relative_to_object_id] , identifier[object_id] , identifier[create_file_type] = literal[int] ): literal[string] keyword[try] : keyword[return] ( identifier[self] . identifier[process] . identifier[OpenHierarchy] ( identifier[path] , identifier[relative_to_object_id] , literal[string] , identifier[create_file_type] )) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[e] ) identifier[print] ( literal[string] )
def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0): """ CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location. """ try: return self.process.OpenHierarchy(path, relative_to_object_id, '', create_file_type) # depends on [control=['try'], data=[]] except Exception as e: print(e) print('Could not Open Hierarchy') # depends on [control=['except'], data=['e']]
def added(self): ''' Returns all keys that have been added. If the keys are in child dictionaries they will be represented with . notation ''' def _added(diffs, prefix): keys = [] for key in diffs.keys(): if isinstance(diffs[key], dict) and 'old' not in diffs[key]: keys.extend(_added(diffs[key], prefix='{0}{1}.'.format(prefix, key))) elif diffs[key]['old'] == self.NONE_VALUE: if isinstance(diffs[key]['new'], dict): keys.extend( _added(diffs[key]['new'], prefix='{0}{1}.'.format(prefix, key))) else: keys.append('{0}{1}'.format(prefix, key)) return keys return sorted(_added(self._diffs, prefix=''))
def function[added, parameter[self]]: constant[ Returns all keys that have been added. If the keys are in child dictionaries they will be represented with . notation ] def function[_added, parameter[diffs, prefix]]: variable[keys] assign[=] list[[]] for taget[name[key]] in starred[call[name[diffs].keys, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b1c1b2e0> begin[:] call[name[keys].extend, parameter[call[name[_added], parameter[call[name[diffs]][name[key]]]]]] return[name[keys]] return[call[name[sorted], parameter[call[name[_added], parameter[name[self]._diffs]]]]]
keyword[def] identifier[added] ( identifier[self] ): literal[string] keyword[def] identifier[_added] ( identifier[diffs] , identifier[prefix] ): identifier[keys] =[] keyword[for] identifier[key] keyword[in] identifier[diffs] . identifier[keys] (): keyword[if] identifier[isinstance] ( identifier[diffs] [ identifier[key] ], identifier[dict] ) keyword[and] literal[string] keyword[not] keyword[in] identifier[diffs] [ identifier[key] ]: identifier[keys] . identifier[extend] ( identifier[_added] ( identifier[diffs] [ identifier[key] ], identifier[prefix] = literal[string] . identifier[format] ( identifier[prefix] , identifier[key] ))) keyword[elif] identifier[diffs] [ identifier[key] ][ literal[string] ]== identifier[self] . identifier[NONE_VALUE] : keyword[if] identifier[isinstance] ( identifier[diffs] [ identifier[key] ][ literal[string] ], identifier[dict] ): identifier[keys] . identifier[extend] ( identifier[_added] ( identifier[diffs] [ identifier[key] ][ literal[string] ], identifier[prefix] = literal[string] . identifier[format] ( identifier[prefix] , identifier[key] ))) keyword[else] : identifier[keys] . identifier[append] ( literal[string] . identifier[format] ( identifier[prefix] , identifier[key] )) keyword[return] identifier[keys] keyword[return] identifier[sorted] ( identifier[_added] ( identifier[self] . identifier[_diffs] , identifier[prefix] = literal[string] ))
def added(self): """ Returns all keys that have been added. If the keys are in child dictionaries they will be represented with . notation """ def _added(diffs, prefix): keys = [] for key in diffs.keys(): if isinstance(diffs[key], dict) and 'old' not in diffs[key]: keys.extend(_added(diffs[key], prefix='{0}{1}.'.format(prefix, key))) # depends on [control=['if'], data=[]] elif diffs[key]['old'] == self.NONE_VALUE: if isinstance(diffs[key]['new'], dict): keys.extend(_added(diffs[key]['new'], prefix='{0}{1}.'.format(prefix, key))) # depends on [control=['if'], data=[]] else: keys.append('{0}{1}'.format(prefix, key)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return keys return sorted(_added(self._diffs, prefix=''))
def list(gandi, only_data, only_snapshot, attached, detached, type, id, vm, snapshotprofile, datacenter, limit): """ List disks. """ options = { 'items_per_page': limit, } if attached and detached: raise UsageError('You cannot use both --attached and --detached.') if only_data: options.setdefault('type', []).append('data') if only_snapshot: options.setdefault('type', []).append('snapshot') if datacenter: options['datacenter_id'] = gandi.datacenter.usable_id(datacenter) output_keys = ['name', 'state', 'size'] if type: output_keys.append('type') if id: output_keys.append('id') if vm: output_keys.append('vm') profiles = [] if snapshotprofile: output_keys.append('profile') profiles = gandi.snapshotprofile.list() result = gandi.disk.list(options) vms = dict([(vm_['id'], vm_) for vm_ in gandi.iaas.list()]) # filter results per attached/detached disks = [] for disk in result: if attached and not disk['vms_id']: continue if detached and disk['vms_id']: continue disks.append(disk) for num, disk in enumerate(disks): if num: gandi.separator_line() output_disk(gandi, disk, [], vms, profiles, output_keys) return result
def function[list, parameter[gandi, only_data, only_snapshot, attached, detached, type, id, vm, snapshotprofile, datacenter, limit]]: constant[ List disks. ] variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf280>], [<ast.Name object at 0x7da18c4cef50>]] if <ast.BoolOp object at 0x7da18c4cca00> begin[:] <ast.Raise object at 0x7da18c4ce920> if name[only_data] begin[:] call[call[name[options].setdefault, parameter[constant[type], list[[]]]].append, parameter[constant[data]]] if name[only_snapshot] begin[:] call[call[name[options].setdefault, parameter[constant[type], list[[]]]].append, parameter[constant[snapshot]]] if name[datacenter] begin[:] call[name[options]][constant[datacenter_id]] assign[=] call[name[gandi].datacenter.usable_id, parameter[name[datacenter]]] variable[output_keys] assign[=] list[[<ast.Constant object at 0x7da18c4cffd0>, <ast.Constant object at 0x7da18c4cdc00>, <ast.Constant object at 0x7da18c4cd630>]] if name[type] begin[:] call[name[output_keys].append, parameter[constant[type]]] if name[id] begin[:] call[name[output_keys].append, parameter[constant[id]]] if name[vm] begin[:] call[name[output_keys].append, parameter[constant[vm]]] variable[profiles] assign[=] list[[]] if name[snapshotprofile] begin[:] call[name[output_keys].append, parameter[constant[profile]]] variable[profiles] assign[=] call[name[gandi].snapshotprofile.list, parameter[]] variable[result] assign[=] call[name[gandi].disk.list, parameter[name[options]]] variable[vms] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18c4cecb0>]] variable[disks] assign[=] list[[]] for taget[name[disk]] in starred[name[result]] begin[:] if <ast.BoolOp object at 0x7da18c4ce710> begin[:] continue if <ast.BoolOp object at 0x7da18c4ce7d0> begin[:] continue call[name[disks].append, parameter[name[disk]]] for taget[tuple[[<ast.Name object at 0x7da18c4cfa00>, <ast.Name object at 0x7da18c4cef20>]]] in starred[call[name[enumerate], parameter[name[disks]]]] begin[:] if name[num] begin[:] call[name[gandi].separator_line, parameter[]] call[name[output_disk], parameter[name[gandi], name[disk], list[[]], name[vms], name[profiles], name[output_keys]]] return[name[result]]
keyword[def] identifier[list] ( identifier[gandi] , identifier[only_data] , identifier[only_snapshot] , identifier[attached] , identifier[detached] , identifier[type] , identifier[id] , identifier[vm] , identifier[snapshotprofile] , identifier[datacenter] , identifier[limit] ): literal[string] identifier[options] ={ literal[string] : identifier[limit] , } keyword[if] identifier[attached] keyword[and] identifier[detached] : keyword[raise] identifier[UsageError] ( literal[string] ) keyword[if] identifier[only_data] : identifier[options] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( literal[string] ) keyword[if] identifier[only_snapshot] : identifier[options] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( literal[string] ) keyword[if] identifier[datacenter] : identifier[options] [ literal[string] ]= identifier[gandi] . identifier[datacenter] . identifier[usable_id] ( identifier[datacenter] ) identifier[output_keys] =[ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[type] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[id] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[vm] : identifier[output_keys] . identifier[append] ( literal[string] ) identifier[profiles] =[] keyword[if] identifier[snapshotprofile] : identifier[output_keys] . identifier[append] ( literal[string] ) identifier[profiles] = identifier[gandi] . identifier[snapshotprofile] . identifier[list] () identifier[result] = identifier[gandi] . identifier[disk] . identifier[list] ( identifier[options] ) identifier[vms] = identifier[dict] ([( identifier[vm_] [ literal[string] ], identifier[vm_] ) keyword[for] identifier[vm_] keyword[in] identifier[gandi] . identifier[iaas] . identifier[list] ()]) identifier[disks] =[] keyword[for] identifier[disk] keyword[in] identifier[result] : keyword[if] identifier[attached] keyword[and] keyword[not] identifier[disk] [ literal[string] ]: keyword[continue] keyword[if] identifier[detached] keyword[and] identifier[disk] [ literal[string] ]: keyword[continue] identifier[disks] . identifier[append] ( identifier[disk] ) keyword[for] identifier[num] , identifier[disk] keyword[in] identifier[enumerate] ( identifier[disks] ): keyword[if] identifier[num] : identifier[gandi] . identifier[separator_line] () identifier[output_disk] ( identifier[gandi] , identifier[disk] ,[], identifier[vms] , identifier[profiles] , identifier[output_keys] ) keyword[return] identifier[result]
def list(gandi, only_data, only_snapshot, attached, detached, type, id, vm, snapshotprofile, datacenter, limit): """ List disks. """ options = {'items_per_page': limit} if attached and detached: raise UsageError('You cannot use both --attached and --detached.') # depends on [control=['if'], data=[]] if only_data: options.setdefault('type', []).append('data') # depends on [control=['if'], data=[]] if only_snapshot: options.setdefault('type', []).append('snapshot') # depends on [control=['if'], data=[]] if datacenter: options['datacenter_id'] = gandi.datacenter.usable_id(datacenter) # depends on [control=['if'], data=[]] output_keys = ['name', 'state', 'size'] if type: output_keys.append('type') # depends on [control=['if'], data=[]] if id: output_keys.append('id') # depends on [control=['if'], data=[]] if vm: output_keys.append('vm') # depends on [control=['if'], data=[]] profiles = [] if snapshotprofile: output_keys.append('profile') profiles = gandi.snapshotprofile.list() # depends on [control=['if'], data=[]] result = gandi.disk.list(options) vms = dict([(vm_['id'], vm_) for vm_ in gandi.iaas.list()]) # filter results per attached/detached disks = [] for disk in result: if attached and (not disk['vms_id']): continue # depends on [control=['if'], data=[]] if detached and disk['vms_id']: continue # depends on [control=['if'], data=[]] disks.append(disk) # depends on [control=['for'], data=['disk']] for (num, disk) in enumerate(disks): if num: gandi.separator_line() # depends on [control=['if'], data=[]] output_disk(gandi, disk, [], vms, profiles, output_keys) # depends on [control=['for'], data=[]] return result
def resolve(self, pubID, sysID): """Do a complete resolution lookup of an External Identifier """ ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID) return ret
def function[resolve, parameter[self, pubID, sysID]]: constant[Do a complete resolution lookup of an External Identifier ] variable[ret] assign[=] call[name[libxml2mod].xmlACatalogResolve, parameter[name[self]._o, name[pubID], name[sysID]]] return[name[ret]]
keyword[def] identifier[resolve] ( identifier[self] , identifier[pubID] , identifier[sysID] ): literal[string] identifier[ret] = identifier[libxml2mod] . identifier[xmlACatalogResolve] ( identifier[self] . identifier[_o] , identifier[pubID] , identifier[sysID] ) keyword[return] identifier[ret]
def resolve(self, pubID, sysID): """Do a complete resolution lookup of an External Identifier """ ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID) return ret
def get_name(tags_or_instance_or_id): """Helper utility to extract name out of tags dictionary or intancce. [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus' Assert fails if there's more than one name. Returns '' if there's less than one name. """ ec2 = get_ec2_resource() if hasattr(tags_or_instance_or_id, 'tags'): tags = tags_or_instance_or_id.tags elif isinstance(tags_or_instance_or_id, str): tags = ec2.Instance(tags_or_instance_or_id).tags elif tags_or_instance_or_id is None: return EMPTY_NAME else: assert isinstance(tags_or_instance_or_id, Iterable), "expected iterable of tags" tags = tags_or_instance_or_id if not tags: return EMPTY_NAME names = [entry['Value'] for entry in tags if entry['Key'] == 'Name'] if not names: return '' if len(names) > 1: assert False, "have more than one name: " + str(names) return names[0]
def function[get_name, parameter[tags_or_instance_or_id]]: constant[Helper utility to extract name out of tags dictionary or intancce. [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus' Assert fails if there's more than one name. Returns '' if there's less than one name. ] variable[ec2] assign[=] call[name[get_ec2_resource], parameter[]] if call[name[hasattr], parameter[name[tags_or_instance_or_id], constant[tags]]] begin[:] variable[tags] assign[=] name[tags_or_instance_or_id].tags if <ast.UnaryOp object at 0x7da207f00460> begin[:] return[name[EMPTY_NAME]] variable[names] assign[=] <ast.ListComp object at 0x7da207f02650> if <ast.UnaryOp object at 0x7da207f01390> begin[:] return[constant[]] if compare[call[name[len], parameter[name[names]]] greater[>] constant[1]] begin[:] assert[constant[False]] return[call[name[names]][constant[0]]]
keyword[def] identifier[get_name] ( identifier[tags_or_instance_or_id] ): literal[string] identifier[ec2] = identifier[get_ec2_resource] () keyword[if] identifier[hasattr] ( identifier[tags_or_instance_or_id] , literal[string] ): identifier[tags] = identifier[tags_or_instance_or_id] . identifier[tags] keyword[elif] identifier[isinstance] ( identifier[tags_or_instance_or_id] , identifier[str] ): identifier[tags] = identifier[ec2] . identifier[Instance] ( identifier[tags_or_instance_or_id] ). identifier[tags] keyword[elif] identifier[tags_or_instance_or_id] keyword[is] keyword[None] : keyword[return] identifier[EMPTY_NAME] keyword[else] : keyword[assert] identifier[isinstance] ( identifier[tags_or_instance_or_id] , identifier[Iterable] ), literal[string] identifier[tags] = identifier[tags_or_instance_or_id] keyword[if] keyword[not] identifier[tags] : keyword[return] identifier[EMPTY_NAME] identifier[names] =[ identifier[entry] [ literal[string] ] keyword[for] identifier[entry] keyword[in] identifier[tags] keyword[if] identifier[entry] [ literal[string] ]== literal[string] ] keyword[if] keyword[not] identifier[names] : keyword[return] literal[string] keyword[if] identifier[len] ( identifier[names] )> literal[int] : keyword[assert] keyword[False] , literal[string] + identifier[str] ( identifier[names] ) keyword[return] identifier[names] [ literal[int] ]
def get_name(tags_or_instance_or_id): """Helper utility to extract name out of tags dictionary or intancce. [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus' Assert fails if there's more than one name. Returns '' if there's less than one name. """ ec2 = get_ec2_resource() if hasattr(tags_or_instance_or_id, 'tags'): tags = tags_or_instance_or_id.tags # depends on [control=['if'], data=[]] elif isinstance(tags_or_instance_or_id, str): tags = ec2.Instance(tags_or_instance_or_id).tags # depends on [control=['if'], data=[]] elif tags_or_instance_or_id is None: return EMPTY_NAME # depends on [control=['if'], data=[]] else: assert isinstance(tags_or_instance_or_id, Iterable), 'expected iterable of tags' tags = tags_or_instance_or_id if not tags: return EMPTY_NAME # depends on [control=['if'], data=[]] names = [entry['Value'] for entry in tags if entry['Key'] == 'Name'] if not names: return '' # depends on [control=['if'], data=[]] if len(names) > 1: assert False, 'have more than one name: ' + str(names) # depends on [control=['if'], data=[]] return names[0]
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext): """ prefixDecl: KW_PREFIX PNAME_NS IRIREF """ iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF()) prefix = ctx.PNAME_NS().getText() if iri not in self.context.ld_prefixes: self.context.prefixes.setdefault(prefix, iri.val)
def function[visitPrefixDecl, parameter[self, ctx]]: constant[ prefixDecl: KW_PREFIX PNAME_NS IRIREF ] variable[iri] assign[=] call[name[self].context.iriref_to_shexj_iriref, parameter[call[name[ctx].IRIREF, parameter[]]]] variable[prefix] assign[=] call[call[name[ctx].PNAME_NS, parameter[]].getText, parameter[]] if compare[name[iri] <ast.NotIn object at 0x7da2590d7190> name[self].context.ld_prefixes] begin[:] call[name[self].context.prefixes.setdefault, parameter[name[prefix], name[iri].val]]
keyword[def] identifier[visitPrefixDecl] ( identifier[self] , identifier[ctx] : identifier[ShExDocParser] . identifier[PrefixDeclContext] ): literal[string] identifier[iri] = identifier[self] . identifier[context] . identifier[iriref_to_shexj_iriref] ( identifier[ctx] . identifier[IRIREF] ()) identifier[prefix] = identifier[ctx] . identifier[PNAME_NS] (). identifier[getText] () keyword[if] identifier[iri] keyword[not] keyword[in] identifier[self] . identifier[context] . identifier[ld_prefixes] : identifier[self] . identifier[context] . identifier[prefixes] . identifier[setdefault] ( identifier[prefix] , identifier[iri] . identifier[val] )
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext): """ prefixDecl: KW_PREFIX PNAME_NS IRIREF """ iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF()) prefix = ctx.PNAME_NS().getText() if iri not in self.context.ld_prefixes: self.context.prefixes.setdefault(prefix, iri.val) # depends on [control=['if'], data=['iri']]
async def sync_recent_conversations( self, sync_recent_conversations_request ): """Return info on recent conversations and their events.""" response = hangouts_pb2.SyncRecentConversationsResponse() await self._pb_request('conversations/syncrecentconversations', sync_recent_conversations_request, response) return response
<ast.AsyncFunctionDef object at 0x7da18f00dc90>
keyword[async] keyword[def] identifier[sync_recent_conversations] ( identifier[self] , identifier[sync_recent_conversations_request] ): literal[string] identifier[response] = identifier[hangouts_pb2] . identifier[SyncRecentConversationsResponse] () keyword[await] identifier[self] . identifier[_pb_request] ( literal[string] , identifier[sync_recent_conversations_request] , identifier[response] ) keyword[return] identifier[response]
async def sync_recent_conversations(self, sync_recent_conversations_request): """Return info on recent conversations and their events.""" response = hangouts_pb2.SyncRecentConversationsResponse() await self._pb_request('conversations/syncrecentconversations', sync_recent_conversations_request, response) return response
def wait_for_operation_completion(self, operation, timeout): """Waits until the given operation is done with a given timeout in milliseconds; specify -1 for an indefinite wait. See :py:func:`wait_for_completion` for event queue considerations. in operation of type int Number of the operation to wait for. Must be less than :py:func:`operation_count` . in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for operation completion. """ if not isinstance(operation, baseinteger): raise TypeError("operation can only be an instance of type baseinteger") if not isinstance(timeout, baseinteger): raise TypeError("timeout can only be an instance of type baseinteger") self._call("waitForOperationCompletion", in_p=[operation, timeout])
def function[wait_for_operation_completion, parameter[self, operation, timeout]]: constant[Waits until the given operation is done with a given timeout in milliseconds; specify -1 for an indefinite wait. See :py:func:`wait_for_completion` for event queue considerations. in operation of type int Number of the operation to wait for. Must be less than :py:func:`operation_count` . in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for operation completion. ] if <ast.UnaryOp object at 0x7da20e9b3a00> begin[:] <ast.Raise object at 0x7da20e9b29e0> if <ast.UnaryOp object at 0x7da20e9b13c0> begin[:] <ast.Raise object at 0x7da20e9b3490> call[name[self]._call, parameter[constant[waitForOperationCompletion]]]
keyword[def] identifier[wait_for_operation_completion] ( identifier[self] , identifier[operation] , identifier[timeout] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[operation] , identifier[baseinteger] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[timeout] , identifier[baseinteger] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[operation] , identifier[timeout] ])
def wait_for_operation_completion(self, operation, timeout): """Waits until the given operation is done with a given timeout in milliseconds; specify -1 for an indefinite wait. See :py:func:`wait_for_completion` for event queue considerations. in operation of type int Number of the operation to wait for. Must be less than :py:func:`operation_count` . in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for operation completion. """ if not isinstance(operation, baseinteger): raise TypeError('operation can only be an instance of type baseinteger') # depends on [control=['if'], data=[]] if not isinstance(timeout, baseinteger): raise TypeError('timeout can only be an instance of type baseinteger') # depends on [control=['if'], data=[]] self._call('waitForOperationCompletion', in_p=[operation, timeout])
def merge(self, other): # type: (TentativeType) -> None """ Merge two TentativeType instances """ for hashables in other.types_hashable: self.add(hashables) for non_hashbles in other.types: self.add(non_hashbles)
def function[merge, parameter[self, other]]: constant[ Merge two TentativeType instances ] for taget[name[hashables]] in starred[name[other].types_hashable] begin[:] call[name[self].add, parameter[name[hashables]]] for taget[name[non_hashbles]] in starred[name[other].types] begin[:] call[name[self].add, parameter[name[non_hashbles]]]
keyword[def] identifier[merge] ( identifier[self] , identifier[other] ): literal[string] keyword[for] identifier[hashables] keyword[in] identifier[other] . identifier[types_hashable] : identifier[self] . identifier[add] ( identifier[hashables] ) keyword[for] identifier[non_hashbles] keyword[in] identifier[other] . identifier[types] : identifier[self] . identifier[add] ( identifier[non_hashbles] )
def merge(self, other): # type: (TentativeType) -> None '\n Merge two TentativeType instances\n ' for hashables in other.types_hashable: self.add(hashables) # depends on [control=['for'], data=['hashables']] for non_hashbles in other.types: self.add(non_hashbles) # depends on [control=['for'], data=['non_hashbles']]
def add_phenotype(self, ind_obj, phenotype_id): """Add a phenotype term to the case.""" if phenotype_id.startswith('HP:') or len(phenotype_id) == 7: logger.debug('querying on HPO term') hpo_results = phizz.query_hpo([phenotype_id]) else: logger.debug('querying on OMIM term') hpo_results = phizz.query_disease([phenotype_id]) added_terms = [] if hpo_results else None existing_ids = set(term.phenotype_id for term in ind_obj.phenotypes) for result in hpo_results: if result['hpo_term'] not in existing_ids: term = PhenotypeTerm(phenotype_id=result['hpo_term'], description=result['description']) logger.info('adding new HPO term: %s', term.phenotype_id) ind_obj.phenotypes.append(term) added_terms.append(term) logger.debug('storing new HPO terms') self.save() if added_terms is not None and len(added_terms) > 0: for case_obj in ind_obj.cases: self.update_hpolist(case_obj) return added_terms
def function[add_phenotype, parameter[self, ind_obj, phenotype_id]]: constant[Add a phenotype term to the case.] if <ast.BoolOp object at 0x7da18f811b40> begin[:] call[name[logger].debug, parameter[constant[querying on HPO term]]] variable[hpo_results] assign[=] call[name[phizz].query_hpo, parameter[list[[<ast.Name object at 0x7da18f811480>]]]] variable[added_terms] assign[=] <ast.IfExp object at 0x7da18f811180> variable[existing_ids] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18f813340>]] for taget[name[result]] in starred[name[hpo_results]] begin[:] if compare[call[name[result]][constant[hpo_term]] <ast.NotIn object at 0x7da2590d7190> name[existing_ids]] begin[:] variable[term] assign[=] call[name[PhenotypeTerm], parameter[]] call[name[logger].info, parameter[constant[adding new HPO term: %s], name[term].phenotype_id]] call[name[ind_obj].phenotypes.append, parameter[name[term]]] call[name[added_terms].append, parameter[name[term]]] call[name[logger].debug, parameter[constant[storing new HPO terms]]] call[name[self].save, parameter[]] if <ast.BoolOp object at 0x7da2044c3550> begin[:] for taget[name[case_obj]] in starred[name[ind_obj].cases] begin[:] call[name[self].update_hpolist, parameter[name[case_obj]]] return[name[added_terms]]
keyword[def] identifier[add_phenotype] ( identifier[self] , identifier[ind_obj] , identifier[phenotype_id] ): literal[string] keyword[if] identifier[phenotype_id] . identifier[startswith] ( literal[string] ) keyword[or] identifier[len] ( identifier[phenotype_id] )== literal[int] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[hpo_results] = identifier[phizz] . identifier[query_hpo] ([ identifier[phenotype_id] ]) keyword[else] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[hpo_results] = identifier[phizz] . identifier[query_disease] ([ identifier[phenotype_id] ]) identifier[added_terms] =[] keyword[if] identifier[hpo_results] keyword[else] keyword[None] identifier[existing_ids] = identifier[set] ( identifier[term] . identifier[phenotype_id] keyword[for] identifier[term] keyword[in] identifier[ind_obj] . identifier[phenotypes] ) keyword[for] identifier[result] keyword[in] identifier[hpo_results] : keyword[if] identifier[result] [ literal[string] ] keyword[not] keyword[in] identifier[existing_ids] : identifier[term] = identifier[PhenotypeTerm] ( identifier[phenotype_id] = identifier[result] [ literal[string] ], identifier[description] = identifier[result] [ literal[string] ]) identifier[logger] . identifier[info] ( literal[string] , identifier[term] . identifier[phenotype_id] ) identifier[ind_obj] . identifier[phenotypes] . identifier[append] ( identifier[term] ) identifier[added_terms] . identifier[append] ( identifier[term] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[save] () keyword[if] identifier[added_terms] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[added_terms] )> literal[int] : keyword[for] identifier[case_obj] keyword[in] identifier[ind_obj] . identifier[cases] : identifier[self] . identifier[update_hpolist] ( identifier[case_obj] ) keyword[return] identifier[added_terms]
def add_phenotype(self, ind_obj, phenotype_id): """Add a phenotype term to the case.""" if phenotype_id.startswith('HP:') or len(phenotype_id) == 7: logger.debug('querying on HPO term') hpo_results = phizz.query_hpo([phenotype_id]) # depends on [control=['if'], data=[]] else: logger.debug('querying on OMIM term') hpo_results = phizz.query_disease([phenotype_id]) added_terms = [] if hpo_results else None existing_ids = set((term.phenotype_id for term in ind_obj.phenotypes)) for result in hpo_results: if result['hpo_term'] not in existing_ids: term = PhenotypeTerm(phenotype_id=result['hpo_term'], description=result['description']) logger.info('adding new HPO term: %s', term.phenotype_id) ind_obj.phenotypes.append(term) added_terms.append(term) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']] logger.debug('storing new HPO terms') self.save() if added_terms is not None and len(added_terms) > 0: for case_obj in ind_obj.cases: self.update_hpolist(case_obj) # depends on [control=['for'], data=['case_obj']] # depends on [control=['if'], data=[]] return added_terms
def broadcast_long(self): """ Broadcast address, as long. >>> localnet = Network('127.0.0.1/8') >>> print(localnet.broadcast_long()) 2147483647 """ if self.version() == 4: return self.network_long() | (MAX_IPV4 - self.netmask_long()) else: return self.network_long() \ | (MAX_IPV6 - self.netmask_long())
def function[broadcast_long, parameter[self]]: constant[ Broadcast address, as long. >>> localnet = Network('127.0.0.1/8') >>> print(localnet.broadcast_long()) 2147483647 ] if compare[call[name[self].version, parameter[]] equal[==] constant[4]] begin[:] return[binary_operation[call[name[self].network_long, parameter[]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[MAX_IPV4] - call[name[self].netmask_long, parameter[]]]]]
keyword[def] identifier[broadcast_long] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[version] ()== literal[int] : keyword[return] identifier[self] . identifier[network_long] ()|( identifier[MAX_IPV4] - identifier[self] . identifier[netmask_long] ()) keyword[else] : keyword[return] identifier[self] . identifier[network_long] ()|( identifier[MAX_IPV6] - identifier[self] . identifier[netmask_long] ())
def broadcast_long(self): """ Broadcast address, as long. >>> localnet = Network('127.0.0.1/8') >>> print(localnet.broadcast_long()) 2147483647 """ if self.version() == 4: return self.network_long() | MAX_IPV4 - self.netmask_long() # depends on [control=['if'], data=[]] else: return self.network_long() | MAX_IPV6 - self.netmask_long()
def connect_outgoing(self, outgoing_task, outgoing_task_node, sequence_flow_node, is_default): """ Connects this task to the indicating outgoing task, with the details in the sequence flow. A subclass can override this method to get extra information from the node. """ self.task.connect_outgoing( outgoing_task, sequence_flow_node.get('id'), sequence_flow_node.get( 'name', None), self.parser._parse_documentation(sequence_flow_node, task_parser=self))
def function[connect_outgoing, parameter[self, outgoing_task, outgoing_task_node, sequence_flow_node, is_default]]: constant[ Connects this task to the indicating outgoing task, with the details in the sequence flow. A subclass can override this method to get extra information from the node. ] call[name[self].task.connect_outgoing, parameter[name[outgoing_task], call[name[sequence_flow_node].get, parameter[constant[id]]], call[name[sequence_flow_node].get, parameter[constant[name], constant[None]]], call[name[self].parser._parse_documentation, parameter[name[sequence_flow_node]]]]]
keyword[def] identifier[connect_outgoing] ( identifier[self] , identifier[outgoing_task] , identifier[outgoing_task_node] , identifier[sequence_flow_node] , identifier[is_default] ): literal[string] identifier[self] . identifier[task] . identifier[connect_outgoing] ( identifier[outgoing_task] , identifier[sequence_flow_node] . identifier[get] ( literal[string] ), identifier[sequence_flow_node] . identifier[get] ( literal[string] , keyword[None] ), identifier[self] . identifier[parser] . identifier[_parse_documentation] ( identifier[sequence_flow_node] , identifier[task_parser] = identifier[self] ))
def connect_outgoing(self, outgoing_task, outgoing_task_node, sequence_flow_node, is_default): """ Connects this task to the indicating outgoing task, with the details in the sequence flow. A subclass can override this method to get extra information from the node. """ self.task.connect_outgoing(outgoing_task, sequence_flow_node.get('id'), sequence_flow_node.get('name', None), self.parser._parse_documentation(sequence_flow_node, task_parser=self))
def stats(self, node_id=None, params=None): """ The Cluster Stats API allows to retrieve statistics from a cluster wide perspective. The API returns basic index metrics and information about the current nodes that form the cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_ :arg node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :arg flat_settings: Return settings in flat format (default: false) :arg timeout: Explicit operation timeout """ url = '/_cluster/stats' if node_id: url = _make_path('_cluster/stats/nodes', node_id) return self.transport.perform_request('GET', url, params=params)
def function[stats, parameter[self, node_id, params]]: constant[ The Cluster Stats API allows to retrieve statistics from a cluster wide perspective. The API returns basic index metrics and information about the current nodes that form the cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_ :arg node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :arg flat_settings: Return settings in flat format (default: false) :arg timeout: Explicit operation timeout ] variable[url] assign[=] constant[/_cluster/stats] if name[node_id] begin[:] variable[url] assign[=] call[name[_make_path], parameter[constant[_cluster/stats/nodes], name[node_id]]] return[call[name[self].transport.perform_request, parameter[constant[GET], name[url]]]]
keyword[def] identifier[stats] ( identifier[self] , identifier[node_id] = keyword[None] , identifier[params] = keyword[None] ): literal[string] identifier[url] = literal[string] keyword[if] identifier[node_id] : identifier[url] = identifier[_make_path] ( literal[string] , identifier[node_id] ) keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] ( literal[string] , identifier[url] , identifier[params] = identifier[params] )
def stats(self, node_id=None, params=None): """ The Cluster Stats API allows to retrieve statistics from a cluster wide perspective. The API returns basic index metrics and information about the current nodes that form the cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_ :arg node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :arg flat_settings: Return settings in flat format (default: false) :arg timeout: Explicit operation timeout """ url = '/_cluster/stats' if node_id: url = _make_path('_cluster/stats/nodes', node_id) # depends on [control=['if'], data=[]] return self.transport.perform_request('GET', url, params=params)
def _get_struct_gradient(self, shape_number): """Get the values for the GRADIENT record.""" obj = _make_object("Gradient") bc = BitConsumer(self._src) obj.SpreadMode = bc.u_get(2) obj.InterpolationMode = bc.u_get(2) obj.NumGradients = bc.u_get(4) obj.GradientRecords = gradient_records = [] for _ in range(obj.NumGradients): record = _make_object("GradRecord") gradient_records.append(record) record.Ratio = unpack_ui8(self._src) if shape_number <= 2: record.Color = self._get_struct_rgb() else: record.Color = self._get_struct_rgba() return obj
def function[_get_struct_gradient, parameter[self, shape_number]]: constant[Get the values for the GRADIENT record.] variable[obj] assign[=] call[name[_make_object], parameter[constant[Gradient]]] variable[bc] assign[=] call[name[BitConsumer], parameter[name[self]._src]] name[obj].SpreadMode assign[=] call[name[bc].u_get, parameter[constant[2]]] name[obj].InterpolationMode assign[=] call[name[bc].u_get, parameter[constant[2]]] name[obj].NumGradients assign[=] call[name[bc].u_get, parameter[constant[4]]] name[obj].GradientRecords assign[=] list[[]] for taget[name[_]] in starred[call[name[range], parameter[name[obj].NumGradients]]] begin[:] variable[record] assign[=] call[name[_make_object], parameter[constant[GradRecord]]] call[name[gradient_records].append, parameter[name[record]]] name[record].Ratio assign[=] call[name[unpack_ui8], parameter[name[self]._src]] if compare[name[shape_number] less_or_equal[<=] constant[2]] begin[:] name[record].Color assign[=] call[name[self]._get_struct_rgb, parameter[]] return[name[obj]]
keyword[def] identifier[_get_struct_gradient] ( identifier[self] , identifier[shape_number] ): literal[string] identifier[obj] = identifier[_make_object] ( literal[string] ) identifier[bc] = identifier[BitConsumer] ( identifier[self] . identifier[_src] ) identifier[obj] . identifier[SpreadMode] = identifier[bc] . identifier[u_get] ( literal[int] ) identifier[obj] . identifier[InterpolationMode] = identifier[bc] . identifier[u_get] ( literal[int] ) identifier[obj] . identifier[NumGradients] = identifier[bc] . identifier[u_get] ( literal[int] ) identifier[obj] . identifier[GradientRecords] = identifier[gradient_records] =[] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[obj] . identifier[NumGradients] ): identifier[record] = identifier[_make_object] ( literal[string] ) identifier[gradient_records] . identifier[append] ( identifier[record] ) identifier[record] . identifier[Ratio] = identifier[unpack_ui8] ( identifier[self] . identifier[_src] ) keyword[if] identifier[shape_number] <= literal[int] : identifier[record] . identifier[Color] = identifier[self] . identifier[_get_struct_rgb] () keyword[else] : identifier[record] . identifier[Color] = identifier[self] . identifier[_get_struct_rgba] () keyword[return] identifier[obj]
def _get_struct_gradient(self, shape_number): """Get the values for the GRADIENT record.""" obj = _make_object('Gradient') bc = BitConsumer(self._src) obj.SpreadMode = bc.u_get(2) obj.InterpolationMode = bc.u_get(2) obj.NumGradients = bc.u_get(4) obj.GradientRecords = gradient_records = [] for _ in range(obj.NumGradients): record = _make_object('GradRecord') gradient_records.append(record) record.Ratio = unpack_ui8(self._src) if shape_number <= 2: record.Color = self._get_struct_rgb() # depends on [control=['if'], data=[]] else: record.Color = self._get_struct_rgba() # depends on [control=['for'], data=[]] return obj
def insertBlock(self, businput): """ Input dictionary has to have the following keys: blockname It may have: open_for_writing, origin_site(name), block_size, file_count, creation_date, create_by, last_modification_date, last_modified_by it builds the correct dictionary for dao input and executes the dao NEED to validate there are no extra keys in the businput """ if not ("block_name" in businput and "origin_site_name" in businput ): dbsExceptionHandler('dbsException-invalid-input', "business/DBSBlock/insertBlock must have block_name and origin_site_name as input") conn = self.dbi.connection() tran = conn.begin() try: blkinput = { "last_modification_date":businput.get("last_modification_date", dbsUtils().getTime()), #"last_modified_by":businput.get("last_modified_by", dbsUtils().getCreateBy()), "last_modified_by":dbsUtils().getCreateBy(), #"create_by":businput.get("create_by", dbsUtils().getCreateBy()), "create_by":dbsUtils().getCreateBy(), "creation_date":businput.get("creation_date", dbsUtils().getTime()), "open_for_writing":businput.get("open_for_writing", 1), "block_size":businput.get("block_size", 0), "file_count":businput.get("file_count", 0), "block_name":businput.get("block_name"), "origin_site_name":businput.get("origin_site_name") } ds_name = businput["block_name"].split('#')[0] blkinput["dataset_id"] = self.datasetid.execute(conn, ds_name, tran) if blkinput["dataset_id"] == -1 : msg = "DBSBlock/insertBlock. Dataset %s does not exists" % ds_name dbsExceptionHandler('dbsException-missing-data', msg) blkinput["block_id"] = self.sm.increment(conn, "SEQ_BK", tran) self.blockin.execute(conn, blkinput, tran) tran.commit() tran = None except Exception as e: if str(e).lower().find("unique constraint") != -1 or str(e).lower().find("duplicate") != -1: pass else: if tran: tran.rollback() if conn: conn.close() raise finally: if tran: tran.rollback() if conn: conn.close()
def function[insertBlock, parameter[self, businput]]: constant[ Input dictionary has to have the following keys: blockname It may have: open_for_writing, origin_site(name), block_size, file_count, creation_date, create_by, last_modification_date, last_modified_by it builds the correct dictionary for dao input and executes the dao NEED to validate there are no extra keys in the businput ] if <ast.UnaryOp object at 0x7da20c6e59c0> begin[:] call[name[dbsExceptionHandler], parameter[constant[dbsException-invalid-input], constant[business/DBSBlock/insertBlock must have block_name and origin_site_name as input]]] variable[conn] assign[=] call[name[self].dbi.connection, parameter[]] variable[tran] assign[=] call[name[conn].begin, parameter[]] <ast.Try object at 0x7da20c6e53f0>
keyword[def] identifier[insertBlock] ( identifier[self] , identifier[businput] ): literal[string] keyword[if] keyword[not] ( literal[string] keyword[in] identifier[businput] keyword[and] literal[string] keyword[in] identifier[businput] ): identifier[dbsExceptionHandler] ( literal[string] , literal[string] ) identifier[conn] = identifier[self] . identifier[dbi] . identifier[connection] () identifier[tran] = identifier[conn] . identifier[begin] () keyword[try] : identifier[blkinput] ={ literal[string] : identifier[businput] . identifier[get] ( literal[string] , identifier[dbsUtils] (). identifier[getTime] ()), literal[string] : identifier[dbsUtils] (). identifier[getCreateBy] (), literal[string] : identifier[dbsUtils] (). identifier[getCreateBy] (), literal[string] : identifier[businput] . identifier[get] ( literal[string] , identifier[dbsUtils] (). identifier[getTime] ()), literal[string] : identifier[businput] . identifier[get] ( literal[string] , literal[int] ), literal[string] : identifier[businput] . identifier[get] ( literal[string] , literal[int] ), literal[string] : identifier[businput] . identifier[get] ( literal[string] , literal[int] ), literal[string] : identifier[businput] . identifier[get] ( literal[string] ), literal[string] : identifier[businput] . identifier[get] ( literal[string] ) } identifier[ds_name] = identifier[businput] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ] identifier[blkinput] [ literal[string] ]= identifier[self] . identifier[datasetid] . identifier[execute] ( identifier[conn] , identifier[ds_name] , identifier[tran] ) keyword[if] identifier[blkinput] [ literal[string] ]==- literal[int] : identifier[msg] = literal[string] % identifier[ds_name] identifier[dbsExceptionHandler] ( literal[string] , identifier[msg] ) identifier[blkinput] [ literal[string] ]= identifier[self] . identifier[sm] . identifier[increment] ( identifier[conn] , literal[string] , identifier[tran] ) identifier[self] . identifier[blockin] . identifier[execute] ( identifier[conn] , identifier[blkinput] , identifier[tran] ) identifier[tran] . identifier[commit] () identifier[tran] = keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[if] identifier[str] ( identifier[e] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] keyword[or] identifier[str] ( identifier[e] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] : keyword[pass] keyword[else] : keyword[if] identifier[tran] : identifier[tran] . identifier[rollback] () keyword[if] identifier[conn] : identifier[conn] . identifier[close] () keyword[raise] keyword[finally] : keyword[if] identifier[tran] : identifier[tran] . identifier[rollback] () keyword[if] identifier[conn] : identifier[conn] . identifier[close] ()
def insertBlock(self, businput): """ Input dictionary has to have the following keys: blockname It may have: open_for_writing, origin_site(name), block_size, file_count, creation_date, create_by, last_modification_date, last_modified_by it builds the correct dictionary for dao input and executes the dao NEED to validate there are no extra keys in the businput """ if not ('block_name' in businput and 'origin_site_name' in businput): dbsExceptionHandler('dbsException-invalid-input', 'business/DBSBlock/insertBlock must have block_name and origin_site_name as input') # depends on [control=['if'], data=[]] conn = self.dbi.connection() tran = conn.begin() try: #"last_modified_by":businput.get("last_modified_by", dbsUtils().getCreateBy()), #"create_by":businput.get("create_by", dbsUtils().getCreateBy()), blkinput = {'last_modification_date': businput.get('last_modification_date', dbsUtils().getTime()), 'last_modified_by': dbsUtils().getCreateBy(), 'create_by': dbsUtils().getCreateBy(), 'creation_date': businput.get('creation_date', dbsUtils().getTime()), 'open_for_writing': businput.get('open_for_writing', 1), 'block_size': businput.get('block_size', 0), 'file_count': businput.get('file_count', 0), 'block_name': businput.get('block_name'), 'origin_site_name': businput.get('origin_site_name')} ds_name = businput['block_name'].split('#')[0] blkinput['dataset_id'] = self.datasetid.execute(conn, ds_name, tran) if blkinput['dataset_id'] == -1: msg = 'DBSBlock/insertBlock. Dataset %s does not exists' % ds_name dbsExceptionHandler('dbsException-missing-data', msg) # depends on [control=['if'], data=[]] blkinput['block_id'] = self.sm.increment(conn, 'SEQ_BK', tran) self.blockin.execute(conn, blkinput, tran) tran.commit() tran = None # depends on [control=['try'], data=[]] except Exception as e: if str(e).lower().find('unique constraint') != -1 or str(e).lower().find('duplicate') != -1: pass # depends on [control=['if'], data=[]] else: if tran: tran.rollback() # depends on [control=['if'], data=[]] if conn: conn.close() # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=['e']] finally: if tran: tran.rollback() # depends on [control=['if'], data=[]] if conn: conn.close() # depends on [control=['if'], data=[]]
def convert_ensembl_to_entrez(self, ensembl): """Convert Ensembl Id to Entrez Gene Id""" if 'ENST' in ensembl: pass else: raise (IndexError) # Submit resquest to NCBI eutils/Gene database server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format( ensembl) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() # Process Request response = r.text info = xmltodict.parse(response) try: geneId = info['eSearchResult']['IdList']['Id'] except TypeError: raise (TypeError) return geneId
def function[convert_ensembl_to_entrez, parameter[self, ensembl]]: constant[Convert Ensembl Id to Entrez Gene Id] if compare[constant[ENST] in name[ensembl]] begin[:] pass variable[server] assign[=] binary_operation[binary_operation[constant[http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?] + name[self].options] + call[constant[&db=gene&term={0}].format, parameter[name[ensembl]]]] variable[r] assign[=] call[name[requests].get, parameter[name[server]]] if <ast.UnaryOp object at 0x7da207f9ad40> begin[:] call[name[r].raise_for_status, parameter[]] call[name[sys].exit, parameter[]] variable[response] assign[=] name[r].text variable[info] assign[=] call[name[xmltodict].parse, parameter[name[response]]] <ast.Try object at 0x7da207f9a1d0> return[name[geneId]]
keyword[def] identifier[convert_ensembl_to_entrez] ( identifier[self] , identifier[ensembl] ): literal[string] keyword[if] literal[string] keyword[in] identifier[ensembl] : keyword[pass] keyword[else] : keyword[raise] ( identifier[IndexError] ) identifier[server] = literal[string] + identifier[self] . identifier[options] + literal[string] . identifier[format] ( identifier[ensembl] ) identifier[r] = identifier[requests] . identifier[get] ( identifier[server] , identifier[headers] ={ literal[string] : literal[string] }) keyword[if] keyword[not] identifier[r] . identifier[ok] : identifier[r] . identifier[raise_for_status] () identifier[sys] . identifier[exit] () identifier[response] = identifier[r] . identifier[text] identifier[info] = identifier[xmltodict] . identifier[parse] ( identifier[response] ) keyword[try] : identifier[geneId] = identifier[info] [ literal[string] ][ literal[string] ][ literal[string] ] keyword[except] identifier[TypeError] : keyword[raise] ( identifier[TypeError] ) keyword[return] identifier[geneId]
def convert_ensembl_to_entrez(self, ensembl): """Convert Ensembl Id to Entrez Gene Id""" if 'ENST' in ensembl: pass # depends on [control=['if'], data=[]] else: raise IndexError # Submit resquest to NCBI eutils/Gene database server = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?' + self.options + '&db=gene&term={0}'.format(ensembl) r = requests.get(server, headers={'Content-Type': 'text/xml'}) if not r.ok: r.raise_for_status() sys.exit() # depends on [control=['if'], data=[]] # Process Request response = r.text info = xmltodict.parse(response) try: geneId = info['eSearchResult']['IdList']['Id'] # depends on [control=['try'], data=[]] except TypeError: raise TypeError # depends on [control=['except'], data=[]] return geneId
def complement(color): r"""Calculates polar opposite of color This isn't guaranteed to look good >_> (especially with brighter, higher intensity colors.) This will be replaced with a formula that produces better looking colors in the future. >>> complement('red') (0, 255, 76) >>> complement((0, 100, 175)) (175, 101, 0) """ (r, g, b) = parse_color(color) gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0)) complement = gcolor.ComplementaryColor() (r, g, b) = [int(c * 255.0) for c in complement.rgb] return (r, g, b)
def function[complement, parameter[color]]: constant[Calculates polar opposite of color This isn't guaranteed to look good >_> (especially with brighter, higher intensity colors.) This will be replaced with a formula that produces better looking colors in the future. >>> complement('red') (0, 255, 76) >>> complement((0, 100, 175)) (175, 101, 0) ] <ast.Tuple object at 0x7da204961000> assign[=] call[name[parse_color], parameter[name[color]]] variable[gcolor] assign[=] call[name[grapefruit].Color, parameter[tuple[[<ast.BinOp object at 0x7da2049634c0>, <ast.BinOp object at 0x7da204963d30>, <ast.BinOp object at 0x7da204961f00>]]]] variable[complement] assign[=] call[name[gcolor].ComplementaryColor, parameter[]] <ast.Tuple object at 0x7da204961e70> assign[=] <ast.ListComp object at 0x7da204961ba0> return[tuple[[<ast.Name object at 0x7da204960040>, <ast.Name object at 0x7da204962c50>, <ast.Name object at 0x7da204963070>]]]
keyword[def] identifier[complement] ( identifier[color] ): literal[string] ( identifier[r] , identifier[g] , identifier[b] )= identifier[parse_color] ( identifier[color] ) identifier[gcolor] = identifier[grapefruit] . identifier[Color] (( identifier[r] / literal[int] , identifier[g] / literal[int] , identifier[b] / literal[int] )) identifier[complement] = identifier[gcolor] . identifier[ComplementaryColor] () ( identifier[r] , identifier[g] , identifier[b] )=[ identifier[int] ( identifier[c] * literal[int] ) keyword[for] identifier[c] keyword[in] identifier[complement] . identifier[rgb] ] keyword[return] ( identifier[r] , identifier[g] , identifier[b] )
def complement(color): """Calculates polar opposite of color This isn't guaranteed to look good >_> (especially with brighter, higher intensity colors.) This will be replaced with a formula that produces better looking colors in the future. >>> complement('red') (0, 255, 76) >>> complement((0, 100, 175)) (175, 101, 0) """ (r, g, b) = parse_color(color) gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0)) complement = gcolor.ComplementaryColor() (r, g, b) = [int(c * 255.0) for c in complement.rgb] return (r, g, b)
def _predict(self, X, method='fprop'): """ Get model predictions. See pylearn2.scripts.mlp.predict_csv and http://fastml.com/how-to-get-predictions-from-pylearn2/. Parameters ---------- X : array_like Test dataset. method : str Model method to call for prediction. """ import theano X_sym = self.trainer.model.get_input_space().make_theano_batch() y_sym = getattr(self.trainer.model, method)(X_sym) f = theano.function([X_sym], y_sym, allow_input_downcast=True) return f(X)
def function[_predict, parameter[self, X, method]]: constant[ Get model predictions. See pylearn2.scripts.mlp.predict_csv and http://fastml.com/how-to-get-predictions-from-pylearn2/. Parameters ---------- X : array_like Test dataset. method : str Model method to call for prediction. ] import module[theano] variable[X_sym] assign[=] call[call[name[self].trainer.model.get_input_space, parameter[]].make_theano_batch, parameter[]] variable[y_sym] assign[=] call[call[name[getattr], parameter[name[self].trainer.model, name[method]]], parameter[name[X_sym]]] variable[f] assign[=] call[name[theano].function, parameter[list[[<ast.Name object at 0x7da1afff4880>]], name[y_sym]]] return[call[name[f], parameter[name[X]]]]
keyword[def] identifier[_predict] ( identifier[self] , identifier[X] , identifier[method] = literal[string] ): literal[string] keyword[import] identifier[theano] identifier[X_sym] = identifier[self] . identifier[trainer] . identifier[model] . identifier[get_input_space] (). identifier[make_theano_batch] () identifier[y_sym] = identifier[getattr] ( identifier[self] . identifier[trainer] . identifier[model] , identifier[method] )( identifier[X_sym] ) identifier[f] = identifier[theano] . identifier[function] ([ identifier[X_sym] ], identifier[y_sym] , identifier[allow_input_downcast] = keyword[True] ) keyword[return] identifier[f] ( identifier[X] )
def _predict(self, X, method='fprop'): """ Get model predictions. See pylearn2.scripts.mlp.predict_csv and http://fastml.com/how-to-get-predictions-from-pylearn2/. Parameters ---------- X : array_like Test dataset. method : str Model method to call for prediction. """ import theano X_sym = self.trainer.model.get_input_space().make_theano_batch() y_sym = getattr(self.trainer.model, method)(X_sym) f = theano.function([X_sym], y_sym, allow_input_downcast=True) return f(X)
def get_taf_remarks(txt: str) -> (str, str): # type: ignore """ Returns report and remarks separated if found """ remarks_start = find_first_in_list(txt, TAF_RMK) if remarks_start == -1: return txt, '' remarks = txt[remarks_start:] txt = txt[:remarks_start].strip() return txt, remarks
def function[get_taf_remarks, parameter[txt]]: constant[ Returns report and remarks separated if found ] variable[remarks_start] assign[=] call[name[find_first_in_list], parameter[name[txt], name[TAF_RMK]]] if compare[name[remarks_start] equal[==] <ast.UnaryOp object at 0x7da207f021a0>] begin[:] return[tuple[[<ast.Name object at 0x7da207f02d40>, <ast.Constant object at 0x7da207f00910>]]] variable[remarks] assign[=] call[name[txt]][<ast.Slice object at 0x7da207f03be0>] variable[txt] assign[=] call[call[name[txt]][<ast.Slice object at 0x7da207f03760>].strip, parameter[]] return[tuple[[<ast.Name object at 0x7da207f01150>, <ast.Name object at 0x7da207f03670>]]]
keyword[def] identifier[get_taf_remarks] ( identifier[txt] : identifier[str] )->( identifier[str] , identifier[str] ): literal[string] identifier[remarks_start] = identifier[find_first_in_list] ( identifier[txt] , identifier[TAF_RMK] ) keyword[if] identifier[remarks_start] ==- literal[int] : keyword[return] identifier[txt] , literal[string] identifier[remarks] = identifier[txt] [ identifier[remarks_start] :] identifier[txt] = identifier[txt] [: identifier[remarks_start] ]. identifier[strip] () keyword[return] identifier[txt] , identifier[remarks]
def get_taf_remarks(txt: str) -> (str, str): # type: ignore '\n Returns report and remarks separated if found\n ' remarks_start = find_first_in_list(txt, TAF_RMK) if remarks_start == -1: return (txt, '') # depends on [control=['if'], data=[]] remarks = txt[remarks_start:] txt = txt[:remarks_start].strip() return (txt, remarks)
def _get_child_mock(self, **kw): """Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).""" _type = type(self) if not issubclass(_type, CallableMixin): if issubclass(_type, NonCallableMagicMock): klass = MagicMock elif issubclass(_type, NonCallableMock) : klass = Mock else: klass = _type.__mro__[1] return klass(**kw)
def function[_get_child_mock, parameter[self]]: constant[Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).] variable[_type] assign[=] call[name[type], parameter[name[self]]] if <ast.UnaryOp object at 0x7da1b08bae00> begin[:] if call[name[issubclass], parameter[name[_type], name[NonCallableMagicMock]]] begin[:] variable[klass] assign[=] name[MagicMock] return[call[name[klass], parameter[]]]
keyword[def] identifier[_get_child_mock] ( identifier[self] ,** identifier[kw] ): literal[string] identifier[_type] = identifier[type] ( identifier[self] ) keyword[if] keyword[not] identifier[issubclass] ( identifier[_type] , identifier[CallableMixin] ): keyword[if] identifier[issubclass] ( identifier[_type] , identifier[NonCallableMagicMock] ): identifier[klass] = identifier[MagicMock] keyword[elif] identifier[issubclass] ( identifier[_type] , identifier[NonCallableMock] ): identifier[klass] = identifier[Mock] keyword[else] : identifier[klass] = identifier[_type] . identifier[__mro__] [ literal[int] ] keyword[return] identifier[klass] (** identifier[kw] )
def _get_child_mock(self, **kw): """Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).""" _type = type(self) if not issubclass(_type, CallableMixin): if issubclass(_type, NonCallableMagicMock): klass = MagicMock # depends on [control=['if'], data=[]] elif issubclass(_type, NonCallableMock): klass = Mock # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: klass = _type.__mro__[1] return klass(**kw)
def run_gmsh(self): """ Makes the mesh using gmsh. """ argiope.utils.run_gmsh(gmsh_path = self.gmsh_path, gmsh_space = self.gmsh_space, gmsh_options = self.gmsh_options, name = self.file_name + ".geo", workdir = self.workdir) self.mesh = argiope.mesh.read_msh(self.workdir + self.file_name + ".msh")
def function[run_gmsh, parameter[self]]: constant[ Makes the mesh using gmsh. ] call[name[argiope].utils.run_gmsh, parameter[]] name[self].mesh assign[=] call[name[argiope].mesh.read_msh, parameter[binary_operation[binary_operation[name[self].workdir + name[self].file_name] + constant[.msh]]]]
keyword[def] identifier[run_gmsh] ( identifier[self] ): literal[string] identifier[argiope] . identifier[utils] . identifier[run_gmsh] ( identifier[gmsh_path] = identifier[self] . identifier[gmsh_path] , identifier[gmsh_space] = identifier[self] . identifier[gmsh_space] , identifier[gmsh_options] = identifier[self] . identifier[gmsh_options] , identifier[name] = identifier[self] . identifier[file_name] + literal[string] , identifier[workdir] = identifier[self] . identifier[workdir] ) identifier[self] . identifier[mesh] = identifier[argiope] . identifier[mesh] . identifier[read_msh] ( identifier[self] . identifier[workdir] + identifier[self] . identifier[file_name] + literal[string] )
def run_gmsh(self): """ Makes the mesh using gmsh. """ argiope.utils.run_gmsh(gmsh_path=self.gmsh_path, gmsh_space=self.gmsh_space, gmsh_options=self.gmsh_options, name=self.file_name + '.geo', workdir=self.workdir) self.mesh = argiope.mesh.read_msh(self.workdir + self.file_name + '.msh')
def parse_nodes_coords(osm_response): """ Parse node coordinates from OSM response. Some nodes are standalone points of interest, others are vertices in polygonal (areal) POIs. Parameters ---------- osm_response : string OSM response JSON string Returns ------- coords : dict dict of node IDs and their lat, lon coordinates """ coords = {} for result in osm_response['elements']: if 'type' in result and result['type'] == 'node': coords[result['id']] = {'lat': result['lat'], 'lon': result['lon']} return coords
def function[parse_nodes_coords, parameter[osm_response]]: constant[ Parse node coordinates from OSM response. Some nodes are standalone points of interest, others are vertices in polygonal (areal) POIs. Parameters ---------- osm_response : string OSM response JSON string Returns ------- coords : dict dict of node IDs and their lat, lon coordinates ] variable[coords] assign[=] dictionary[[], []] for taget[name[result]] in starred[call[name[osm_response]][constant[elements]]] begin[:] if <ast.BoolOp object at 0x7da1b1b286a0> begin[:] call[name[coords]][call[name[result]][constant[id]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b28400>, <ast.Constant object at 0x7da1b1b29ed0>], [<ast.Subscript object at 0x7da1b1b29ab0>, <ast.Subscript object at 0x7da1b1b2bbb0>]] return[name[coords]]
keyword[def] identifier[parse_nodes_coords] ( identifier[osm_response] ): literal[string] identifier[coords] ={} keyword[for] identifier[result] keyword[in] identifier[osm_response] [ literal[string] ]: keyword[if] literal[string] keyword[in] identifier[result] keyword[and] identifier[result] [ literal[string] ]== literal[string] : identifier[coords] [ identifier[result] [ literal[string] ]]={ literal[string] : identifier[result] [ literal[string] ], literal[string] : identifier[result] [ literal[string] ]} keyword[return] identifier[coords]
def parse_nodes_coords(osm_response): """ Parse node coordinates from OSM response. Some nodes are standalone points of interest, others are vertices in polygonal (areal) POIs. Parameters ---------- osm_response : string OSM response JSON string Returns ------- coords : dict dict of node IDs and their lat, lon coordinates """ coords = {} for result in osm_response['elements']: if 'type' in result and result['type'] == 'node': coords[result['id']] = {'lat': result['lat'], 'lon': result['lon']} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']] return coords
def preprocess(self): """Preprocessing. Removes repeated chars from firstName and lastName fields. Adds a 'name' field joining all names in to one string. """ super(MambuUser,self).preprocess() try: self['firstName'] = self['firstName'].strip() except Exception as e: self['firstName'] = "" try: self['lastName'] = self['lastName'].strip() except Exception as ex: self['lastName'] = "" self['name'] = self['firstName'] + " " + self['lastName']
def function[preprocess, parameter[self]]: constant[Preprocessing. Removes repeated chars from firstName and lastName fields. Adds a 'name' field joining all names in to one string. ] call[call[name[super], parameter[name[MambuUser], name[self]]].preprocess, parameter[]] <ast.Try object at 0x7da18dc9a410> <ast.Try object at 0x7da18dc9a140> call[name[self]][constant[name]] assign[=] binary_operation[binary_operation[call[name[self]][constant[firstName]] + constant[ ]] + call[name[self]][constant[lastName]]]
keyword[def] identifier[preprocess] ( identifier[self] ): literal[string] identifier[super] ( identifier[MambuUser] , identifier[self] ). identifier[preprocess] () keyword[try] : identifier[self] [ literal[string] ]= identifier[self] [ literal[string] ]. identifier[strip] () keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] [ literal[string] ]= literal[string] keyword[try] : identifier[self] [ literal[string] ]= identifier[self] [ literal[string] ]. identifier[strip] () keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[self] [ literal[string] ]= literal[string] identifier[self] [ literal[string] ]= identifier[self] [ literal[string] ]+ literal[string] + identifier[self] [ literal[string] ]
def preprocess(self): """Preprocessing. Removes repeated chars from firstName and lastName fields. Adds a 'name' field joining all names in to one string. """ super(MambuUser, self).preprocess() try: self['firstName'] = self['firstName'].strip() # depends on [control=['try'], data=[]] except Exception as e: self['firstName'] = '' # depends on [control=['except'], data=[]] try: self['lastName'] = self['lastName'].strip() # depends on [control=['try'], data=[]] except Exception as ex: self['lastName'] = '' # depends on [control=['except'], data=[]] self['name'] = self['firstName'] + ' ' + self['lastName']
def rollback(awsclient, function_name, alias_name=ALIAS_NAME, version=None): """Rollback a lambda function to a given version. :param awsclient: :param function_name: :param alias_name: :param version: :return: exit_code """ if version: log.info('rolling back to version {}'.format(version)) else: log.info('rolling back to previous version') version = _get_previous_version(awsclient, function_name, alias_name) if version == '0': log.error('unable to find previous version of lambda function') return 1 log.info('new version is %s' % str(version)) _update_alias(awsclient, function_name, version, alias_name) return 0
def function[rollback, parameter[awsclient, function_name, alias_name, version]]: constant[Rollback a lambda function to a given version. :param awsclient: :param function_name: :param alias_name: :param version: :return: exit_code ] if name[version] begin[:] call[name[log].info, parameter[call[constant[rolling back to version {}].format, parameter[name[version]]]]] call[name[_update_alias], parameter[name[awsclient], name[function_name], name[version], name[alias_name]]] return[constant[0]]
keyword[def] identifier[rollback] ( identifier[awsclient] , identifier[function_name] , identifier[alias_name] = identifier[ALIAS_NAME] , identifier[version] = keyword[None] ): literal[string] keyword[if] identifier[version] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[version] )) keyword[else] : identifier[log] . identifier[info] ( literal[string] ) identifier[version] = identifier[_get_previous_version] ( identifier[awsclient] , identifier[function_name] , identifier[alias_name] ) keyword[if] identifier[version] == literal[string] : identifier[log] . identifier[error] ( literal[string] ) keyword[return] literal[int] identifier[log] . identifier[info] ( literal[string] % identifier[str] ( identifier[version] )) identifier[_update_alias] ( identifier[awsclient] , identifier[function_name] , identifier[version] , identifier[alias_name] ) keyword[return] literal[int]
def rollback(awsclient, function_name, alias_name=ALIAS_NAME, version=None): """Rollback a lambda function to a given version. :param awsclient: :param function_name: :param alias_name: :param version: :return: exit_code """ if version: log.info('rolling back to version {}'.format(version)) # depends on [control=['if'], data=[]] else: log.info('rolling back to previous version') version = _get_previous_version(awsclient, function_name, alias_name) if version == '0': log.error('unable to find previous version of lambda function') return 1 # depends on [control=['if'], data=[]] log.info('new version is %s' % str(version)) _update_alias(awsclient, function_name, version, alias_name) return 0
def from_dict(data, ctx): """ Instantiate a new ClientConfigureRejectTransaction from a dict (generally from loading a JSON response). The data used to instantiate the ClientConfigureRejectTransaction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number( data.get('marginRate') ) return ClientConfigureRejectTransaction(**data)
def function[from_dict, parameter[data, ctx]]: constant[ Instantiate a new ClientConfigureRejectTransaction from a dict (generally from loading a JSON response). The data used to instantiate the ClientConfigureRejectTransaction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. ] variable[data] assign[=] call[name[data].copy, parameter[]] if compare[call[name[data].get, parameter[constant[marginRate]]] is_not constant[None]] begin[:] call[name[data]][constant[marginRate]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[marginRate]]]]] return[call[name[ClientConfigureRejectTransaction], parameter[]]]
keyword[def] identifier[from_dict] ( identifier[data] , identifier[ctx] ): literal[string] identifier[data] = identifier[data] . identifier[copy] () keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] : identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] ( identifier[data] . identifier[get] ( literal[string] ) ) keyword[return] identifier[ClientConfigureRejectTransaction] (** identifier[data] )
def from_dict(data, ctx): """ Instantiate a new ClientConfigureRejectTransaction from a dict (generally from loading a JSON response). The data used to instantiate the ClientConfigureRejectTransaction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number(data.get('marginRate')) # depends on [control=['if'], data=[]] return ClientConfigureRejectTransaction(**data)
def remove(self, option): """ Removes an option from a Config instance IN: option (type: Option) """ if option.__class__ == Option: if option in self.options: del self.options[self.options.index(option)] else: raise OptionNotFoundError(option.name) else: raise TypeError("invalid type supplied")
def function[remove, parameter[self, option]]: constant[ Removes an option from a Config instance IN: option (type: Option) ] if compare[name[option].__class__ equal[==] name[Option]] begin[:] if compare[name[option] in name[self].options] begin[:] <ast.Delete object at 0x7da1b0853eb0>
keyword[def] identifier[remove] ( identifier[self] , identifier[option] ): literal[string] keyword[if] identifier[option] . identifier[__class__] == identifier[Option] : keyword[if] identifier[option] keyword[in] identifier[self] . identifier[options] : keyword[del] identifier[self] . identifier[options] [ identifier[self] . identifier[options] . identifier[index] ( identifier[option] )] keyword[else] : keyword[raise] identifier[OptionNotFoundError] ( identifier[option] . identifier[name] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def remove(self, option): """ Removes an option from a Config instance IN: option (type: Option) """ if option.__class__ == Option: if option in self.options: del self.options[self.options.index(option)] # depends on [control=['if'], data=['option']] else: raise OptionNotFoundError(option.name) # depends on [control=['if'], data=[]] else: raise TypeError('invalid type supplied')
def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
def function[h5fmem, parameter[]]: constant[Create an in-memory HDF5 file.] variable[fn] assign[=] call[name[tempfile].mktemp, parameter[]] call[name[kwargs]][constant[mode]] assign[=] constant[w] call[name[kwargs]][constant[driver]] assign[=] constant[core] call[name[kwargs]][constant[backing_store]] assign[=] constant[False] variable[h5f] assign[=] call[name[h5py].File, parameter[name[fn]]] return[name[h5f]]
keyword[def] identifier[h5fmem] (** identifier[kwargs] ): literal[string] identifier[fn] = identifier[tempfile] . identifier[mktemp] () identifier[kwargs] [ literal[string] ]= literal[string] identifier[kwargs] [ literal[string] ]= literal[string] identifier[kwargs] [ literal[string] ]= keyword[False] identifier[h5f] = identifier[h5py] . identifier[File] ( identifier[fn] ,** identifier[kwargs] ) keyword[return] identifier[h5f]
def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
def _fast_permalink(self): """Return the short permalink to the comment.""" if hasattr(self, 'link_id'): # from /r or /u comments page sid = self.link_id.split('_')[1] else: # from user's /message page sid = self.context.split('/')[4] return urljoin(self.reddit_session.config['comments'], '{0}/_/{1}' .format(sid, self.id))
def function[_fast_permalink, parameter[self]]: constant[Return the short permalink to the comment.] if call[name[hasattr], parameter[name[self], constant[link_id]]] begin[:] variable[sid] assign[=] call[call[name[self].link_id.split, parameter[constant[_]]]][constant[1]] return[call[name[urljoin], parameter[call[name[self].reddit_session.config][constant[comments]], call[constant[{0}/_/{1}].format, parameter[name[sid], name[self].id]]]]]
keyword[def] identifier[_fast_permalink] ( identifier[self] ): literal[string] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[sid] = identifier[self] . identifier[link_id] . identifier[split] ( literal[string] )[ literal[int] ] keyword[else] : identifier[sid] = identifier[self] . identifier[context] . identifier[split] ( literal[string] )[ literal[int] ] keyword[return] identifier[urljoin] ( identifier[self] . identifier[reddit_session] . identifier[config] [ literal[string] ], literal[string] . identifier[format] ( identifier[sid] , identifier[self] . identifier[id] ))
def _fast_permalink(self): """Return the short permalink to the comment.""" if hasattr(self, 'link_id'): # from /r or /u comments page sid = self.link_id.split('_')[1] # depends on [control=['if'], data=[]] else: # from user's /message page sid = self.context.split('/')[4] return urljoin(self.reddit_session.config['comments'], '{0}/_/{1}'.format(sid, self.id))
def generate_numeric_range(items, lower_bound, upper_bound): """Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound """ quantile_grid = create_quantiles(items, lower_bound, upper_bound) labels, bounds = (zip(*quantile_grid)) ranges = ((label, NumericRange(*bound)) for label, bound in zip(labels, bounds)) return ranges
def function[generate_numeric_range, parameter[items, lower_bound, upper_bound]]: constant[Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound ] variable[quantile_grid] assign[=] call[name[create_quantiles], parameter[name[items], name[lower_bound], name[upper_bound]]] <ast.Tuple object at 0x7da18dc986a0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18dc9a8f0>]] variable[ranges] assign[=] <ast.GeneratorExp object at 0x7da18dc981c0> return[name[ranges]]
keyword[def] identifier[generate_numeric_range] ( identifier[items] , identifier[lower_bound] , identifier[upper_bound] ): literal[string] identifier[quantile_grid] = identifier[create_quantiles] ( identifier[items] , identifier[lower_bound] , identifier[upper_bound] ) identifier[labels] , identifier[bounds] =( identifier[zip] (* identifier[quantile_grid] )) identifier[ranges] =(( identifier[label] , identifier[NumericRange] (* identifier[bound] )) keyword[for] identifier[label] , identifier[bound] keyword[in] identifier[zip] ( identifier[labels] , identifier[bounds] )) keyword[return] identifier[ranges]
def generate_numeric_range(items, lower_bound, upper_bound): """Generate postgresql numeric range and label for insertion. Parameters ---------- items: iterable labels for ranges. lower_bound: numeric lower bound upper_bound: numeric upper bound """ quantile_grid = create_quantiles(items, lower_bound, upper_bound) (labels, bounds) = zip(*quantile_grid) ranges = ((label, NumericRange(*bound)) for (label, bound) in zip(labels, bounds)) return ranges
def make_energies_hdu(self, extname="ENERGIES"): """ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name """ if self._evals is None: return None cols = [fits.Column("ENERGY", "1E", unit='MeV', array=self._evals)] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
def function[make_energies_hdu, parameter[self, extname]]: constant[ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name ] if compare[name[self]._evals is constant[None]] begin[:] return[constant[None]] variable[cols] assign[=] list[[<ast.Call object at 0x7da18f00f640>]] variable[hdu] assign[=] call[name[fits].BinTableHDU.from_columns, parameter[name[cols], call[name[self].make_header, parameter[]]]] return[name[hdu]]
keyword[def] identifier[make_energies_hdu] ( identifier[self] , identifier[extname] = literal[string] ): literal[string] keyword[if] identifier[self] . identifier[_evals] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[cols] =[ identifier[fits] . identifier[Column] ( literal[string] , literal[string] , identifier[unit] = literal[string] , identifier[array] = identifier[self] . identifier[_evals] )] identifier[hdu] = identifier[fits] . identifier[BinTableHDU] . identifier[from_columns] ( identifier[cols] , identifier[self] . identifier[make_header] (), identifier[name] = identifier[extname] ) keyword[return] identifier[hdu]
def make_energies_hdu(self, extname='ENERGIES'): """ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name """ if self._evals is None: return None # depends on [control=['if'], data=[]] cols = [fits.Column('ENERGY', '1E', unit='MeV', array=self._evals)] hdu = fits.BinTableHDU.from_columns(cols, self.make_header(), name=extname) return hdu
def add_upsert(self, action, meta_action, doc_source, update_spec): """ Function which stores sources for "insert" actions and decide if for "update" action has to add docs to get source buffer """ # Whenever update_spec is provided to this method # it means that doc source needs to be retrieved # from Elasticsearch. It means also that source # is not stored in local buffer if update_spec: self.bulk_index(action, meta_action) # -1 -> to get latest index number # -1 -> to get action instead of meta_action # Update document based on source retrieved from ES self.add_doc_to_update(action, update_spec, len(self.action_buffer) - 2) else: # Insert and update operations provide source # Store it in local buffer and use for comming updates # inside same buffer # add_to_sources will not be called for delete operation # as it does not provide doc_source if doc_source: self.add_to_sources(action, doc_source) self.bulk_index(action, meta_action)
def function[add_upsert, parameter[self, action, meta_action, doc_source, update_spec]]: constant[ Function which stores sources for "insert" actions and decide if for "update" action has to add docs to get source buffer ] if name[update_spec] begin[:] call[name[self].bulk_index, parameter[name[action], name[meta_action]]] call[name[self].add_doc_to_update, parameter[name[action], name[update_spec], binary_operation[call[name[len], parameter[name[self].action_buffer]] - constant[2]]]]
keyword[def] identifier[add_upsert] ( identifier[self] , identifier[action] , identifier[meta_action] , identifier[doc_source] , identifier[update_spec] ): literal[string] keyword[if] identifier[update_spec] : identifier[self] . identifier[bulk_index] ( identifier[action] , identifier[meta_action] ) identifier[self] . identifier[add_doc_to_update] ( identifier[action] , identifier[update_spec] , identifier[len] ( identifier[self] . identifier[action_buffer] )- literal[int] ) keyword[else] : keyword[if] identifier[doc_source] : identifier[self] . identifier[add_to_sources] ( identifier[action] , identifier[doc_source] ) identifier[self] . identifier[bulk_index] ( identifier[action] , identifier[meta_action] )
def add_upsert(self, action, meta_action, doc_source, update_spec): """ Function which stores sources for "insert" actions and decide if for "update" action has to add docs to get source buffer """ # Whenever update_spec is provided to this method # it means that doc source needs to be retrieved # from Elasticsearch. It means also that source # is not stored in local buffer if update_spec: self.bulk_index(action, meta_action) # -1 -> to get latest index number # -1 -> to get action instead of meta_action # Update document based on source retrieved from ES self.add_doc_to_update(action, update_spec, len(self.action_buffer) - 2) # depends on [control=['if'], data=[]] else: # Insert and update operations provide source # Store it in local buffer and use for comming updates # inside same buffer # add_to_sources will not be called for delete operation # as it does not provide doc_source if doc_source: self.add_to_sources(action, doc_source) # depends on [control=['if'], data=[]] self.bulk_index(action, meta_action)
def _join_chemical(query, cas_rn, chemical_id, chemical_name, chemical_definition): """helper function to add a query join to Chemical model :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param cas_rn: :param chemical_id: :param chemical_name: :param chemical_definition: :return: `sqlalchemy.orm.query.Query` object """ if cas_rn or chemical_id or chemical_name or chemical_definition: query = query.join(models.Chemical) if cas_rn: query = query.filter(models.Chemical.cas_rn.like(cas_rn)) if chemical_id: query = query.filter(models.Chemical.chemical_id == chemical_id) if chemical_name: query = query.filter(models.Chemical.chemical_name.like(chemical_name)) if chemical_definition: query = query.filter(models.Chemical.definition.like(chemical_definition)) return query
def function[_join_chemical, parameter[query, cas_rn, chemical_id, chemical_name, chemical_definition]]: constant[helper function to add a query join to Chemical model :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param cas_rn: :param chemical_id: :param chemical_name: :param chemical_definition: :return: `sqlalchemy.orm.query.Query` object ] if <ast.BoolOp object at 0x7da1b0bcf4f0> begin[:] variable[query] assign[=] call[name[query].join, parameter[name[models].Chemical]] if name[cas_rn] begin[:] variable[query] assign[=] call[name[query].filter, parameter[call[name[models].Chemical.cas_rn.like, parameter[name[cas_rn]]]]] if name[chemical_id] begin[:] variable[query] assign[=] call[name[query].filter, parameter[compare[name[models].Chemical.chemical_id equal[==] name[chemical_id]]]] if name[chemical_name] begin[:] variable[query] assign[=] call[name[query].filter, parameter[call[name[models].Chemical.chemical_name.like, parameter[name[chemical_name]]]]] if name[chemical_definition] begin[:] variable[query] assign[=] call[name[query].filter, parameter[call[name[models].Chemical.definition.like, parameter[name[chemical_definition]]]]] return[name[query]]
keyword[def] identifier[_join_chemical] ( identifier[query] , identifier[cas_rn] , identifier[chemical_id] , identifier[chemical_name] , identifier[chemical_definition] ): literal[string] keyword[if] identifier[cas_rn] keyword[or] identifier[chemical_id] keyword[or] identifier[chemical_name] keyword[or] identifier[chemical_definition] : identifier[query] = identifier[query] . identifier[join] ( identifier[models] . identifier[Chemical] ) keyword[if] identifier[cas_rn] : identifier[query] = identifier[query] . identifier[filter] ( identifier[models] . identifier[Chemical] . identifier[cas_rn] . identifier[like] ( identifier[cas_rn] )) keyword[if] identifier[chemical_id] : identifier[query] = identifier[query] . identifier[filter] ( identifier[models] . identifier[Chemical] . identifier[chemical_id] == identifier[chemical_id] ) keyword[if] identifier[chemical_name] : identifier[query] = identifier[query] . identifier[filter] ( identifier[models] . identifier[Chemical] . identifier[chemical_name] . identifier[like] ( identifier[chemical_name] )) keyword[if] identifier[chemical_definition] : identifier[query] = identifier[query] . identifier[filter] ( identifier[models] . identifier[Chemical] . identifier[definition] . identifier[like] ( identifier[chemical_definition] )) keyword[return] identifier[query]
def _join_chemical(query, cas_rn, chemical_id, chemical_name, chemical_definition): """helper function to add a query join to Chemical model :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param cas_rn: :param chemical_id: :param chemical_name: :param chemical_definition: :return: `sqlalchemy.orm.query.Query` object """ if cas_rn or chemical_id or chemical_name or chemical_definition: query = query.join(models.Chemical) if cas_rn: query = query.filter(models.Chemical.cas_rn.like(cas_rn)) # depends on [control=['if'], data=[]] if chemical_id: query = query.filter(models.Chemical.chemical_id == chemical_id) # depends on [control=['if'], data=[]] if chemical_name: query = query.filter(models.Chemical.chemical_name.like(chemical_name)) # depends on [control=['if'], data=[]] if chemical_definition: query = query.filter(models.Chemical.definition.like(chemical_definition)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return query
def extend(dict_, *dicts, **kwargs): """Extend a dictionary with keys and values from other dictionaries. :param dict_: Dictionary to extend Optional keyword arguments allow to control the exact way in which ``dict_`` will be extended. :param overwrite: Whether repeated keys should have their values overwritten, retaining the last value, as per given order of dictionaries. This is the default behavior (equivalent to ``overwrite=True``). If ``overwrite=False``, repeated keys are simply ignored. Example:: >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=True) {'a': 10, 'b': 2} >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=False) {'a': 1, 'b': 2} :param deep: Whether extending should proceed recursively, and cause corresponding subdictionaries to be merged into each other. By default, this does not happen (equivalent to ``deep=False``). Example:: >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=False) {'a': {'c': 2}} >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=True) {'a': {'b': 1, 'c': 2}} :return: Extended ``dict_`` .. versionadded:: 0.0.2 """ ensure_mapping(dict_) dicts = list(imap(ensure_mapping, dicts)) ensure_keyword_args(kwargs, optional=('deep', 'overwrite')) return _nary_dict_update([dict_] + dicts, copy=False, deep=kwargs.get('deep', False), overwrite=kwargs.get('overwrite', True))
def function[extend, parameter[dict_]]: constant[Extend a dictionary with keys and values from other dictionaries. :param dict_: Dictionary to extend Optional keyword arguments allow to control the exact way in which ``dict_`` will be extended. :param overwrite: Whether repeated keys should have their values overwritten, retaining the last value, as per given order of dictionaries. This is the default behavior (equivalent to ``overwrite=True``). If ``overwrite=False``, repeated keys are simply ignored. Example:: >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=True) {'a': 10, 'b': 2} >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=False) {'a': 1, 'b': 2} :param deep: Whether extending should proceed recursively, and cause corresponding subdictionaries to be merged into each other. By default, this does not happen (equivalent to ``deep=False``). Example:: >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=False) {'a': {'c': 2}} >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=True) {'a': {'b': 1, 'c': 2}} :return: Extended ``dict_`` .. versionadded:: 0.0.2 ] call[name[ensure_mapping], parameter[name[dict_]]] variable[dicts] assign[=] call[name[list], parameter[call[name[imap], parameter[name[ensure_mapping], name[dicts]]]]] call[name[ensure_keyword_args], parameter[name[kwargs]]] return[call[name[_nary_dict_update], parameter[binary_operation[list[[<ast.Name object at 0x7da1b1faad40>]] + name[dicts]]]]]
keyword[def] identifier[extend] ( identifier[dict_] ,* identifier[dicts] ,** identifier[kwargs] ): literal[string] identifier[ensure_mapping] ( identifier[dict_] ) identifier[dicts] = identifier[list] ( identifier[imap] ( identifier[ensure_mapping] , identifier[dicts] )) identifier[ensure_keyword_args] ( identifier[kwargs] , identifier[optional] =( literal[string] , literal[string] )) keyword[return] identifier[_nary_dict_update] ([ identifier[dict_] ]+ identifier[dicts] , identifier[copy] = keyword[False] , identifier[deep] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ), identifier[overwrite] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] ))
def extend(dict_, *dicts, **kwargs): """Extend a dictionary with keys and values from other dictionaries. :param dict_: Dictionary to extend Optional keyword arguments allow to control the exact way in which ``dict_`` will be extended. :param overwrite: Whether repeated keys should have their values overwritten, retaining the last value, as per given order of dictionaries. This is the default behavior (equivalent to ``overwrite=True``). If ``overwrite=False``, repeated keys are simply ignored. Example:: >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=True) {'a': 10, 'b': 2} >> foo = {'a': 1} >> extend(foo, {'a': 10, 'b': 2}, overwrite=False) {'a': 1, 'b': 2} :param deep: Whether extending should proceed recursively, and cause corresponding subdictionaries to be merged into each other. By default, this does not happen (equivalent to ``deep=False``). Example:: >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=False) {'a': {'c': 2}} >> foo = {'a': {'b': 1}} >> extend(foo, {'a': {'c': 2}}, deep=True) {'a': {'b': 1, 'c': 2}} :return: Extended ``dict_`` .. versionadded:: 0.0.2 """ ensure_mapping(dict_) dicts = list(imap(ensure_mapping, dicts)) ensure_keyword_args(kwargs, optional=('deep', 'overwrite')) return _nary_dict_update([dict_] + dicts, copy=False, deep=kwargs.get('deep', False), overwrite=kwargs.get('overwrite', True))
def Diguilio_Teja(T, xs, sigmas_Tb, Tbs, Tcs): r'''Calculates surface tension of a liquid mixture according to mixing rules in [1]_. .. math:: \sigma = 1.002855(T^*)^{1.118091} \frac{T}{T_b} \sigma_r T^* = \frac{(T_c/T)-1}{(T_c/T_b)-1} \sigma_r = \sum x_i \sigma_i T_b = \sum x_i T_{b,i} T_c = \sum x_i T_{c,i} Parameters ---------- T : float Temperature of fluid [K] xs : array-like Mole fractions of all components sigmas_Tb : array-like Surface tensions of all components at the boiling point, [N/m] Tbs : array-like Boiling temperatures of all components, [K] Tcs : array-like Critical temperatures of all components, [K] Returns ------- sigma : float Air-liquid surface tension of mixture, [N/m] Notes ----- Simple model, however it has 0 citations. Gives similar results to the `Winterfeld_Scriven_Davis` model. Raises a ValueError if temperature is greater than the mixture's critical temperature or if the given temperature is negative, or if the mixture's boiling temperature is higher than its critical temperature. [1]_ claims a 4.63 percent average absolute error on 21 binary and 4 ternary non-aqueous systems. [1]_ also considered Van der Waals mixing rules for `Tc`, but found it provided a higher error of 5.58% Examples -------- >>> Diguilio_Teja(T=298.15, xs=[0.1606, 0.8394], ... sigmas_Tb=[0.01424, 0.02530], Tbs=[309.21, 312.95], Tcs=[469.7, 508.0]) 0.025716823875045505 References ---------- .. [1] Diguilio, Ralph, and Amyn S. Teja. "Correlation and Prediction of the Surface Tensions of Mixtures." The Chemical Engineering Journal 38, no. 3 (July 1988): 205-8. doi:10.1016/0300-9467(88)80079-0. ''' if not none_and_length_check([xs, sigmas_Tb, Tbs, Tcs]): raise Exception('Function inputs are incorrect format') Tc = mixing_simple(xs, Tcs) if T > Tc: raise ValueError('T > Tc according to Kays rule - model is not valid in this range.') Tb = mixing_simple(xs, Tbs) sigmar = mixing_simple(xs, sigmas_Tb) Tst = (Tc/T - 1.)/(Tc/Tb - 1) return 1.002855*Tst**1.118091*(T/Tb)*sigmar
def function[Diguilio_Teja, parameter[T, xs, sigmas_Tb, Tbs, Tcs]]: constant[Calculates surface tension of a liquid mixture according to mixing rules in [1]_. .. math:: \sigma = 1.002855(T^*)^{1.118091} \frac{T}{T_b} \sigma_r T^* = \frac{(T_c/T)-1}{(T_c/T_b)-1} \sigma_r = \sum x_i \sigma_i T_b = \sum x_i T_{b,i} T_c = \sum x_i T_{c,i} Parameters ---------- T : float Temperature of fluid [K] xs : array-like Mole fractions of all components sigmas_Tb : array-like Surface tensions of all components at the boiling point, [N/m] Tbs : array-like Boiling temperatures of all components, [K] Tcs : array-like Critical temperatures of all components, [K] Returns ------- sigma : float Air-liquid surface tension of mixture, [N/m] Notes ----- Simple model, however it has 0 citations. Gives similar results to the `Winterfeld_Scriven_Davis` model. Raises a ValueError if temperature is greater than the mixture's critical temperature or if the given temperature is negative, or if the mixture's boiling temperature is higher than its critical temperature. [1]_ claims a 4.63 percent average absolute error on 21 binary and 4 ternary non-aqueous systems. [1]_ also considered Van der Waals mixing rules for `Tc`, but found it provided a higher error of 5.58% Examples -------- >>> Diguilio_Teja(T=298.15, xs=[0.1606, 0.8394], ... sigmas_Tb=[0.01424, 0.02530], Tbs=[309.21, 312.95], Tcs=[469.7, 508.0]) 0.025716823875045505 References ---------- .. [1] Diguilio, Ralph, and Amyn S. Teja. "Correlation and Prediction of the Surface Tensions of Mixtures." The Chemical Engineering Journal 38, no. 3 (July 1988): 205-8. doi:10.1016/0300-9467(88)80079-0. ] if <ast.UnaryOp object at 0x7da1b021e3e0> begin[:] <ast.Raise object at 0x7da1b021c790> variable[Tc] assign[=] call[name[mixing_simple], parameter[name[xs], name[Tcs]]] if compare[name[T] greater[>] name[Tc]] begin[:] <ast.Raise object at 0x7da1b021ceb0> variable[Tb] assign[=] call[name[mixing_simple], parameter[name[xs], name[Tbs]]] variable[sigmar] assign[=] call[name[mixing_simple], parameter[name[xs], name[sigmas_Tb]]] variable[Tst] assign[=] binary_operation[binary_operation[binary_operation[name[Tc] / name[T]] - constant[1.0]] / binary_operation[binary_operation[name[Tc] / name[Tb]] - constant[1]]] return[binary_operation[binary_operation[binary_operation[constant[1.002855] * binary_operation[name[Tst] ** constant[1.118091]]] * binary_operation[name[T] / name[Tb]]] * name[sigmar]]]
keyword[def] identifier[Diguilio_Teja] ( identifier[T] , identifier[xs] , identifier[sigmas_Tb] , identifier[Tbs] , identifier[Tcs] ): literal[string] keyword[if] keyword[not] identifier[none_and_length_check] ([ identifier[xs] , identifier[sigmas_Tb] , identifier[Tbs] , identifier[Tcs] ]): keyword[raise] identifier[Exception] ( literal[string] ) identifier[Tc] = identifier[mixing_simple] ( identifier[xs] , identifier[Tcs] ) keyword[if] identifier[T] > identifier[Tc] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[Tb] = identifier[mixing_simple] ( identifier[xs] , identifier[Tbs] ) identifier[sigmar] = identifier[mixing_simple] ( identifier[xs] , identifier[sigmas_Tb] ) identifier[Tst] =( identifier[Tc] / identifier[T] - literal[int] )/( identifier[Tc] / identifier[Tb] - literal[int] ) keyword[return] literal[int] * identifier[Tst] ** literal[int] *( identifier[T] / identifier[Tb] )* identifier[sigmar]
def Diguilio_Teja(T, xs, sigmas_Tb, Tbs, Tcs): """Calculates surface tension of a liquid mixture according to mixing rules in [1]_. .. math:: \\sigma = 1.002855(T^*)^{1.118091} \\frac{T}{T_b} \\sigma_r T^* = \\frac{(T_c/T)-1}{(T_c/T_b)-1} \\sigma_r = \\sum x_i \\sigma_i T_b = \\sum x_i T_{b,i} T_c = \\sum x_i T_{c,i} Parameters ---------- T : float Temperature of fluid [K] xs : array-like Mole fractions of all components sigmas_Tb : array-like Surface tensions of all components at the boiling point, [N/m] Tbs : array-like Boiling temperatures of all components, [K] Tcs : array-like Critical temperatures of all components, [K] Returns ------- sigma : float Air-liquid surface tension of mixture, [N/m] Notes ----- Simple model, however it has 0 citations. Gives similar results to the `Winterfeld_Scriven_Davis` model. Raises a ValueError if temperature is greater than the mixture's critical temperature or if the given temperature is negative, or if the mixture's boiling temperature is higher than its critical temperature. [1]_ claims a 4.63 percent average absolute error on 21 binary and 4 ternary non-aqueous systems. [1]_ also considered Van der Waals mixing rules for `Tc`, but found it provided a higher error of 5.58% Examples -------- >>> Diguilio_Teja(T=298.15, xs=[0.1606, 0.8394], ... sigmas_Tb=[0.01424, 0.02530], Tbs=[309.21, 312.95], Tcs=[469.7, 508.0]) 0.025716823875045505 References ---------- .. [1] Diguilio, Ralph, and Amyn S. Teja. "Correlation and Prediction of the Surface Tensions of Mixtures." The Chemical Engineering Journal 38, no. 3 (July 1988): 205-8. doi:10.1016/0300-9467(88)80079-0. """ if not none_and_length_check([xs, sigmas_Tb, Tbs, Tcs]): raise Exception('Function inputs are incorrect format') # depends on [control=['if'], data=[]] Tc = mixing_simple(xs, Tcs) if T > Tc: raise ValueError('T > Tc according to Kays rule - model is not valid in this range.') # depends on [control=['if'], data=[]] Tb = mixing_simple(xs, Tbs) sigmar = mixing_simple(xs, sigmas_Tb) Tst = (Tc / T - 1.0) / (Tc / Tb - 1) return 1.002855 * Tst ** 1.118091 * (T / Tb) * sigmar
def path(self, name): """ Look for files in subdirectory of MEDIA_ROOT using the tenant's domain_url value as the specifier. """ if name is None: name = '' try: location = safe_join(self.location, connection.tenant.domain_url) except AttributeError: location = self.location try: path = safe_join(location, name) except ValueError: raise SuspiciousOperation( "Attempted access to '%s' denied." % name) return os.path.normpath(path)
def function[path, parameter[self, name]]: constant[ Look for files in subdirectory of MEDIA_ROOT using the tenant's domain_url value as the specifier. ] if compare[name[name] is constant[None]] begin[:] variable[name] assign[=] constant[] <ast.Try object at 0x7da1b18a9000> <ast.Try object at 0x7da1b18a97e0> return[call[name[os].path.normpath, parameter[name[path]]]]
keyword[def] identifier[path] ( identifier[self] , identifier[name] ): literal[string] keyword[if] identifier[name] keyword[is] keyword[None] : identifier[name] = literal[string] keyword[try] : identifier[location] = identifier[safe_join] ( identifier[self] . identifier[location] , identifier[connection] . identifier[tenant] . identifier[domain_url] ) keyword[except] identifier[AttributeError] : identifier[location] = identifier[self] . identifier[location] keyword[try] : identifier[path] = identifier[safe_join] ( identifier[location] , identifier[name] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[SuspiciousOperation] ( literal[string] % identifier[name] ) keyword[return] identifier[os] . identifier[path] . identifier[normpath] ( identifier[path] )
def path(self, name): """ Look for files in subdirectory of MEDIA_ROOT using the tenant's domain_url value as the specifier. """ if name is None: name = '' # depends on [control=['if'], data=['name']] try: location = safe_join(self.location, connection.tenant.domain_url) # depends on [control=['try'], data=[]] except AttributeError: location = self.location # depends on [control=['except'], data=[]] try: path = safe_join(location, name) # depends on [control=['try'], data=[]] except ValueError: raise SuspiciousOperation("Attempted access to '%s' denied." % name) # depends on [control=['except'], data=[]] return os.path.normpath(path)
def convertStateToIndex(population, fire): """Convert state parameters to transition probability matrix index. Parameters ---------- population : int The population abundance class of the threatened species. fire : int The time in years since last fire. Returns ------- index : int The index into the transition probability matrix that corresponds to the state parameters. """ assert 0 <= population < POPULATION_CLASSES, "'population' must be in " \ "(0, 1...%s)" % str(POPULATION_CLASSES - 1) assert 0 <= fire < FIRE_CLASSES, "'fire' must be in " \ "(0, 1...%s) " % str(FIRE_CLASSES - 1) return(population * FIRE_CLASSES + fire)
def function[convertStateToIndex, parameter[population, fire]]: constant[Convert state parameters to transition probability matrix index. Parameters ---------- population : int The population abundance class of the threatened species. fire : int The time in years since last fire. Returns ------- index : int The index into the transition probability matrix that corresponds to the state parameters. ] assert[compare[constant[0] less_or_equal[<=] name[population]]] assert[compare[constant[0] less_or_equal[<=] name[fire]]] return[binary_operation[binary_operation[name[population] * name[FIRE_CLASSES]] + name[fire]]]
keyword[def] identifier[convertStateToIndex] ( identifier[population] , identifier[fire] ): literal[string] keyword[assert] literal[int] <= identifier[population] < identifier[POPULATION_CLASSES] , literal[string] literal[string] % identifier[str] ( identifier[POPULATION_CLASSES] - literal[int] ) keyword[assert] literal[int] <= identifier[fire] < identifier[FIRE_CLASSES] , literal[string] literal[string] % identifier[str] ( identifier[FIRE_CLASSES] - literal[int] ) keyword[return] ( identifier[population] * identifier[FIRE_CLASSES] + identifier[fire] )
def convertStateToIndex(population, fire): """Convert state parameters to transition probability matrix index. Parameters ---------- population : int The population abundance class of the threatened species. fire : int The time in years since last fire. Returns ------- index : int The index into the transition probability matrix that corresponds to the state parameters. """ assert 0 <= population < POPULATION_CLASSES, "'population' must be in (0, 1...%s)" % str(POPULATION_CLASSES - 1) assert 0 <= fire < FIRE_CLASSES, "'fire' must be in (0, 1...%s) " % str(FIRE_CLASSES - 1) return population * FIRE_CLASSES + fire
def listdir_nohidden(path): """List not hidden files or directories under path""" for f in os.listdir(path): if isinstance(f, str): f = unicode(f, "utf-8") if not f.startswith('.'): yield f
def function[listdir_nohidden, parameter[path]]: constant[List not hidden files or directories under path] for taget[name[f]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:] if call[name[isinstance], parameter[name[f], name[str]]] begin[:] variable[f] assign[=] call[name[unicode], parameter[name[f], constant[utf-8]]] if <ast.UnaryOp object at 0x7da20e9639d0> begin[:] <ast.Yield object at 0x7da20e9633a0>
keyword[def] identifier[listdir_nohidden] ( identifier[path] ): literal[string] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ): keyword[if] identifier[isinstance] ( identifier[f] , identifier[str] ): identifier[f] = identifier[unicode] ( identifier[f] , literal[string] ) keyword[if] keyword[not] identifier[f] . identifier[startswith] ( literal[string] ): keyword[yield] identifier[f]
def listdir_nohidden(path): """List not hidden files or directories under path""" for f in os.listdir(path): if isinstance(f, str): f = unicode(f, 'utf-8') # depends on [control=['if'], data=[]] if not f.startswith('.'): yield f # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
def journey_options(origin: str, destination: str, via: t.Optional[str]=None, before: t.Optional[int]=None, after: t.Optional[int]=None, time: t.Optional[datetime]=None, hsl: t.Optional[bool]=None, year_card: t.Optional[bool]=None) -> ( snug.Query[t.List[Journey]]): """journey recommendations from an origin to a destination station""" return snug.GET('treinplanner', params={ 'fromStation': origin, 'toStation': destination, 'viaStation': via, 'previousAdvices': before, 'nextAdvices': after, 'dateTime': time, 'hslAllowed': hsl, 'yearCard': year_card, })
def function[journey_options, parameter[origin, destination, via, before, after, time, hsl, year_card]]: constant[journey recommendations from an origin to a destination station] return[call[name[snug].GET, parameter[constant[treinplanner]]]]
keyword[def] identifier[journey_options] ( identifier[origin] : identifier[str] , identifier[destination] : identifier[str] , identifier[via] : identifier[t] . identifier[Optional] [ identifier[str] ]= keyword[None] , identifier[before] : identifier[t] . identifier[Optional] [ identifier[int] ]= keyword[None] , identifier[after] : identifier[t] . identifier[Optional] [ identifier[int] ]= keyword[None] , identifier[time] : identifier[t] . identifier[Optional] [ identifier[datetime] ]= keyword[None] , identifier[hsl] : identifier[t] . identifier[Optional] [ identifier[bool] ]= keyword[None] , identifier[year_card] : identifier[t] . identifier[Optional] [ identifier[bool] ]= keyword[None] )->( identifier[snug] . identifier[Query] [ identifier[t] . identifier[List] [ identifier[Journey] ]]): literal[string] keyword[return] identifier[snug] . identifier[GET] ( literal[string] , identifier[params] ={ literal[string] : identifier[origin] , literal[string] : identifier[destination] , literal[string] : identifier[via] , literal[string] : identifier[before] , literal[string] : identifier[after] , literal[string] : identifier[time] , literal[string] : identifier[hsl] , literal[string] : identifier[year_card] , })
def journey_options(origin: str, destination: str, via: t.Optional[str]=None, before: t.Optional[int]=None, after: t.Optional[int]=None, time: t.Optional[datetime]=None, hsl: t.Optional[bool]=None, year_card: t.Optional[bool]=None) -> snug.Query[t.List[Journey]]: """journey recommendations from an origin to a destination station""" return snug.GET('treinplanner', params={'fromStation': origin, 'toStation': destination, 'viaStation': via, 'previousAdvices': before, 'nextAdvices': after, 'dateTime': time, 'hslAllowed': hsl, 'yearCard': year_card})
def convex_hull(labels, indexes=None, fast=True): """Given a labeled image, return a list of points per object ordered by angle from an interior point, representing the convex hull.s labels - the label matrix indexes - an array of label #s to be processed, defaults to all non-zero labels Returns a matrix and a vector. The matrix consists of one row per point in the convex hull. Each row has three columns, the label #, the i coordinate of the point and the j coordinate of the point. The result is organized first by label, then the points are arranged counter-clockwise around the perimeter. The vector is a vector of #s of points in the convex hull per label """ if indexes is None: indexes = np.unique(labels) indexes.sort() indexes=indexes[indexes!=0] else: indexes=np.array(indexes) if len(indexes) == 0: return np.zeros((0,2),int),np.zeros((0,),int) # # Reduce the # of points to consider # outlines = outline(labels) coords = np.argwhere(outlines > 0).astype(np.int32) if len(coords)==0: # Every outline of every image is blank return (np.zeros((0,3),int), np.zeros((len(indexes),),int)) i = coords[:,0] j = coords[:,1] labels_per_point = labels[i,j] pixel_labels = np.column_stack((i,j,labels_per_point)) return convex_hull_ijv(pixel_labels, indexes, fast)
def function[convex_hull, parameter[labels, indexes, fast]]: constant[Given a labeled image, return a list of points per object ordered by angle from an interior point, representing the convex hull.s labels - the label matrix indexes - an array of label #s to be processed, defaults to all non-zero labels Returns a matrix and a vector. The matrix consists of one row per point in the convex hull. Each row has three columns, the label #, the i coordinate of the point and the j coordinate of the point. The result is organized first by label, then the points are arranged counter-clockwise around the perimeter. The vector is a vector of #s of points in the convex hull per label ] if compare[name[indexes] is constant[None]] begin[:] variable[indexes] assign[=] call[name[np].unique, parameter[name[labels]]] call[name[indexes].sort, parameter[]] variable[indexes] assign[=] call[name[indexes]][compare[name[indexes] not_equal[!=] constant[0]]] if compare[call[name[len], parameter[name[indexes]]] equal[==] constant[0]] begin[:] return[tuple[[<ast.Call object at 0x7da20c7ca5c0>, <ast.Call object at 0x7da20c7c8e50>]]] variable[outlines] assign[=] call[name[outline], parameter[name[labels]]] variable[coords] assign[=] call[call[name[np].argwhere, parameter[compare[name[outlines] greater[>] constant[0]]]].astype, parameter[name[np].int32]] if compare[call[name[len], parameter[name[coords]]] equal[==] constant[0]] begin[:] return[tuple[[<ast.Call object at 0x7da20c7ca7d0>, <ast.Call object at 0x7da20c7cbc70>]]] variable[i] assign[=] call[name[coords]][tuple[[<ast.Slice object at 0x7da20c7c9ea0>, <ast.Constant object at 0x7da20c7c8700>]]] variable[j] assign[=] call[name[coords]][tuple[[<ast.Slice object at 0x7da20c7cb0a0>, <ast.Constant object at 0x7da20c7ca2c0>]]] variable[labels_per_point] assign[=] call[name[labels]][tuple[[<ast.Name object at 0x7da20c7ca9b0>, <ast.Name object at 0x7da20c7cb880>]]] variable[pixel_labels] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Name object at 0x7da20c7c9a20>, <ast.Name object at 0x7da20c7cbca0>, <ast.Name object at 0x7da20c7c95a0>]]]] return[call[name[convex_hull_ijv], parameter[name[pixel_labels], name[indexes], name[fast]]]]
keyword[def] identifier[convex_hull] ( identifier[labels] , identifier[indexes] = keyword[None] , identifier[fast] = keyword[True] ): literal[string] keyword[if] identifier[indexes] keyword[is] keyword[None] : identifier[indexes] = identifier[np] . identifier[unique] ( identifier[labels] ) identifier[indexes] . identifier[sort] () identifier[indexes] = identifier[indexes] [ identifier[indexes] != literal[int] ] keyword[else] : identifier[indexes] = identifier[np] . identifier[array] ( identifier[indexes] ) keyword[if] identifier[len] ( identifier[indexes] )== literal[int] : keyword[return] identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[int] ), identifier[np] . identifier[zeros] (( literal[int] ,), identifier[int] ) identifier[outlines] = identifier[outline] ( identifier[labels] ) identifier[coords] = identifier[np] . identifier[argwhere] ( identifier[outlines] > literal[int] ). identifier[astype] ( identifier[np] . identifier[int32] ) keyword[if] identifier[len] ( identifier[coords] )== literal[int] : keyword[return] ( identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[int] ), identifier[np] . identifier[zeros] (( identifier[len] ( identifier[indexes] ),), identifier[int] )) identifier[i] = identifier[coords] [:, literal[int] ] identifier[j] = identifier[coords] [:, literal[int] ] identifier[labels_per_point] = identifier[labels] [ identifier[i] , identifier[j] ] identifier[pixel_labels] = identifier[np] . identifier[column_stack] (( identifier[i] , identifier[j] , identifier[labels_per_point] )) keyword[return] identifier[convex_hull_ijv] ( identifier[pixel_labels] , identifier[indexes] , identifier[fast] )
def convex_hull(labels, indexes=None, fast=True): """Given a labeled image, return a list of points per object ordered by angle from an interior point, representing the convex hull.s labels - the label matrix indexes - an array of label #s to be processed, defaults to all non-zero labels Returns a matrix and a vector. The matrix consists of one row per point in the convex hull. Each row has three columns, the label #, the i coordinate of the point and the j coordinate of the point. The result is organized first by label, then the points are arranged counter-clockwise around the perimeter. The vector is a vector of #s of points in the convex hull per label """ if indexes is None: indexes = np.unique(labels) indexes.sort() indexes = indexes[indexes != 0] # depends on [control=['if'], data=['indexes']] else: indexes = np.array(indexes) if len(indexes) == 0: return (np.zeros((0, 2), int), np.zeros((0,), int)) # depends on [control=['if'], data=[]] # # Reduce the # of points to consider # outlines = outline(labels) coords = np.argwhere(outlines > 0).astype(np.int32) if len(coords) == 0: # Every outline of every image is blank return (np.zeros((0, 3), int), np.zeros((len(indexes),), int)) # depends on [control=['if'], data=[]] i = coords[:, 0] j = coords[:, 1] labels_per_point = labels[i, j] pixel_labels = np.column_stack((i, j, labels_per_point)) return convex_hull_ijv(pixel_labels, indexes, fast)
def markdown_single_text(self, catalog, cdli_number): """ Prints single text in file in markdown. :param catalog: text ingested by cdli_corpus :param cdli_number: text you wish to print :return: output in filename.md """ if cdli_number in catalog: pnum = catalog[cdli_number]['pnum'] edition = catalog[cdli_number]['edition'] metadata = '\n\t'.join(catalog[cdli_number]['metadata']) transliteration = '\n\t'.join(catalog[cdli_number]['transliteration']) normalization = '\n\t'.join(catalog[cdli_number]['normalization']) translation = '\n\t'.join(catalog[cdli_number]['translation']) m_d = """{edition} {pnum} --- ### metadata {metadata} ### transliteration {trans} ### normalization {norm} ### translation {translation} """.format(pnum=pnum, edition=edition, metadata=metadata, trans=transliteration, norm=normalization, translation=translation) self.markdown_text = m_d
def function[markdown_single_text, parameter[self, catalog, cdli_number]]: constant[ Prints single text in file in markdown. :param catalog: text ingested by cdli_corpus :param cdli_number: text you wish to print :return: output in filename.md ] if compare[name[cdli_number] in name[catalog]] begin[:] variable[pnum] assign[=] call[call[name[catalog]][name[cdli_number]]][constant[pnum]] variable[edition] assign[=] call[call[name[catalog]][name[cdli_number]]][constant[edition]] variable[metadata] assign[=] call[constant[ ].join, parameter[call[call[name[catalog]][name[cdli_number]]][constant[metadata]]]] variable[transliteration] assign[=] call[constant[ ].join, parameter[call[call[name[catalog]][name[cdli_number]]][constant[transliteration]]]] variable[normalization] assign[=] call[constant[ ].join, parameter[call[call[name[catalog]][name[cdli_number]]][constant[normalization]]]] variable[translation] assign[=] call[constant[ ].join, parameter[call[call[name[catalog]][name[cdli_number]]][constant[translation]]]] variable[m_d] assign[=] call[constant[{edition} {pnum} --- ### metadata {metadata} ### transliteration {trans} ### normalization {norm} ### translation {translation} ].format, parameter[]] name[self].markdown_text assign[=] name[m_d]
keyword[def] identifier[markdown_single_text] ( identifier[self] , identifier[catalog] , identifier[cdli_number] ): literal[string] keyword[if] identifier[cdli_number] keyword[in] identifier[catalog] : identifier[pnum] = identifier[catalog] [ identifier[cdli_number] ][ literal[string] ] identifier[edition] = identifier[catalog] [ identifier[cdli_number] ][ literal[string] ] identifier[metadata] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[cdli_number] ][ literal[string] ]) identifier[transliteration] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[cdli_number] ][ literal[string] ]) identifier[normalization] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[cdli_number] ][ literal[string] ]) identifier[translation] = literal[string] . identifier[join] ( identifier[catalog] [ identifier[cdli_number] ][ literal[string] ]) identifier[m_d] = literal[string] . identifier[format] ( identifier[pnum] = identifier[pnum] , identifier[edition] = identifier[edition] , identifier[metadata] = identifier[metadata] , identifier[trans] = identifier[transliteration] , identifier[norm] = identifier[normalization] , identifier[translation] = identifier[translation] ) identifier[self] . identifier[markdown_text] = identifier[m_d]
def markdown_single_text(self, catalog, cdli_number): """ Prints single text in file in markdown. :param catalog: text ingested by cdli_corpus :param cdli_number: text you wish to print :return: output in filename.md """ if cdli_number in catalog: pnum = catalog[cdli_number]['pnum'] edition = catalog[cdli_number]['edition'] metadata = '\n\t'.join(catalog[cdli_number]['metadata']) transliteration = '\n\t'.join(catalog[cdli_number]['transliteration']) normalization = '\n\t'.join(catalog[cdli_number]['normalization']) translation = '\n\t'.join(catalog[cdli_number]['translation']) m_d = '{edition}\n{pnum}\n---\n### metadata\n {metadata}\n### transliteration\n {trans}\n### normalization\n {norm}\n### translation\n {translation} \n'.format(pnum=pnum, edition=edition, metadata=metadata, trans=transliteration, norm=normalization, translation=translation) self.markdown_text = m_d # depends on [control=['if'], data=['cdli_number', 'catalog']]
def slamdunkUtrRatesPlot (self): """ Generate the UTR rates plot """ cats = OrderedDict() keys = ['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'] for i, v in enumerate(keys): cats[v] = { 'color': self.plot_cols[i] } pconfig = { 'id': 'slamdunk_utrratesplot', 'title': 'Slamdunk: Overall conversion rates per UTR', 'cpswitch': False, 'cpswitch_c_active': False, 'ylab': 'Number of conversions', 'stacking': 'normal', 'tt_decimals': 2, 'tt_suffix': '%', 'tt_percentages': False, 'hide_zero_cats': False } self.add_section ( name = 'Conversion rates per UTR', anchor = 'slamdunk_utr_rates', description = """This plot shows the individual conversion rates for all UTRs (see the <a href="http://t-neumann.github.io/slamdunk/docs.html#utrrates" target="_blank">slamdunk docs</a>).""", plot = bargraph.plot(self.utrates_data, cats, pconfig) )
def function[slamdunkUtrRatesPlot, parameter[self]]: constant[ Generate the UTR rates plot ] variable[cats] assign[=] call[name[OrderedDict], parameter[]] variable[keys] assign[=] list[[<ast.Constant object at 0x7da204564a30>, <ast.Constant object at 0x7da204565990>, <ast.Constant object at 0x7da204567a30>, <ast.Constant object at 0x7da204566710>, <ast.Constant object at 0x7da204564d00>, <ast.Constant object at 0x7da204566020>, <ast.Constant object at 0x7da204566920>, <ast.Constant object at 0x7da204566980>, <ast.Constant object at 0x7da204566890>, <ast.Constant object at 0x7da204564760>, <ast.Constant object at 0x7da204566d40>, <ast.Constant object at 0x7da204567760>]] for taget[tuple[[<ast.Name object at 0x7da204567190>, <ast.Name object at 0x7da204564520>]]] in starred[call[name[enumerate], parameter[name[keys]]]] begin[:] call[name[cats]][name[v]] assign[=] dictionary[[<ast.Constant object at 0x7da204564100>], [<ast.Subscript object at 0x7da204564d60>]] variable[pconfig] assign[=] dictionary[[<ast.Constant object at 0x7da204566aa0>, <ast.Constant object at 0x7da204564730>, <ast.Constant object at 0x7da2045663b0>, <ast.Constant object at 0x7da204564df0>, <ast.Constant object at 0x7da204566d10>, <ast.Constant object at 0x7da204565ae0>, <ast.Constant object at 0x7da204566fb0>, <ast.Constant object at 0x7da204567df0>, <ast.Constant object at 0x7da204565180>, <ast.Constant object at 0x7da18bc70f10>], [<ast.Constant object at 0x7da18bc73d60>, <ast.Constant object at 0x7da18bc701f0>, <ast.Constant object at 0x7da18bc70c70>, <ast.Constant object at 0x7da18bc71270>, <ast.Constant object at 0x7da18bc70340>, <ast.Constant object at 0x7da18bc725c0>, <ast.Constant object at 0x7da18bc72860>, <ast.Constant object at 0x7da18bc73040>, <ast.Constant object at 0x7da18bc72380>, <ast.Constant object at 0x7da18bc71c60>]] call[name[self].add_section, parameter[]]
keyword[def] identifier[slamdunkUtrRatesPlot] ( identifier[self] ): literal[string] identifier[cats] = identifier[OrderedDict] () identifier[keys] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[keys] ): identifier[cats] [ identifier[v] ]={ literal[string] : identifier[self] . identifier[plot_cols] [ identifier[i] ]} identifier[pconfig] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[False] , literal[string] : keyword[False] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[int] , literal[string] : literal[string] , literal[string] : keyword[False] , literal[string] : keyword[False] } identifier[self] . identifier[add_section] ( identifier[name] = literal[string] , identifier[anchor] = literal[string] , identifier[description] = literal[string] , identifier[plot] = identifier[bargraph] . identifier[plot] ( identifier[self] . identifier[utrates_data] , identifier[cats] , identifier[pconfig] ) )
def slamdunkUtrRatesPlot(self): """ Generate the UTR rates plot """ cats = OrderedDict() keys = ['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'] for (i, v) in enumerate(keys): cats[v] = {'color': self.plot_cols[i]} # depends on [control=['for'], data=[]] pconfig = {'id': 'slamdunk_utrratesplot', 'title': 'Slamdunk: Overall conversion rates per UTR', 'cpswitch': False, 'cpswitch_c_active': False, 'ylab': 'Number of conversions', 'stacking': 'normal', 'tt_decimals': 2, 'tt_suffix': '%', 'tt_percentages': False, 'hide_zero_cats': False} self.add_section(name='Conversion rates per UTR', anchor='slamdunk_utr_rates', description='This plot shows the individual conversion rates for all UTRs\n (see the <a href="http://t-neumann.github.io/slamdunk/docs.html#utrrates" target="_blank">slamdunk docs</a>).', plot=bargraph.plot(self.utrates_data, cats, pconfig))
def term(self): """ term: atom (('*' | '/' | '//') atom)* """ node = self.atom() while self.token.nature in (Nature.MUL, Nature.DIV, Nature.INT_DIV): token = self.token if token.nature == Nature.MUL: self._process(Nature.MUL) elif token.nature == Nature.DIV: self._process(Nature.DIV) elif token.nature == Nature.INT_DIV: self._process(Nature.INT_DIV) else: self._error() node = BinaryOperation(left=node, op=token, right=self.atom()) return node
def function[term, parameter[self]]: constant[ term: atom (('*' | '/' | '//') atom)* ] variable[node] assign[=] call[name[self].atom, parameter[]] while compare[name[self].token.nature in tuple[[<ast.Attribute object at 0x7da1b09eba60>, <ast.Attribute object at 0x7da1b09e87f0>, <ast.Attribute object at 0x7da1b09e8cd0>]]] begin[:] variable[token] assign[=] name[self].token if compare[name[token].nature equal[==] name[Nature].MUL] begin[:] call[name[self]._process, parameter[name[Nature].MUL]] variable[node] assign[=] call[name[BinaryOperation], parameter[]] return[name[node]]
keyword[def] identifier[term] ( identifier[self] ): literal[string] identifier[node] = identifier[self] . identifier[atom] () keyword[while] identifier[self] . identifier[token] . identifier[nature] keyword[in] ( identifier[Nature] . identifier[MUL] , identifier[Nature] . identifier[DIV] , identifier[Nature] . identifier[INT_DIV] ): identifier[token] = identifier[self] . identifier[token] keyword[if] identifier[token] . identifier[nature] == identifier[Nature] . identifier[MUL] : identifier[self] . identifier[_process] ( identifier[Nature] . identifier[MUL] ) keyword[elif] identifier[token] . identifier[nature] == identifier[Nature] . identifier[DIV] : identifier[self] . identifier[_process] ( identifier[Nature] . identifier[DIV] ) keyword[elif] identifier[token] . identifier[nature] == identifier[Nature] . identifier[INT_DIV] : identifier[self] . identifier[_process] ( identifier[Nature] . identifier[INT_DIV] ) keyword[else] : identifier[self] . identifier[_error] () identifier[node] = identifier[BinaryOperation] ( identifier[left] = identifier[node] , identifier[op] = identifier[token] , identifier[right] = identifier[self] . identifier[atom] ()) keyword[return] identifier[node]
def term(self): """ term: atom (('*' | '/' | '//') atom)* """ node = self.atom() while self.token.nature in (Nature.MUL, Nature.DIV, Nature.INT_DIV): token = self.token if token.nature == Nature.MUL: self._process(Nature.MUL) # depends on [control=['if'], data=[]] elif token.nature == Nature.DIV: self._process(Nature.DIV) # depends on [control=['if'], data=[]] elif token.nature == Nature.INT_DIV: self._process(Nature.INT_DIV) # depends on [control=['if'], data=[]] else: self._error() node = BinaryOperation(left=node, op=token, right=self.atom()) # depends on [control=['while'], data=[]] return node
def user_present(name, password, email, tenant=None, enabled=True, roles=None, profile=None, password_reset=True, project=None, **connection_args): ''' Ensure that the keystone user is present with the specified properties. name The name of the user to manage password The password to use for this user. .. note:: If the user already exists and a different password was set for the user than the one specified here, the password for the user will be updated. Please set the ``password_reset`` option to ``False`` if this is not the desired behavior. password_reset Whether or not to reset password after initial set. Defaults to ``True``. email The email address for this user tenant The tenant (name) for this user project The project (name) for this user (overrides tenant in api v3) enabled Availability state for this user roles The roles the user should have under given tenants. Passed as a dictionary mapping tenant names to a list of roles in this tenant, i.e.:: roles: admin: # tenant - admin # role service: - admin - Member ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'User "{0}" will be updated'.format(name)} _api_version(profile=profile, **connection_args) if project and not tenant: tenant = project # Validate tenant if set if tenant is not None: tenantdata = __salt__['keystone.tenant_get'](name=tenant, profile=profile, **connection_args) if 'Error' in tenantdata: ret['result'] = False ret['comment'] = 'Tenant / project "{0}" does not exist'.format(tenant) return ret tenant_id = tenantdata[tenant]['id'] else: tenant_id = None # Check if user is already present user = __salt__['keystone.user_get'](name=name, profile=profile, **connection_args) if 'Error' not in user: change_email = False change_enabled = False change_tenant = False change_password = False if user[name].get('email', None) != email: change_email = True if user[name].get('enabled', None) != enabled: change_enabled = True if tenant and (_TENANT_ID not in user[name] or user[name].get(_TENANT_ID, None) != tenant_id): change_tenant = True if (password_reset is True and not __salt__['keystone.user_verify_password'](name=name, password=password, profile=profile, **connection_args)): change_password = True if __opts__.get('test') and (change_email or change_enabled or change_tenant or change_password): ret['result'] = None ret['comment'] = 'User "{0}" will be updated'.format(name) if change_email is True: ret['changes']['Email'] = 'Will be updated' if change_enabled is True: ret['changes']['Enabled'] = 'Will be True' if change_tenant is True: ret['changes']['Tenant'] = 'Will be added to "{0}" tenant'.format(tenant) if change_password is True: ret['changes']['Password'] = 'Will be updated' return ret ret['comment'] = 'User "{0}" is already present'.format(name) if change_email: __salt__['keystone.user_update'](name=name, email=email, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Email'] = 'Updated' if change_enabled: __salt__['keystone.user_update'](name=name, enabled=enabled, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Enabled'] = 'Now {0}'.format(enabled) if change_tenant: __salt__['keystone.user_update'](name=name, tenant=tenant, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Tenant'] = 'Added to "{0}" tenant'.format(tenant) if change_password: __salt__['keystone.user_password_update'](name=name, password=password, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Password'] = 'Updated' if roles: for tenant in roles: args = dict({'user_name': name, 'tenant_name': tenant, 'profile': profile}, **connection_args) tenant_roles = __salt__['keystone.user_role_list'](**args) for role in roles[tenant]: if role not in tenant_roles: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) newrole = __salt__['keystone.user_role_add'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(newrole) else: ret['changes']['roles'] = [newrole] roles_to_remove = list(set(tenant_roles) - set(roles[tenant])) for role in roles_to_remove: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) oldrole = __salt__['keystone.user_role_remove'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(oldrole) else: ret['changes']['roles'] = [oldrole] else: # Create that user! if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Keystone user "{0}" will be added'.format(name) ret['changes']['User'] = 'Will be created' return ret __salt__['keystone.user_create'](name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled, profile=profile, **connection_args) if roles: for tenant in roles: for role in roles[tenant]: __salt__['keystone.user_role_add'](user=name, role=role, tenant=tenant, profile=profile, **connection_args) ret['comment'] = 'Keystone user {0} has been added'.format(name) ret['changes']['User'] = 'Created' return ret
def function[user_present, parameter[name, password, email, tenant, enabled, roles, profile, password_reset, project]]: constant[ Ensure that the keystone user is present with the specified properties. name The name of the user to manage password The password to use for this user. .. note:: If the user already exists and a different password was set for the user than the one specified here, the password for the user will be updated. Please set the ``password_reset`` option to ``False`` if this is not the desired behavior. password_reset Whether or not to reset password after initial set. Defaults to ``True``. email The email address for this user tenant The tenant (name) for this user project The project (name) for this user (overrides tenant in api v3) enabled Availability state for this user roles The roles the user should have under given tenants. Passed as a dictionary mapping tenant names to a list of roles in this tenant, i.e.:: roles: admin: # tenant - admin # role service: - admin - Member ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2137c70>, <ast.Constant object at 0x7da1b21379d0>, <ast.Constant object at 0x7da1b2136e60>, <ast.Constant object at 0x7da1b2134c40>], [<ast.Name object at 0x7da1b21345b0>, <ast.Dict object at 0x7da1b21373a0>, <ast.Constant object at 0x7da1b21370d0>, <ast.Call object at 0x7da1b2137310>]] call[name[_api_version], parameter[]] if <ast.BoolOp object at 0x7da1b2134580> begin[:] variable[tenant] assign[=] name[project] if compare[name[tenant] is_not constant[None]] begin[:] variable[tenantdata] assign[=] call[call[name[__salt__]][constant[keystone.tenant_get]], parameter[]] if compare[constant[Error] in name[tenantdata]] begin[:] call[name[ret]][constant[result]] assign[=] constant[False] call[name[ret]][constant[comment]] assign[=] call[constant[Tenant / project "{0}" does not exist].format, parameter[name[tenant]]] return[name[ret]] variable[tenant_id] assign[=] call[call[name[tenantdata]][name[tenant]]][constant[id]] variable[user] assign[=] call[call[name[__salt__]][constant[keystone.user_get]], parameter[]] if compare[constant[Error] <ast.NotIn object at 0x7da2590d7190> name[user]] begin[:] variable[change_email] assign[=] constant[False] variable[change_enabled] assign[=] constant[False] variable[change_tenant] assign[=] constant[False] variable[change_password] assign[=] constant[False] if compare[call[call[name[user]][name[name]].get, parameter[constant[email], constant[None]]] not_equal[!=] name[email]] begin[:] variable[change_email] assign[=] constant[True] if compare[call[call[name[user]][name[name]].get, parameter[constant[enabled], constant[None]]] not_equal[!=] name[enabled]] begin[:] variable[change_enabled] assign[=] constant[True] if <ast.BoolOp object at 0x7da1b21f8ca0> begin[:] variable[change_tenant] assign[=] constant[True] if <ast.BoolOp object at 0x7da1b21f9b10> begin[:] variable[change_password] assign[=] constant[True] if <ast.BoolOp object at 0x7da1b21fae30> begin[:] call[name[ret]][constant[result]] assign[=] constant[None] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" will be updated].format, parameter[name[name]]] if compare[name[change_email] is constant[True]] begin[:] call[call[name[ret]][constant[changes]]][constant[Email]] assign[=] constant[Will be updated] if compare[name[change_enabled] is constant[True]] begin[:] call[call[name[ret]][constant[changes]]][constant[Enabled]] assign[=] constant[Will be True] if compare[name[change_tenant] is constant[True]] begin[:] call[call[name[ret]][constant[changes]]][constant[Tenant]] assign[=] call[constant[Will be added to "{0}" tenant].format, parameter[name[tenant]]] if compare[name[change_password] is constant[True]] begin[:] call[call[name[ret]][constant[changes]]][constant[Password]] assign[=] constant[Will be updated] return[name[ret]] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" is already present].format, parameter[name[name]]] if name[change_email] begin[:] call[call[name[__salt__]][constant[keystone.user_update]], parameter[]] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" has been updated].format, parameter[name[name]]] call[call[name[ret]][constant[changes]]][constant[Email]] assign[=] constant[Updated] if name[change_enabled] begin[:] call[call[name[__salt__]][constant[keystone.user_update]], parameter[]] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" has been updated].format, parameter[name[name]]] call[call[name[ret]][constant[changes]]][constant[Enabled]] assign[=] call[constant[Now {0}].format, parameter[name[enabled]]] if name[change_tenant] begin[:] call[call[name[__salt__]][constant[keystone.user_update]], parameter[]] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" has been updated].format, parameter[name[name]]] call[call[name[ret]][constant[changes]]][constant[Tenant]] assign[=] call[constant[Added to "{0}" tenant].format, parameter[name[tenant]]] if name[change_password] begin[:] call[call[name[__salt__]][constant[keystone.user_password_update]], parameter[]] call[name[ret]][constant[comment]] assign[=] call[constant[User "{0}" has been updated].format, parameter[name[name]]] call[call[name[ret]][constant[changes]]][constant[Password]] assign[=] constant[Updated] if name[roles] begin[:] for taget[name[tenant]] in starred[name[roles]] begin[:] variable[args] assign[=] call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c990df0>, <ast.Constant object at 0x7da20c9906d0>, <ast.Constant object at 0x7da20c993f40>], [<ast.Name object at 0x7da20c9933a0>, <ast.Name object at 0x7da20c991600>, <ast.Name object at 0x7da20c991bd0>]]]] variable[tenant_roles] assign[=] call[call[name[__salt__]][constant[keystone.user_role_list]], parameter[]] for taget[name[role]] in starred[call[name[roles]][name[tenant]]] begin[:] if compare[name[role] <ast.NotIn object at 0x7da2590d7190> name[tenant_roles]] begin[:] if call[name[__opts__].get, parameter[constant[test]]] begin[:] call[name[ret]][constant[result]] assign[=] constant[None] call[name[ret]][constant[comment]] assign[=] call[constant[User roles "{0}" will been updated].format, parameter[name[name]]] return[name[ret]] variable[addargs] assign[=] call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c992530>, <ast.Constant object at 0x7da20c9902e0>, <ast.Constant object at 0x7da20c991e70>, <ast.Constant object at 0x7da20c993010>], [<ast.Name object at 0x7da20c992d10>, <ast.Name object at 0x7da20c993af0>, <ast.Name object at 0x7da20c992410>, <ast.Name object at 0x7da20c992890>]]]] variable[newrole] assign[=] call[call[name[__salt__]][constant[keystone.user_role_add]], parameter[]] if compare[constant[roles] in call[name[ret]][constant[changes]]] begin[:] call[call[call[name[ret]][constant[changes]]][constant[roles]].append, parameter[name[newrole]]] variable[roles_to_remove] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[tenant_roles]]] - call[name[set], parameter[call[name[roles]][name[tenant]]]]]]] for taget[name[role]] in starred[name[roles_to_remove]] begin[:] if call[name[__opts__].get, parameter[constant[test]]] begin[:] call[name[ret]][constant[result]] assign[=] constant[None] call[name[ret]][constant[comment]] assign[=] call[constant[User roles "{0}" will been updated].format, parameter[name[name]]] return[name[ret]] variable[addargs] assign[=] call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c991a20>, <ast.Constant object at 0x7da20c992860>, <ast.Constant object at 0x7da20c991c30>, <ast.Constant object at 0x7da20c992fb0>], [<ast.Name object at 0x7da20c991ea0>, <ast.Name object at 0x7da20c993370>, <ast.Name object at 0x7da1b21e2890>, <ast.Name object at 0x7da1b21e3430>]]]] variable[oldrole] assign[=] call[call[name[__salt__]][constant[keystone.user_role_remove]], parameter[]] if compare[constant[roles] in call[name[ret]][constant[changes]]] begin[:] call[call[call[name[ret]][constant[changes]]][constant[roles]].append, parameter[name[oldrole]]] return[name[ret]]
keyword[def] identifier[user_present] ( identifier[name] , identifier[password] , identifier[email] , identifier[tenant] = keyword[None] , identifier[enabled] = keyword[True] , identifier[roles] = keyword[None] , identifier[profile] = keyword[None] , identifier[password_reset] = keyword[True] , identifier[project] = keyword[None] , ** identifier[connection_args] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : keyword[True] , literal[string] : literal[string] . identifier[format] ( identifier[name] )} identifier[_api_version] ( identifier[profile] = identifier[profile] ,** identifier[connection_args] ) keyword[if] identifier[project] keyword[and] keyword[not] identifier[tenant] : identifier[tenant] = identifier[project] keyword[if] identifier[tenant] keyword[is] keyword[not] keyword[None] : identifier[tenantdata] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[tenant] , identifier[profile] = identifier[profile] , ** identifier[connection_args] ) keyword[if] literal[string] keyword[in] identifier[tenantdata] : identifier[ret] [ literal[string] ]= keyword[False] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[tenant] ) keyword[return] identifier[ret] identifier[tenant_id] = identifier[tenantdata] [ identifier[tenant] ][ literal[string] ] keyword[else] : identifier[tenant_id] = keyword[None] identifier[user] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[profile] = identifier[profile] , ** identifier[connection_args] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[user] : identifier[change_email] = keyword[False] identifier[change_enabled] = keyword[False] identifier[change_tenant] = keyword[False] identifier[change_password] = keyword[False] keyword[if] identifier[user] [ identifier[name] ]. identifier[get] ( literal[string] , keyword[None] )!= identifier[email] : identifier[change_email] = keyword[True] keyword[if] identifier[user] [ identifier[name] ]. identifier[get] ( literal[string] , keyword[None] )!= identifier[enabled] : identifier[change_enabled] = keyword[True] keyword[if] identifier[tenant] keyword[and] ( identifier[_TENANT_ID] keyword[not] keyword[in] identifier[user] [ identifier[name] ] keyword[or] identifier[user] [ identifier[name] ]. identifier[get] ( identifier[_TENANT_ID] , keyword[None] )!= identifier[tenant_id] ): identifier[change_tenant] = keyword[True] keyword[if] ( identifier[password_reset] keyword[is] keyword[True] keyword[and] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[password] = identifier[password] , identifier[profile] = identifier[profile] , ** identifier[connection_args] )): identifier[change_password] = keyword[True] keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ) keyword[and] ( identifier[change_email] keyword[or] identifier[change_enabled] keyword[or] identifier[change_tenant] keyword[or] identifier[change_password] ): identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) keyword[if] identifier[change_email] keyword[is] keyword[True] : identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[change_enabled] keyword[is] keyword[True] : identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[change_tenant] keyword[is] keyword[True] : identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[tenant] ) keyword[if] identifier[change_password] keyword[is] keyword[True] : identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[return] identifier[ret] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) keyword[if] identifier[change_email] : identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[email] = identifier[email] , identifier[profile] = identifier[profile] ,** identifier[connection_args] ) identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[change_enabled] : identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[enabled] = identifier[enabled] , identifier[profile] = identifier[profile] ,** identifier[connection_args] ) identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[enabled] ) keyword[if] identifier[change_tenant] : identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[tenant] = identifier[tenant] , identifier[profile] = identifier[profile] ,** identifier[connection_args] ) identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[tenant] ) keyword[if] identifier[change_password] : identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[password] = identifier[password] , identifier[profile] = identifier[profile] , ** identifier[connection_args] ) identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[roles] : keyword[for] identifier[tenant] keyword[in] identifier[roles] : identifier[args] = identifier[dict] ({ literal[string] : identifier[name] , literal[string] : identifier[tenant] , literal[string] : identifier[profile] },** identifier[connection_args] ) identifier[tenant_roles] = identifier[__salt__] [ literal[string] ](** identifier[args] ) keyword[for] identifier[role] keyword[in] identifier[roles] [ identifier[tenant] ]: keyword[if] identifier[role] keyword[not] keyword[in] identifier[tenant_roles] : keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ): identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) keyword[return] identifier[ret] identifier[addargs] = identifier[dict] ({ literal[string] : identifier[name] , literal[string] : identifier[role] , literal[string] : identifier[tenant] , literal[string] : identifier[profile] }, ** identifier[connection_args] ) identifier[newrole] = identifier[__salt__] [ literal[string] ](** identifier[addargs] ) keyword[if] literal[string] keyword[in] identifier[ret] [ literal[string] ]: identifier[ret] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[newrole] ) keyword[else] : identifier[ret] [ literal[string] ][ literal[string] ]=[ identifier[newrole] ] identifier[roles_to_remove] = identifier[list] ( identifier[set] ( identifier[tenant_roles] )- identifier[set] ( identifier[roles] [ identifier[tenant] ])) keyword[for] identifier[role] keyword[in] identifier[roles_to_remove] : keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ): identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) keyword[return] identifier[ret] identifier[addargs] = identifier[dict] ({ literal[string] : identifier[name] , literal[string] : identifier[role] , literal[string] : identifier[tenant] , literal[string] : identifier[profile] }, ** identifier[connection_args] ) identifier[oldrole] = identifier[__salt__] [ literal[string] ](** identifier[addargs] ) keyword[if] literal[string] keyword[in] identifier[ret] [ literal[string] ]: identifier[ret] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[oldrole] ) keyword[else] : identifier[ret] [ literal[string] ][ literal[string] ]=[ identifier[oldrole] ] keyword[else] : keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ): identifier[ret] [ literal[string] ]= keyword[None] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[return] identifier[ret] identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[password] = identifier[password] , identifier[email] = identifier[email] , identifier[tenant_id] = identifier[tenant_id] , identifier[enabled] = identifier[enabled] , identifier[profile] = identifier[profile] , ** identifier[connection_args] ) keyword[if] identifier[roles] : keyword[for] identifier[tenant] keyword[in] identifier[roles] : keyword[for] identifier[role] keyword[in] identifier[roles] [ identifier[tenant] ]: identifier[__salt__] [ literal[string] ]( identifier[user] = identifier[name] , identifier[role] = identifier[role] , identifier[tenant] = identifier[tenant] , identifier[profile] = identifier[profile] , ** identifier[connection_args] ) identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[return] identifier[ret]
def user_present(name, password, email, tenant=None, enabled=True, roles=None, profile=None, password_reset=True, project=None, **connection_args): """ Ensure that the keystone user is present with the specified properties. name The name of the user to manage password The password to use for this user. .. note:: If the user already exists and a different password was set for the user than the one specified here, the password for the user will be updated. Please set the ``password_reset`` option to ``False`` if this is not the desired behavior. password_reset Whether or not to reset password after initial set. Defaults to ``True``. email The email address for this user tenant The tenant (name) for this user project The project (name) for this user (overrides tenant in api v3) enabled Availability state for this user roles The roles the user should have under given tenants. Passed as a dictionary mapping tenant names to a list of roles in this tenant, i.e.:: roles: admin: # tenant - admin # role service: - admin - Member """ ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'User "{0}" will be updated'.format(name)} _api_version(profile=profile, **connection_args) if project and (not tenant): tenant = project # depends on [control=['if'], data=[]] # Validate tenant if set if tenant is not None: tenantdata = __salt__['keystone.tenant_get'](name=tenant, profile=profile, **connection_args) if 'Error' in tenantdata: ret['result'] = False ret['comment'] = 'Tenant / project "{0}" does not exist'.format(tenant) return ret # depends on [control=['if'], data=[]] tenant_id = tenantdata[tenant]['id'] # depends on [control=['if'], data=['tenant']] else: tenant_id = None # Check if user is already present user = __salt__['keystone.user_get'](name=name, profile=profile, **connection_args) if 'Error' not in user: change_email = False change_enabled = False change_tenant = False change_password = False if user[name].get('email', None) != email: change_email = True # depends on [control=['if'], data=[]] if user[name].get('enabled', None) != enabled: change_enabled = True # depends on [control=['if'], data=[]] if tenant and (_TENANT_ID not in user[name] or user[name].get(_TENANT_ID, None) != tenant_id): change_tenant = True # depends on [control=['if'], data=[]] if password_reset is True and (not __salt__['keystone.user_verify_password'](name=name, password=password, profile=profile, **connection_args)): change_password = True # depends on [control=['if'], data=[]] if __opts__.get('test') and (change_email or change_enabled or change_tenant or change_password): ret['result'] = None ret['comment'] = 'User "{0}" will be updated'.format(name) if change_email is True: ret['changes']['Email'] = 'Will be updated' # depends on [control=['if'], data=[]] if change_enabled is True: ret['changes']['Enabled'] = 'Will be True' # depends on [control=['if'], data=[]] if change_tenant is True: ret['changes']['Tenant'] = 'Will be added to "{0}" tenant'.format(tenant) # depends on [control=['if'], data=[]] if change_password is True: ret['changes']['Password'] = 'Will be updated' # depends on [control=['if'], data=[]] return ret # depends on [control=['if'], data=[]] ret['comment'] = 'User "{0}" is already present'.format(name) if change_email: __salt__['keystone.user_update'](name=name, email=email, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Email'] = 'Updated' # depends on [control=['if'], data=[]] if change_enabled: __salt__['keystone.user_update'](name=name, enabled=enabled, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Enabled'] = 'Now {0}'.format(enabled) # depends on [control=['if'], data=[]] if change_tenant: __salt__['keystone.user_update'](name=name, tenant=tenant, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Tenant'] = 'Added to "{0}" tenant'.format(tenant) # depends on [control=['if'], data=[]] if change_password: __salt__['keystone.user_password_update'](name=name, password=password, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been updated'.format(name) ret['changes']['Password'] = 'Updated' # depends on [control=['if'], data=[]] if roles: for tenant in roles: args = dict({'user_name': name, 'tenant_name': tenant, 'profile': profile}, **connection_args) tenant_roles = __salt__['keystone.user_role_list'](**args) for role in roles[tenant]: if role not in tenant_roles: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret # depends on [control=['if'], data=[]] addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) newrole = __salt__['keystone.user_role_add'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(newrole) # depends on [control=['if'], data=[]] else: ret['changes']['roles'] = [newrole] # depends on [control=['if'], data=['role']] # depends on [control=['for'], data=['role']] roles_to_remove = list(set(tenant_roles) - set(roles[tenant])) for role in roles_to_remove: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User roles "{0}" will been updated'.format(name) return ret # depends on [control=['if'], data=[]] addargs = dict({'user': name, 'role': role, 'tenant': tenant, 'profile': profile}, **connection_args) oldrole = __salt__['keystone.user_role_remove'](**addargs) if 'roles' in ret['changes']: ret['changes']['roles'].append(oldrole) # depends on [control=['if'], data=[]] else: ret['changes']['roles'] = [oldrole] # depends on [control=['for'], data=['role']] # depends on [control=['for'], data=['tenant']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['user']] else: # Create that user! if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Keystone user "{0}" will be added'.format(name) ret['changes']['User'] = 'Will be created' return ret # depends on [control=['if'], data=[]] __salt__['keystone.user_create'](name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled, profile=profile, **connection_args) if roles: for tenant in roles: for role in roles[tenant]: __salt__['keystone.user_role_add'](user=name, role=role, tenant=tenant, profile=profile, **connection_args) # depends on [control=['for'], data=['role']] # depends on [control=['for'], data=['tenant']] # depends on [control=['if'], data=[]] ret['comment'] = 'Keystone user {0} has been added'.format(name) ret['changes']['User'] = 'Created' return ret
def command(*args, **kwargs): """Decorator to define a command. The arguments to this decorator are those of the `ArgumentParser <https://docs.python.org/3/library/argparse.html\ #argumentparser-objects>`_ object constructor. """ def decorator(f): if 'description' not in kwargs: kwargs['description'] = f.__doc__ if 'parents' in kwargs: if not hasattr(f, '_argnames'): # pragma: no cover f._argnames = [] for p in kwargs['parents']: f._argnames += p._argnames if hasattr(p, '_argnames') else [] kwargs['parents'] = [p.parser for p in kwargs['parents']] f.parser = argparse.ArgumentParser(*args, **kwargs) f.climax = True for arg in getattr(f, '_arguments', []): f.parser.add_argument(*arg[0], **arg[1]) @wraps(f) def wrapper(args=None): kwargs = f.parser.parse_args(args) return f(**vars(kwargs)) wrapper.func = f return wrapper return decorator
def function[command, parameter[]]: constant[Decorator to define a command. The arguments to this decorator are those of the `ArgumentParser <https://docs.python.org/3/library/argparse.html#argumentparser-objects>`_ object constructor. ] def function[decorator, parameter[f]]: if compare[constant[description] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[description]] assign[=] name[f].__doc__ if compare[constant[parents] in name[kwargs]] begin[:] if <ast.UnaryOp object at 0x7da18dc04c10> begin[:] name[f]._argnames assign[=] list[[]] for taget[name[p]] in starred[call[name[kwargs]][constant[parents]]] begin[:] <ast.AugAssign object at 0x7da18dc07f70> call[name[kwargs]][constant[parents]] assign[=] <ast.ListComp object at 0x7da18dc07af0> name[f].parser assign[=] call[name[argparse].ArgumentParser, parameter[<ast.Starred object at 0x7da18dc07010>]] name[f].climax assign[=] constant[True] for taget[name[arg]] in starred[call[name[getattr], parameter[name[f], constant[_arguments], list[[]]]]] begin[:] call[name[f].parser.add_argument, parameter[<ast.Starred object at 0x7da18f09dae0>]] def function[wrapper, parameter[args]]: variable[kwargs] assign[=] call[name[f].parser.parse_args, parameter[name[args]]] return[call[name[f], parameter[]]] name[wrapper].func assign[=] name[f] return[name[wrapper]] return[name[decorator]]
keyword[def] identifier[command] (* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[decorator] ( identifier[f] ): keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[f] . identifier[__doc__] keyword[if] literal[string] keyword[in] identifier[kwargs] : keyword[if] keyword[not] identifier[hasattr] ( identifier[f] , literal[string] ): identifier[f] . identifier[_argnames] =[] keyword[for] identifier[p] keyword[in] identifier[kwargs] [ literal[string] ]: identifier[f] . identifier[_argnames] += identifier[p] . identifier[_argnames] keyword[if] identifier[hasattr] ( identifier[p] , literal[string] ) keyword[else] [] identifier[kwargs] [ literal[string] ]=[ identifier[p] . identifier[parser] keyword[for] identifier[p] keyword[in] identifier[kwargs] [ literal[string] ]] identifier[f] . identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (* identifier[args] ,** identifier[kwargs] ) identifier[f] . identifier[climax] = keyword[True] keyword[for] identifier[arg] keyword[in] identifier[getattr] ( identifier[f] , literal[string] ,[]): identifier[f] . identifier[parser] . identifier[add_argument] (* identifier[arg] [ literal[int] ],** identifier[arg] [ literal[int] ]) @ identifier[wraps] ( identifier[f] ) keyword[def] identifier[wrapper] ( identifier[args] = keyword[None] ): identifier[kwargs] = identifier[f] . identifier[parser] . identifier[parse_args] ( identifier[args] ) keyword[return] identifier[f] (** identifier[vars] ( identifier[kwargs] )) identifier[wrapper] . identifier[func] = identifier[f] keyword[return] identifier[wrapper] keyword[return] identifier[decorator]
def command(*args, **kwargs): """Decorator to define a command. The arguments to this decorator are those of the `ArgumentParser <https://docs.python.org/3/library/argparse.html#argumentparser-objects>`_ object constructor. """ def decorator(f): if 'description' not in kwargs: kwargs['description'] = f.__doc__ # depends on [control=['if'], data=['kwargs']] if 'parents' in kwargs: if not hasattr(f, '_argnames'): # pragma: no cover f._argnames = [] # depends on [control=['if'], data=[]] for p in kwargs['parents']: f._argnames += p._argnames if hasattr(p, '_argnames') else [] # depends on [control=['for'], data=['p']] kwargs['parents'] = [p.parser for p in kwargs['parents']] # depends on [control=['if'], data=['kwargs']] f.parser = argparse.ArgumentParser(*args, **kwargs) f.climax = True for arg in getattr(f, '_arguments', []): f.parser.add_argument(*arg[0], **arg[1]) # depends on [control=['for'], data=['arg']] @wraps(f) def wrapper(args=None): kwargs = f.parser.parse_args(args) return f(**vars(kwargs)) wrapper.func = f return wrapper return decorator
def cast_problem(problem): """ Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object """ # Optproblem if isinstance(problem,OptProblem): return problem # Other else: # Type Base if (not hasattr(problem,'G') or (problem.G.shape[0] == problem.G.shape[1] and problem.G.shape[0] == problem.G.nnz and np.all(problem.G.row == problem.G.col) and np.all(problem.G.data == 1.))): return create_problem_from_type_base(problem) # Type A else: return create_problem_from_type_A(problem)
def function[cast_problem, parameter[problem]]: constant[ Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object ] if call[name[isinstance], parameter[name[problem], name[OptProblem]]] begin[:] return[name[problem]]
keyword[def] identifier[cast_problem] ( identifier[problem] ): literal[string] keyword[if] identifier[isinstance] ( identifier[problem] , identifier[OptProblem] ): keyword[return] identifier[problem] keyword[else] : keyword[if] ( keyword[not] identifier[hasattr] ( identifier[problem] , literal[string] ) keyword[or] ( identifier[problem] . identifier[G] . identifier[shape] [ literal[int] ]== identifier[problem] . identifier[G] . identifier[shape] [ literal[int] ] keyword[and] identifier[problem] . identifier[G] . identifier[shape] [ literal[int] ]== identifier[problem] . identifier[G] . identifier[nnz] keyword[and] identifier[np] . identifier[all] ( identifier[problem] . identifier[G] . identifier[row] == identifier[problem] . identifier[G] . identifier[col] ) keyword[and] identifier[np] . identifier[all] ( identifier[problem] . identifier[G] . identifier[data] == literal[int] ))): keyword[return] identifier[create_problem_from_type_base] ( identifier[problem] ) keyword[else] : keyword[return] identifier[create_problem_from_type_A] ( identifier[problem] )
def cast_problem(problem): """ Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object """ # Optproblem if isinstance(problem, OptProblem): return problem # depends on [control=['if'], data=[]] # Other # Type Base elif not hasattr(problem, 'G') or (problem.G.shape[0] == problem.G.shape[1] and problem.G.shape[0] == problem.G.nnz and np.all(problem.G.row == problem.G.col) and np.all(problem.G.data == 1.0)): return create_problem_from_type_base(problem) # depends on [control=['if'], data=[]] else: # Type A return create_problem_from_type_A(problem)
def _partition_runs_by_day(self): """Split the runs by day, so we can display them grouped that way.""" run_infos = self._get_all_run_infos() for x in run_infos: ts = float(x['timestamp']) x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S') def date_text(dt): delta_days = (date.today() - dt).days if delta_days == 0: return 'Today' elif delta_days == 1: return 'Yesterday' elif delta_days < 7: return dt.strftime('%A') # Weekday name. else: d = dt.day % 10 suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th' return dt.strftime('%B %d') + suffix # E.g., October 30th. keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp'])) sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True) return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]} for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
def function[_partition_runs_by_day, parameter[self]]: constant[Split the runs by day, so we can display them grouped that way.] variable[run_infos] assign[=] call[name[self]._get_all_run_infos, parameter[]] for taget[name[x]] in starred[name[run_infos]] begin[:] variable[ts] assign[=] call[name[float], parameter[call[name[x]][constant[timestamp]]]] call[name[x]][constant[time_of_day_text]] assign[=] call[call[name[datetime].fromtimestamp, parameter[name[ts]]].strftime, parameter[constant[%H:%M:%S]]] def function[date_text, parameter[dt]]: variable[delta_days] assign[=] binary_operation[call[name[date].today, parameter[]] - name[dt]].days if compare[name[delta_days] equal[==] constant[0]] begin[:] return[constant[Today]] variable[keyfunc] assign[=] <ast.Lambda object at 0x7da1b1eaea10> variable[sorted_run_infos] assign[=] call[name[sorted], parameter[name[run_infos]]] return[<ast.ListComp object at 0x7da1b1eade70>]
keyword[def] identifier[_partition_runs_by_day] ( identifier[self] ): literal[string] identifier[run_infos] = identifier[self] . identifier[_get_all_run_infos] () keyword[for] identifier[x] keyword[in] identifier[run_infos] : identifier[ts] = identifier[float] ( identifier[x] [ literal[string] ]) identifier[x] [ literal[string] ]= identifier[datetime] . identifier[fromtimestamp] ( identifier[ts] ). identifier[strftime] ( literal[string] ) keyword[def] identifier[date_text] ( identifier[dt] ): identifier[delta_days] =( identifier[date] . identifier[today] ()- identifier[dt] ). identifier[days] keyword[if] identifier[delta_days] == literal[int] : keyword[return] literal[string] keyword[elif] identifier[delta_days] == literal[int] : keyword[return] literal[string] keyword[elif] identifier[delta_days] < literal[int] : keyword[return] identifier[dt] . identifier[strftime] ( literal[string] ) keyword[else] : identifier[d] = identifier[dt] . identifier[day] % literal[int] identifier[suffix] = literal[string] keyword[if] identifier[d] == literal[int] keyword[else] literal[string] keyword[if] identifier[d] == literal[int] keyword[else] literal[string] keyword[if] identifier[d] == literal[int] keyword[else] literal[string] keyword[return] identifier[dt] . identifier[strftime] ( literal[string] )+ identifier[suffix] identifier[keyfunc] = keyword[lambda] identifier[x] : identifier[datetime] . identifier[fromtimestamp] ( identifier[float] ( identifier[x] [ literal[string] ])) identifier[sorted_run_infos] = identifier[sorted] ( identifier[run_infos] , identifier[key] = identifier[keyfunc] , identifier[reverse] = keyword[True] ) keyword[return] [{ literal[string] : identifier[date_text] ( identifier[dt] ), literal[string] :[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[infos] ]} keyword[for] identifier[dt] , identifier[infos] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[sorted_run_infos] , keyword[lambda] identifier[x] : identifier[keyfunc] ( identifier[x] ). identifier[date] ())]
def _partition_runs_by_day(self): """Split the runs by day, so we can display them grouped that way.""" run_infos = self._get_all_run_infos() for x in run_infos: ts = float(x['timestamp']) x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S') # depends on [control=['for'], data=['x']] def date_text(dt): delta_days = (date.today() - dt).days if delta_days == 0: return 'Today' # depends on [control=['if'], data=[]] elif delta_days == 1: return 'Yesterday' # depends on [control=['if'], data=[]] elif delta_days < 7: return dt.strftime('%A') # Weekday name. # depends on [control=['if'], data=[]] else: d = dt.day % 10 suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th' return dt.strftime('%B %d') + suffix # E.g., October 30th. keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp'])) sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True) return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]} for (dt, infos) in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
def build(self): """Run the build command specified in index.yaml.""" for cmd in self.build_cmds: log.info('building command: {}'.format(cmd)) full_cmd = 'cd {}; {}'.format(self.analyses_path, cmd) log.debug('full command: {}'.format(full_cmd)) subprocess.call(full_cmd, shell=True) log.info('build done')
def function[build, parameter[self]]: constant[Run the build command specified in index.yaml.] for taget[name[cmd]] in starred[name[self].build_cmds] begin[:] call[name[log].info, parameter[call[constant[building command: {}].format, parameter[name[cmd]]]]] variable[full_cmd] assign[=] call[constant[cd {}; {}].format, parameter[name[self].analyses_path, name[cmd]]] call[name[log].debug, parameter[call[constant[full command: {}].format, parameter[name[full_cmd]]]]] call[name[subprocess].call, parameter[name[full_cmd]]] call[name[log].info, parameter[constant[build done]]]
keyword[def] identifier[build] ( identifier[self] ): literal[string] keyword[for] identifier[cmd] keyword[in] identifier[self] . identifier[build_cmds] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[cmd] )) identifier[full_cmd] = literal[string] . identifier[format] ( identifier[self] . identifier[analyses_path] , identifier[cmd] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[full_cmd] )) identifier[subprocess] . identifier[call] ( identifier[full_cmd] , identifier[shell] = keyword[True] ) identifier[log] . identifier[info] ( literal[string] )
def build(self): """Run the build command specified in index.yaml.""" for cmd in self.build_cmds: log.info('building command: {}'.format(cmd)) full_cmd = 'cd {}; {}'.format(self.analyses_path, cmd) log.debug('full command: {}'.format(full_cmd)) subprocess.call(full_cmd, shell=True) log.info('build done') # depends on [control=['for'], data=['cmd']]
def to_textgrid(self, filtin=[], filtex=[], regex=False): """Convert the object to a :class:`pympi.Praat.TextGrid` object. :param list filtin: Include only tiers in this list, if empty all tiers are included. :param list filtex: Exclude all tiers in this list. :param bool regex: If this flag is set the filters are seen as regexes. :returns: :class:`pympi.Praat.TextGrid` representation. :raises ImportError: If the pympi.Praat module can't be loaded. """ from pympi.Praat import TextGrid _, end = self.get_full_time_interval() tgout = TextGrid(xmax=end/1000.0) func = (lambda x, y: re.match(x, y)) if regex else lambda x, y: x == y for tier in self.tiers: if (filtin and not any(func(f, tier) for f in filtin)) or\ (filtex and any(func(f, tier) for f in filtex)): continue ctier = tgout.add_tier(tier) for intv in self.get_annotation_data_for_tier(tier): try: ctier.add_interval(intv[0]/1000.0, intv[1]/1000.0, intv[2]) except: pass return tgout
def function[to_textgrid, parameter[self, filtin, filtex, regex]]: constant[Convert the object to a :class:`pympi.Praat.TextGrid` object. :param list filtin: Include only tiers in this list, if empty all tiers are included. :param list filtex: Exclude all tiers in this list. :param bool regex: If this flag is set the filters are seen as regexes. :returns: :class:`pympi.Praat.TextGrid` representation. :raises ImportError: If the pympi.Praat module can't be loaded. ] from relative_module[pympi.Praat] import module[TextGrid] <ast.Tuple object at 0x7da1b033f1f0> assign[=] call[name[self].get_full_time_interval, parameter[]] variable[tgout] assign[=] call[name[TextGrid], parameter[]] variable[func] assign[=] <ast.IfExp object at 0x7da1b033dea0> for taget[name[tier]] in starred[name[self].tiers] begin[:] if <ast.BoolOp object at 0x7da1b033c850> begin[:] continue variable[ctier] assign[=] call[name[tgout].add_tier, parameter[name[tier]]] for taget[name[intv]] in starred[call[name[self].get_annotation_data_for_tier, parameter[name[tier]]]] begin[:] <ast.Try object at 0x7da1b0212980> return[name[tgout]]
keyword[def] identifier[to_textgrid] ( identifier[self] , identifier[filtin] =[], identifier[filtex] =[], identifier[regex] = keyword[False] ): literal[string] keyword[from] identifier[pympi] . identifier[Praat] keyword[import] identifier[TextGrid] identifier[_] , identifier[end] = identifier[self] . identifier[get_full_time_interval] () identifier[tgout] = identifier[TextGrid] ( identifier[xmax] = identifier[end] / literal[int] ) identifier[func] =( keyword[lambda] identifier[x] , identifier[y] : identifier[re] . identifier[match] ( identifier[x] , identifier[y] )) keyword[if] identifier[regex] keyword[else] keyword[lambda] identifier[x] , identifier[y] : identifier[x] == identifier[y] keyword[for] identifier[tier] keyword[in] identifier[self] . identifier[tiers] : keyword[if] ( identifier[filtin] keyword[and] keyword[not] identifier[any] ( identifier[func] ( identifier[f] , identifier[tier] ) keyword[for] identifier[f] keyword[in] identifier[filtin] )) keyword[or] ( identifier[filtex] keyword[and] identifier[any] ( identifier[func] ( identifier[f] , identifier[tier] ) keyword[for] identifier[f] keyword[in] identifier[filtex] )): keyword[continue] identifier[ctier] = identifier[tgout] . identifier[add_tier] ( identifier[tier] ) keyword[for] identifier[intv] keyword[in] identifier[self] . identifier[get_annotation_data_for_tier] ( identifier[tier] ): keyword[try] : identifier[ctier] . identifier[add_interval] ( identifier[intv] [ literal[int] ]/ literal[int] , identifier[intv] [ literal[int] ]/ literal[int] , identifier[intv] [ literal[int] ]) keyword[except] : keyword[pass] keyword[return] identifier[tgout]
def to_textgrid(self, filtin=[], filtex=[], regex=False): """Convert the object to a :class:`pympi.Praat.TextGrid` object. :param list filtin: Include only tiers in this list, if empty all tiers are included. :param list filtex: Exclude all tiers in this list. :param bool regex: If this flag is set the filters are seen as regexes. :returns: :class:`pympi.Praat.TextGrid` representation. :raises ImportError: If the pympi.Praat module can't be loaded. """ from pympi.Praat import TextGrid (_, end) = self.get_full_time_interval() tgout = TextGrid(xmax=end / 1000.0) func = (lambda x, y: re.match(x, y)) if regex else lambda x, y: x == y for tier in self.tiers: if filtin and (not any((func(f, tier) for f in filtin))) or (filtex and any((func(f, tier) for f in filtex))): continue # depends on [control=['if'], data=[]] ctier = tgout.add_tier(tier) for intv in self.get_annotation_data_for_tier(tier): try: ctier.add_interval(intv[0] / 1000.0, intv[1] / 1000.0, intv[2]) # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['intv']] # depends on [control=['for'], data=['tier']] return tgout
def parse_duration(string): ''' Parses duration/period stamp expressed in a subset of ISO8601 duration specification formats and returns a dictionary of time components (as integers). Accepted formats are: * ``PnYnMnDTnHnMnS`` Note: n is positive integer! Floats are NOT supported. Any component (nX) is optional, but if any time component is specified, ``T`` spearator is mandatory. Also, ``P`` is always mandatory. * ``PnW`` Note: n is positive integer representing number of weeks. * ``P<date>T<time>`` Note: This format is basically standard ISO8601 timestamp format, without the time zone and with ``P`` prepended. We support both basic and extended format, which translates into following two subformats: - ``PYYYYMMDDThhmmss`` for basic, and - ``PYYYY-MM-DDThh:mm:ss`` for extended format. Note that all subfields are mandatory. Note: whitespaces are ignored. Examples:: from datetime import datetime from dateutil.relativedelta import relativedelta rel = parse_duration('P1m') # +1 month rel = parse_duration('P 1y 1m T 2m 1s') rel = parse_duration('P12w') # +12 weeks rel = parse_duration('P 0001-02-03 T 03:02:01') rel = parse_duration('P00010203T030201') future = datetime.now() + relativedelta(**rel) Returns: dictionary with (some of the) fields: ``years``, ``months``, ``weeks``, ``days``, ``hours``, ``minutes`` or ``seconds``. If nothing is matched, an empty dict is returned. ''' string = string.replace(' ', '').upper() # try `PnYnMnDTnHnMnS` form match = re.match( "^P(?:(?:(?P<years>\d+)Y)?(?:(?P<months>\d+)M)?(?:(?P<days>\d+)D)?)?" \ "(?:T(?:(?P<hours>\d+)H)?(?:(?P<minutes>\d+)M)?(?:(?P<seconds>\d+)S)?)?$", string ) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) # try `PnW` form match = re.match( "^P(?P<weeks>\d+)W$", string ) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) # try `P<date>T<time>` form, subforms `PYYYYMMDDThhmmss` and `PYYYY-MM-DDThh:mm:ss` match = re.match( "^P(?P<years>\d{4})(-)?(?P<months>\d{2})(?(2)-)(?P<days>\d{2})T(?P<hours>\d{2})(?(2):)(?P<minutes>\d{2})(?(2):)(?P<seconds>\d{2})$", string ) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) return {}
def function[parse_duration, parameter[string]]: constant[ Parses duration/period stamp expressed in a subset of ISO8601 duration specification formats and returns a dictionary of time components (as integers). Accepted formats are: * ``PnYnMnDTnHnMnS`` Note: n is positive integer! Floats are NOT supported. Any component (nX) is optional, but if any time component is specified, ``T`` spearator is mandatory. Also, ``P`` is always mandatory. * ``PnW`` Note: n is positive integer representing number of weeks. * ``P<date>T<time>`` Note: This format is basically standard ISO8601 timestamp format, without the time zone and with ``P`` prepended. We support both basic and extended format, which translates into following two subformats: - ``PYYYYMMDDThhmmss`` for basic, and - ``PYYYY-MM-DDThh:mm:ss`` for extended format. Note that all subfields are mandatory. Note: whitespaces are ignored. Examples:: from datetime import datetime from dateutil.relativedelta import relativedelta rel = parse_duration('P1m') # +1 month rel = parse_duration('P 1y 1m T 2m 1s') rel = parse_duration('P12w') # +12 weeks rel = parse_duration('P 0001-02-03 T 03:02:01') rel = parse_duration('P00010203T030201') future = datetime.now() + relativedelta(**rel) Returns: dictionary with (some of the) fields: ``years``, ``months``, ``weeks``, ``days``, ``hours``, ``minutes`` or ``seconds``. If nothing is matched, an empty dict is returned. ] variable[string] assign[=] call[call[name[string].replace, parameter[constant[ ], constant[]]].upper, parameter[]] variable[match] assign[=] call[name[re].match, parameter[constant[^P(?:(?:(?P<years>\d+)Y)?(?:(?P<months>\d+)M)?(?:(?P<days>\d+)D)?)?(?:T(?:(?P<hours>\d+)H)?(?:(?P<minutes>\d+)M)?(?:(?P<seconds>\d+)S)?)?$], name[string]]] if name[match] begin[:] variable[d] assign[=] call[name[match].groupdict, parameter[constant[0]]] return[call[name[dict], parameter[call[name[zip], parameter[call[name[d].keys, parameter[]], call[name[map], parameter[name[int], call[name[d].values, parameter[]]]]]]]]] variable[match] assign[=] call[name[re].match, parameter[constant[^P(?P<weeks>\d+)W$], name[string]]] if name[match] begin[:] variable[d] assign[=] call[name[match].groupdict, parameter[constant[0]]] return[call[name[dict], parameter[call[name[zip], parameter[call[name[d].keys, parameter[]], call[name[map], parameter[name[int], call[name[d].values, parameter[]]]]]]]]] variable[match] assign[=] call[name[re].match, parameter[constant[^P(?P<years>\d{4})(-)?(?P<months>\d{2})(?(2)-)(?P<days>\d{2})T(?P<hours>\d{2})(?(2):)(?P<minutes>\d{2})(?(2):)(?P<seconds>\d{2})$], name[string]]] if name[match] begin[:] variable[d] assign[=] call[name[match].groupdict, parameter[constant[0]]] return[call[name[dict], parameter[call[name[zip], parameter[call[name[d].keys, parameter[]], call[name[map], parameter[name[int], call[name[d].values, parameter[]]]]]]]]] return[dictionary[[], []]]
keyword[def] identifier[parse_duration] ( identifier[string] ): literal[string] identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ). identifier[upper] () identifier[match] = identifier[re] . identifier[match] ( literal[string] literal[string] , identifier[string] ) keyword[if] identifier[match] : identifier[d] = identifier[match] . identifier[groupdict] ( literal[int] ) keyword[return] identifier[dict] ( identifier[zip] ( identifier[d] . identifier[keys] (), identifier[map] ( identifier[int] , identifier[d] . identifier[values] ()))) identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[string] ) keyword[if] identifier[match] : identifier[d] = identifier[match] . identifier[groupdict] ( literal[int] ) keyword[return] identifier[dict] ( identifier[zip] ( identifier[d] . identifier[keys] (), identifier[map] ( identifier[int] , identifier[d] . identifier[values] ()))) identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[string] ) keyword[if] identifier[match] : identifier[d] = identifier[match] . identifier[groupdict] ( literal[int] ) keyword[return] identifier[dict] ( identifier[zip] ( identifier[d] . identifier[keys] (), identifier[map] ( identifier[int] , identifier[d] . identifier[values] ()))) keyword[return] {}
def parse_duration(string): """ Parses duration/period stamp expressed in a subset of ISO8601 duration specification formats and returns a dictionary of time components (as integers). Accepted formats are: * ``PnYnMnDTnHnMnS`` Note: n is positive integer! Floats are NOT supported. Any component (nX) is optional, but if any time component is specified, ``T`` spearator is mandatory. Also, ``P`` is always mandatory. * ``PnW`` Note: n is positive integer representing number of weeks. * ``P<date>T<time>`` Note: This format is basically standard ISO8601 timestamp format, without the time zone and with ``P`` prepended. We support both basic and extended format, which translates into following two subformats: - ``PYYYYMMDDThhmmss`` for basic, and - ``PYYYY-MM-DDThh:mm:ss`` for extended format. Note that all subfields are mandatory. Note: whitespaces are ignored. Examples:: from datetime import datetime from dateutil.relativedelta import relativedelta rel = parse_duration('P1m') # +1 month rel = parse_duration('P 1y 1m T 2m 1s') rel = parse_duration('P12w') # +12 weeks rel = parse_duration('P 0001-02-03 T 03:02:01') rel = parse_duration('P00010203T030201') future = datetime.now() + relativedelta(**rel) Returns: dictionary with (some of the) fields: ``years``, ``months``, ``weeks``, ``days``, ``hours``, ``minutes`` or ``seconds``. If nothing is matched, an empty dict is returned. """ string = string.replace(' ', '').upper() # try `PnYnMnDTnHnMnS` form match = re.match('^P(?:(?:(?P<years>\\d+)Y)?(?:(?P<months>\\d+)M)?(?:(?P<days>\\d+)D)?)?(?:T(?:(?P<hours>\\d+)H)?(?:(?P<minutes>\\d+)M)?(?:(?P<seconds>\\d+)S)?)?$', string) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) # depends on [control=['if'], data=[]] # try `PnW` form match = re.match('^P(?P<weeks>\\d+)W$', string) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) # depends on [control=['if'], data=[]] # try `P<date>T<time>` form, subforms `PYYYYMMDDThhmmss` and `PYYYY-MM-DDThh:mm:ss` match = re.match('^P(?P<years>\\d{4})(-)?(?P<months>\\d{2})(?(2)-)(?P<days>\\d{2})T(?P<hours>\\d{2})(?(2):)(?P<minutes>\\d{2})(?(2):)(?P<seconds>\\d{2})$', string) if match: d = match.groupdict(0) return dict(zip(d.keys(), map(int, d.values()))) # depends on [control=['if'], data=[]] return {}