code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def add_to_filemenu(): """Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit. """ if hasattr(cmds, 'about') and not cmds.about(batch=True): # As Maya builds its menus dynamically upon being accessed, # we force its build here prior to adding our entry using it's # native mel function call. mel.eval("evalDeferred buildFileMenu") # Serialise function into string script = inspect.getsource(_add_to_filemenu) script += "\n_add_to_filemenu()" # If cmds doesn't have any members, we're most likely in an # uninitialized batch-mode. It it does exists, ensure we # really aren't in batch mode. cmds.evalDeferred(script)
def function[add_to_filemenu, parameter[]]: constant[Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit. ] if <ast.BoolOp object at 0x7da1b0216ec0> begin[:] call[name[mel].eval, parameter[constant[evalDeferred buildFileMenu]]] variable[script] assign[=] call[name[inspect].getsource, parameter[name[_add_to_filemenu]]] <ast.AugAssign object at 0x7da1b0217610> call[name[cmds].evalDeferred, parameter[name[script]]]
keyword[def] identifier[add_to_filemenu] (): literal[string] keyword[if] identifier[hasattr] ( identifier[cmds] , literal[string] ) keyword[and] keyword[not] identifier[cmds] . identifier[about] ( identifier[batch] = keyword[True] ): identifier[mel] . identifier[eval] ( literal[string] ) identifier[script] = identifier[inspect] . identifier[getsource] ( identifier[_add_to_filemenu] ) identifier[script] += literal[string] identifier[cmds] . identifier[evalDeferred] ( identifier[script] )
def add_to_filemenu(): """Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit. """ if hasattr(cmds, 'about') and (not cmds.about(batch=True)): # As Maya builds its menus dynamically upon being accessed, # we force its build here prior to adding our entry using it's # native mel function call. mel.eval('evalDeferred buildFileMenu') # Serialise function into string script = inspect.getsource(_add_to_filemenu) script += '\n_add_to_filemenu()' # If cmds doesn't have any members, we're most likely in an # uninitialized batch-mode. It it does exists, ensure we # really aren't in batch mode. cmds.evalDeferred(script) # depends on [control=['if'], data=[]]
def determine_target_roots(self, goal_name): """Helper for tasks that scan for default target roots. :param string goal_name: The goal name to use for any warning emissions. """ if not self.context.target_roots: print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr) # For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior # of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result. return self.context.target_roots
def function[determine_target_roots, parameter[self, goal_name]]: constant[Helper for tasks that scan for default target roots. :param string goal_name: The goal name to use for any warning emissions. ] if <ast.UnaryOp object at 0x7da1b227beb0> begin[:] call[name[print], parameter[call[constant[WARNING: No targets were matched in goal `{}`.].format, parameter[name[goal_name]]]]] return[name[self].context.target_roots]
keyword[def] identifier[determine_target_roots] ( identifier[self] , identifier[goal_name] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[context] . identifier[target_roots] : identifier[print] ( literal[string] . identifier[format] ( identifier[goal_name] ), identifier[file] = identifier[sys] . identifier[stderr] ) keyword[return] identifier[self] . identifier[context] . identifier[target_roots]
def determine_target_roots(self, goal_name): """Helper for tasks that scan for default target roots. :param string goal_name: The goal name to use for any warning emissions. """ if not self.context.target_roots: print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr) # depends on [control=['if'], data=[]] # For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior # of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result. return self.context.target_roots
def keyStats(symbol, token='', version=''): '''Key Stats about company https://iexcloud.io/docs/api/#key-stats 8am, 9am ET Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/stats', token, version)
def function[keyStats, parameter[symbol, token, version]]: constant[Key Stats about company https://iexcloud.io/docs/api/#key-stats 8am, 9am ET Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ] call[name[_raiseIfNotStr], parameter[name[symbol]]] return[call[name[_getJson], parameter[binary_operation[binary_operation[constant[stock/] + name[symbol]] + constant[/stats]], name[token], name[version]]]]
keyword[def] identifier[keyStats] ( identifier[symbol] , identifier[token] = literal[string] , identifier[version] = literal[string] ): literal[string] identifier[_raiseIfNotStr] ( identifier[symbol] ) keyword[return] identifier[_getJson] ( literal[string] + identifier[symbol] + literal[string] , identifier[token] , identifier[version] )
def keyStats(symbol, token='', version=''): """Key Stats about company https://iexcloud.io/docs/api/#key-stats 8am, 9am ET Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result """ _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/stats', token, version)
def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.parseCmdline") rh.userid = '' if rh.totalParms >= 2: rh.subfunction = rh.request[1].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 2 # Begin Parsing at 3rd operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit getHost.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
def function[parseCmdline, parameter[rh]]: constant[ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error ] call[name[rh].printSysLog, parameter[constant[Enter getHost.parseCmdline]]] name[rh].userid assign[=] constant[] if compare[name[rh].totalParms greater_or_equal[>=] constant[2]] begin[:] name[rh].subfunction assign[=] call[call[name[rh].request][constant[1]].upper, parameter[]] if compare[name[rh].subfunction <ast.NotIn object at 0x7da2590d7190> name[subfuncHandler]] begin[:] variable[subList] assign[=] call[constant[, ].join, parameter[call[name[sorted], parameter[call[name[subfuncHandler].keys, parameter[]]]]]] variable[msg] assign[=] binary_operation[call[call[name[msgs].msg][constant[0011]]][constant[1]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9926b0>, <ast.Name object at 0x7da20c992770>]]] call[name[rh].printLn, parameter[constant[ES], name[msg]]] call[name[rh].updateResults, parameter[call[call[name[msgs].msg][constant[0011]]][constant[0]]]] if compare[call[name[rh].results][constant[overallRC]] equal[==] constant[0]] begin[:] name[rh].argPos assign[=] constant[2] call[name[generalUtils].parseCmdline, parameter[name[rh], name[posOpsList], name[keyOpsList]]] call[name[rh].printSysLog, parameter[binary_operation[constant[Exit getHost.parseCmdLine, rc: ] + call[name[str], parameter[call[name[rh].results][constant[overallRC]]]]]]] return[call[name[rh].results][constant[overallRC]]]
keyword[def] identifier[parseCmdline] ( identifier[rh] ): literal[string] identifier[rh] . identifier[printSysLog] ( literal[string] ) identifier[rh] . identifier[userid] = literal[string] keyword[if] identifier[rh] . identifier[totalParms] >= literal[int] : identifier[rh] . identifier[subfunction] = identifier[rh] . identifier[request] [ literal[int] ]. identifier[upper] () keyword[if] identifier[rh] . identifier[subfunction] keyword[not] keyword[in] identifier[subfuncHandler] : identifier[subList] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[subfuncHandler] . identifier[keys] ())) identifier[msg] = identifier[msgs] . identifier[msg] [ literal[string] ][ literal[int] ]%( identifier[modId] , identifier[subList] ) identifier[rh] . identifier[printLn] ( literal[string] , identifier[msg] ) identifier[rh] . identifier[updateResults] ( identifier[msgs] . identifier[msg] [ literal[string] ][ literal[int] ]) keyword[if] identifier[rh] . identifier[results] [ literal[string] ]== literal[int] : identifier[rh] . identifier[argPos] = literal[int] identifier[generalUtils] . identifier[parseCmdline] ( identifier[rh] , identifier[posOpsList] , identifier[keyOpsList] ) identifier[rh] . identifier[printSysLog] ( literal[string] + identifier[str] ( identifier[rh] . identifier[results] [ literal[string] ])) keyword[return] identifier[rh] . identifier[results] [ literal[string] ]
def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog('Enter getHost.parseCmdline') rh.userid = '' if rh.totalParms >= 2: rh.subfunction = rh.request[1].upper() # depends on [control=['if'], data=[]] # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn('ES', msg) rh.updateResults(msgs.msg['0011'][0]) # depends on [control=['if'], data=['subfuncHandler']] # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 2 # Begin Parsing at 3rd operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) # depends on [control=['if'], data=[]] rh.printSysLog('Exit getHost.parseCmdLine, rc: ' + str(rh.results['overallRC'])) return rh.results['overallRC']
def objectAtCursor(self): """ Returns the python object that the text is representing. :return <object> || None """ # determine the text block cursor = self.textCursor() text = projex.text.nativestring(cursor.block().text()) position = cursor.positionInBlock() - 1 if not text: return (None, '') symbol = '' for match in re.finditer('[\w\.]+', text): if match.start() <= position <= match.end(): symbol = match.group() break if not symbol: return (None, '') parts = symbol.split('.') if len(parts) == 1: return (self.scope(), parts[0]) part = parts[0] obj = self.scope().get(part) for part in parts[1:-1]: try: obj = getattr(obj, part) except AttributeError: return (None, '') return (obj, parts[-1])
def function[objectAtCursor, parameter[self]]: constant[ Returns the python object that the text is representing. :return <object> || None ] variable[cursor] assign[=] call[name[self].textCursor, parameter[]] variable[text] assign[=] call[name[projex].text.nativestring, parameter[call[call[name[cursor].block, parameter[]].text, parameter[]]]] variable[position] assign[=] binary_operation[call[name[cursor].positionInBlock, parameter[]] - constant[1]] if <ast.UnaryOp object at 0x7da18bcc8c40> begin[:] return[tuple[[<ast.Constant object at 0x7da18bccae60>, <ast.Constant object at 0x7da18bccae90>]]] variable[symbol] assign[=] constant[] for taget[name[match]] in starred[call[name[re].finditer, parameter[constant[[\w\.]+], name[text]]]] begin[:] if compare[call[name[match].start, parameter[]] less_or_equal[<=] name[position]] begin[:] variable[symbol] assign[=] call[name[match].group, parameter[]] break if <ast.UnaryOp object at 0x7da18bcc9a50> begin[:] return[tuple[[<ast.Constant object at 0x7da18bccbc10>, <ast.Constant object at 0x7da18bccb0a0>]]] variable[parts] assign[=] call[name[symbol].split, parameter[constant[.]]] if compare[call[name[len], parameter[name[parts]]] equal[==] constant[1]] begin[:] return[tuple[[<ast.Call object at 0x7da18bccbb80>, <ast.Subscript object at 0x7da18bccadd0>]]] variable[part] assign[=] call[name[parts]][constant[0]] variable[obj] assign[=] call[call[name[self].scope, parameter[]].get, parameter[name[part]]] for taget[name[part]] in starred[call[name[parts]][<ast.Slice object at 0x7da18bcc8e80>]] begin[:] <ast.Try object at 0x7da18bccbd00> return[tuple[[<ast.Name object at 0x7da18bccae30>, <ast.Subscript object at 0x7da18bccb310>]]]
keyword[def] identifier[objectAtCursor] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[textCursor] () identifier[text] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) identifier[position] = identifier[cursor] . identifier[positionInBlock] ()- literal[int] keyword[if] keyword[not] identifier[text] : keyword[return] ( keyword[None] , literal[string] ) identifier[symbol] = literal[string] keyword[for] identifier[match] keyword[in] identifier[re] . identifier[finditer] ( literal[string] , identifier[text] ): keyword[if] identifier[match] . identifier[start] ()<= identifier[position] <= identifier[match] . identifier[end] (): identifier[symbol] = identifier[match] . identifier[group] () keyword[break] keyword[if] keyword[not] identifier[symbol] : keyword[return] ( keyword[None] , literal[string] ) identifier[parts] = identifier[symbol] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[parts] )== literal[int] : keyword[return] ( identifier[self] . identifier[scope] (), identifier[parts] [ literal[int] ]) identifier[part] = identifier[parts] [ literal[int] ] identifier[obj] = identifier[self] . identifier[scope] (). identifier[get] ( identifier[part] ) keyword[for] identifier[part] keyword[in] identifier[parts] [ literal[int] :- literal[int] ]: keyword[try] : identifier[obj] = identifier[getattr] ( identifier[obj] , identifier[part] ) keyword[except] identifier[AttributeError] : keyword[return] ( keyword[None] , literal[string] ) keyword[return] ( identifier[obj] , identifier[parts] [- literal[int] ])
def objectAtCursor(self): """ Returns the python object that the text is representing. :return <object> || None """ # determine the text block cursor = self.textCursor() text = projex.text.nativestring(cursor.block().text()) position = cursor.positionInBlock() - 1 if not text: return (None, '') # depends on [control=['if'], data=[]] symbol = '' for match in re.finditer('[\\w\\.]+', text): if match.start() <= position <= match.end(): symbol = match.group() break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']] if not symbol: return (None, '') # depends on [control=['if'], data=[]] parts = symbol.split('.') if len(parts) == 1: return (self.scope(), parts[0]) # depends on [control=['if'], data=[]] part = parts[0] obj = self.scope().get(part) for part in parts[1:-1]: try: obj = getattr(obj, part) # depends on [control=['try'], data=[]] except AttributeError: return (None, '') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['part']] return (obj, parts[-1])
def new(self, page_name, **dict): ''' Create a new item with the provided dict information at the given page_name. Returns the new item. As of version 2.2 of Redmine, this doesn't seem to function. ''' self._item_new_path = '/projects/%s/wiki/%s.json' % \ (self._project.identifier, page_name) # Call the base class new method return super(Redmine_Wiki_Pages_Manager, self).new(**dict)
def function[new, parameter[self, page_name]]: constant[ Create a new item with the provided dict information at the given page_name. Returns the new item. As of version 2.2 of Redmine, this doesn't seem to function. ] name[self]._item_new_path assign[=] binary_operation[constant[/projects/%s/wiki/%s.json] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0facac0>, <ast.Name object at 0x7da1b0fae980>]]] return[call[call[name[super], parameter[name[Redmine_Wiki_Pages_Manager], name[self]]].new, parameter[]]]
keyword[def] identifier[new] ( identifier[self] , identifier[page_name] ,** identifier[dict] ): literal[string] identifier[self] . identifier[_item_new_path] = literal[string] %( identifier[self] . identifier[_project] . identifier[identifier] , identifier[page_name] ) keyword[return] identifier[super] ( identifier[Redmine_Wiki_Pages_Manager] , identifier[self] ). identifier[new] (** identifier[dict] )
def new(self, page_name, **dict): """ Create a new item with the provided dict information at the given page_name. Returns the new item. As of version 2.2 of Redmine, this doesn't seem to function. """ self._item_new_path = '/projects/%s/wiki/%s.json' % (self._project.identifier, page_name) # Call the base class new method return super(Redmine_Wiki_Pages_Manager, self).new(**dict)
def CreateEvent(self, EventId, Caption, Hint): """Creates a custom event displayed in Skype client's events pane. :Parameters: EventId : unicode Unique identifier for the event. Caption : unicode Caption text. Hint : unicode Hint text. Shown when mouse hoovers over the event. :return: Event object. :rtype: `PluginEvent` """ self._Skype._DoCommand('CREATE EVENT %s CAPTION %s HINT %s' % (tounicode(EventId), quote(tounicode(Caption)), quote(tounicode(Hint)))) return PluginEvent(self._Skype, EventId)
def function[CreateEvent, parameter[self, EventId, Caption, Hint]]: constant[Creates a custom event displayed in Skype client's events pane. :Parameters: EventId : unicode Unique identifier for the event. Caption : unicode Caption text. Hint : unicode Hint text. Shown when mouse hoovers over the event. :return: Event object. :rtype: `PluginEvent` ] call[name[self]._Skype._DoCommand, parameter[binary_operation[constant[CREATE EVENT %s CAPTION %s HINT %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c76edd0>, <ast.Call object at 0x7da20c76f340>, <ast.Call object at 0x7da20c76e950>]]]]] return[call[name[PluginEvent], parameter[name[self]._Skype, name[EventId]]]]
keyword[def] identifier[CreateEvent] ( identifier[self] , identifier[EventId] , identifier[Caption] , identifier[Hint] ): literal[string] identifier[self] . identifier[_Skype] . identifier[_DoCommand] ( literal[string] %( identifier[tounicode] ( identifier[EventId] ), identifier[quote] ( identifier[tounicode] ( identifier[Caption] )), identifier[quote] ( identifier[tounicode] ( identifier[Hint] )))) keyword[return] identifier[PluginEvent] ( identifier[self] . identifier[_Skype] , identifier[EventId] )
def CreateEvent(self, EventId, Caption, Hint): """Creates a custom event displayed in Skype client's events pane. :Parameters: EventId : unicode Unique identifier for the event. Caption : unicode Caption text. Hint : unicode Hint text. Shown when mouse hoovers over the event. :return: Event object. :rtype: `PluginEvent` """ self._Skype._DoCommand('CREATE EVENT %s CAPTION %s HINT %s' % (tounicode(EventId), quote(tounicode(Caption)), quote(tounicode(Hint)))) return PluginEvent(self._Skype, EventId)
def home(request): """This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html""" cage_list = Animal.objects.values("Cage").distinct() cage_list_current = cage_list.filter(Alive=True) animal_list = Animal.objects.all() animal_list_current = animal_list.filter(Alive=True) strain_list = animal_list.values("Strain").distinct() strain_list_current = animal_list_current.values("Strain").distinct() return render(request, 'home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current})
def function[home, parameter[request]]: constant[This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html] variable[cage_list] assign[=] call[call[name[Animal].objects.values, parameter[constant[Cage]]].distinct, parameter[]] variable[cage_list_current] assign[=] call[name[cage_list].filter, parameter[]] variable[animal_list] assign[=] call[name[Animal].objects.all, parameter[]] variable[animal_list_current] assign[=] call[name[animal_list].filter, parameter[]] variable[strain_list] assign[=] call[call[name[animal_list].values, parameter[constant[Strain]]].distinct, parameter[]] variable[strain_list_current] assign[=] call[call[name[animal_list_current].values, parameter[constant[Strain]]].distinct, parameter[]] return[call[name[render], parameter[name[request], constant[home.html], dictionary[[<ast.Constant object at 0x7da18dc068f0>, <ast.Constant object at 0x7da18dc06950>, <ast.Constant object at 0x7da18dc05420>, <ast.Constant object at 0x7da18dc05780>, <ast.Constant object at 0x7da18dc07a30>, <ast.Constant object at 0x7da18dc05c90>], [<ast.Name object at 0x7da18dc07d90>, <ast.Name object at 0x7da18dc05090>, <ast.Name object at 0x7da18dc055d0>, <ast.Name object at 0x7da18dc045b0>, <ast.Name object at 0x7da18dc04c70>, <ast.Name object at 0x7da18dc05030>]]]]]
keyword[def] identifier[home] ( identifier[request] ): literal[string] identifier[cage_list] = identifier[Animal] . identifier[objects] . identifier[values] ( literal[string] ). identifier[distinct] () identifier[cage_list_current] = identifier[cage_list] . identifier[filter] ( identifier[Alive] = keyword[True] ) identifier[animal_list] = identifier[Animal] . identifier[objects] . identifier[all] () identifier[animal_list_current] = identifier[animal_list] . identifier[filter] ( identifier[Alive] = keyword[True] ) identifier[strain_list] = identifier[animal_list] . identifier[values] ( literal[string] ). identifier[distinct] () identifier[strain_list_current] = identifier[animal_list_current] . identifier[values] ( literal[string] ). identifier[distinct] () keyword[return] identifier[render] ( identifier[request] , literal[string] ,{ literal[string] : identifier[animal_list] , literal[string] : identifier[animal_list_current] , literal[string] : identifier[strain_list] , literal[string] : identifier[strain_list_current] , literal[string] : identifier[cage_list] , literal[string] : identifier[cage_list_current] })
def home(request): """This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html""" cage_list = Animal.objects.values('Cage').distinct() cage_list_current = cage_list.filter(Alive=True) animal_list = Animal.objects.all() animal_list_current = animal_list.filter(Alive=True) strain_list = animal_list.values('Strain').distinct() strain_list_current = animal_list_current.values('Strain').distinct() return render(request, 'home.html', {'animal_list': animal_list, 'animal_list_current': animal_list_current, 'strain_list': strain_list, 'strain_list_current': strain_list_current, 'cage_list': cage_list, 'cage_list_current': cage_list_current})
def init_log(*handlers, **kwargs): """ :param handlers: :return: """ disable_existing_loggers = kwargs.get('disable_existing_loggers', False) handlers_config = [t.get_handler() for t in handlers] new_handlers_config = {} for t in handlers_config: new_handlers_config.update(t) formatter_config = [t.get_formatter() for t in handlers] new_formatter_config = {} for t in formatter_config: new_formatter_config.update(t) handler_name_list = [t.get_formatter_name() for t in handlers] dict_config = { 'version': 1, 'disable_existing_loggers': disable_existing_loggers, 'formatters': new_formatter_config, 'handlers': new_handlers_config, 'loggers': { '': { 'handlers': handler_name_list, 'level': 'DEBUG', } } } logging.config.dictConfig(dict_config)
def function[init_log, parameter[]]: constant[ :param handlers: :return: ] variable[disable_existing_loggers] assign[=] call[name[kwargs].get, parameter[constant[disable_existing_loggers], constant[False]]] variable[handlers_config] assign[=] <ast.ListComp object at 0x7da1b242fbb0> variable[new_handlers_config] assign[=] dictionary[[], []] for taget[name[t]] in starred[name[handlers_config]] begin[:] call[name[new_handlers_config].update, parameter[name[t]]] variable[formatter_config] assign[=] <ast.ListComp object at 0x7da1b242ec50> variable[new_formatter_config] assign[=] dictionary[[], []] for taget[name[t]] in starred[name[formatter_config]] begin[:] call[name[new_formatter_config].update, parameter[name[t]]] variable[handler_name_list] assign[=] <ast.ListComp object at 0x7da1b242c7f0> variable[dict_config] assign[=] dictionary[[<ast.Constant object at 0x7da1b242f130>, <ast.Constant object at 0x7da1b242e650>, <ast.Constant object at 0x7da1b242d6c0>, <ast.Constant object at 0x7da1b242e590>, <ast.Constant object at 0x7da1b242dab0>], [<ast.Constant object at 0x7da1b242e800>, <ast.Name object at 0x7da1b242f0d0>, <ast.Name object at 0x7da1b242c070>, <ast.Name object at 0x7da1b242c550>, <ast.Dict object at 0x7da1b242e770>]] call[name[logging].config.dictConfig, parameter[name[dict_config]]]
keyword[def] identifier[init_log] (* identifier[handlers] ,** identifier[kwargs] ): literal[string] identifier[disable_existing_loggers] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) identifier[handlers_config] =[ identifier[t] . identifier[get_handler] () keyword[for] identifier[t] keyword[in] identifier[handlers] ] identifier[new_handlers_config] ={} keyword[for] identifier[t] keyword[in] identifier[handlers_config] : identifier[new_handlers_config] . identifier[update] ( identifier[t] ) identifier[formatter_config] =[ identifier[t] . identifier[get_formatter] () keyword[for] identifier[t] keyword[in] identifier[handlers] ] identifier[new_formatter_config] ={} keyword[for] identifier[t] keyword[in] identifier[formatter_config] : identifier[new_formatter_config] . identifier[update] ( identifier[t] ) identifier[handler_name_list] =[ identifier[t] . identifier[get_formatter_name] () keyword[for] identifier[t] keyword[in] identifier[handlers] ] identifier[dict_config] ={ literal[string] : literal[int] , literal[string] : identifier[disable_existing_loggers] , literal[string] : identifier[new_formatter_config] , literal[string] : identifier[new_handlers_config] , literal[string] :{ literal[string] :{ literal[string] : identifier[handler_name_list] , literal[string] : literal[string] , } } } identifier[logging] . identifier[config] . identifier[dictConfig] ( identifier[dict_config] )
def init_log(*handlers, **kwargs): """ :param handlers: :return: """ disable_existing_loggers = kwargs.get('disable_existing_loggers', False) handlers_config = [t.get_handler() for t in handlers] new_handlers_config = {} for t in handlers_config: new_handlers_config.update(t) # depends on [control=['for'], data=['t']] formatter_config = [t.get_formatter() for t in handlers] new_formatter_config = {} for t in formatter_config: new_formatter_config.update(t) # depends on [control=['for'], data=['t']] handler_name_list = [t.get_formatter_name() for t in handlers] dict_config = {'version': 1, 'disable_existing_loggers': disable_existing_loggers, 'formatters': new_formatter_config, 'handlers': new_handlers_config, 'loggers': {'': {'handlers': handler_name_list, 'level': 'DEBUG'}}} logging.config.dictConfig(dict_config)
def _parseCounters(self, data): """Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. """ info_dict = util.NestedDict() for line in data.splitlines(): mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line) if mobj: (key, value) = mobj.groups() klist = key.split('.') info_dict.set_nested(klist, parse_value(value)) return info_dict
def function[_parseCounters, parameter[self, data]]: constant[Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. ] variable[info_dict] assign[=] call[name[util].NestedDict, parameter[]] for taget[name[line]] in starred[call[name[data].splitlines, parameter[]]] begin[:] variable[mobj] assign[=] call[name[re].match, parameter[constant[^\s*([\w\.]+)\s*=\s*(\S.*)$], name[line]]] if name[mobj] begin[:] <ast.Tuple object at 0x7da1b0fe8a00> assign[=] call[name[mobj].groups, parameter[]] variable[klist] assign[=] call[name[key].split, parameter[constant[.]]] call[name[info_dict].set_nested, parameter[name[klist], call[name[parse_value], parameter[name[value]]]]] return[name[info_dict]]
keyword[def] identifier[_parseCounters] ( identifier[self] , identifier[data] ): literal[string] identifier[info_dict] = identifier[util] . identifier[NestedDict] () keyword[for] identifier[line] keyword[in] identifier[data] . identifier[splitlines] (): identifier[mobj] = identifier[re] . identifier[match] ( literal[string] , identifier[line] ) keyword[if] identifier[mobj] : ( identifier[key] , identifier[value] )= identifier[mobj] . identifier[groups] () identifier[klist] = identifier[key] . identifier[split] ( literal[string] ) identifier[info_dict] . identifier[set_nested] ( identifier[klist] , identifier[parse_value] ( identifier[value] )) keyword[return] identifier[info_dict]
def _parseCounters(self, data): """Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. """ info_dict = util.NestedDict() for line in data.splitlines(): mobj = re.match('^\\s*([\\w\\.]+)\\s*=\\s*(\\S.*)$', line) if mobj: (key, value) = mobj.groups() klist = key.split('.') info_dict.set_nested(klist, parse_value(value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] return info_dict
def set_video_config(self, video_config): ''' .. versionchanged:: 0.6.1 Log video source process ID. ''' self.video_config = video_config if video_config is None: self.disable_video() return py_exe = sys.executable port = self.canvas_slave.video_sink.socket_info['port'] transport = self.canvas_slave.video_sink.socket_info['transport'] host = (self.canvas_slave.video_sink.socket_info['host'] .replace('*', 'localhost')) # Terminate existing process (if running). self.cleanup_video() # Launch new video source process using JSON serialized video # configuration. command = [py_exe, '-m', 'pygst_utils.video_view.video_source', 'fromjson', '-p', str(port), transport, host, video_config.to_json()] logger.info(' '.join(command)) self.video_source_process = sp.Popen(command) logger.info('Launched video source process: %s', self.video_source_process.pid) self.canvas_slave.enable()
def function[set_video_config, parameter[self, video_config]]: constant[ .. versionchanged:: 0.6.1 Log video source process ID. ] name[self].video_config assign[=] name[video_config] if compare[name[video_config] is constant[None]] begin[:] call[name[self].disable_video, parameter[]] return[None] variable[py_exe] assign[=] name[sys].executable variable[port] assign[=] call[name[self].canvas_slave.video_sink.socket_info][constant[port]] variable[transport] assign[=] call[name[self].canvas_slave.video_sink.socket_info][constant[transport]] variable[host] assign[=] call[call[name[self].canvas_slave.video_sink.socket_info][constant[host]].replace, parameter[constant[*], constant[localhost]]] call[name[self].cleanup_video, parameter[]] variable[command] assign[=] list[[<ast.Name object at 0x7da1b2717970>, <ast.Constant object at 0x7da1b2716a40>, <ast.Constant object at 0x7da1b2714d60>, <ast.Constant object at 0x7da1b2716230>, <ast.Constant object at 0x7da1b2715420>, <ast.Call object at 0x7da1b2716d40>, <ast.Name object at 0x7da1b27168c0>, <ast.Name object at 0x7da1b2716e60>, <ast.Call object at 0x7da1b2717850>]] call[name[logger].info, parameter[call[constant[ ].join, parameter[name[command]]]]] name[self].video_source_process assign[=] call[name[sp].Popen, parameter[name[command]]] call[name[logger].info, parameter[constant[Launched video source process: %s], name[self].video_source_process.pid]] call[name[self].canvas_slave.enable, parameter[]]
keyword[def] identifier[set_video_config] ( identifier[self] , identifier[video_config] ): literal[string] identifier[self] . identifier[video_config] = identifier[video_config] keyword[if] identifier[video_config] keyword[is] keyword[None] : identifier[self] . identifier[disable_video] () keyword[return] identifier[py_exe] = identifier[sys] . identifier[executable] identifier[port] = identifier[self] . identifier[canvas_slave] . identifier[video_sink] . identifier[socket_info] [ literal[string] ] identifier[transport] = identifier[self] . identifier[canvas_slave] . identifier[video_sink] . identifier[socket_info] [ literal[string] ] identifier[host] =( identifier[self] . identifier[canvas_slave] . identifier[video_sink] . identifier[socket_info] [ literal[string] ] . identifier[replace] ( literal[string] , literal[string] )) identifier[self] . identifier[cleanup_video] () identifier[command] =[ identifier[py_exe] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[str] ( identifier[port] ), identifier[transport] , identifier[host] , identifier[video_config] . identifier[to_json] ()] identifier[logger] . identifier[info] ( literal[string] . identifier[join] ( identifier[command] )) identifier[self] . identifier[video_source_process] = identifier[sp] . identifier[Popen] ( identifier[command] ) identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[video_source_process] . identifier[pid] ) identifier[self] . identifier[canvas_slave] . identifier[enable] ()
def set_video_config(self, video_config): """ .. versionchanged:: 0.6.1 Log video source process ID. """ self.video_config = video_config if video_config is None: self.disable_video() return # depends on [control=['if'], data=[]] py_exe = sys.executable port = self.canvas_slave.video_sink.socket_info['port'] transport = self.canvas_slave.video_sink.socket_info['transport'] host = self.canvas_slave.video_sink.socket_info['host'].replace('*', 'localhost') # Terminate existing process (if running). self.cleanup_video() # Launch new video source process using JSON serialized video # configuration. command = [py_exe, '-m', 'pygst_utils.video_view.video_source', 'fromjson', '-p', str(port), transport, host, video_config.to_json()] logger.info(' '.join(command)) self.video_source_process = sp.Popen(command) logger.info('Launched video source process: %s', self.video_source_process.pid) self.canvas_slave.enable()
def _to_rest_includes(models, includes): """ Fetch the models to be included The includes should follow a few basic rules: * the include MUST not already be an array member of the included array (no dupes) * the include MUST not be the same as the primary data if the primary data is a single resource object (no dupes) * the include MUST not be an array member of the primary data if the primary data an array of resource objects (no dupes) Basically, each included array member should be the only instance of that resource object in the entire restified data. """ included = [] includes = includes or [] if not isinstance(models, list): models = [models] for include in includes: for model in models: rel = getattr(model, include) if hasattr(rel, 'model') and rel.model: rel_models = [rel.model] elif hasattr(rel, 'models') and rel.models: rel_models = rel.models for rel_model in rel_models: if rel_model in models or rel_model in included: continue else: included.append(rel_model) for idx, val in enumerate(included): included[idx] = _to_rest(val) return included
def function[_to_rest_includes, parameter[models, includes]]: constant[ Fetch the models to be included The includes should follow a few basic rules: * the include MUST not already be an array member of the included array (no dupes) * the include MUST not be the same as the primary data if the primary data is a single resource object (no dupes) * the include MUST not be an array member of the primary data if the primary data an array of resource objects (no dupes) Basically, each included array member should be the only instance of that resource object in the entire restified data. ] variable[included] assign[=] list[[]] variable[includes] assign[=] <ast.BoolOp object at 0x7da20e9550c0> if <ast.UnaryOp object at 0x7da20e955fc0> begin[:] variable[models] assign[=] list[[<ast.Name object at 0x7da20e955120>]] for taget[name[include]] in starred[name[includes]] begin[:] for taget[name[model]] in starred[name[models]] begin[:] variable[rel] assign[=] call[name[getattr], parameter[name[model], name[include]]] if <ast.BoolOp object at 0x7da20e955b40> begin[:] variable[rel_models] assign[=] list[[<ast.Attribute object at 0x7da20e956530>]] for taget[name[rel_model]] in starred[name[rel_models]] begin[:] if <ast.BoolOp object at 0x7da1b14d1360> begin[:] continue for taget[tuple[[<ast.Name object at 0x7da1b14d2aa0>, <ast.Name object at 0x7da1b14d2a40>]]] in starred[call[name[enumerate], parameter[name[included]]]] begin[:] call[name[included]][name[idx]] assign[=] call[name[_to_rest], parameter[name[val]]] return[name[included]]
keyword[def] identifier[_to_rest_includes] ( identifier[models] , identifier[includes] ): literal[string] identifier[included] =[] identifier[includes] = identifier[includes] keyword[or] [] keyword[if] keyword[not] identifier[isinstance] ( identifier[models] , identifier[list] ): identifier[models] =[ identifier[models] ] keyword[for] identifier[include] keyword[in] identifier[includes] : keyword[for] identifier[model] keyword[in] identifier[models] : identifier[rel] = identifier[getattr] ( identifier[model] , identifier[include] ) keyword[if] identifier[hasattr] ( identifier[rel] , literal[string] ) keyword[and] identifier[rel] . identifier[model] : identifier[rel_models] =[ identifier[rel] . identifier[model] ] keyword[elif] identifier[hasattr] ( identifier[rel] , literal[string] ) keyword[and] identifier[rel] . identifier[models] : identifier[rel_models] = identifier[rel] . identifier[models] keyword[for] identifier[rel_model] keyword[in] identifier[rel_models] : keyword[if] identifier[rel_model] keyword[in] identifier[models] keyword[or] identifier[rel_model] keyword[in] identifier[included] : keyword[continue] keyword[else] : identifier[included] . identifier[append] ( identifier[rel_model] ) keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[included] ): identifier[included] [ identifier[idx] ]= identifier[_to_rest] ( identifier[val] ) keyword[return] identifier[included]
def _to_rest_includes(models, includes): """ Fetch the models to be included The includes should follow a few basic rules: * the include MUST not already be an array member of the included array (no dupes) * the include MUST not be the same as the primary data if the primary data is a single resource object (no dupes) * the include MUST not be an array member of the primary data if the primary data an array of resource objects (no dupes) Basically, each included array member should be the only instance of that resource object in the entire restified data. """ included = [] includes = includes or [] if not isinstance(models, list): models = [models] # depends on [control=['if'], data=[]] for include in includes: for model in models: rel = getattr(model, include) if hasattr(rel, 'model') and rel.model: rel_models = [rel.model] # depends on [control=['if'], data=[]] elif hasattr(rel, 'models') and rel.models: rel_models = rel.models # depends on [control=['if'], data=[]] for rel_model in rel_models: if rel_model in models or rel_model in included: continue # depends on [control=['if'], data=[]] else: included.append(rel_model) # depends on [control=['for'], data=['rel_model']] # depends on [control=['for'], data=['model']] # depends on [control=['for'], data=['include']] for (idx, val) in enumerate(included): included[idx] = _to_rest(val) # depends on [control=['for'], data=[]] return included
def clear_line(mode=2): ''' Clear the current line. Arguments: mode: | 0 | 'forward' | 'right' - Clear cursor to end of line. | 1 | 'backward' | 'left' - Clear cursor to beginning of line. | 2 | 'full' - Clear entire line. Note: Cursor position does not change. ''' text = sc.erase_line(_mode_map.get(mode, mode)) _write(text) return text
def function[clear_line, parameter[mode]]: constant[ Clear the current line. Arguments: mode: | 0 | 'forward' | 'right' - Clear cursor to end of line. | 1 | 'backward' | 'left' - Clear cursor to beginning of line. | 2 | 'full' - Clear entire line. Note: Cursor position does not change. ] variable[text] assign[=] call[name[sc].erase_line, parameter[call[name[_mode_map].get, parameter[name[mode], name[mode]]]]] call[name[_write], parameter[name[text]]] return[name[text]]
keyword[def] identifier[clear_line] ( identifier[mode] = literal[int] ): literal[string] identifier[text] = identifier[sc] . identifier[erase_line] ( identifier[_mode_map] . identifier[get] ( identifier[mode] , identifier[mode] )) identifier[_write] ( identifier[text] ) keyword[return] identifier[text]
def clear_line(mode=2): """ Clear the current line. Arguments: mode: | 0 | 'forward' | 'right' - Clear cursor to end of line. | 1 | 'backward' | 'left' - Clear cursor to beginning of line. | 2 | 'full' - Clear entire line. Note: Cursor position does not change. """ text = sc.erase_line(_mode_map.get(mode, mode)) _write(text) return text
def media(self, uri): """Play a media file.""" try: local_path, _ = urllib.request.urlretrieve(uri) metadata = mutagen.File(local_path, easy=True) if metadata.tags: self._tags = metadata.tags title = self._tags.get(TAG_TITLE, []) self._manager[ATTR_TITLE] = title[0] if len(title) else '' artist = self._tags.get(TAG_ARTIST, []) self._manager[ATTR_ARTIST] = artist[0] if len(artist) else '' album = self._tags.get(TAG_ALBUM, []) self._manager[ATTR_ALBUM] = album[0] if len(album) else '' local_uri = 'file://{}'.format(local_path) # urllib.error.HTTPError except Exception: # pylint: disable=broad-except local_uri = uri self._player.set_state(Gst.State.NULL) self._player.set_property(PROP_URI, local_uri) self._player.set_state(Gst.State.PLAYING) self.state = STATE_PLAYING self._manager[ATTR_URI] = uri self._manager[ATTR_DURATION] = self._duration() self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME) _LOGGER.info('playing %s (as %s)', uri, local_uri)
def function[media, parameter[self, uri]]: constant[Play a media file.] <ast.Try object at 0x7da2054a51e0> call[name[self]._player.set_state, parameter[name[Gst].State.NULL]] call[name[self]._player.set_property, parameter[name[PROP_URI], name[local_uri]]] call[name[self]._player.set_state, parameter[name[Gst].State.PLAYING]] name[self].state assign[=] name[STATE_PLAYING] call[name[self]._manager][name[ATTR_URI]] assign[=] name[uri] call[name[self]._manager][name[ATTR_DURATION]] assign[=] call[name[self]._duration, parameter[]] call[name[self]._manager][name[ATTR_VOLUME]] assign[=] call[name[self]._player.get_property, parameter[name[PROP_VOLUME]]] call[name[_LOGGER].info, parameter[constant[playing %s (as %s)], name[uri], name[local_uri]]]
keyword[def] identifier[media] ( identifier[self] , identifier[uri] ): literal[string] keyword[try] : identifier[local_path] , identifier[_] = identifier[urllib] . identifier[request] . identifier[urlretrieve] ( identifier[uri] ) identifier[metadata] = identifier[mutagen] . identifier[File] ( identifier[local_path] , identifier[easy] = keyword[True] ) keyword[if] identifier[metadata] . identifier[tags] : identifier[self] . identifier[_tags] = identifier[metadata] . identifier[tags] identifier[title] = identifier[self] . identifier[_tags] . identifier[get] ( identifier[TAG_TITLE] ,[]) identifier[self] . identifier[_manager] [ identifier[ATTR_TITLE] ]= identifier[title] [ literal[int] ] keyword[if] identifier[len] ( identifier[title] ) keyword[else] literal[string] identifier[artist] = identifier[self] . identifier[_tags] . identifier[get] ( identifier[TAG_ARTIST] ,[]) identifier[self] . identifier[_manager] [ identifier[ATTR_ARTIST] ]= identifier[artist] [ literal[int] ] keyword[if] identifier[len] ( identifier[artist] ) keyword[else] literal[string] identifier[album] = identifier[self] . identifier[_tags] . identifier[get] ( identifier[TAG_ALBUM] ,[]) identifier[self] . identifier[_manager] [ identifier[ATTR_ALBUM] ]= identifier[album] [ literal[int] ] keyword[if] identifier[len] ( identifier[album] ) keyword[else] literal[string] identifier[local_uri] = literal[string] . identifier[format] ( identifier[local_path] ) keyword[except] identifier[Exception] : identifier[local_uri] = identifier[uri] identifier[self] . identifier[_player] . identifier[set_state] ( identifier[Gst] . identifier[State] . identifier[NULL] ) identifier[self] . identifier[_player] . identifier[set_property] ( identifier[PROP_URI] , identifier[local_uri] ) identifier[self] . identifier[_player] . identifier[set_state] ( identifier[Gst] . identifier[State] . identifier[PLAYING] ) identifier[self] . identifier[state] = identifier[STATE_PLAYING] identifier[self] . identifier[_manager] [ identifier[ATTR_URI] ]= identifier[uri] identifier[self] . identifier[_manager] [ identifier[ATTR_DURATION] ]= identifier[self] . identifier[_duration] () identifier[self] . identifier[_manager] [ identifier[ATTR_VOLUME] ]= identifier[self] . identifier[_player] . identifier[get_property] ( identifier[PROP_VOLUME] ) identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[uri] , identifier[local_uri] )
def media(self, uri): """Play a media file.""" try: (local_path, _) = urllib.request.urlretrieve(uri) metadata = mutagen.File(local_path, easy=True) if metadata.tags: self._tags = metadata.tags # depends on [control=['if'], data=[]] title = self._tags.get(TAG_TITLE, []) self._manager[ATTR_TITLE] = title[0] if len(title) else '' artist = self._tags.get(TAG_ARTIST, []) self._manager[ATTR_ARTIST] = artist[0] if len(artist) else '' album = self._tags.get(TAG_ALBUM, []) self._manager[ATTR_ALBUM] = album[0] if len(album) else '' local_uri = 'file://{}'.format(local_path) # depends on [control=['try'], data=[]] # urllib.error.HTTPError except Exception: # pylint: disable=broad-except local_uri = uri # depends on [control=['except'], data=[]] self._player.set_state(Gst.State.NULL) self._player.set_property(PROP_URI, local_uri) self._player.set_state(Gst.State.PLAYING) self.state = STATE_PLAYING self._manager[ATTR_URI] = uri self._manager[ATTR_DURATION] = self._duration() self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME) _LOGGER.info('playing %s (as %s)', uri, local_uri)
def list_apps(self): """获得当前账号的应用列表 列出所属应用为当前请求方的应用列表。 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回应用列表,失败返回None - ResponseInfo 请求的Response信息 """ url = '{0}/v3/apps'.format(self.host) return http._get_with_qiniu_mac(url, None, self.auth)
def function[list_apps, parameter[self]]: constant[获得当前账号的应用列表 列出所属应用为当前请求方的应用列表。 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回应用列表,失败返回None - ResponseInfo 请求的Response信息 ] variable[url] assign[=] call[constant[{0}/v3/apps].format, parameter[name[self].host]] return[call[name[http]._get_with_qiniu_mac, parameter[name[url], constant[None], name[self].auth]]]
keyword[def] identifier[list_apps] ( identifier[self] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[host] ) keyword[return] identifier[http] . identifier[_get_with_qiniu_mac] ( identifier[url] , keyword[None] , identifier[self] . identifier[auth] )
def list_apps(self): """获得当前账号的应用列表 列出所属应用为当前请求方的应用列表。 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回应用列表,失败返回None - ResponseInfo 请求的Response信息 """ url = '{0}/v3/apps'.format(self.host) return http._get_with_qiniu_mac(url, None, self.auth)
def delete_task(self, task_name): """ Deletes the named Task in this Job. """ logger.debug('Deleting task {0}'.format(task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) if task_name not in self.tasks: raise DagobahError('task %s does not exist' % task_name) self.tasks.pop(task_name) self.delete_node(task_name) self.commit()
def function[delete_task, parameter[self, task_name]]: constant[ Deletes the named Task in this Job. ] call[name[logger].debug, parameter[call[constant[Deleting task {0}].format, parameter[name[task_name]]]]] if <ast.UnaryOp object at 0x7da1b0bdae90> begin[:] <ast.Raise object at 0x7da1b0bd9f60> if compare[name[task_name] <ast.NotIn object at 0x7da2590d7190> name[self].tasks] begin[:] <ast.Raise object at 0x7da1b0cf79d0> call[name[self].tasks.pop, parameter[name[task_name]]] call[name[self].delete_node, parameter[name[task_name]]] call[name[self].commit, parameter[]]
keyword[def] identifier[delete_task] ( identifier[self] , identifier[task_name] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[task_name] )) keyword[if] keyword[not] identifier[self] . identifier[state] . identifier[allow_change_graph] : keyword[raise] identifier[DagobahError] ( literal[string] % identifier[self] . identifier[state] . identifier[status] ) keyword[if] identifier[task_name] keyword[not] keyword[in] identifier[self] . identifier[tasks] : keyword[raise] identifier[DagobahError] ( literal[string] % identifier[task_name] ) identifier[self] . identifier[tasks] . identifier[pop] ( identifier[task_name] ) identifier[self] . identifier[delete_node] ( identifier[task_name] ) identifier[self] . identifier[commit] ()
def delete_task(self, task_name): """ Deletes the named Task in this Job. """ logger.debug('Deleting task {0}'.format(task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) # depends on [control=['if'], data=[]] if task_name not in self.tasks: raise DagobahError('task %s does not exist' % task_name) # depends on [control=['if'], data=['task_name']] self.tasks.pop(task_name) self.delete_node(task_name) self.commit()
def get_thermostability(self, at_temp): """Run the thermostability calculator using either the Dill or Oobatake methods. Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key `thermostability_<TEMP>-<METHOD_USED>`. See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details. """ import ssbio.protein.sequence.properties.thermostability as ts dG = ts.get_dG_at_T(seq=self, temp=at_temp) self.annotations['thermostability_{}_C-{}'.format(at_temp, dG[2].lower())] = (dG[0], dG[1])
def function[get_thermostability, parameter[self, at_temp]]: constant[Run the thermostability calculator using either the Dill or Oobatake methods. Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key `thermostability_<TEMP>-<METHOD_USED>`. See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details. ] import module[ssbio.protein.sequence.properties.thermostability] as alias[ts] variable[dG] assign[=] call[name[ts].get_dG_at_T, parameter[]] call[name[self].annotations][call[constant[thermostability_{}_C-{}].format, parameter[name[at_temp], call[call[name[dG]][constant[2]].lower, parameter[]]]]] assign[=] tuple[[<ast.Subscript object at 0x7da204622da0>, <ast.Subscript object at 0x7da204621b70>]]
keyword[def] identifier[get_thermostability] ( identifier[self] , identifier[at_temp] ): literal[string] keyword[import] identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[properties] . identifier[thermostability] keyword[as] identifier[ts] identifier[dG] = identifier[ts] . identifier[get_dG_at_T] ( identifier[seq] = identifier[self] , identifier[temp] = identifier[at_temp] ) identifier[self] . identifier[annotations] [ literal[string] . identifier[format] ( identifier[at_temp] , identifier[dG] [ literal[int] ]. identifier[lower] ())]=( identifier[dG] [ literal[int] ], identifier[dG] [ literal[int] ])
def get_thermostability(self, at_temp): """Run the thermostability calculator using either the Dill or Oobatake methods. Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key `thermostability_<TEMP>-<METHOD_USED>`. See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details. """ import ssbio.protein.sequence.properties.thermostability as ts dG = ts.get_dG_at_T(seq=self, temp=at_temp) self.annotations['thermostability_{}_C-{}'.format(at_temp, dG[2].lower())] = (dG[0], dG[1])
def from_array_list(required_type, result, list_level, is_builtin): """ Tries to parse the `result` as type given in `required_type`, while traversing into lists as often as specified in `list_level`. :param required_type: What it should be parsed as :type required_type: class :param result: The result to parse :param list_level: "list of" * list_level :type list_level: int :param is_builtin: if it is a builtin python type like :class:`int`, :class:`bool`, etc. :type is_builtin: bool :return: the result as `required_type` type """ logger.debug("Trying parsing as {type}, list_level={list_level}, is_builtin={is_builtin}".format( type=required_type.__name__, list_level=list_level, is_builtin=is_builtin )) if list_level > 0: assert isinstance(result, (list, tuple)) return [from_array_list(required_type, obj, list_level-1, is_builtin) for obj in result] # end if if is_builtin: if isinstance(result, required_type): logger.debug("Already is correct type.") return required_type(result) elif isinstance(required_type, unicode_type): # handle str, so emojis work for py2. return u(result) else: import ast logger.warn("Trying parsing with ast.literal_eval()...") return ast.literal_eval(str(result)) # raises ValueError if it could not parse # end if else: return required_type.from_array(result)
def function[from_array_list, parameter[required_type, result, list_level, is_builtin]]: constant[ Tries to parse the `result` as type given in `required_type`, while traversing into lists as often as specified in `list_level`. :param required_type: What it should be parsed as :type required_type: class :param result: The result to parse :param list_level: "list of" * list_level :type list_level: int :param is_builtin: if it is a builtin python type like :class:`int`, :class:`bool`, etc. :type is_builtin: bool :return: the result as `required_type` type ] call[name[logger].debug, parameter[call[constant[Trying parsing as {type}, list_level={list_level}, is_builtin={is_builtin}].format, parameter[]]]] if compare[name[list_level] greater[>] constant[0]] begin[:] assert[call[name[isinstance], parameter[name[result], tuple[[<ast.Name object at 0x7da1b0472b90>, <ast.Name object at 0x7da1b0472aa0>]]]]] return[<ast.ListComp object at 0x7da1b0473370>] if name[is_builtin] begin[:] if call[name[isinstance], parameter[name[result], name[required_type]]] begin[:] call[name[logger].debug, parameter[constant[Already is correct type.]]] return[call[name[required_type], parameter[name[result]]]]
keyword[def] identifier[from_array_list] ( identifier[required_type] , identifier[result] , identifier[list_level] , identifier[is_builtin] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[type] = identifier[required_type] . identifier[__name__] , identifier[list_level] = identifier[list_level] , identifier[is_builtin] = identifier[is_builtin] )) keyword[if] identifier[list_level] > literal[int] : keyword[assert] identifier[isinstance] ( identifier[result] ,( identifier[list] , identifier[tuple] )) keyword[return] [ identifier[from_array_list] ( identifier[required_type] , identifier[obj] , identifier[list_level] - literal[int] , identifier[is_builtin] ) keyword[for] identifier[obj] keyword[in] identifier[result] ] keyword[if] identifier[is_builtin] : keyword[if] identifier[isinstance] ( identifier[result] , identifier[required_type] ): identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[required_type] ( identifier[result] ) keyword[elif] identifier[isinstance] ( identifier[required_type] , identifier[unicode_type] ): keyword[return] identifier[u] ( identifier[result] ) keyword[else] : keyword[import] identifier[ast] identifier[logger] . identifier[warn] ( literal[string] ) keyword[return] identifier[ast] . identifier[literal_eval] ( identifier[str] ( identifier[result] )) keyword[else] : keyword[return] identifier[required_type] . identifier[from_array] ( identifier[result] )
def from_array_list(required_type, result, list_level, is_builtin): """ Tries to parse the `result` as type given in `required_type`, while traversing into lists as often as specified in `list_level`. :param required_type: What it should be parsed as :type required_type: class :param result: The result to parse :param list_level: "list of" * list_level :type list_level: int :param is_builtin: if it is a builtin python type like :class:`int`, :class:`bool`, etc. :type is_builtin: bool :return: the result as `required_type` type """ logger.debug('Trying parsing as {type}, list_level={list_level}, is_builtin={is_builtin}'.format(type=required_type.__name__, list_level=list_level, is_builtin=is_builtin)) if list_level > 0: assert isinstance(result, (list, tuple)) return [from_array_list(required_type, obj, list_level - 1, is_builtin) for obj in result] # depends on [control=['if'], data=['list_level']] # end if if is_builtin: if isinstance(result, required_type): logger.debug('Already is correct type.') return required_type(result) # depends on [control=['if'], data=[]] elif isinstance(required_type, unicode_type): # handle str, so emojis work for py2. return u(result) # depends on [control=['if'], data=[]] else: import ast logger.warn('Trying parsing with ast.literal_eval()...') return ast.literal_eval(str(result)) # raises ValueError if it could not parse # depends on [control=['if'], data=[]] else: # end if return required_type.from_array(result)
def btc_tx_sighash_segwit(tx, i, prevout_amount, prevout_script, hashcode=SIGHASH_ALL): """ Calculate the sighash for a segwit transaction, according to bip143 """ txobj = btc_tx_deserialize(tx) hash_prevouts = encoding.encode(0, 256, 32) hash_sequence = encoding.encode(0, 256, 32) hash_outputs = encoding.encode(0, 256, 32) if (hashcode & SIGHASH_ANYONECANPAY) == 0: prevouts = '' for inp in txobj['ins']: prevouts += hashing.reverse_hash(inp['outpoint']['hash']) prevouts += encoding.encode(inp['outpoint']['index'], 256, 4)[::-1].encode('hex') hash_prevouts = hashing.bin_double_sha256(prevouts.decode('hex')) # print 'prevouts: {}'.format(prevouts) if (hashcode & SIGHASH_ANYONECANPAY) == 0 and (hashcode & 0x1f) != SIGHASH_SINGLE and (hashcode & 0x1f) != SIGHASH_NONE: sequences = '' for inp in txobj['ins']: sequences += encoding.encode(inp['sequence'], 256, 4)[::-1].encode('hex') hash_sequence = hashing.bin_double_sha256(sequences.decode('hex')) # print 'sequences: {}'.format(sequences) if (hashcode & 0x1f) != SIGHASH_SINGLE and (hashcode & 0x1f) != SIGHASH_NONE: outputs = '' for out in txobj['outs']: outputs += encoding.encode(out['value'], 256, 8)[::-1].encode('hex') outputs += make_var_string(out['script']) hash_outputs = hashing.bin_double_sha256(outputs.decode('hex')) # print 'outputs: {}'.format(outputs) elif (hashcode & 0x1f) == SIGHASH_SINGLE and i < len(txobj['outs']): outputs = '' outputs += encoding.encode(txobj['outs'][i]['value'], 256, 8)[::-1].encode('hex') outputs += make_var_string(txobj['outs'][i]['script']) hash_outputs = hashing.bin_double_sha256(outputs.decode('hex')) # print 'outputs: {}'.format(outputs) # print 'hash_prevouts: {}'.format(hash_prevouts.encode('hex')) # print 'hash_sequence: {}'.format(hash_sequence.encode('hex')) # print 'hash_outputs: {}'.format(hash_outputs.encode('hex')) # print 'prevout_script: {}'.format(prevout_script) # print 'prevout_amount: {}'.format(prevout_amount) sighash_preimage = '' sighash_preimage += encoding.encode(txobj['version'], 256, 4)[::-1].encode('hex') sighash_preimage += hash_prevouts.encode('hex') sighash_preimage += hash_sequence.encode('hex') # this input's prevout, script, amount, and sequence sighash_preimage += hashing.reverse_hash(txobj['ins'][i]['outpoint']['hash']) sighash_preimage += encoding.encode(txobj['ins'][i]['outpoint']['index'], 256, 4)[::-1].encode('hex') sighash_preimage += make_var_string(prevout_script) sighash_preimage += encoding.encode(prevout_amount, 256, 8)[::-1].encode('hex') sighash_preimage += encoding.encode(txobj['ins'][i]['sequence'], 256, 4)[::-1].encode('hex') sighash_preimage += hash_outputs.encode('hex') sighash_preimage += encoding.encode(txobj['locktime'], 256, 4)[::-1].encode('hex') sighash_preimage += encoding.encode(hashcode, 256, 4)[::-1].encode('hex') sighash = hashing.bin_double_sha256(sighash_preimage.decode('hex')).encode('hex') # print 'sighash_preimage: {}'.format(sighash_preimage) # print 'sighash: {}'.format(sighash) return sighash
def function[btc_tx_sighash_segwit, parameter[tx, i, prevout_amount, prevout_script, hashcode]]: constant[ Calculate the sighash for a segwit transaction, according to bip143 ] variable[txobj] assign[=] call[name[btc_tx_deserialize], parameter[name[tx]]] variable[hash_prevouts] assign[=] call[name[encoding].encode, parameter[constant[0], constant[256], constant[32]]] variable[hash_sequence] assign[=] call[name[encoding].encode, parameter[constant[0], constant[256], constant[32]]] variable[hash_outputs] assign[=] call[name[encoding].encode, parameter[constant[0], constant[256], constant[32]]] if compare[binary_operation[name[hashcode] <ast.BitAnd object at 0x7da2590d6b60> name[SIGHASH_ANYONECANPAY]] equal[==] constant[0]] begin[:] variable[prevouts] assign[=] constant[] for taget[name[inp]] in starred[call[name[txobj]][constant[ins]]] begin[:] <ast.AugAssign object at 0x7da1b26ca560> <ast.AugAssign object at 0x7da1b26c9fc0> variable[hash_prevouts] assign[=] call[name[hashing].bin_double_sha256, parameter[call[name[prevouts].decode, parameter[constant[hex]]]]] if <ast.BoolOp object at 0x7da1b26c9d20> begin[:] variable[sequences] assign[=] constant[] for taget[name[inp]] in starred[call[name[txobj]][constant[ins]]] begin[:] <ast.AugAssign object at 0x7da1b26c98a0> variable[hash_sequence] assign[=] call[name[hashing].bin_double_sha256, parameter[call[name[sequences].decode, parameter[constant[hex]]]]] if <ast.BoolOp object at 0x7da1b26cabf0> begin[:] variable[outputs] assign[=] constant[] for taget[name[out]] in starred[call[name[txobj]][constant[outs]]] begin[:] <ast.AugAssign object at 0x7da1b26ca1d0> <ast.AugAssign object at 0x7da1b26cb490> variable[hash_outputs] assign[=] call[name[hashing].bin_double_sha256, parameter[call[name[outputs].decode, parameter[constant[hex]]]]] variable[sighash_preimage] assign[=] constant[] <ast.AugAssign object at 0x7da1b26c89a0> <ast.AugAssign object at 0x7da1b26c98d0> <ast.AugAssign object at 0x7da1b26c92d0> <ast.AugAssign object at 0x7da1b26ca080> <ast.AugAssign object at 0x7da1b28d7040> <ast.AugAssign object at 0x7da1b28d6530> <ast.AugAssign object at 0x7da1b28d4d60> <ast.AugAssign object at 0x7da1b28d4af0> <ast.AugAssign object at 0x7da1b28d7f40> <ast.AugAssign object at 0x7da1b28d6470> <ast.AugAssign object at 0x7da1b28d6740> variable[sighash] assign[=] call[call[name[hashing].bin_double_sha256, parameter[call[name[sighash_preimage].decode, parameter[constant[hex]]]]].encode, parameter[constant[hex]]] return[name[sighash]]
keyword[def] identifier[btc_tx_sighash_segwit] ( identifier[tx] , identifier[i] , identifier[prevout_amount] , identifier[prevout_script] , identifier[hashcode] = identifier[SIGHASH_ALL] ): literal[string] identifier[txobj] = identifier[btc_tx_deserialize] ( identifier[tx] ) identifier[hash_prevouts] = identifier[encoding] . identifier[encode] ( literal[int] , literal[int] , literal[int] ) identifier[hash_sequence] = identifier[encoding] . identifier[encode] ( literal[int] , literal[int] , literal[int] ) identifier[hash_outputs] = identifier[encoding] . identifier[encode] ( literal[int] , literal[int] , literal[int] ) keyword[if] ( identifier[hashcode] & identifier[SIGHASH_ANYONECANPAY] )== literal[int] : identifier[prevouts] = literal[string] keyword[for] identifier[inp] keyword[in] identifier[txobj] [ literal[string] ]: identifier[prevouts] += identifier[hashing] . identifier[reverse_hash] ( identifier[inp] [ literal[string] ][ literal[string] ]) identifier[prevouts] += identifier[encoding] . identifier[encode] ( identifier[inp] [ literal[string] ][ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[hash_prevouts] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[prevouts] . identifier[decode] ( literal[string] )) keyword[if] ( identifier[hashcode] & identifier[SIGHASH_ANYONECANPAY] )== literal[int] keyword[and] ( identifier[hashcode] & literal[int] )!= identifier[SIGHASH_SINGLE] keyword[and] ( identifier[hashcode] & literal[int] )!= identifier[SIGHASH_NONE] : identifier[sequences] = literal[string] keyword[for] identifier[inp] keyword[in] identifier[txobj] [ literal[string] ]: identifier[sequences] += identifier[encoding] . identifier[encode] ( identifier[inp] [ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[hash_sequence] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[sequences] . identifier[decode] ( literal[string] )) keyword[if] ( identifier[hashcode] & literal[int] )!= identifier[SIGHASH_SINGLE] keyword[and] ( identifier[hashcode] & literal[int] )!= identifier[SIGHASH_NONE] : identifier[outputs] = literal[string] keyword[for] identifier[out] keyword[in] identifier[txobj] [ literal[string] ]: identifier[outputs] += identifier[encoding] . identifier[encode] ( identifier[out] [ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[outputs] += identifier[make_var_string] ( identifier[out] [ literal[string] ]) identifier[hash_outputs] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[outputs] . identifier[decode] ( literal[string] )) keyword[elif] ( identifier[hashcode] & literal[int] )== identifier[SIGHASH_SINGLE] keyword[and] identifier[i] < identifier[len] ( identifier[txobj] [ literal[string] ]): identifier[outputs] = literal[string] identifier[outputs] += identifier[encoding] . identifier[encode] ( identifier[txobj] [ literal[string] ][ identifier[i] ][ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[outputs] += identifier[make_var_string] ( identifier[txobj] [ literal[string] ][ identifier[i] ][ literal[string] ]) identifier[hash_outputs] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[outputs] . identifier[decode] ( literal[string] )) identifier[sighash_preimage] = literal[string] identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[txobj] [ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[hash_prevouts] . identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[hash_sequence] . identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[hashing] . identifier[reverse_hash] ( identifier[txobj] [ literal[string] ][ identifier[i] ][ literal[string] ][ literal[string] ]) identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[txobj] [ literal[string] ][ identifier[i] ][ literal[string] ][ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[make_var_string] ( identifier[prevout_script] ) identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[prevout_amount] , literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[txobj] [ literal[string] ][ identifier[i] ][ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[hash_outputs] . identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[txobj] [ literal[string] ], literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash_preimage] += identifier[encoding] . identifier[encode] ( identifier[hashcode] , literal[int] , literal[int] )[::- literal[int] ]. identifier[encode] ( literal[string] ) identifier[sighash] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[sighash_preimage] . identifier[decode] ( literal[string] )). identifier[encode] ( literal[string] ) keyword[return] identifier[sighash]
def btc_tx_sighash_segwit(tx, i, prevout_amount, prevout_script, hashcode=SIGHASH_ALL): """ Calculate the sighash for a segwit transaction, according to bip143 """ txobj = btc_tx_deserialize(tx) hash_prevouts = encoding.encode(0, 256, 32) hash_sequence = encoding.encode(0, 256, 32) hash_outputs = encoding.encode(0, 256, 32) if hashcode & SIGHASH_ANYONECANPAY == 0: prevouts = '' for inp in txobj['ins']: prevouts += hashing.reverse_hash(inp['outpoint']['hash']) prevouts += encoding.encode(inp['outpoint']['index'], 256, 4)[::-1].encode('hex') # depends on [control=['for'], data=['inp']] hash_prevouts = hashing.bin_double_sha256(prevouts.decode('hex')) # depends on [control=['if'], data=[]] # print 'prevouts: {}'.format(prevouts) if hashcode & SIGHASH_ANYONECANPAY == 0 and hashcode & 31 != SIGHASH_SINGLE and (hashcode & 31 != SIGHASH_NONE): sequences = '' for inp in txobj['ins']: sequences += encoding.encode(inp['sequence'], 256, 4)[::-1].encode('hex') # depends on [control=['for'], data=['inp']] hash_sequence = hashing.bin_double_sha256(sequences.decode('hex')) # depends on [control=['if'], data=[]] # print 'sequences: {}'.format(sequences) if hashcode & 31 != SIGHASH_SINGLE and hashcode & 31 != SIGHASH_NONE: outputs = '' for out in txobj['outs']: outputs += encoding.encode(out['value'], 256, 8)[::-1].encode('hex') outputs += make_var_string(out['script']) # depends on [control=['for'], data=['out']] hash_outputs = hashing.bin_double_sha256(outputs.decode('hex')) # depends on [control=['if'], data=[]] # print 'outputs: {}'.format(outputs) elif hashcode & 31 == SIGHASH_SINGLE and i < len(txobj['outs']): outputs = '' outputs += encoding.encode(txobj['outs'][i]['value'], 256, 8)[::-1].encode('hex') outputs += make_var_string(txobj['outs'][i]['script']) hash_outputs = hashing.bin_double_sha256(outputs.decode('hex')) # depends on [control=['if'], data=[]] # print 'outputs: {}'.format(outputs) # print 'hash_prevouts: {}'.format(hash_prevouts.encode('hex')) # print 'hash_sequence: {}'.format(hash_sequence.encode('hex')) # print 'hash_outputs: {}'.format(hash_outputs.encode('hex')) # print 'prevout_script: {}'.format(prevout_script) # print 'prevout_amount: {}'.format(prevout_amount) sighash_preimage = '' sighash_preimage += encoding.encode(txobj['version'], 256, 4)[::-1].encode('hex') sighash_preimage += hash_prevouts.encode('hex') sighash_preimage += hash_sequence.encode('hex') # this input's prevout, script, amount, and sequence sighash_preimage += hashing.reverse_hash(txobj['ins'][i]['outpoint']['hash']) sighash_preimage += encoding.encode(txobj['ins'][i]['outpoint']['index'], 256, 4)[::-1].encode('hex') sighash_preimage += make_var_string(prevout_script) sighash_preimage += encoding.encode(prevout_amount, 256, 8)[::-1].encode('hex') sighash_preimage += encoding.encode(txobj['ins'][i]['sequence'], 256, 4)[::-1].encode('hex') sighash_preimage += hash_outputs.encode('hex') sighash_preimage += encoding.encode(txobj['locktime'], 256, 4)[::-1].encode('hex') sighash_preimage += encoding.encode(hashcode, 256, 4)[::-1].encode('hex') sighash = hashing.bin_double_sha256(sighash_preimage.decode('hex')).encode('hex') # print 'sighash_preimage: {}'.format(sighash_preimage) # print 'sighash: {}'.format(sighash) return sighash
def run_job(self, job_id, array_id = None): """Overwrites the run-job command from the manager to extract the correct job id before calling base class implementation.""" # get the unique job id from the given grid id self.lock() jobs = list(self.session.query(Job).filter(Job.id == job_id)) if len(jobs) != 1: self.unlock() raise ValueError("Could not find job id '%d' in the database'" % job_id) job_id = jobs[0].unique self.unlock() # call base class implementation with the corrected job id return JobManager.run_job(self, job_id, array_id)
def function[run_job, parameter[self, job_id, array_id]]: constant[Overwrites the run-job command from the manager to extract the correct job id before calling base class implementation.] call[name[self].lock, parameter[]] variable[jobs] assign[=] call[name[list], parameter[call[call[name[self].session.query, parameter[name[Job]]].filter, parameter[compare[name[Job].id equal[==] name[job_id]]]]]] if compare[call[name[len], parameter[name[jobs]]] not_equal[!=] constant[1]] begin[:] call[name[self].unlock, parameter[]] <ast.Raise object at 0x7da2054a7190> variable[job_id] assign[=] call[name[jobs]][constant[0]].unique call[name[self].unlock, parameter[]] return[call[name[JobManager].run_job, parameter[name[self], name[job_id], name[array_id]]]]
keyword[def] identifier[run_job] ( identifier[self] , identifier[job_id] , identifier[array_id] = keyword[None] ): literal[string] identifier[self] . identifier[lock] () identifier[jobs] = identifier[list] ( identifier[self] . identifier[session] . identifier[query] ( identifier[Job] ). identifier[filter] ( identifier[Job] . identifier[id] == identifier[job_id] )) keyword[if] identifier[len] ( identifier[jobs] )!= literal[int] : identifier[self] . identifier[unlock] () keyword[raise] identifier[ValueError] ( literal[string] % identifier[job_id] ) identifier[job_id] = identifier[jobs] [ literal[int] ]. identifier[unique] identifier[self] . identifier[unlock] () keyword[return] identifier[JobManager] . identifier[run_job] ( identifier[self] , identifier[job_id] , identifier[array_id] )
def run_job(self, job_id, array_id=None): """Overwrites the run-job command from the manager to extract the correct job id before calling base class implementation.""" # get the unique job id from the given grid id self.lock() jobs = list(self.session.query(Job).filter(Job.id == job_id)) if len(jobs) != 1: self.unlock() raise ValueError("Could not find job id '%d' in the database'" % job_id) # depends on [control=['if'], data=[]] job_id = jobs[0].unique self.unlock() # call base class implementation with the corrected job id return JobManager.run_job(self, job_id, array_id)
def _get_path_pattern_tornado45(self, router=None): """Return the path pattern used when routing a request. (Tornado>=4.5) :param tornado.routing.Router router: (Optional) The router to scan. Defaults to the application's router. :rtype: str """ if router is None: router = self.application.default_router for rule in router.rules: if rule.matcher.match(self.request) is not None: if isinstance(rule.matcher, routing.PathMatches): return rule.matcher.regex.pattern elif isinstance(rule.target, routing.Router): return self._get_path_pattern_tornado45(rule.target)
def function[_get_path_pattern_tornado45, parameter[self, router]]: constant[Return the path pattern used when routing a request. (Tornado>=4.5) :param tornado.routing.Router router: (Optional) The router to scan. Defaults to the application's router. :rtype: str ] if compare[name[router] is constant[None]] begin[:] variable[router] assign[=] name[self].application.default_router for taget[name[rule]] in starred[name[router].rules] begin[:] if compare[call[name[rule].matcher.match, parameter[name[self].request]] is_not constant[None]] begin[:] if call[name[isinstance], parameter[name[rule].matcher, name[routing].PathMatches]] begin[:] return[name[rule].matcher.regex.pattern]
keyword[def] identifier[_get_path_pattern_tornado45] ( identifier[self] , identifier[router] = keyword[None] ): literal[string] keyword[if] identifier[router] keyword[is] keyword[None] : identifier[router] = identifier[self] . identifier[application] . identifier[default_router] keyword[for] identifier[rule] keyword[in] identifier[router] . identifier[rules] : keyword[if] identifier[rule] . identifier[matcher] . identifier[match] ( identifier[self] . identifier[request] ) keyword[is] keyword[not] keyword[None] : keyword[if] identifier[isinstance] ( identifier[rule] . identifier[matcher] , identifier[routing] . identifier[PathMatches] ): keyword[return] identifier[rule] . identifier[matcher] . identifier[regex] . identifier[pattern] keyword[elif] identifier[isinstance] ( identifier[rule] . identifier[target] , identifier[routing] . identifier[Router] ): keyword[return] identifier[self] . identifier[_get_path_pattern_tornado45] ( identifier[rule] . identifier[target] )
def _get_path_pattern_tornado45(self, router=None): """Return the path pattern used when routing a request. (Tornado>=4.5) :param tornado.routing.Router router: (Optional) The router to scan. Defaults to the application's router. :rtype: str """ if router is None: router = self.application.default_router # depends on [control=['if'], data=['router']] for rule in router.rules: if rule.matcher.match(self.request) is not None: if isinstance(rule.matcher, routing.PathMatches): return rule.matcher.regex.pattern # depends on [control=['if'], data=[]] elif isinstance(rule.target, routing.Router): return self._get_path_pattern_tornado45(rule.target) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']]
def dumpf(obj, path, encoding=None): """Serialize obj to path in ARPA format (.arpa, .gz).""" path = str(path) if path.endswith('.gz'): with gzip.open(path, mode='wt', encoding=encoding) as f: return dump(obj, f) else: with open(path, mode='wt', encoding=encoding) as f: dump(obj, f)
def function[dumpf, parameter[obj, path, encoding]]: constant[Serialize obj to path in ARPA format (.arpa, .gz).] variable[path] assign[=] call[name[str], parameter[name[path]]] if call[name[path].endswith, parameter[constant[.gz]]] begin[:] with call[name[gzip].open, parameter[name[path]]] begin[:] return[call[name[dump], parameter[name[obj], name[f]]]]
keyword[def] identifier[dumpf] ( identifier[obj] , identifier[path] , identifier[encoding] = keyword[None] ): literal[string] identifier[path] = identifier[str] ( identifier[path] ) keyword[if] identifier[path] . identifier[endswith] ( literal[string] ): keyword[with] identifier[gzip] . identifier[open] ( identifier[path] , identifier[mode] = literal[string] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[f] : keyword[return] identifier[dump] ( identifier[obj] , identifier[f] ) keyword[else] : keyword[with] identifier[open] ( identifier[path] , identifier[mode] = literal[string] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[f] : identifier[dump] ( identifier[obj] , identifier[f] )
def dumpf(obj, path, encoding=None): """Serialize obj to path in ARPA format (.arpa, .gz).""" path = str(path) if path.endswith('.gz'): with gzip.open(path, mode='wt', encoding=encoding) as f: return dump(obj, f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] else: with open(path, mode='wt', encoding=encoding) as f: dump(obj, f) # depends on [control=['with'], data=['f']]
def split_date(value): """ This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','') """ if not is_valid_date(value): return ('', '', '') splited = value.split('-') try: year = splited[0] except IndexError: year = '' try: month = splited[1] except IndexError: month = '' try: day = splited[2] except IndexError: day = '' return (year, month, day)
def function[split_date, parameter[value]]: constant[ This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','') ] if <ast.UnaryOp object at 0x7da204962e30> begin[:] return[tuple[[<ast.Constant object at 0x7da204961bd0>, <ast.Constant object at 0x7da2049615a0>, <ast.Constant object at 0x7da204961cc0>]]] variable[splited] assign[=] call[name[value].split, parameter[constant[-]]] <ast.Try object at 0x7da204961840> <ast.Try object at 0x7da1b0b39de0> <ast.Try object at 0x7da1b0b3b430> return[tuple[[<ast.Name object at 0x7da1b0b3b7c0>, <ast.Name object at 0x7da1b0b3a800>, <ast.Name object at 0x7da1b0b3b850>]]]
keyword[def] identifier[split_date] ( identifier[value] ): literal[string] keyword[if] keyword[not] identifier[is_valid_date] ( identifier[value] ): keyword[return] ( literal[string] , literal[string] , literal[string] ) identifier[splited] = identifier[value] . identifier[split] ( literal[string] ) keyword[try] : identifier[year] = identifier[splited] [ literal[int] ] keyword[except] identifier[IndexError] : identifier[year] = literal[string] keyword[try] : identifier[month] = identifier[splited] [ literal[int] ] keyword[except] identifier[IndexError] : identifier[month] = literal[string] keyword[try] : identifier[day] = identifier[splited] [ literal[int] ] keyword[except] identifier[IndexError] : identifier[day] = literal[string] keyword[return] ( identifier[year] , identifier[month] , identifier[day] )
def split_date(value): """ This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','') """ if not is_valid_date(value): return ('', '', '') # depends on [control=['if'], data=[]] splited = value.split('-') try: year = splited[0] # depends on [control=['try'], data=[]] except IndexError: year = '' # depends on [control=['except'], data=[]] try: month = splited[1] # depends on [control=['try'], data=[]] except IndexError: month = '' # depends on [control=['except'], data=[]] try: day = splited[2] # depends on [control=['try'], data=[]] except IndexError: day = '' # depends on [control=['except'], data=[]] return (year, month, day)
def enable_cloud_password( self, password: str, hint: str = "", email: str = None ) -> bool: """Use this method to enable the Two-Step Verification security feature (Cloud Password) on your account. This password will be asked when you log-in on a new device in addition to the SMS code. Args: password (``str``): Your password. hint (``str``, *optional*): A password hint. email (``str``, *optional*): Recovery e-mail. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is already a cloud password enabled. """ r = self.send(functions.account.GetPassword()) if r.has_password: raise ValueError("There is already a cloud password enabled") r.new_algo.salt1 += os.urandom(32) new_hash = btoi(compute_hash(r.new_algo, password)) new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p))) self.send( functions.account.UpdatePasswordSettings( password=types.InputCheckPasswordEmpty(), new_settings=types.account.PasswordInputSettings( new_algo=r.new_algo, new_password_hash=new_hash, hint=hint, email=email ) ) ) return True
def function[enable_cloud_password, parameter[self, password, hint, email]]: constant[Use this method to enable the Two-Step Verification security feature (Cloud Password) on your account. This password will be asked when you log-in on a new device in addition to the SMS code. Args: password (``str``): Your password. hint (``str``, *optional*): A password hint. email (``str``, *optional*): Recovery e-mail. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is already a cloud password enabled. ] variable[r] assign[=] call[name[self].send, parameter[call[name[functions].account.GetPassword, parameter[]]]] if name[r].has_password begin[:] <ast.Raise object at 0x7da18c4cf7f0> <ast.AugAssign object at 0x7da18c4cc640> variable[new_hash] assign[=] call[name[btoi], parameter[call[name[compute_hash], parameter[name[r].new_algo, name[password]]]]] variable[new_hash] assign[=] call[name[itob], parameter[call[name[pow], parameter[name[r].new_algo.g, name[new_hash], call[name[btoi], parameter[name[r].new_algo.p]]]]]] call[name[self].send, parameter[call[name[functions].account.UpdatePasswordSettings, parameter[]]]] return[constant[True]]
keyword[def] identifier[enable_cloud_password] ( identifier[self] , identifier[password] : identifier[str] , identifier[hint] : identifier[str] = literal[string] , identifier[email] : identifier[str] = keyword[None] )-> identifier[bool] : literal[string] identifier[r] = identifier[self] . identifier[send] ( identifier[functions] . identifier[account] . identifier[GetPassword] ()) keyword[if] identifier[r] . identifier[has_password] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[r] . identifier[new_algo] . identifier[salt1] += identifier[os] . identifier[urandom] ( literal[int] ) identifier[new_hash] = identifier[btoi] ( identifier[compute_hash] ( identifier[r] . identifier[new_algo] , identifier[password] )) identifier[new_hash] = identifier[itob] ( identifier[pow] ( identifier[r] . identifier[new_algo] . identifier[g] , identifier[new_hash] , identifier[btoi] ( identifier[r] . identifier[new_algo] . identifier[p] ))) identifier[self] . identifier[send] ( identifier[functions] . identifier[account] . identifier[UpdatePasswordSettings] ( identifier[password] = identifier[types] . identifier[InputCheckPasswordEmpty] (), identifier[new_settings] = identifier[types] . identifier[account] . identifier[PasswordInputSettings] ( identifier[new_algo] = identifier[r] . identifier[new_algo] , identifier[new_password_hash] = identifier[new_hash] , identifier[hint] = identifier[hint] , identifier[email] = identifier[email] ) ) ) keyword[return] keyword[True]
def enable_cloud_password(self, password: str, hint: str='', email: str=None) -> bool: """Use this method to enable the Two-Step Verification security feature (Cloud Password) on your account. This password will be asked when you log-in on a new device in addition to the SMS code. Args: password (``str``): Your password. hint (``str``, *optional*): A password hint. email (``str``, *optional*): Recovery e-mail. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is already a cloud password enabled. """ r = self.send(functions.account.GetPassword()) if r.has_password: raise ValueError('There is already a cloud password enabled') # depends on [control=['if'], data=[]] r.new_algo.salt1 += os.urandom(32) new_hash = btoi(compute_hash(r.new_algo, password)) new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p))) self.send(functions.account.UpdatePasswordSettings(password=types.InputCheckPasswordEmpty(), new_settings=types.account.PasswordInputSettings(new_algo=r.new_algo, new_password_hash=new_hash, hint=hint, email=email))) return True
def course(self): """ Course this node belongs to """ course = self.parent while course.parent: course = course.parent return course
def function[course, parameter[self]]: constant[ Course this node belongs to ] variable[course] assign[=] name[self].parent while name[course].parent begin[:] variable[course] assign[=] name[course].parent return[name[course]]
keyword[def] identifier[course] ( identifier[self] ): literal[string] identifier[course] = identifier[self] . identifier[parent] keyword[while] identifier[course] . identifier[parent] : identifier[course] = identifier[course] . identifier[parent] keyword[return] identifier[course]
def course(self): """ Course this node belongs to """ course = self.parent while course.parent: course = course.parent # depends on [control=['while'], data=[]] return course
def fields(*fields, **keys): """ Factory for for L{MessageType} and L{ActionType} field definitions. @param *fields: A L{tuple} of L{Field} instances. @param **keys: A L{dict} mapping key names to the expected type of the field's values. @return: A L{list} of L{Field} instances. """ return list(fields) + [ Field.forTypes(key, [value], "") for key, value in keys.items()]
def function[fields, parameter[]]: constant[ Factory for for L{MessageType} and L{ActionType} field definitions. @param *fields: A L{tuple} of L{Field} instances. @param **keys: A L{dict} mapping key names to the expected type of the field's values. @return: A L{list} of L{Field} instances. ] return[binary_operation[call[name[list], parameter[name[fields]]] + <ast.ListComp object at 0x7da1b0861690>]]
keyword[def] identifier[fields] (* identifier[fields] ,** identifier[keys] ): literal[string] keyword[return] identifier[list] ( identifier[fields] )+[ identifier[Field] . identifier[forTypes] ( identifier[key] ,[ identifier[value] ], literal[string] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[keys] . identifier[items] ()]
def fields(*fields, **keys): """ Factory for for L{MessageType} and L{ActionType} field definitions. @param *fields: A L{tuple} of L{Field} instances. @param **keys: A L{dict} mapping key names to the expected type of the field's values. @return: A L{list} of L{Field} instances. """ return list(fields) + [Field.forTypes(key, [value], '') for (key, value) in keys.items()]
def query_tissue_specificity(): """ Returns list of tissue specificity by query parameters --- tags: - Query functions parameters: - name: comment in: query type: string required: false description: Comment to tissue specificity default: '%APP695%' - name: entry_name in: query type: string required: false description: reference identifier default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args( request_args=request.args, allowed_str_args=['comment', 'entry_name'], allowed_int_args=['limit'] ) return jsonify(query.tissue_specificity(**args))
def function[query_tissue_specificity, parameter[]]: constant[ Returns list of tissue specificity by query parameters --- tags: - Query functions parameters: - name: comment in: query type: string required: false description: Comment to tissue specificity default: '%APP695%' - name: entry_name in: query type: string required: false description: reference identifier default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 ] variable[args] assign[=] call[name[get_args], parameter[]] return[call[name[jsonify], parameter[call[name[query].tissue_specificity, parameter[]]]]]
keyword[def] identifier[query_tissue_specificity] (): literal[string] identifier[args] = identifier[get_args] ( identifier[request_args] = identifier[request] . identifier[args] , identifier[allowed_str_args] =[ literal[string] , literal[string] ], identifier[allowed_int_args] =[ literal[string] ] ) keyword[return] identifier[jsonify] ( identifier[query] . identifier[tissue_specificity] (** identifier[args] ))
def query_tissue_specificity(): """ Returns list of tissue specificity by query parameters --- tags: - Query functions parameters: - name: comment in: query type: string required: false description: Comment to tissue specificity default: '%APP695%' - name: entry_name in: query type: string required: false description: reference identifier default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args(request_args=request.args, allowed_str_args=['comment', 'entry_name'], allowed_int_args=['limit']) return jsonify(query.tissue_specificity(**args))
def groupIntoChoices(splitsOnWord, wordWidth: int, origin: OneOfTransaction): """ :param splitsOnWord: list of lists of parts (fields splited on word boundaries) :return: generators of ChoicesOfFrameParts for each word which are not crossing word boundaries """ def cmpWordIndex(a, b): return a.startOfPart // wordWidth < b.startOfPart // wordWidth actual = None itCnt = len(splitsOnWord) for i, item in iterSort(splitsOnWord, cmpWordIndex): _actualW = item.startOfPart // wordWidth if actual is None: # first pass actual = ChoicesOfFrameParts(item.startOfPart, origin) actual.extend( ChoiceOfFrameParts(actual, origin.possibleTransactions[_i]) for _i in range(itCnt)) actualW = _actualW elif _actualW > actualW: actual.resolveEnd() yield actual actual = ChoicesOfFrameParts(item.startOfPart, origin) actual.extend( ChoiceOfFrameParts(actual, origin.possibleTransactions[_i]) for _i in range(itCnt)) actualW = _actualW actual[i].append(item) if actual is not None: actual.setIsLast(True) actual.resolveEnd() yield actual
def function[groupIntoChoices, parameter[splitsOnWord, wordWidth, origin]]: constant[ :param splitsOnWord: list of lists of parts (fields splited on word boundaries) :return: generators of ChoicesOfFrameParts for each word which are not crossing word boundaries ] def function[cmpWordIndex, parameter[a, b]]: return[compare[binary_operation[name[a].startOfPart <ast.FloorDiv object at 0x7da2590d6bc0> name[wordWidth]] less[<] binary_operation[name[b].startOfPart <ast.FloorDiv object at 0x7da2590d6bc0> name[wordWidth]]]] variable[actual] assign[=] constant[None] variable[itCnt] assign[=] call[name[len], parameter[name[splitsOnWord]]] for taget[tuple[[<ast.Name object at 0x7da18c4ce2c0>, <ast.Name object at 0x7da18c4cf070>]]] in starred[call[name[iterSort], parameter[name[splitsOnWord], name[cmpWordIndex]]]] begin[:] variable[_actualW] assign[=] binary_operation[name[item].startOfPart <ast.FloorDiv object at 0x7da2590d6bc0> name[wordWidth]] if compare[name[actual] is constant[None]] begin[:] variable[actual] assign[=] call[name[ChoicesOfFrameParts], parameter[name[item].startOfPart, name[origin]]] call[name[actual].extend, parameter[<ast.GeneratorExp object at 0x7da18c4cc100>]] variable[actualW] assign[=] name[_actualW] call[call[name[actual]][name[i]].append, parameter[name[item]]] if compare[name[actual] is_not constant[None]] begin[:] call[name[actual].setIsLast, parameter[constant[True]]] call[name[actual].resolveEnd, parameter[]] <ast.Yield object at 0x7da1b05c4d30>
keyword[def] identifier[groupIntoChoices] ( identifier[splitsOnWord] , identifier[wordWidth] : identifier[int] , identifier[origin] : identifier[OneOfTransaction] ): literal[string] keyword[def] identifier[cmpWordIndex] ( identifier[a] , identifier[b] ): keyword[return] identifier[a] . identifier[startOfPart] // identifier[wordWidth] < identifier[b] . identifier[startOfPart] // identifier[wordWidth] identifier[actual] = keyword[None] identifier[itCnt] = identifier[len] ( identifier[splitsOnWord] ) keyword[for] identifier[i] , identifier[item] keyword[in] identifier[iterSort] ( identifier[splitsOnWord] , identifier[cmpWordIndex] ): identifier[_actualW] = identifier[item] . identifier[startOfPart] // identifier[wordWidth] keyword[if] identifier[actual] keyword[is] keyword[None] : identifier[actual] = identifier[ChoicesOfFrameParts] ( identifier[item] . identifier[startOfPart] , identifier[origin] ) identifier[actual] . identifier[extend] ( identifier[ChoiceOfFrameParts] ( identifier[actual] , identifier[origin] . identifier[possibleTransactions] [ identifier[_i] ]) keyword[for] identifier[_i] keyword[in] identifier[range] ( identifier[itCnt] )) identifier[actualW] = identifier[_actualW] keyword[elif] identifier[_actualW] > identifier[actualW] : identifier[actual] . identifier[resolveEnd] () keyword[yield] identifier[actual] identifier[actual] = identifier[ChoicesOfFrameParts] ( identifier[item] . identifier[startOfPart] , identifier[origin] ) identifier[actual] . identifier[extend] ( identifier[ChoiceOfFrameParts] ( identifier[actual] , identifier[origin] . identifier[possibleTransactions] [ identifier[_i] ]) keyword[for] identifier[_i] keyword[in] identifier[range] ( identifier[itCnt] )) identifier[actualW] = identifier[_actualW] identifier[actual] [ identifier[i] ]. identifier[append] ( identifier[item] ) keyword[if] identifier[actual] keyword[is] keyword[not] keyword[None] : identifier[actual] . identifier[setIsLast] ( keyword[True] ) identifier[actual] . identifier[resolveEnd] () keyword[yield] identifier[actual]
def groupIntoChoices(splitsOnWord, wordWidth: int, origin: OneOfTransaction): """ :param splitsOnWord: list of lists of parts (fields splited on word boundaries) :return: generators of ChoicesOfFrameParts for each word which are not crossing word boundaries """ def cmpWordIndex(a, b): return a.startOfPart // wordWidth < b.startOfPart // wordWidth actual = None itCnt = len(splitsOnWord) for (i, item) in iterSort(splitsOnWord, cmpWordIndex): _actualW = item.startOfPart // wordWidth if actual is None: # first pass actual = ChoicesOfFrameParts(item.startOfPart, origin) actual.extend((ChoiceOfFrameParts(actual, origin.possibleTransactions[_i]) for _i in range(itCnt))) actualW = _actualW # depends on [control=['if'], data=['actual']] elif _actualW > actualW: actual.resolveEnd() yield actual actual = ChoicesOfFrameParts(item.startOfPart, origin) actual.extend((ChoiceOfFrameParts(actual, origin.possibleTransactions[_i]) for _i in range(itCnt))) actualW = _actualW # depends on [control=['if'], data=['_actualW', 'actualW']] actual[i].append(item) # depends on [control=['for'], data=[]] if actual is not None: actual.setIsLast(True) actual.resolveEnd() yield actual # depends on [control=['if'], data=['actual']]
def update(self, **kwargs): """ Update the project :param kwargs: Project properties """ old_json = self.__json__() for prop in kwargs: setattr(self, prop, kwargs[prop]) # We send notif only if object has changed if old_json != self.__json__(): self.controller.notification.emit("project.updated", self.__json__()) self.dump()
def function[update, parameter[self]]: constant[ Update the project :param kwargs: Project properties ] variable[old_json] assign[=] call[name[self].__json__, parameter[]] for taget[name[prop]] in starred[name[kwargs]] begin[:] call[name[setattr], parameter[name[self], name[prop], call[name[kwargs]][name[prop]]]] if compare[name[old_json] not_equal[!=] call[name[self].__json__, parameter[]]] begin[:] call[name[self].controller.notification.emit, parameter[constant[project.updated], call[name[self].__json__, parameter[]]]] call[name[self].dump, parameter[]]
keyword[def] identifier[update] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[old_json] = identifier[self] . identifier[__json__] () keyword[for] identifier[prop] keyword[in] identifier[kwargs] : identifier[setattr] ( identifier[self] , identifier[prop] , identifier[kwargs] [ identifier[prop] ]) keyword[if] identifier[old_json] != identifier[self] . identifier[__json__] (): identifier[self] . identifier[controller] . identifier[notification] . identifier[emit] ( literal[string] , identifier[self] . identifier[__json__] ()) identifier[self] . identifier[dump] ()
def update(self, **kwargs): """ Update the project :param kwargs: Project properties """ old_json = self.__json__() for prop in kwargs: setattr(self, prop, kwargs[prop]) # depends on [control=['for'], data=['prop']] # We send notif only if object has changed if old_json != self.__json__(): self.controller.notification.emit('project.updated', self.__json__()) self.dump() # depends on [control=['if'], data=[]]
def get_new_term_doc_mat(self, doc_domains): ''' Combines documents together that are in the same domain Parameters ---------- doc_domains : array-like Returns ------- scipy.sparse.csr_matrix ''' assert len(doc_domains) == self.term_doc_matrix.get_num_docs() doc_domain_set = set(doc_domains) num_terms = self.term_doc_matrix.get_num_terms() num_domains = len(doc_domain_set) domain_mat = lil_matrix((num_domains, num_terms), dtype=int) X = self.term_doc_matrix.get_term_doc_mat() for i, domain in enumerate(doc_domain_set): domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0) return domain_mat.tocsr()
def function[get_new_term_doc_mat, parameter[self, doc_domains]]: constant[ Combines documents together that are in the same domain Parameters ---------- doc_domains : array-like Returns ------- scipy.sparse.csr_matrix ] assert[compare[call[name[len], parameter[name[doc_domains]]] equal[==] call[name[self].term_doc_matrix.get_num_docs, parameter[]]]] variable[doc_domain_set] assign[=] call[name[set], parameter[name[doc_domains]]] variable[num_terms] assign[=] call[name[self].term_doc_matrix.get_num_terms, parameter[]] variable[num_domains] assign[=] call[name[len], parameter[name[doc_domain_set]]] variable[domain_mat] assign[=] call[name[lil_matrix], parameter[tuple[[<ast.Name object at 0x7da1b1b1ab90>, <ast.Name object at 0x7da1b1b190f0>]]]] variable[X] assign[=] call[name[self].term_doc_matrix.get_term_doc_mat, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1b18b20>, <ast.Name object at 0x7da1b1b19180>]]] in starred[call[name[enumerate], parameter[name[doc_domain_set]]]] begin[:] call[name[domain_mat]][tuple[[<ast.Name object at 0x7da1b1bfb400>, <ast.Slice object at 0x7da1b1bfa6e0>]]] assign[=] call[call[name[X]][call[name[np].array, parameter[compare[name[doc_domains] equal[==] name[domain]]]]].sum, parameter[]] return[call[name[domain_mat].tocsr, parameter[]]]
keyword[def] identifier[get_new_term_doc_mat] ( identifier[self] , identifier[doc_domains] ): literal[string] keyword[assert] identifier[len] ( identifier[doc_domains] )== identifier[self] . identifier[term_doc_matrix] . identifier[get_num_docs] () identifier[doc_domain_set] = identifier[set] ( identifier[doc_domains] ) identifier[num_terms] = identifier[self] . identifier[term_doc_matrix] . identifier[get_num_terms] () identifier[num_domains] = identifier[len] ( identifier[doc_domain_set] ) identifier[domain_mat] = identifier[lil_matrix] (( identifier[num_domains] , identifier[num_terms] ), identifier[dtype] = identifier[int] ) identifier[X] = identifier[self] . identifier[term_doc_matrix] . identifier[get_term_doc_mat] () keyword[for] identifier[i] , identifier[domain] keyword[in] identifier[enumerate] ( identifier[doc_domain_set] ): identifier[domain_mat] [ identifier[i] ,:]= identifier[X] [ identifier[np] . identifier[array] ( identifier[doc_domains] == identifier[domain] )]. identifier[sum] ( identifier[axis] = literal[int] ) keyword[return] identifier[domain_mat] . identifier[tocsr] ()
def get_new_term_doc_mat(self, doc_domains): """ Combines documents together that are in the same domain Parameters ---------- doc_domains : array-like Returns ------- scipy.sparse.csr_matrix """ assert len(doc_domains) == self.term_doc_matrix.get_num_docs() doc_domain_set = set(doc_domains) num_terms = self.term_doc_matrix.get_num_terms() num_domains = len(doc_domain_set) domain_mat = lil_matrix((num_domains, num_terms), dtype=int) X = self.term_doc_matrix.get_term_doc_mat() for (i, domain) in enumerate(doc_domain_set): domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0) # depends on [control=['for'], data=[]] return domain_mat.tocsr()
def get_coordinates(filename, fmt): """ Get coordinates from filename in format fmt. Supports XYZ and PDB. Parameters ---------- filename : string Filename to read fmt : string Format of filename. Either xyz or pdb. Returns ------- atoms : list List of atomic types V : array (N,3) where N is number of atoms """ if fmt == "xyz": get_func = get_coordinates_xyz elif fmt == "pdb": get_func = get_coordinates_pdb else: exit("Could not recognize file format: {:s}".format(fmt)) return get_func(filename)
def function[get_coordinates, parameter[filename, fmt]]: constant[ Get coordinates from filename in format fmt. Supports XYZ and PDB. Parameters ---------- filename : string Filename to read fmt : string Format of filename. Either xyz or pdb. Returns ------- atoms : list List of atomic types V : array (N,3) where N is number of atoms ] if compare[name[fmt] equal[==] constant[xyz]] begin[:] variable[get_func] assign[=] name[get_coordinates_xyz] return[call[name[get_func], parameter[name[filename]]]]
keyword[def] identifier[get_coordinates] ( identifier[filename] , identifier[fmt] ): literal[string] keyword[if] identifier[fmt] == literal[string] : identifier[get_func] = identifier[get_coordinates_xyz] keyword[elif] identifier[fmt] == literal[string] : identifier[get_func] = identifier[get_coordinates_pdb] keyword[else] : identifier[exit] ( literal[string] . identifier[format] ( identifier[fmt] )) keyword[return] identifier[get_func] ( identifier[filename] )
def get_coordinates(filename, fmt): """ Get coordinates from filename in format fmt. Supports XYZ and PDB. Parameters ---------- filename : string Filename to read fmt : string Format of filename. Either xyz or pdb. Returns ------- atoms : list List of atomic types V : array (N,3) where N is number of atoms """ if fmt == 'xyz': get_func = get_coordinates_xyz # depends on [control=['if'], data=[]] elif fmt == 'pdb': get_func = get_coordinates_pdb # depends on [control=['if'], data=[]] else: exit('Could not recognize file format: {:s}'.format(fmt)) return get_func(filename)
def _vec_lnqmed_residuals(self, catchments): """ Return ln(QMED) model errors for a list of catchments :param catchments: List of gauged catchments :type catchments: list of :class:`Catchment` :return: Model errors :rtype: list of float """ result = np.empty(len(catchments)) for index, donor in enumerate(catchments): result[index] = self._lnqmed_residual(donor) return result
def function[_vec_lnqmed_residuals, parameter[self, catchments]]: constant[ Return ln(QMED) model errors for a list of catchments :param catchments: List of gauged catchments :type catchments: list of :class:`Catchment` :return: Model errors :rtype: list of float ] variable[result] assign[=] call[name[np].empty, parameter[call[name[len], parameter[name[catchments]]]]] for taget[tuple[[<ast.Name object at 0x7da18f58c6a0>, <ast.Name object at 0x7da18f58ea10>]]] in starred[call[name[enumerate], parameter[name[catchments]]]] begin[:] call[name[result]][name[index]] assign[=] call[name[self]._lnqmed_residual, parameter[name[donor]]] return[name[result]]
keyword[def] identifier[_vec_lnqmed_residuals] ( identifier[self] , identifier[catchments] ): literal[string] identifier[result] = identifier[np] . identifier[empty] ( identifier[len] ( identifier[catchments] )) keyword[for] identifier[index] , identifier[donor] keyword[in] identifier[enumerate] ( identifier[catchments] ): identifier[result] [ identifier[index] ]= identifier[self] . identifier[_lnqmed_residual] ( identifier[donor] ) keyword[return] identifier[result]
def _vec_lnqmed_residuals(self, catchments): """ Return ln(QMED) model errors for a list of catchments :param catchments: List of gauged catchments :type catchments: list of :class:`Catchment` :return: Model errors :rtype: list of float """ result = np.empty(len(catchments)) for (index, donor) in enumerate(catchments): result[index] = self._lnqmed_residual(donor) # depends on [control=['for'], data=[]] return result
def upload_activity(self, activity_file, data_type, name=None, description=None, activity_type=None, private=None, external_id=None): """ Uploads a GPS file (tcx, gpx) to create a new activity for current athlete. http://strava.github.io/api/v3/athlete/#get-details :param activity_file: The file object to upload or file contents. :type activity_file: file or str :param data_type: File format for upload. Possible values: fit, fit.gz, tcx, tcx.gz, gpx, gpx.gz :type data_type: str :param name: (optional) if not provided, will be populated using start date and location, if available :type name: str :param description: (optional) The description for the activity :type name: str :param activity_type: (optional) case-insensitive type of activity. possible values: ride, run, swim, workout, hike, walk, nordicski, alpineski, backcountryski, iceskate, inlineskate, kitesurf, rollerski, windsurf, workout, snowboard, snowshoe Type detected from file overrides, uses athlete's default type if not specified :type activity_type: str :param private: (optional) set to True to mark the resulting activity as private, 'view_private' permissions will be necessary to view the activity :type private: bool :param external_id: (optional) An arbitrary unique identifier may be specified which will be included in status responses. :type external_id: str """ if not hasattr(activity_file, 'read'): if isinstance(activity_file, six.string_types): activity_file = BytesIO(activity_file.encode('utf-8')) elif isinstance(activity_file, str): activity_file = BytesIO(activity_file) else: raise TypeError("Invalid type specified for activity_file: {0}".format(type(activity_file))) valid_data_types = ('fit', 'fit.gz', 'tcx', 'tcx.gz', 'gpx', 'gpx.gz') if not data_type in valid_data_types: raise ValueError("Invalid data type {0}. Possible values {1!r}".format(data_type, valid_data_types)) params = {'data_type': data_type} if name is not None: params['name'] = name if description is not None: params['description'] = description if activity_type is not None: if not activity_type.lower() in [t.lower() for t in model.Activity.TYPES]: raise ValueError("Invalid activity type: {0}. Possible values: {1!r}".format(activity_type, model.Activity.TYPES)) params['activity_type'] = activity_type if private is not None: params['private'] = int(private) if external_id is not None: params['external_id'] = external_id initial_response = self.protocol.post('/uploads', files={'file': activity_file}, check_for_errors=False, **params) return ActivityUploader(self, response=initial_response)
def function[upload_activity, parameter[self, activity_file, data_type, name, description, activity_type, private, external_id]]: constant[ Uploads a GPS file (tcx, gpx) to create a new activity for current athlete. http://strava.github.io/api/v3/athlete/#get-details :param activity_file: The file object to upload or file contents. :type activity_file: file or str :param data_type: File format for upload. Possible values: fit, fit.gz, tcx, tcx.gz, gpx, gpx.gz :type data_type: str :param name: (optional) if not provided, will be populated using start date and location, if available :type name: str :param description: (optional) The description for the activity :type name: str :param activity_type: (optional) case-insensitive type of activity. possible values: ride, run, swim, workout, hike, walk, nordicski, alpineski, backcountryski, iceskate, inlineskate, kitesurf, rollerski, windsurf, workout, snowboard, snowshoe Type detected from file overrides, uses athlete's default type if not specified :type activity_type: str :param private: (optional) set to True to mark the resulting activity as private, 'view_private' permissions will be necessary to view the activity :type private: bool :param external_id: (optional) An arbitrary unique identifier may be specified which will be included in status responses. :type external_id: str ] if <ast.UnaryOp object at 0x7da1b07b20e0> begin[:] if call[name[isinstance], parameter[name[activity_file], name[six].string_types]] begin[:] variable[activity_file] assign[=] call[name[BytesIO], parameter[call[name[activity_file].encode, parameter[constant[utf-8]]]]] variable[valid_data_types] assign[=] tuple[[<ast.Constant object at 0x7da1b07b1870>, <ast.Constant object at 0x7da1b07b2e60>, <ast.Constant object at 0x7da1b07b31c0>, <ast.Constant object at 0x7da1b07b0cd0>, <ast.Constant object at 0x7da1b07b3fa0>, <ast.Constant object at 0x7da1b07b2590>]] if <ast.UnaryOp object at 0x7da1b07b3130> begin[:] <ast.Raise object at 0x7da1b07b08b0> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b07b0700>], [<ast.Name object at 0x7da1b07b12d0>]] if compare[name[name] is_not constant[None]] begin[:] call[name[params]][constant[name]] assign[=] name[name] if compare[name[description] is_not constant[None]] begin[:] call[name[params]][constant[description]] assign[=] name[description] if compare[name[activity_type] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da1b07b3b20> begin[:] <ast.Raise object at 0x7da1b07b1720> call[name[params]][constant[activity_type]] assign[=] name[activity_type] if compare[name[private] is_not constant[None]] begin[:] call[name[params]][constant[private]] assign[=] call[name[int], parameter[name[private]]] if compare[name[external_id] is_not constant[None]] begin[:] call[name[params]][constant[external_id]] assign[=] name[external_id] variable[initial_response] assign[=] call[name[self].protocol.post, parameter[constant[/uploads]]] return[call[name[ActivityUploader], parameter[name[self]]]]
keyword[def] identifier[upload_activity] ( identifier[self] , identifier[activity_file] , identifier[data_type] , identifier[name] = keyword[None] , identifier[description] = keyword[None] , identifier[activity_type] = keyword[None] , identifier[private] = keyword[None] , identifier[external_id] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[activity_file] , literal[string] ): keyword[if] identifier[isinstance] ( identifier[activity_file] , identifier[six] . identifier[string_types] ): identifier[activity_file] = identifier[BytesIO] ( identifier[activity_file] . identifier[encode] ( literal[string] )) keyword[elif] identifier[isinstance] ( identifier[activity_file] , identifier[str] ): identifier[activity_file] = identifier[BytesIO] ( identifier[activity_file] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[activity_file] ))) identifier[valid_data_types] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[if] keyword[not] identifier[data_type] keyword[in] identifier[valid_data_types] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[data_type] , identifier[valid_data_types] )) identifier[params] ={ literal[string] : identifier[data_type] } keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[name] keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[description] keyword[if] identifier[activity_type] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[activity_type] . identifier[lower] () keyword[in] [ identifier[t] . identifier[lower] () keyword[for] identifier[t] keyword[in] identifier[model] . identifier[Activity] . identifier[TYPES] ]: keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[activity_type] , identifier[model] . identifier[Activity] . identifier[TYPES] )) identifier[params] [ literal[string] ]= identifier[activity_type] keyword[if] identifier[private] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[int] ( identifier[private] ) keyword[if] identifier[external_id] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[external_id] identifier[initial_response] = identifier[self] . identifier[protocol] . identifier[post] ( literal[string] , identifier[files] ={ literal[string] : identifier[activity_file] }, identifier[check_for_errors] = keyword[False] , ** identifier[params] ) keyword[return] identifier[ActivityUploader] ( identifier[self] , identifier[response] = identifier[initial_response] )
def upload_activity(self, activity_file, data_type, name=None, description=None, activity_type=None, private=None, external_id=None): """ Uploads a GPS file (tcx, gpx) to create a new activity for current athlete. http://strava.github.io/api/v3/athlete/#get-details :param activity_file: The file object to upload or file contents. :type activity_file: file or str :param data_type: File format for upload. Possible values: fit, fit.gz, tcx, tcx.gz, gpx, gpx.gz :type data_type: str :param name: (optional) if not provided, will be populated using start date and location, if available :type name: str :param description: (optional) The description for the activity :type name: str :param activity_type: (optional) case-insensitive type of activity. possible values: ride, run, swim, workout, hike, walk, nordicski, alpineski, backcountryski, iceskate, inlineskate, kitesurf, rollerski, windsurf, workout, snowboard, snowshoe Type detected from file overrides, uses athlete's default type if not specified :type activity_type: str :param private: (optional) set to True to mark the resulting activity as private, 'view_private' permissions will be necessary to view the activity :type private: bool :param external_id: (optional) An arbitrary unique identifier may be specified which will be included in status responses. :type external_id: str """ if not hasattr(activity_file, 'read'): if isinstance(activity_file, six.string_types): activity_file = BytesIO(activity_file.encode('utf-8')) # depends on [control=['if'], data=[]] elif isinstance(activity_file, str): activity_file = BytesIO(activity_file) # depends on [control=['if'], data=[]] else: raise TypeError('Invalid type specified for activity_file: {0}'.format(type(activity_file))) # depends on [control=['if'], data=[]] valid_data_types = ('fit', 'fit.gz', 'tcx', 'tcx.gz', 'gpx', 'gpx.gz') if not data_type in valid_data_types: raise ValueError('Invalid data type {0}. Possible values {1!r}'.format(data_type, valid_data_types)) # depends on [control=['if'], data=[]] params = {'data_type': data_type} if name is not None: params['name'] = name # depends on [control=['if'], data=['name']] if description is not None: params['description'] = description # depends on [control=['if'], data=['description']] if activity_type is not None: if not activity_type.lower() in [t.lower() for t in model.Activity.TYPES]: raise ValueError('Invalid activity type: {0}. Possible values: {1!r}'.format(activity_type, model.Activity.TYPES)) # depends on [control=['if'], data=[]] params['activity_type'] = activity_type # depends on [control=['if'], data=['activity_type']] if private is not None: params['private'] = int(private) # depends on [control=['if'], data=['private']] if external_id is not None: params['external_id'] = external_id # depends on [control=['if'], data=['external_id']] initial_response = self.protocol.post('/uploads', files={'file': activity_file}, check_for_errors=False, **params) return ActivityUploader(self, response=initial_response)
def list_all(self): ''' Return a list of the reactors ''' if isinstance(self.minion.opts['reactor'], six.string_types): log.debug('Reading reactors from yaml %s', self.opts['reactor']) try: with salt.utils.files.fopen(self.opts['reactor']) as fp_: react_map = salt.utils.yaml.safe_load(fp_) except (OSError, IOError): log.error('Failed to read reactor map: "%s"', self.opts['reactor']) except Exception: log.error( 'Failed to parse YAML in reactor map: "%s"', self.opts['reactor'] ) else: log.debug('Not reading reactors from yaml') react_map = self.minion.opts['reactor'] return react_map
def function[list_all, parameter[self]]: constant[ Return a list of the reactors ] if call[name[isinstance], parameter[call[name[self].minion.opts][constant[reactor]], name[six].string_types]] begin[:] call[name[log].debug, parameter[constant[Reading reactors from yaml %s], call[name[self].opts][constant[reactor]]]] <ast.Try object at 0x7da20e960220> return[name[react_map]]
keyword[def] identifier[list_all] ( identifier[self] ): literal[string] keyword[if] identifier[isinstance] ( identifier[self] . identifier[minion] . identifier[opts] [ literal[string] ], identifier[six] . identifier[string_types] ): identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[opts] [ literal[string] ]) keyword[try] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[self] . identifier[opts] [ literal[string] ]) keyword[as] identifier[fp_] : identifier[react_map] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_load] ( identifier[fp_] ) keyword[except] ( identifier[OSError] , identifier[IOError] ): identifier[log] . identifier[error] ( literal[string] , identifier[self] . identifier[opts] [ literal[string] ]) keyword[except] identifier[Exception] : identifier[log] . identifier[error] ( literal[string] , identifier[self] . identifier[opts] [ literal[string] ] ) keyword[else] : identifier[log] . identifier[debug] ( literal[string] ) identifier[react_map] = identifier[self] . identifier[minion] . identifier[opts] [ literal[string] ] keyword[return] identifier[react_map]
def list_all(self): """ Return a list of the reactors """ if isinstance(self.minion.opts['reactor'], six.string_types): log.debug('Reading reactors from yaml %s', self.opts['reactor']) try: with salt.utils.files.fopen(self.opts['reactor']) as fp_: react_map = salt.utils.yaml.safe_load(fp_) # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]] except (OSError, IOError): log.error('Failed to read reactor map: "%s"', self.opts['reactor']) # depends on [control=['except'], data=[]] except Exception: log.error('Failed to parse YAML in reactor map: "%s"', self.opts['reactor']) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: log.debug('Not reading reactors from yaml') react_map = self.minion.opts['reactor'] return react_map
def always_iterable(item): """ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. If item is None, an empty iterable is returned. >>> always_iterable([1,2,3]) <list_iterator...> >>> always_iterable('foo') <tuple_iterator...> >>> always_iterable(None) <tuple_iterator...> >>> always_iterable(range(10)) <range_iterator...> >>> def _test_func(): yield "I'm iterable" >>> print(next(always_iterable(_test_func()))) I'm iterable Although mappings are iterable, treat each like a singleton, as it's more like an object than a sequence. >>> next(always_iterable(dict(a=1))) {'a': 1} """ base_types = six.text_type, bytes, collections.abc.Mapping return more_itertools.always_iterable(item, base_type=base_types)
def function[always_iterable, parameter[item]]: constant[ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. If item is None, an empty iterable is returned. >>> always_iterable([1,2,3]) <list_iterator...> >>> always_iterable('foo') <tuple_iterator...> >>> always_iterable(None) <tuple_iterator...> >>> always_iterable(range(10)) <range_iterator...> >>> def _test_func(): yield "I'm iterable" >>> print(next(always_iterable(_test_func()))) I'm iterable Although mappings are iterable, treat each like a singleton, as it's more like an object than a sequence. >>> next(always_iterable(dict(a=1))) {'a': 1} ] variable[base_types] assign[=] tuple[[<ast.Attribute object at 0x7da18ede6620>, <ast.Name object at 0x7da18ede6770>, <ast.Attribute object at 0x7da18ede6260>]] return[call[name[more_itertools].always_iterable, parameter[name[item]]]]
keyword[def] identifier[always_iterable] ( identifier[item] ): literal[string] identifier[base_types] = identifier[six] . identifier[text_type] , identifier[bytes] , identifier[collections] . identifier[abc] . identifier[Mapping] keyword[return] identifier[more_itertools] . identifier[always_iterable] ( identifier[item] , identifier[base_type] = identifier[base_types] )
def always_iterable(item): """ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. If item is None, an empty iterable is returned. >>> always_iterable([1,2,3]) <list_iterator...> >>> always_iterable('foo') <tuple_iterator...> >>> always_iterable(None) <tuple_iterator...> >>> always_iterable(range(10)) <range_iterator...> >>> def _test_func(): yield "I'm iterable" >>> print(next(always_iterable(_test_func()))) I'm iterable Although mappings are iterable, treat each like a singleton, as it's more like an object than a sequence. >>> next(always_iterable(dict(a=1))) {'a': 1} """ base_types = (six.text_type, bytes, collections.abc.Mapping) return more_itertools.always_iterable(item, base_type=base_types)
def add_args(p): """ Update parser with tool specific arguments. This overwrites was is done in utils.uns_args. """ # dictionary for adding arguments dadd_args = { '--opfile': { 'default': '', 'metavar': 'f', 'type': str, 'help': 'Specify a parameter file ' '(default: "sim/${exkey}_params.txt")'}} p = utils.add_args(p, dadd_args) return p
def function[add_args, parameter[p]]: constant[ Update parser with tool specific arguments. This overwrites was is done in utils.uns_args. ] variable[dadd_args] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c5720>], [<ast.Dict object at 0x7da20c6c4370>]] variable[p] assign[=] call[name[utils].add_args, parameter[name[p], name[dadd_args]]] return[name[p]]
keyword[def] identifier[add_args] ( identifier[p] ): literal[string] identifier[dadd_args] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[str] , literal[string] : literal[string] literal[string] }} identifier[p] = identifier[utils] . identifier[add_args] ( identifier[p] , identifier[dadd_args] ) keyword[return] identifier[p]
def add_args(p): """ Update parser with tool specific arguments. This overwrites was is done in utils.uns_args. """ # dictionary for adding arguments dadd_args = {'--opfile': {'default': '', 'metavar': 'f', 'type': str, 'help': 'Specify a parameter file (default: "sim/${exkey}_params.txt")'}} p = utils.add_args(p, dadd_args) return p
def pop_all(self): """ NON-BLOCKING POP ALL IN QUEUE, IF ANY """ with self.lock: output = list(self.queue) self.queue.clear() return output
def function[pop_all, parameter[self]]: constant[ NON-BLOCKING POP ALL IN QUEUE, IF ANY ] with name[self].lock begin[:] variable[output] assign[=] call[name[list], parameter[name[self].queue]] call[name[self].queue.clear, parameter[]] return[name[output]]
keyword[def] identifier[pop_all] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[lock] : identifier[output] = identifier[list] ( identifier[self] . identifier[queue] ) identifier[self] . identifier[queue] . identifier[clear] () keyword[return] identifier[output]
def pop_all(self): """ NON-BLOCKING POP ALL IN QUEUE, IF ANY """ with self.lock: output = list(self.queue) self.queue.clear() # depends on [control=['with'], data=[]] return output
def generate_jsapi_signature(self, timestamp, noncestr, url, jsapi_ticket=None): """ 使用 jsapi_ticket 对 url 进行签名 :param timestamp: 时间戳 :param noncestr: 随机数 :param url: 要签名的 url,不包含 # 及其后面部分 :param jsapi_ticket: (可选参数) jsapi_ticket 值 (如不提供将自动通过 appid 和 appsecret 获取) :return: 返回sha1签名的hexdigest值 """ if not jsapi_ticket: jsapi_ticket = self.conf.jsapi_ticket data = { 'jsapi_ticket': jsapi_ticket, 'noncestr': noncestr, 'timestamp': timestamp, 'url': url, } keys = list(data.keys()) keys.sort() data_str = '&'.join(['%s=%s' % (key, data[key]) for key in keys]) signature = hashlib.sha1(data_str.encode('utf-8')).hexdigest() return signature
def function[generate_jsapi_signature, parameter[self, timestamp, noncestr, url, jsapi_ticket]]: constant[ 使用 jsapi_ticket 对 url 进行签名 :param timestamp: 时间戳 :param noncestr: 随机数 :param url: 要签名的 url,不包含 # 及其后面部分 :param jsapi_ticket: (可选参数) jsapi_ticket 值 (如不提供将自动通过 appid 和 appsecret 获取) :return: 返回sha1签名的hexdigest值 ] if <ast.UnaryOp object at 0x7da1b008cf70> begin[:] variable[jsapi_ticket] assign[=] name[self].conf.jsapi_ticket variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b008c4c0>, <ast.Constant object at 0x7da1b008d900>, <ast.Constant object at 0x7da1b008ce50>, <ast.Constant object at 0x7da1b008d810>], [<ast.Name object at 0x7da1b008e110>, <ast.Name object at 0x7da1b008c730>, <ast.Name object at 0x7da1b008d420>, <ast.Name object at 0x7da1b008c640>]] variable[keys] assign[=] call[name[list], parameter[call[name[data].keys, parameter[]]]] call[name[keys].sort, parameter[]] variable[data_str] assign[=] call[constant[&].join, parameter[<ast.ListComp object at 0x7da1b008cc70>]] variable[signature] assign[=] call[call[name[hashlib].sha1, parameter[call[name[data_str].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]] return[name[signature]]
keyword[def] identifier[generate_jsapi_signature] ( identifier[self] , identifier[timestamp] , identifier[noncestr] , identifier[url] , identifier[jsapi_ticket] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[jsapi_ticket] : identifier[jsapi_ticket] = identifier[self] . identifier[conf] . identifier[jsapi_ticket] identifier[data] ={ literal[string] : identifier[jsapi_ticket] , literal[string] : identifier[noncestr] , literal[string] : identifier[timestamp] , literal[string] : identifier[url] , } identifier[keys] = identifier[list] ( identifier[data] . identifier[keys] ()) identifier[keys] . identifier[sort] () identifier[data_str] = literal[string] . identifier[join] ([ literal[string] %( identifier[key] , identifier[data] [ identifier[key] ]) keyword[for] identifier[key] keyword[in] identifier[keys] ]) identifier[signature] = identifier[hashlib] . identifier[sha1] ( identifier[data_str] . identifier[encode] ( literal[string] )). identifier[hexdigest] () keyword[return] identifier[signature]
def generate_jsapi_signature(self, timestamp, noncestr, url, jsapi_ticket=None): """ 使用 jsapi_ticket 对 url 进行签名 :param timestamp: 时间戳 :param noncestr: 随机数 :param url: 要签名的 url,不包含 # 及其后面部分 :param jsapi_ticket: (可选参数) jsapi_ticket 值 (如不提供将自动通过 appid 和 appsecret 获取) :return: 返回sha1签名的hexdigest值 """ if not jsapi_ticket: jsapi_ticket = self.conf.jsapi_ticket # depends on [control=['if'], data=[]] data = {'jsapi_ticket': jsapi_ticket, 'noncestr': noncestr, 'timestamp': timestamp, 'url': url} keys = list(data.keys()) keys.sort() data_str = '&'.join(['%s=%s' % (key, data[key]) for key in keys]) signature = hashlib.sha1(data_str.encode('utf-8')).hexdigest() return signature
def data(self, ctx=None): """Returns a copy of this parameter on one context. Must have been initialized on this context before. Parameters ---------- ctx : Context Desired context. Returns ------- NDArray on ctx """ d = self._check_and_get(self._data, ctx) if self._rate: d = nd.Dropout(d, self._rate, self._mode, self._axes) return d
def function[data, parameter[self, ctx]]: constant[Returns a copy of this parameter on one context. Must have been initialized on this context before. Parameters ---------- ctx : Context Desired context. Returns ------- NDArray on ctx ] variable[d] assign[=] call[name[self]._check_and_get, parameter[name[self]._data, name[ctx]]] if name[self]._rate begin[:] variable[d] assign[=] call[name[nd].Dropout, parameter[name[d], name[self]._rate, name[self]._mode, name[self]._axes]] return[name[d]]
keyword[def] identifier[data] ( identifier[self] , identifier[ctx] = keyword[None] ): literal[string] identifier[d] = identifier[self] . identifier[_check_and_get] ( identifier[self] . identifier[_data] , identifier[ctx] ) keyword[if] identifier[self] . identifier[_rate] : identifier[d] = identifier[nd] . identifier[Dropout] ( identifier[d] , identifier[self] . identifier[_rate] , identifier[self] . identifier[_mode] , identifier[self] . identifier[_axes] ) keyword[return] identifier[d]
def data(self, ctx=None): """Returns a copy of this parameter on one context. Must have been initialized on this context before. Parameters ---------- ctx : Context Desired context. Returns ------- NDArray on ctx """ d = self._check_and_get(self._data, ctx) if self._rate: d = nd.Dropout(d, self._rate, self._mode, self._axes) # depends on [control=['if'], data=[]] return d
def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
def function[mid_pt, parameter[self]]: constant[Midpoint of this interval product.] variable[midp] assign[=] binary_operation[binary_operation[name[self].max_pt + name[self].min_pt] / constant[2.0]] call[name[midp]][<ast.UnaryOp object at 0x7da1b20faf50>] assign[=] call[name[self].min_pt][<ast.UnaryOp object at 0x7da1b20faf20>] return[name[midp]]
keyword[def] identifier[mid_pt] ( identifier[self] ): literal[string] identifier[midp] =( identifier[self] . identifier[max_pt] + identifier[self] . identifier[min_pt] )/ literal[int] identifier[midp] [~ identifier[self] . identifier[nondegen_byaxis] ]= identifier[self] . identifier[min_pt] [~ identifier[self] . identifier[nondegen_byaxis] ] keyword[return] identifier[midp]
def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2.0 midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
def assistant_fallback_actions(self): """ Access the assistant_fallback_actions :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList """ if self._assistant_fallback_actions is None: self._assistant_fallback_actions = AssistantFallbackActionsList( self._version, assistant_sid=self._solution['sid'], ) return self._assistant_fallback_actions
def function[assistant_fallback_actions, parameter[self]]: constant[ Access the assistant_fallback_actions :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList ] if compare[name[self]._assistant_fallback_actions is constant[None]] begin[:] name[self]._assistant_fallback_actions assign[=] call[name[AssistantFallbackActionsList], parameter[name[self]._version]] return[name[self]._assistant_fallback_actions]
keyword[def] identifier[assistant_fallback_actions] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_assistant_fallback_actions] keyword[is] keyword[None] : identifier[self] . identifier[_assistant_fallback_actions] = identifier[AssistantFallbackActionsList] ( identifier[self] . identifier[_version] , identifier[assistant_sid] = identifier[self] . identifier[_solution] [ literal[string] ], ) keyword[return] identifier[self] . identifier[_assistant_fallback_actions]
def assistant_fallback_actions(self): """ Access the assistant_fallback_actions :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList """ if self._assistant_fallback_actions is None: self._assistant_fallback_actions = AssistantFallbackActionsList(self._version, assistant_sid=self._solution['sid']) # depends on [control=['if'], data=[]] return self._assistant_fallback_actions
def parse_services(config, services): """Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks. """ enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if not check_disabled: enabled += 1 return enabled
def function[parse_services, parameter[config, services]]: constant[Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks. ] variable[enabled] assign[=] constant[0] for taget[name[service]] in starred[name[services]] begin[:] variable[check_disabled] assign[=] call[name[config].getboolean, parameter[name[service], constant[check_disabled]]] if <ast.UnaryOp object at 0x7da1b0d0c0d0> begin[:] <ast.AugAssign object at 0x7da1b0d0e3e0> return[name[enabled]]
keyword[def] identifier[parse_services] ( identifier[config] , identifier[services] ): literal[string] identifier[enabled] = literal[int] keyword[for] identifier[service] keyword[in] identifier[services] : identifier[check_disabled] = identifier[config] . identifier[getboolean] ( identifier[service] , literal[string] ) keyword[if] keyword[not] identifier[check_disabled] : identifier[enabled] += literal[int] keyword[return] identifier[enabled]
def parse_services(config, services): """Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks. """ enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if not check_disabled: enabled += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['service']] return enabled
def convert_trees(self, ptb_trees, representation='basic', include_punct=True, include_erased=False, universal=True, debug=False): """Convert a list of Penn Treebank formatted trees (ptb_trees) into Stanford Dependencies. The dependencies are represented as a list of sentences, where each sentence is itself a list of Token objects. Currently supported representations are 'basic', 'collapsed', 'CCprocessed', and 'collapsedTree' which behave the same as they in the CoreNLP command line tools. (note that in the online CoreNLP demo, 'collapsed' is called 'enhanced') Setting debug=True will cause debugging information (including the java command run to be printed.""" self._raise_on_bad_representation(representation) input_file = tempfile.NamedTemporaryFile(delete=False) try: for ptb_tree in ptb_trees: self._raise_on_bad_input(ptb_tree) tree_with_line_break = ptb_tree + "\n" input_file.write(tree_with_line_break.encode("utf-8")) input_file.flush() input_file.close() command = [self.java_command, '-ea', '-cp', self.jar_filename, JAVA_CLASS_NAME, '-' + representation, '-treeFile', input_file.name] # if we're including erased, we want to include punctuation # since otherwise we won't know what SD considers punctuation if include_punct or include_erased: command.append('-keepPunct') if not universal: command.append('-originalDependencies') if debug: print('Command:', ' '.join(command)) sd_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return_code = sd_process.wait() stderr = sd_process.stderr.read() stdout = sd_process.stdout.read() if debug: print("stdout: {%s}" % stdout) print("stderr: {%s}" % stderr) print('Exit code:', return_code) self._raise_on_bad_exit_or_output(return_code, stderr) finally: os.remove(input_file.name) try: sentences = Corpus.from_stanford_dependencies(stdout.splitlines(), ptb_trees, include_erased, include_punct) for sentence, ptb_tree in zip(sentences, ptb_trees): if len(sentence) == 0: raise ValueError("Invalid PTB tree: %r" % ptb_tree) except: print("Error during conversion") if not debug: print("stdout: {%s}" % stdout) print("stderr: {%s}" % stderr) raise assert len(sentences) == len(ptb_trees), \ "Only got %d sentences from Stanford Dependencies when " \ "given %d trees." % (len(sentences), len(ptb_trees)) return sentences
def function[convert_trees, parameter[self, ptb_trees, representation, include_punct, include_erased, universal, debug]]: constant[Convert a list of Penn Treebank formatted trees (ptb_trees) into Stanford Dependencies. The dependencies are represented as a list of sentences, where each sentence is itself a list of Token objects. Currently supported representations are 'basic', 'collapsed', 'CCprocessed', and 'collapsedTree' which behave the same as they in the CoreNLP command line tools. (note that in the online CoreNLP demo, 'collapsed' is called 'enhanced') Setting debug=True will cause debugging information (including the java command run to be printed.] call[name[self]._raise_on_bad_representation, parameter[name[representation]]] variable[input_file] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]] <ast.Try object at 0x7da1b1a2ae90> <ast.Try object at 0x7da1b1b9d720> assert[compare[call[name[len], parameter[name[sentences]]] equal[==] call[name[len], parameter[name[ptb_trees]]]]] return[name[sentences]]
keyword[def] identifier[convert_trees] ( identifier[self] , identifier[ptb_trees] , identifier[representation] = literal[string] , identifier[include_punct] = keyword[True] , identifier[include_erased] = keyword[False] , identifier[universal] = keyword[True] , identifier[debug] = keyword[False] ): literal[string] identifier[self] . identifier[_raise_on_bad_representation] ( identifier[representation] ) identifier[input_file] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] ) keyword[try] : keyword[for] identifier[ptb_tree] keyword[in] identifier[ptb_trees] : identifier[self] . identifier[_raise_on_bad_input] ( identifier[ptb_tree] ) identifier[tree_with_line_break] = identifier[ptb_tree] + literal[string] identifier[input_file] . identifier[write] ( identifier[tree_with_line_break] . identifier[encode] ( literal[string] )) identifier[input_file] . identifier[flush] () identifier[input_file] . identifier[close] () identifier[command] =[ identifier[self] . identifier[java_command] , literal[string] , literal[string] , identifier[self] . identifier[jar_filename] , identifier[JAVA_CLASS_NAME] , literal[string] + identifier[representation] , literal[string] , identifier[input_file] . identifier[name] ] keyword[if] identifier[include_punct] keyword[or] identifier[include_erased] : identifier[command] . identifier[append] ( literal[string] ) keyword[if] keyword[not] identifier[universal] : identifier[command] . identifier[append] ( literal[string] ) keyword[if] identifier[debug] : identifier[print] ( literal[string] , literal[string] . identifier[join] ( identifier[command] )) identifier[sd_process] = identifier[subprocess] . identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[universal_newlines] = keyword[True] ) identifier[return_code] = identifier[sd_process] . identifier[wait] () identifier[stderr] = identifier[sd_process] . identifier[stderr] . identifier[read] () identifier[stdout] = identifier[sd_process] . identifier[stdout] . identifier[read] () keyword[if] identifier[debug] : identifier[print] ( literal[string] % identifier[stdout] ) identifier[print] ( literal[string] % identifier[stderr] ) identifier[print] ( literal[string] , identifier[return_code] ) identifier[self] . identifier[_raise_on_bad_exit_or_output] ( identifier[return_code] , identifier[stderr] ) keyword[finally] : identifier[os] . identifier[remove] ( identifier[input_file] . identifier[name] ) keyword[try] : identifier[sentences] = identifier[Corpus] . identifier[from_stanford_dependencies] ( identifier[stdout] . identifier[splitlines] (), identifier[ptb_trees] , identifier[include_erased] , identifier[include_punct] ) keyword[for] identifier[sentence] , identifier[ptb_tree] keyword[in] identifier[zip] ( identifier[sentences] , identifier[ptb_trees] ): keyword[if] identifier[len] ( identifier[sentence] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[ptb_tree] ) keyword[except] : identifier[print] ( literal[string] ) keyword[if] keyword[not] identifier[debug] : identifier[print] ( literal[string] % identifier[stdout] ) identifier[print] ( literal[string] % identifier[stderr] ) keyword[raise] keyword[assert] identifier[len] ( identifier[sentences] )== identifier[len] ( identifier[ptb_trees] ), literal[string] literal[string] %( identifier[len] ( identifier[sentences] ), identifier[len] ( identifier[ptb_trees] )) keyword[return] identifier[sentences]
def convert_trees(self, ptb_trees, representation='basic', include_punct=True, include_erased=False, universal=True, debug=False): """Convert a list of Penn Treebank formatted trees (ptb_trees) into Stanford Dependencies. The dependencies are represented as a list of sentences, where each sentence is itself a list of Token objects. Currently supported representations are 'basic', 'collapsed', 'CCprocessed', and 'collapsedTree' which behave the same as they in the CoreNLP command line tools. (note that in the online CoreNLP demo, 'collapsed' is called 'enhanced') Setting debug=True will cause debugging information (including the java command run to be printed.""" self._raise_on_bad_representation(representation) input_file = tempfile.NamedTemporaryFile(delete=False) try: for ptb_tree in ptb_trees: self._raise_on_bad_input(ptb_tree) tree_with_line_break = ptb_tree + '\n' input_file.write(tree_with_line_break.encode('utf-8')) # depends on [control=['for'], data=['ptb_tree']] input_file.flush() input_file.close() command = [self.java_command, '-ea', '-cp', self.jar_filename, JAVA_CLASS_NAME, '-' + representation, '-treeFile', input_file.name] # if we're including erased, we want to include punctuation # since otherwise we won't know what SD considers punctuation if include_punct or include_erased: command.append('-keepPunct') # depends on [control=['if'], data=[]] if not universal: command.append('-originalDependencies') # depends on [control=['if'], data=[]] if debug: print('Command:', ' '.join(command)) # depends on [control=['if'], data=[]] sd_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return_code = sd_process.wait() stderr = sd_process.stderr.read() stdout = sd_process.stdout.read() if debug: print('stdout: {%s}' % stdout) print('stderr: {%s}' % stderr) print('Exit code:', return_code) # depends on [control=['if'], data=[]] self._raise_on_bad_exit_or_output(return_code, stderr) # depends on [control=['try'], data=[]] finally: os.remove(input_file.name) try: sentences = Corpus.from_stanford_dependencies(stdout.splitlines(), ptb_trees, include_erased, include_punct) for (sentence, ptb_tree) in zip(sentences, ptb_trees): if len(sentence) == 0: raise ValueError('Invalid PTB tree: %r' % ptb_tree) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except: print('Error during conversion') if not debug: print('stdout: {%s}' % stdout) print('stderr: {%s}' % stderr) # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=[]] assert len(sentences) == len(ptb_trees), 'Only got %d sentences from Stanford Dependencies when given %d trees.' % (len(sentences), len(ptb_trees)) return sentences
def get_files_map(self): """stub""" files_map = {} if self.has_files(): for label in self.my_osid_object._my_map['fileIds']: asset_content = self._get_asset_content( Id(self.my_osid_object._my_map['fileIds'][label]['assetId']), Type(self.my_osid_object._my_map['fileIds'][label]['assetContentTypeId'])) try: files_map[label] = asset_content._my_map['base64'] except KeyError: files_map[label] = base64.b64encode(asset_content.get_data().read()) return files_map raise IllegalState('no files_map')
def function[get_files_map, parameter[self]]: constant[stub] variable[files_map] assign[=] dictionary[[], []] if call[name[self].has_files, parameter[]] begin[:] for taget[name[label]] in starred[call[name[self].my_osid_object._my_map][constant[fileIds]]] begin[:] variable[asset_content] assign[=] call[name[self]._get_asset_content, parameter[call[name[Id], parameter[call[call[call[name[self].my_osid_object._my_map][constant[fileIds]]][name[label]]][constant[assetId]]]], call[name[Type], parameter[call[call[call[name[self].my_osid_object._my_map][constant[fileIds]]][name[label]]][constant[assetContentTypeId]]]]]] <ast.Try object at 0x7da20c7ca560> return[name[files_map]] <ast.Raise object at 0x7da20c7cb160>
keyword[def] identifier[get_files_map] ( identifier[self] ): literal[string] identifier[files_map] ={} keyword[if] identifier[self] . identifier[has_files] (): keyword[for] identifier[label] keyword[in] identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ]: identifier[asset_content] = identifier[self] . identifier[_get_asset_content] ( identifier[Id] ( identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ][ identifier[label] ][ literal[string] ]), identifier[Type] ( identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ][ identifier[label] ][ literal[string] ])) keyword[try] : identifier[files_map] [ identifier[label] ]= identifier[asset_content] . identifier[_my_map] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[files_map] [ identifier[label] ]= identifier[base64] . identifier[b64encode] ( identifier[asset_content] . identifier[get_data] (). identifier[read] ()) keyword[return] identifier[files_map] keyword[raise] identifier[IllegalState] ( literal[string] )
def get_files_map(self): """stub""" files_map = {} if self.has_files(): for label in self.my_osid_object._my_map['fileIds']: asset_content = self._get_asset_content(Id(self.my_osid_object._my_map['fileIds'][label]['assetId']), Type(self.my_osid_object._my_map['fileIds'][label]['assetContentTypeId'])) try: files_map[label] = asset_content._my_map['base64'] # depends on [control=['try'], data=[]] except KeyError: files_map[label] = base64.b64encode(asset_content.get_data().read()) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['label']] return files_map # depends on [control=['if'], data=[]] raise IllegalState('no files_map')
def get_obj(self, name): """ Returns the AWS object associated with a given option. The heuristics used are a bit lame. If the option name contains the word 'bucket' it is assumed to be an S3 bucket, if the name contains the word 'queue' it is assumed to be an SQS queue and if it contains the word 'domain' it is assumed to be a SimpleDB domain. If the option name specified does not exist in the config file or if the AWS object cannot be retrieved this returns None. """ val = self.get(name) if not val: return None if name.find('queue') >= 0: obj = boto.lookup('sqs', val) if obj: obj.set_message_class(ServiceMessage) elif name.find('bucket') >= 0: obj = boto.lookup('s3', val) elif name.find('domain') >= 0: obj = boto.lookup('sdb', val) else: obj = None return obj
def function[get_obj, parameter[self, name]]: constant[ Returns the AWS object associated with a given option. The heuristics used are a bit lame. If the option name contains the word 'bucket' it is assumed to be an S3 bucket, if the name contains the word 'queue' it is assumed to be an SQS queue and if it contains the word 'domain' it is assumed to be a SimpleDB domain. If the option name specified does not exist in the config file or if the AWS object cannot be retrieved this returns None. ] variable[val] assign[=] call[name[self].get, parameter[name[name]]] if <ast.UnaryOp object at 0x7da1b2652e90> begin[:] return[constant[None]] if compare[call[name[name].find, parameter[constant[queue]]] greater_or_equal[>=] constant[0]] begin[:] variable[obj] assign[=] call[name[boto].lookup, parameter[constant[sqs], name[val]]] if name[obj] begin[:] call[name[obj].set_message_class, parameter[name[ServiceMessage]]] return[name[obj]]
keyword[def] identifier[get_obj] ( identifier[self] , identifier[name] ): literal[string] identifier[val] = identifier[self] . identifier[get] ( identifier[name] ) keyword[if] keyword[not] identifier[val] : keyword[return] keyword[None] keyword[if] identifier[name] . identifier[find] ( literal[string] )>= literal[int] : identifier[obj] = identifier[boto] . identifier[lookup] ( literal[string] , identifier[val] ) keyword[if] identifier[obj] : identifier[obj] . identifier[set_message_class] ( identifier[ServiceMessage] ) keyword[elif] identifier[name] . identifier[find] ( literal[string] )>= literal[int] : identifier[obj] = identifier[boto] . identifier[lookup] ( literal[string] , identifier[val] ) keyword[elif] identifier[name] . identifier[find] ( literal[string] )>= literal[int] : identifier[obj] = identifier[boto] . identifier[lookup] ( literal[string] , identifier[val] ) keyword[else] : identifier[obj] = keyword[None] keyword[return] identifier[obj]
def get_obj(self, name): """ Returns the AWS object associated with a given option. The heuristics used are a bit lame. If the option name contains the word 'bucket' it is assumed to be an S3 bucket, if the name contains the word 'queue' it is assumed to be an SQS queue and if it contains the word 'domain' it is assumed to be a SimpleDB domain. If the option name specified does not exist in the config file or if the AWS object cannot be retrieved this returns None. """ val = self.get(name) if not val: return None # depends on [control=['if'], data=[]] if name.find('queue') >= 0: obj = boto.lookup('sqs', val) if obj: obj.set_message_class(ServiceMessage) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif name.find('bucket') >= 0: obj = boto.lookup('s3', val) # depends on [control=['if'], data=[]] elif name.find('domain') >= 0: obj = boto.lookup('sdb', val) # depends on [control=['if'], data=[]] else: obj = None return obj
def get_subject_without_validation(jwt_bu64): """Extract subject from the JWT without validating the JWT. - The extracted subject cannot be trusted for authn or authz. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: str: The subject contained in the JWT. """ try: jwt_dict = get_jwt_dict(jwt_bu64) except JwtException as e: return log_jwt_bu64_info(logging.error, str(e), jwt_bu64) try: return jwt_dict['sub'] except LookupError: log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
def function[get_subject_without_validation, parameter[jwt_bu64]]: constant[Extract subject from the JWT without validating the JWT. - The extracted subject cannot be trusted for authn or authz. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: str: The subject contained in the JWT. ] <ast.Try object at 0x7da1b19b9c90> <ast.Try object at 0x7da1b1b69000>
keyword[def] identifier[get_subject_without_validation] ( identifier[jwt_bu64] ): literal[string] keyword[try] : identifier[jwt_dict] = identifier[get_jwt_dict] ( identifier[jwt_bu64] ) keyword[except] identifier[JwtException] keyword[as] identifier[e] : keyword[return] identifier[log_jwt_bu64_info] ( identifier[logging] . identifier[error] , identifier[str] ( identifier[e] ), identifier[jwt_bu64] ) keyword[try] : keyword[return] identifier[jwt_dict] [ literal[string] ] keyword[except] identifier[LookupError] : identifier[log_jwt_dict_info] ( identifier[logging] . identifier[error] , literal[string] , identifier[jwt_dict] )
def get_subject_without_validation(jwt_bu64): """Extract subject from the JWT without validating the JWT. - The extracted subject cannot be trusted for authn or authz. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: str: The subject contained in the JWT. """ try: jwt_dict = get_jwt_dict(jwt_bu64) # depends on [control=['try'], data=[]] except JwtException as e: return log_jwt_bu64_info(logging.error, str(e), jwt_bu64) # depends on [control=['except'], data=['e']] try: return jwt_dict['sub'] # depends on [control=['try'], data=[]] except LookupError: log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) # depends on [control=['except'], data=[]]
def update_share(self, share_id, **kwargs): """Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) password = kwargs.get('password', None) public_upload = kwargs.get('public_upload', None) if (isinstance(perms, int)) and (perms > self.OCS_PERMISSION_ALL): perms = None if not (perms or password or (public_upload is not None)): return False if not isinstance(share_id, int): return False data = {} if perms: data['permissions'] = perms if isinstance(password, six.string_types): data['password'] = password if (public_upload is not None) and (isinstance(public_upload, bool)): data['publicUpload'] = str(public_upload).lower() res = self._make_ocs_request( 'PUT', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id), data=data ) if res.status_code == 200: return True raise HTTPResponseError(res)
def function[update_share, parameter[self, share_id]]: constant[Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned ] variable[perms] assign[=] call[name[kwargs].get, parameter[constant[perms], constant[None]]] variable[password] assign[=] call[name[kwargs].get, parameter[constant[password], constant[None]]] variable[public_upload] assign[=] call[name[kwargs].get, parameter[constant[public_upload], constant[None]]] if <ast.BoolOp object at 0x7da1b0fcd930> begin[:] variable[perms] assign[=] constant[None] if <ast.UnaryOp object at 0x7da1b0fcdf90> begin[:] return[constant[False]] if <ast.UnaryOp object at 0x7da1b0fccb80> begin[:] return[constant[False]] variable[data] assign[=] dictionary[[], []] if name[perms] begin[:] call[name[data]][constant[permissions]] assign[=] name[perms] if call[name[isinstance], parameter[name[password], name[six].string_types]] begin[:] call[name[data]][constant[password]] assign[=] name[password] if <ast.BoolOp object at 0x7da18eb54f10> begin[:] call[name[data]][constant[publicUpload]] assign[=] call[call[name[str], parameter[name[public_upload]]].lower, parameter[]] variable[res] assign[=] call[name[self]._make_ocs_request, parameter[constant[PUT], name[self].OCS_SERVICE_SHARE, binary_operation[constant[shares/] + call[name[str], parameter[name[share_id]]]]]] if compare[name[res].status_code equal[==] constant[200]] begin[:] return[constant[True]] <ast.Raise object at 0x7da1b0fcfbe0>
keyword[def] identifier[update_share] ( identifier[self] , identifier[share_id] ,** identifier[kwargs] ): literal[string] identifier[perms] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) identifier[password] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) identifier[public_upload] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] ( identifier[isinstance] ( identifier[perms] , identifier[int] )) keyword[and] ( identifier[perms] > identifier[self] . identifier[OCS_PERMISSION_ALL] ): identifier[perms] = keyword[None] keyword[if] keyword[not] ( identifier[perms] keyword[or] identifier[password] keyword[or] ( identifier[public_upload] keyword[is] keyword[not] keyword[None] )): keyword[return] keyword[False] keyword[if] keyword[not] identifier[isinstance] ( identifier[share_id] , identifier[int] ): keyword[return] keyword[False] identifier[data] ={} keyword[if] identifier[perms] : identifier[data] [ literal[string] ]= identifier[perms] keyword[if] identifier[isinstance] ( identifier[password] , identifier[six] . identifier[string_types] ): identifier[data] [ literal[string] ]= identifier[password] keyword[if] ( identifier[public_upload] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[isinstance] ( identifier[public_upload] , identifier[bool] )): identifier[data] [ literal[string] ]= identifier[str] ( identifier[public_upload] ). identifier[lower] () identifier[res] = identifier[self] . identifier[_make_ocs_request] ( literal[string] , identifier[self] . identifier[OCS_SERVICE_SHARE] , literal[string] + identifier[str] ( identifier[share_id] ), identifier[data] = identifier[data] ) keyword[if] identifier[res] . identifier[status_code] == literal[int] : keyword[return] keyword[True] keyword[raise] identifier[HTTPResponseError] ( identifier[res] )
def update_share(self, share_id, **kwargs): """Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) password = kwargs.get('password', None) public_upload = kwargs.get('public_upload', None) if isinstance(perms, int) and perms > self.OCS_PERMISSION_ALL: perms = None # depends on [control=['if'], data=[]] if not (perms or password or public_upload is not None): return False # depends on [control=['if'], data=[]] if not isinstance(share_id, int): return False # depends on [control=['if'], data=[]] data = {} if perms: data['permissions'] = perms # depends on [control=['if'], data=[]] if isinstance(password, six.string_types): data['password'] = password # depends on [control=['if'], data=[]] if public_upload is not None and isinstance(public_upload, bool): data['publicUpload'] = str(public_upload).lower() # depends on [control=['if'], data=[]] res = self._make_ocs_request('PUT', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id), data=data) if res.status_code == 200: return True # depends on [control=['if'], data=[]] raise HTTPResponseError(res)
def run(): """ Runs flake8 lint :return: A bool - if flake8 did not find any errors """ print('Running flake8 %s' % flake8.__version__) flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini')) paths = [] for _dir in [package_name, 'dev', 'tests']: for root, _, filenames in os.walk(_dir): for filename in filenames: if not filename.endswith('.py'): continue paths.append(os.path.join(root, filename)) report = flake8_style.check_files(paths) success = report.total_errors == 0 if success: print('OK') return success
def function[run, parameter[]]: constant[ Runs flake8 lint :return: A bool - if flake8 did not find any errors ] call[name[print], parameter[binary_operation[constant[Running flake8 %s] <ast.Mod object at 0x7da2590d6920> name[flake8].__version__]]] variable[flake8_style] assign[=] call[name[get_style_guide], parameter[]] variable[paths] assign[=] list[[]] for taget[name[_dir]] in starred[list[[<ast.Name object at 0x7da1b08aca30>, <ast.Constant object at 0x7da1b08aed70>, <ast.Constant object at 0x7da1b08aead0>]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b08ad540>, <ast.Name object at 0x7da1b08afcd0>, <ast.Name object at 0x7da1b08adba0>]]] in starred[call[name[os].walk, parameter[name[_dir]]]] begin[:] for taget[name[filename]] in starred[name[filenames]] begin[:] if <ast.UnaryOp object at 0x7da1b08ad6f0> begin[:] continue call[name[paths].append, parameter[call[name[os].path.join, parameter[name[root], name[filename]]]]] variable[report] assign[=] call[name[flake8_style].check_files, parameter[name[paths]]] variable[success] assign[=] compare[name[report].total_errors equal[==] constant[0]] if name[success] begin[:] call[name[print], parameter[constant[OK]]] return[name[success]]
keyword[def] identifier[run] (): literal[string] identifier[print] ( literal[string] % identifier[flake8] . identifier[__version__] ) identifier[flake8_style] = identifier[get_style_guide] ( identifier[config_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[package_root] , literal[string] )) identifier[paths] =[] keyword[for] identifier[_dir] keyword[in] [ identifier[package_name] , literal[string] , literal[string] ]: keyword[for] identifier[root] , identifier[_] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[_dir] ): keyword[for] identifier[filename] keyword[in] identifier[filenames] : keyword[if] keyword[not] identifier[filename] . identifier[endswith] ( literal[string] ): keyword[continue] identifier[paths] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] )) identifier[report] = identifier[flake8_style] . identifier[check_files] ( identifier[paths] ) identifier[success] = identifier[report] . identifier[total_errors] == literal[int] keyword[if] identifier[success] : identifier[print] ( literal[string] ) keyword[return] identifier[success]
def run(): """ Runs flake8 lint :return: A bool - if flake8 did not find any errors """ print('Running flake8 %s' % flake8.__version__) flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini')) paths = [] for _dir in [package_name, 'dev', 'tests']: for (root, _, filenames) in os.walk(_dir): for filename in filenames: if not filename.endswith('.py'): continue # depends on [control=['if'], data=[]] paths.append(os.path.join(root, filename)) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['_dir']] report = flake8_style.check_files(paths) success = report.total_errors == 0 if success: print('OK') # depends on [control=['if'], data=[]] return success
def to_dense(self): """Convert sparse Dataset to dense matrix.""" if hasattr(self._X_train, 'todense'): self._X_train = self._X_train.todense() self._X_test = self._X_test.todense()
def function[to_dense, parameter[self]]: constant[Convert sparse Dataset to dense matrix.] if call[name[hasattr], parameter[name[self]._X_train, constant[todense]]] begin[:] name[self]._X_train assign[=] call[name[self]._X_train.todense, parameter[]] name[self]._X_test assign[=] call[name[self]._X_test.todense, parameter[]]
keyword[def] identifier[to_dense] ( identifier[self] ): literal[string] keyword[if] identifier[hasattr] ( identifier[self] . identifier[_X_train] , literal[string] ): identifier[self] . identifier[_X_train] = identifier[self] . identifier[_X_train] . identifier[todense] () identifier[self] . identifier[_X_test] = identifier[self] . identifier[_X_test] . identifier[todense] ()
def to_dense(self): """Convert sparse Dataset to dense matrix.""" if hasattr(self._X_train, 'todense'): self._X_train = self._X_train.todense() self._X_test = self._X_test.todense() # depends on [control=['if'], data=[]]
def results(self): """Saved results from the nested sampling run. If bounding distributions were saved, those are also returned.""" # Add all saved samples to the results. if self.save_samples: with warnings.catch_warnings(): warnings.simplefilter("ignore") results = [('nlive', self.nlive), ('niter', self.it - 1), ('ncall', np.array(self.saved_nc)), ('eff', self.eff), ('samples', np.array(self.saved_v)), ('samples_id', np.array(self.saved_id)), ('samples_it', np.array(self.saved_it)), ('samples_u', np.array(self.saved_u)), ('logwt', np.array(self.saved_logwt)), ('logl', np.array(self.saved_logl)), ('logvol', np.array(self.saved_logvol)), ('logz', np.array(self.saved_logz)), ('logzerr', np.sqrt(np.array(self.saved_logzvar))), ('information', np.array(self.saved_h))] else: raise ValueError("You didn't save any samples!") # Add any saved bounds (and ancillary quantities) to the results. if self.save_bounds: results.append(('bound', copy.deepcopy(self.bound))) results.append(('bound_iter', np.array(self.saved_bounditer, dtype='int'))) results.append(('samples_bound', np.array(self.saved_boundidx, dtype='int'))) results.append(('scale', np.array(self.saved_scale))) return Results(results)
def function[results, parameter[self]]: constant[Saved results from the nested sampling run. If bounding distributions were saved, those are also returned.] if name[self].save_samples begin[:] with call[name[warnings].catch_warnings, parameter[]] begin[:] call[name[warnings].simplefilter, parameter[constant[ignore]]] variable[results] assign[=] list[[<ast.Tuple object at 0x7da1b1eae9e0>, <ast.Tuple object at 0x7da1b1eac9d0>, <ast.Tuple object at 0x7da1b1eae230>, <ast.Tuple object at 0x7da1b1eadba0>, <ast.Tuple object at 0x7da1b1eaca00>, <ast.Tuple object at 0x7da1b1eacbb0>, <ast.Tuple object at 0x7da20c7ca230>, <ast.Tuple object at 0x7da20c7ca8f0>, <ast.Tuple object at 0x7da20c7c8e80>, <ast.Tuple object at 0x7da20c7cb4f0>, <ast.Tuple object at 0x7da20c7c9f60>, <ast.Tuple object at 0x7da20c7c9720>, <ast.Tuple object at 0x7da1b1d47280>, <ast.Tuple object at 0x7da1b1d452a0>]] if name[self].save_bounds begin[:] call[name[results].append, parameter[tuple[[<ast.Constant object at 0x7da1b1d46800>, <ast.Call object at 0x7da1b1d47b50>]]]] call[name[results].append, parameter[tuple[[<ast.Constant object at 0x7da1b1d44af0>, <ast.Call object at 0x7da1b1d47340>]]]] call[name[results].append, parameter[tuple[[<ast.Constant object at 0x7da1b1d47670>, <ast.Call object at 0x7da1b1d47f40>]]]] call[name[results].append, parameter[tuple[[<ast.Constant object at 0x7da1b1d445b0>, <ast.Call object at 0x7da1b1d47550>]]]] return[call[name[Results], parameter[name[results]]]]
keyword[def] identifier[results] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[save_samples] : keyword[with] identifier[warnings] . identifier[catch_warnings] (): identifier[warnings] . identifier[simplefilter] ( literal[string] ) identifier[results] =[( literal[string] , identifier[self] . identifier[nlive] ), ( literal[string] , identifier[self] . identifier[it] - literal[int] ), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_nc] )), ( literal[string] , identifier[self] . identifier[eff] ), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_v] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_id] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_it] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_u] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_logwt] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_logl] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_logvol] )), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_logz] )), ( literal[string] , identifier[np] . identifier[sqrt] ( identifier[np] . identifier[array] ( identifier[self] . identifier[saved_logzvar] ))), ( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_h] ))] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[self] . identifier[save_bounds] : identifier[results] . identifier[append] (( literal[string] , identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[bound] ))) identifier[results] . identifier[append] (( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_bounditer] , identifier[dtype] = literal[string] ))) identifier[results] . identifier[append] (( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_boundidx] , identifier[dtype] = literal[string] ))) identifier[results] . identifier[append] (( literal[string] , identifier[np] . identifier[array] ( identifier[self] . identifier[saved_scale] ))) keyword[return] identifier[Results] ( identifier[results] )
def results(self): """Saved results from the nested sampling run. If bounding distributions were saved, those are also returned.""" # Add all saved samples to the results. if self.save_samples: with warnings.catch_warnings(): warnings.simplefilter('ignore') results = [('nlive', self.nlive), ('niter', self.it - 1), ('ncall', np.array(self.saved_nc)), ('eff', self.eff), ('samples', np.array(self.saved_v)), ('samples_id', np.array(self.saved_id)), ('samples_it', np.array(self.saved_it)), ('samples_u', np.array(self.saved_u)), ('logwt', np.array(self.saved_logwt)), ('logl', np.array(self.saved_logl)), ('logvol', np.array(self.saved_logvol)), ('logz', np.array(self.saved_logz)), ('logzerr', np.sqrt(np.array(self.saved_logzvar))), ('information', np.array(self.saved_h))] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] else: raise ValueError("You didn't save any samples!") # Add any saved bounds (and ancillary quantities) to the results. if self.save_bounds: results.append(('bound', copy.deepcopy(self.bound))) results.append(('bound_iter', np.array(self.saved_bounditer, dtype='int'))) results.append(('samples_bound', np.array(self.saved_boundidx, dtype='int'))) results.append(('scale', np.array(self.saved_scale))) # depends on [control=['if'], data=[]] return Results(results)
def prepare_queues(queues, lock): """Replaces queue._put() method in order to notify the waiting Condition.""" for queue in queues: queue._pebble_lock = lock with queue.mutex: queue._pebble_old_method = queue._put queue._put = MethodType(new_method, queue)
def function[prepare_queues, parameter[queues, lock]]: constant[Replaces queue._put() method in order to notify the waiting Condition.] for taget[name[queue]] in starred[name[queues]] begin[:] name[queue]._pebble_lock assign[=] name[lock] with name[queue].mutex begin[:] name[queue]._pebble_old_method assign[=] name[queue]._put name[queue]._put assign[=] call[name[MethodType], parameter[name[new_method], name[queue]]]
keyword[def] identifier[prepare_queues] ( identifier[queues] , identifier[lock] ): literal[string] keyword[for] identifier[queue] keyword[in] identifier[queues] : identifier[queue] . identifier[_pebble_lock] = identifier[lock] keyword[with] identifier[queue] . identifier[mutex] : identifier[queue] . identifier[_pebble_old_method] = identifier[queue] . identifier[_put] identifier[queue] . identifier[_put] = identifier[MethodType] ( identifier[new_method] , identifier[queue] )
def prepare_queues(queues, lock): """Replaces queue._put() method in order to notify the waiting Condition.""" for queue in queues: queue._pebble_lock = lock with queue.mutex: queue._pebble_old_method = queue._put queue._put = MethodType(new_method, queue) # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['queue']]
def validation_error_message(cls, spec, backends=None): """ Returns an options validation error message if there are any invalid keywords. Otherwise returns None. """ try: cls.validate_spec(spec, backends=backends) except OptionError as e: return e.format_options_error()
def function[validation_error_message, parameter[cls, spec, backends]]: constant[ Returns an options validation error message if there are any invalid keywords. Otherwise returns None. ] <ast.Try object at 0x7da20c990be0>
keyword[def] identifier[validation_error_message] ( identifier[cls] , identifier[spec] , identifier[backends] = keyword[None] ): literal[string] keyword[try] : identifier[cls] . identifier[validate_spec] ( identifier[spec] , identifier[backends] = identifier[backends] ) keyword[except] identifier[OptionError] keyword[as] identifier[e] : keyword[return] identifier[e] . identifier[format_options_error] ()
def validation_error_message(cls, spec, backends=None): """ Returns an options validation error message if there are any invalid keywords. Otherwise returns None. """ try: cls.validate_spec(spec, backends=backends) # depends on [control=['try'], data=[]] except OptionError as e: return e.format_options_error() # depends on [control=['except'], data=['e']]
def allow_event_stream(self, **kwargs): """ Allow the user of this token to access their event stream. """ scope = ScopeURI('stream', 'subscribe', {'path': '/2010-04-01/Events'}) if kwargs: scope.add_param('params', urlencode(kwargs, doseq=True)) self.capabilities["events"] = scope
def function[allow_event_stream, parameter[self]]: constant[ Allow the user of this token to access their event stream. ] variable[scope] assign[=] call[name[ScopeURI], parameter[constant[stream], constant[subscribe], dictionary[[<ast.Constant object at 0x7da20e749ab0>], [<ast.Constant object at 0x7da20e74af80>]]]] if name[kwargs] begin[:] call[name[scope].add_param, parameter[constant[params], call[name[urlencode], parameter[name[kwargs]]]]] call[name[self].capabilities][constant[events]] assign[=] name[scope]
keyword[def] identifier[allow_event_stream] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[scope] = identifier[ScopeURI] ( literal[string] , literal[string] ,{ literal[string] : literal[string] }) keyword[if] identifier[kwargs] : identifier[scope] . identifier[add_param] ( literal[string] , identifier[urlencode] ( identifier[kwargs] , identifier[doseq] = keyword[True] )) identifier[self] . identifier[capabilities] [ literal[string] ]= identifier[scope]
def allow_event_stream(self, **kwargs): """ Allow the user of this token to access their event stream. """ scope = ScopeURI('stream', 'subscribe', {'path': '/2010-04-01/Events'}) if kwargs: scope.add_param('params', urlencode(kwargs, doseq=True)) # depends on [control=['if'], data=[]] self.capabilities['events'] = scope
def _send(data): """Send data to the Clowder API. :param data: Dictionary of API data :type data: dict """ url = data.get('url', CLOWDER_API_URL) _validate_data(data) if api_key is not None: data['api_key'] = api_key if 'value' not in data: data['value'] = data.get('status', 1) if 'frequency' in data: data['frequency'] = _clean_frequency(data['frequency']) try: requests.post(url, data=data, timeout=TIMEOUT).text # This confirms you that the request has reached server # And that the request has been sent # Because we don't care about the response, we set the timeout # value to be low and ignore read exceptions except requests.exceptions.ReadTimeout as err: pass # Allow a wildcard expection for any other type of processing error except requests.exceptions.RequestException as err: logging.error('Clowder expection %s', err)
def function[_send, parameter[data]]: constant[Send data to the Clowder API. :param data: Dictionary of API data :type data: dict ] variable[url] assign[=] call[name[data].get, parameter[constant[url], name[CLOWDER_API_URL]]] call[name[_validate_data], parameter[name[data]]] if compare[name[api_key] is_not constant[None]] begin[:] call[name[data]][constant[api_key]] assign[=] name[api_key] if compare[constant[value] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:] call[name[data]][constant[value]] assign[=] call[name[data].get, parameter[constant[status], constant[1]]] if compare[constant[frequency] in name[data]] begin[:] call[name[data]][constant[frequency]] assign[=] call[name[_clean_frequency], parameter[call[name[data]][constant[frequency]]]] <ast.Try object at 0x7da18ede47f0>
keyword[def] identifier[_send] ( identifier[data] ): literal[string] identifier[url] = identifier[data] . identifier[get] ( literal[string] , identifier[CLOWDER_API_URL] ) identifier[_validate_data] ( identifier[data] ) keyword[if] identifier[api_key] keyword[is] keyword[not] keyword[None] : identifier[data] [ literal[string] ]= identifier[api_key] keyword[if] literal[string] keyword[not] keyword[in] identifier[data] : identifier[data] [ literal[string] ]= identifier[data] . identifier[get] ( literal[string] , literal[int] ) keyword[if] literal[string] keyword[in] identifier[data] : identifier[data] [ literal[string] ]= identifier[_clean_frequency] ( identifier[data] [ literal[string] ]) keyword[try] : identifier[requests] . identifier[post] ( identifier[url] , identifier[data] = identifier[data] , identifier[timeout] = identifier[TIMEOUT] ). identifier[text] keyword[except] identifier[requests] . identifier[exceptions] . identifier[ReadTimeout] keyword[as] identifier[err] : keyword[pass] keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[err] : identifier[logging] . identifier[error] ( literal[string] , identifier[err] )
def _send(data): """Send data to the Clowder API. :param data: Dictionary of API data :type data: dict """ url = data.get('url', CLOWDER_API_URL) _validate_data(data) if api_key is not None: data['api_key'] = api_key # depends on [control=['if'], data=['api_key']] if 'value' not in data: data['value'] = data.get('status', 1) # depends on [control=['if'], data=['data']] if 'frequency' in data: data['frequency'] = _clean_frequency(data['frequency']) # depends on [control=['if'], data=['data']] try: requests.post(url, data=data, timeout=TIMEOUT).text # depends on [control=['try'], data=[]] # This confirms you that the request has reached server # And that the request has been sent # Because we don't care about the response, we set the timeout # value to be low and ignore read exceptions except requests.exceptions.ReadTimeout as err: pass # depends on [control=['except'], data=[]] # Allow a wildcard expection for any other type of processing error except requests.exceptions.RequestException as err: logging.error('Clowder expection %s', err) # depends on [control=['except'], data=['err']]
def _elsed_block_range(self, lineno, orelse, last=None): """handle block line numbers range for try/finally, for, if and while statements """ if lineno == self.fromlineno: return lineno, lineno if orelse: if lineno >= orelse[0].fromlineno: return lineno, orelse[-1].tolineno return lineno, orelse[0].fromlineno - 1 return lineno, last or self.tolineno
def function[_elsed_block_range, parameter[self, lineno, orelse, last]]: constant[handle block line numbers range for try/finally, for, if and while statements ] if compare[name[lineno] equal[==] name[self].fromlineno] begin[:] return[tuple[[<ast.Name object at 0x7da1b1ec1f30>, <ast.Name object at 0x7da1b1ec2860>]]] if name[orelse] begin[:] if compare[name[lineno] greater_or_equal[>=] call[name[orelse]][constant[0]].fromlineno] begin[:] return[tuple[[<ast.Name object at 0x7da1b1ec39d0>, <ast.Attribute object at 0x7da1b1ec2c50>]]] return[tuple[[<ast.Name object at 0x7da1b1ec35e0>, <ast.BinOp object at 0x7da1b1ec2ad0>]]] return[tuple[[<ast.Name object at 0x7da1b1ec1c60>, <ast.BoolOp object at 0x7da1b1ec2a40>]]]
keyword[def] identifier[_elsed_block_range] ( identifier[self] , identifier[lineno] , identifier[orelse] , identifier[last] = keyword[None] ): literal[string] keyword[if] identifier[lineno] == identifier[self] . identifier[fromlineno] : keyword[return] identifier[lineno] , identifier[lineno] keyword[if] identifier[orelse] : keyword[if] identifier[lineno] >= identifier[orelse] [ literal[int] ]. identifier[fromlineno] : keyword[return] identifier[lineno] , identifier[orelse] [- literal[int] ]. identifier[tolineno] keyword[return] identifier[lineno] , identifier[orelse] [ literal[int] ]. identifier[fromlineno] - literal[int] keyword[return] identifier[lineno] , identifier[last] keyword[or] identifier[self] . identifier[tolineno]
def _elsed_block_range(self, lineno, orelse, last=None): """handle block line numbers range for try/finally, for, if and while statements """ if lineno == self.fromlineno: return (lineno, lineno) # depends on [control=['if'], data=['lineno']] if orelse: if lineno >= orelse[0].fromlineno: return (lineno, orelse[-1].tolineno) # depends on [control=['if'], data=['lineno']] return (lineno, orelse[0].fromlineno - 1) # depends on [control=['if'], data=[]] return (lineno, last or self.tolineno)
def fillstats(args): """ %prog fillstats genome.fill Build stats on .fill file from GapCloser. """ from jcvi.utils.cbook import SummaryStats, percentage, thousands p = OptionParser(fillstats.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fillfile, = args fp = open(fillfile) scaffolds = 0 gaps = [] for row in fp: if row[0] == ">": scaffolds += 1 continue fl = FillLine(row) gaps.append(fl) print("{0} scaffolds in total".format(scaffolds), file=sys.stderr) closed = [x for x in gaps if x.closed] closedbp = sum(x.before for x in closed) notClosed = [x for x in gaps if not x.closed] notClosedbp = sum(x.before for x in notClosed) totalgaps = len(closed) + len(notClosed) print("Closed gaps: {0} size: {1} bp".\ format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in closed]) print(ss, file=sys.stderr) ss = SummaryStats([x.delta for x in closed]) print("Delta:", ss, file=sys.stderr) print("Remaining gaps: {0} size: {1} bp".\ format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in notClosed]) print(ss, file=sys.stderr)
def function[fillstats, parameter[args]]: constant[ %prog fillstats genome.fill Build stats on .fill file from GapCloser. ] from relative_module[jcvi.utils.cbook] import module[SummaryStats], module[percentage], module[thousands] variable[p] assign[=] call[name[OptionParser], parameter[name[fillstats].__doc__]] <ast.Tuple object at 0x7da1b084d720> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b084e410>]] <ast.Tuple object at 0x7da1b084d6f0> assign[=] name[args] variable[fp] assign[=] call[name[open], parameter[name[fillfile]]] variable[scaffolds] assign[=] constant[0] variable[gaps] assign[=] list[[]] for taget[name[row]] in starred[name[fp]] begin[:] if compare[call[name[row]][constant[0]] equal[==] constant[>]] begin[:] <ast.AugAssign object at 0x7da1b0962380> continue variable[fl] assign[=] call[name[FillLine], parameter[name[row]]] call[name[gaps].append, parameter[name[fl]]] call[name[print], parameter[call[constant[{0} scaffolds in total].format, parameter[name[scaffolds]]]]] variable[closed] assign[=] <ast.ListComp object at 0x7da1b09615d0> variable[closedbp] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b0963d30>]] variable[notClosed] assign[=] <ast.ListComp object at 0x7da1b0961000> variable[notClosedbp] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b0976140>]] variable[totalgaps] assign[=] binary_operation[call[name[len], parameter[name[closed]]] + call[name[len], parameter[name[notClosed]]]] call[name[print], parameter[call[constant[Closed gaps: {0} size: {1} bp].format, parameter[call[name[percentage], parameter[call[name[len], parameter[name[closed]]], name[totalgaps]]], call[name[thousands], parameter[name[closedbp]]]]]]] variable[ss] assign[=] call[name[SummaryStats], parameter[<ast.ListComp object at 0x7da1b0977190>]] call[name[print], parameter[name[ss]]] variable[ss] assign[=] call[name[SummaryStats], parameter[<ast.ListComp object at 0x7da1b0976290>]] call[name[print], parameter[constant[Delta:], name[ss]]] call[name[print], parameter[call[constant[Remaining gaps: {0} size: {1} bp].format, parameter[call[name[percentage], parameter[call[name[len], parameter[name[notClosed]]], name[totalgaps]]], call[name[thousands], parameter[name[notClosedbp]]]]]]] variable[ss] assign[=] call[name[SummaryStats], parameter[<ast.ListComp object at 0x7da1b094c3a0>]] call[name[print], parameter[name[ss]]]
keyword[def] identifier[fillstats] ( identifier[args] ): literal[string] keyword[from] identifier[jcvi] . identifier[utils] . identifier[cbook] keyword[import] identifier[SummaryStats] , identifier[percentage] , identifier[thousands] identifier[p] = identifier[OptionParser] ( identifier[fillstats] . identifier[__doc__] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[fillfile] ,= identifier[args] identifier[fp] = identifier[open] ( identifier[fillfile] ) identifier[scaffolds] = literal[int] identifier[gaps] =[] keyword[for] identifier[row] keyword[in] identifier[fp] : keyword[if] identifier[row] [ literal[int] ]== literal[string] : identifier[scaffolds] += literal[int] keyword[continue] identifier[fl] = identifier[FillLine] ( identifier[row] ) identifier[gaps] . identifier[append] ( identifier[fl] ) identifier[print] ( literal[string] . identifier[format] ( identifier[scaffolds] ), identifier[file] = identifier[sys] . identifier[stderr] ) identifier[closed] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[gaps] keyword[if] identifier[x] . identifier[closed] ] identifier[closedbp] = identifier[sum] ( identifier[x] . identifier[before] keyword[for] identifier[x] keyword[in] identifier[closed] ) identifier[notClosed] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[gaps] keyword[if] keyword[not] identifier[x] . identifier[closed] ] identifier[notClosedbp] = identifier[sum] ( identifier[x] . identifier[before] keyword[for] identifier[x] keyword[in] identifier[notClosed] ) identifier[totalgaps] = identifier[len] ( identifier[closed] )+ identifier[len] ( identifier[notClosed] ) identifier[print] ( literal[string] . identifier[format] ( identifier[percentage] ( identifier[len] ( identifier[closed] ), identifier[totalgaps] ), identifier[thousands] ( identifier[closedbp] )), identifier[file] = identifier[sys] . identifier[stderr] ) identifier[ss] = identifier[SummaryStats] ([ identifier[x] . identifier[after] keyword[for] identifier[x] keyword[in] identifier[closed] ]) identifier[print] ( identifier[ss] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[ss] = identifier[SummaryStats] ([ identifier[x] . identifier[delta] keyword[for] identifier[x] keyword[in] identifier[closed] ]) identifier[print] ( literal[string] , identifier[ss] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[print] ( literal[string] . identifier[format] ( identifier[percentage] ( identifier[len] ( identifier[notClosed] ), identifier[totalgaps] ), identifier[thousands] ( identifier[notClosedbp] )), identifier[file] = identifier[sys] . identifier[stderr] ) identifier[ss] = identifier[SummaryStats] ([ identifier[x] . identifier[after] keyword[for] identifier[x] keyword[in] identifier[notClosed] ]) identifier[print] ( identifier[ss] , identifier[file] = identifier[sys] . identifier[stderr] )
def fillstats(args): """ %prog fillstats genome.fill Build stats on .fill file from GapCloser. """ from jcvi.utils.cbook import SummaryStats, percentage, thousands p = OptionParser(fillstats.__doc__) (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (fillfile,) = args fp = open(fillfile) scaffolds = 0 gaps = [] for row in fp: if row[0] == '>': scaffolds += 1 continue # depends on [control=['if'], data=[]] fl = FillLine(row) gaps.append(fl) # depends on [control=['for'], data=['row']] print('{0} scaffolds in total'.format(scaffolds), file=sys.stderr) closed = [x for x in gaps if x.closed] closedbp = sum((x.before for x in closed)) notClosed = [x for x in gaps if not x.closed] notClosedbp = sum((x.before for x in notClosed)) totalgaps = len(closed) + len(notClosed) print('Closed gaps: {0} size: {1} bp'.format(percentage(len(closed), totalgaps), thousands(closedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in closed]) print(ss, file=sys.stderr) ss = SummaryStats([x.delta for x in closed]) print('Delta:', ss, file=sys.stderr) print('Remaining gaps: {0} size: {1} bp'.format(percentage(len(notClosed), totalgaps), thousands(notClosedbp)), file=sys.stderr) ss = SummaryStats([x.after for x in notClosed]) print(ss, file=sys.stderr)
def download(self, bucket_name, object_name, filename=None): """ Get a file from Google Cloud Storage. :param bucket_name: The bucket to fetch from. :type bucket_name: str :param object_name: The object to fetch. :type object_name: str :param filename: If set, a local file path where the file should be written to. :type filename: str """ client = self.get_conn() bucket = client.get_bucket(bucket_name) blob = bucket.blob(blob_name=object_name) if filename: blob.download_to_filename(filename) self.log.info('File downloaded to %s', filename) return blob.download_as_string()
def function[download, parameter[self, bucket_name, object_name, filename]]: constant[ Get a file from Google Cloud Storage. :param bucket_name: The bucket to fetch from. :type bucket_name: str :param object_name: The object to fetch. :type object_name: str :param filename: If set, a local file path where the file should be written to. :type filename: str ] variable[client] assign[=] call[name[self].get_conn, parameter[]] variable[bucket] assign[=] call[name[client].get_bucket, parameter[name[bucket_name]]] variable[blob] assign[=] call[name[bucket].blob, parameter[]] if name[filename] begin[:] call[name[blob].download_to_filename, parameter[name[filename]]] call[name[self].log.info, parameter[constant[File downloaded to %s], name[filename]]] return[call[name[blob].download_as_string, parameter[]]]
keyword[def] identifier[download] ( identifier[self] , identifier[bucket_name] , identifier[object_name] , identifier[filename] = keyword[None] ): literal[string] identifier[client] = identifier[self] . identifier[get_conn] () identifier[bucket] = identifier[client] . identifier[get_bucket] ( identifier[bucket_name] ) identifier[blob] = identifier[bucket] . identifier[blob] ( identifier[blob_name] = identifier[object_name] ) keyword[if] identifier[filename] : identifier[blob] . identifier[download_to_filename] ( identifier[filename] ) identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[filename] ) keyword[return] identifier[blob] . identifier[download_as_string] ()
def download(self, bucket_name, object_name, filename=None): """ Get a file from Google Cloud Storage. :param bucket_name: The bucket to fetch from. :type bucket_name: str :param object_name: The object to fetch. :type object_name: str :param filename: If set, a local file path where the file should be written to. :type filename: str """ client = self.get_conn() bucket = client.get_bucket(bucket_name) blob = bucket.blob(blob_name=object_name) if filename: blob.download_to_filename(filename) self.log.info('File downloaded to %s', filename) # depends on [control=['if'], data=[]] return blob.download_as_string()
def p_expr_end(p): "end : END_EXPR" p[0] = node.expr( op="end", args=node.expr_list([node.number(0), node.number(0)]))
def function[p_expr_end, parameter[p]]: constant[end : END_EXPR] call[name[p]][constant[0]] assign[=] call[name[node].expr, parameter[]]
keyword[def] identifier[p_expr_end] ( identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[node] . identifier[expr] ( identifier[op] = literal[string] , identifier[args] = identifier[node] . identifier[expr_list] ([ identifier[node] . identifier[number] ( literal[int] ), identifier[node] . identifier[number] ( literal[int] )]))
def p_expr_end(p): """end : END_EXPR""" p[0] = node.expr(op='end', args=node.expr_list([node.number(0), node.number(0)]))
def get_data_metadata(self): """Gets the metadata for the content data. return: (osid.Metadata) - metadata for the content data *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['data']) metadata.update({'existing_object_values': self._my_map['data']}) return Metadata(**metadata)
def function[get_data_metadata, parameter[self]]: constant[Gets the metadata for the content data. return: (osid.Metadata) - metadata for the content data *compliance: mandatory -- This method must be implemented.* ] variable[metadata] assign[=] call[name[dict], parameter[call[name[self]._mdata][constant[data]]]] call[name[metadata].update, parameter[dictionary[[<ast.Constant object at 0x7da18f58e7a0>], [<ast.Subscript object at 0x7da18f58d720>]]]] return[call[name[Metadata], parameter[]]]
keyword[def] identifier[get_data_metadata] ( identifier[self] ): literal[string] identifier[metadata] = identifier[dict] ( identifier[self] . identifier[_mdata] [ literal[string] ]) identifier[metadata] . identifier[update] ({ literal[string] : identifier[self] . identifier[_my_map] [ literal[string] ]}) keyword[return] identifier[Metadata] (** identifier[metadata] )
def get_data_metadata(self): """Gets the metadata for the content data. return: (osid.Metadata) - metadata for the content data *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['data']) metadata.update({'existing_object_values': self._my_map['data']}) return Metadata(**metadata)
def wikibase_item_engine_factory(cls, mediawiki_api_url, sparql_endpoint_url, name='LocalItemEngine'): """ Helper function for creating a WDItemEngine class with arguments set for a different Wikibase instance than Wikidata. :param mediawiki_api_url: Mediawiki api url. For wikidata, this is: 'https://www.wikidata.org/w/api.php' :param sparql_endpoint_url: sparql endpoint url. For wikidata, this is: 'https://query.wikidata.org/sparql' :param name: name of the resulting class :return: a subclass of WDItemEngine with the mediawiki_api_url and sparql_endpoint_url arguments set """ class SubCls(cls): def __init__(self, *args, **kwargs): kwargs['mediawiki_api_url'] = mediawiki_api_url kwargs['sparql_endpoint_url'] = sparql_endpoint_url super(SubCls, self).__init__(*args, **kwargs) SubCls.__name__ = name return SubCls
def function[wikibase_item_engine_factory, parameter[cls, mediawiki_api_url, sparql_endpoint_url, name]]: constant[ Helper function for creating a WDItemEngine class with arguments set for a different Wikibase instance than Wikidata. :param mediawiki_api_url: Mediawiki api url. For wikidata, this is: 'https://www.wikidata.org/w/api.php' :param sparql_endpoint_url: sparql endpoint url. For wikidata, this is: 'https://query.wikidata.org/sparql' :param name: name of the resulting class :return: a subclass of WDItemEngine with the mediawiki_api_url and sparql_endpoint_url arguments set ] class class[SubCls, parameter[]] begin[:] def function[__init__, parameter[self]]: call[name[kwargs]][constant[mediawiki_api_url]] assign[=] name[mediawiki_api_url] call[name[kwargs]][constant[sparql_endpoint_url]] assign[=] name[sparql_endpoint_url] call[call[name[super], parameter[name[SubCls], name[self]]].__init__, parameter[<ast.Starred object at 0x7da207f01720>]] name[SubCls].__name__ assign[=] name[name] return[name[SubCls]]
keyword[def] identifier[wikibase_item_engine_factory] ( identifier[cls] , identifier[mediawiki_api_url] , identifier[sparql_endpoint_url] , identifier[name] = literal[string] ): literal[string] keyword[class] identifier[SubCls] ( identifier[cls] ): keyword[def] identifier[__init__] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): identifier[kwargs] [ literal[string] ]= identifier[mediawiki_api_url] identifier[kwargs] [ literal[string] ]= identifier[sparql_endpoint_url] identifier[super] ( identifier[SubCls] , identifier[self] ). identifier[__init__] (* identifier[args] ,** identifier[kwargs] ) identifier[SubCls] . identifier[__name__] = identifier[name] keyword[return] identifier[SubCls]
def wikibase_item_engine_factory(cls, mediawiki_api_url, sparql_endpoint_url, name='LocalItemEngine'): """ Helper function for creating a WDItemEngine class with arguments set for a different Wikibase instance than Wikidata. :param mediawiki_api_url: Mediawiki api url. For wikidata, this is: 'https://www.wikidata.org/w/api.php' :param sparql_endpoint_url: sparql endpoint url. For wikidata, this is: 'https://query.wikidata.org/sparql' :param name: name of the resulting class :return: a subclass of WDItemEngine with the mediawiki_api_url and sparql_endpoint_url arguments set """ class SubCls(cls): def __init__(self, *args, **kwargs): kwargs['mediawiki_api_url'] = mediawiki_api_url kwargs['sparql_endpoint_url'] = sparql_endpoint_url super(SubCls, self).__init__(*args, **kwargs) SubCls.__name__ = name return SubCls
def message_proxy(self, work_dir): """ drone_data_inboud is for data comming from drones drone_data_outbound is for commands to the drones, topic must either be a drone ID or all for sending a broadcast message to all drones """ public_keys_dir = os.path.join(work_dir, 'certificates', 'public_keys') secret_keys_dir = os.path.join(work_dir, 'certificates', 'private_keys') # start and configure auth worker auth = IOLoopAuthenticator() auth.start() auth.allow('127.0.0.1') auth.configure_curve(domain='*', location=public_keys_dir) # external interfaces for communicating with drones server_secret_file = os.path.join(secret_keys_dir, 'beeswarm_server.pri') server_public, server_secret = load_certificate(server_secret_file) drone_data_inbound = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_data_inbound.curve_secretkey = server_secret drone_data_inbound.curve_publickey = server_public drone_data_inbound.curve_server = True drone_data_inbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_port'])) drone_data_outbound = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_outbound.curve_secretkey = server_secret drone_data_outbound.curve_publickey = server_public drone_data_outbound.curve_server = True drone_data_outbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_command_port'])) # internal interfaces # all inbound session data from drones will be replayed on this socket drone_data_socket = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_socket.bind(SocketNames.DRONE_DATA.value) # all commands received on this will be published on the external interface drone_command_socket = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_command_socket.bind(SocketNames.DRONE_COMMANDS.value) poller = zmq.Poller() poller.register(drone_data_inbound, zmq.POLLIN) poller.register(drone_command_socket, zmq.POLLIN) while True: # .recv() gives no context switch - why not? using poller with timeout instead socks = dict(poller.poll(100)) gevent.sleep() if drone_command_socket in socks and socks[drone_command_socket] == zmq.POLLIN: data = drone_command_socket.recv() drone_id, _ = data.split(' ', 1) logger.debug("Sending drone command to: {0}".format(drone_id)) # pub socket takes care of filtering drone_data_outbound.send(data) elif drone_data_inbound in socks and socks[drone_data_inbound] == zmq.POLLIN: raw_msg = drone_data_inbound.recv() split_data = raw_msg.split(' ', 2) if len(split_data) == 3: topic, drone_id, data = split_data else: data = None topic, drone_id, = split_data logger.debug("Received {0} message from {1}.".format(topic, drone_id)) # relay message on internal socket drone_data_socket.send(raw_msg)
def function[message_proxy, parameter[self, work_dir]]: constant[ drone_data_inboud is for data comming from drones drone_data_outbound is for commands to the drones, topic must either be a drone ID or all for sending a broadcast message to all drones ] variable[public_keys_dir] assign[=] call[name[os].path.join, parameter[name[work_dir], constant[certificates], constant[public_keys]]] variable[secret_keys_dir] assign[=] call[name[os].path.join, parameter[name[work_dir], constant[certificates], constant[private_keys]]] variable[auth] assign[=] call[name[IOLoopAuthenticator], parameter[]] call[name[auth].start, parameter[]] call[name[auth].allow, parameter[constant[127.0.0.1]]] call[name[auth].configure_curve, parameter[]] variable[server_secret_file] assign[=] call[name[os].path.join, parameter[name[secret_keys_dir], constant[beeswarm_server.pri]]] <ast.Tuple object at 0x7da1b10a7bb0> assign[=] call[name[load_certificate], parameter[name[server_secret_file]]] variable[drone_data_inbound] assign[=] call[name[beeswarm].shared.zmq_context.socket, parameter[name[zmq].PULL]] name[drone_data_inbound].curve_secretkey assign[=] name[server_secret] name[drone_data_inbound].curve_publickey assign[=] name[server_public] name[drone_data_inbound].curve_server assign[=] constant[True] call[name[drone_data_inbound].bind, parameter[call[constant[tcp://*:{0}].format, parameter[call[call[name[self].config][constant[network]]][constant[zmq_port]]]]]] variable[drone_data_outbound] assign[=] call[name[beeswarm].shared.zmq_context.socket, parameter[name[zmq].PUB]] name[drone_data_outbound].curve_secretkey assign[=] name[server_secret] name[drone_data_outbound].curve_publickey assign[=] name[server_public] name[drone_data_outbound].curve_server assign[=] constant[True] call[name[drone_data_outbound].bind, parameter[call[constant[tcp://*:{0}].format, parameter[call[call[name[self].config][constant[network]]][constant[zmq_command_port]]]]]] variable[drone_data_socket] assign[=] call[name[beeswarm].shared.zmq_context.socket, parameter[name[zmq].PUB]] call[name[drone_data_socket].bind, parameter[name[SocketNames].DRONE_DATA.value]] variable[drone_command_socket] assign[=] call[name[beeswarm].shared.zmq_context.socket, parameter[name[zmq].PULL]] call[name[drone_command_socket].bind, parameter[name[SocketNames].DRONE_COMMANDS.value]] variable[poller] assign[=] call[name[zmq].Poller, parameter[]] call[name[poller].register, parameter[name[drone_data_inbound], name[zmq].POLLIN]] call[name[poller].register, parameter[name[drone_command_socket], name[zmq].POLLIN]] while constant[True] begin[:] variable[socks] assign[=] call[name[dict], parameter[call[name[poller].poll, parameter[constant[100]]]]] call[name[gevent].sleep, parameter[]] if <ast.BoolOp object at 0x7da1b11ef130> begin[:] variable[data] assign[=] call[name[drone_command_socket].recv, parameter[]] <ast.Tuple object at 0x7da1b11efc70> assign[=] call[name[data].split, parameter[constant[ ], constant[1]]] call[name[logger].debug, parameter[call[constant[Sending drone command to: {0}].format, parameter[name[drone_id]]]]] call[name[drone_data_outbound].send, parameter[name[data]]]
keyword[def] identifier[message_proxy] ( identifier[self] , identifier[work_dir] ): literal[string] identifier[public_keys_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] , literal[string] ) identifier[secret_keys_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] , literal[string] ) identifier[auth] = identifier[IOLoopAuthenticator] () identifier[auth] . identifier[start] () identifier[auth] . identifier[allow] ( literal[string] ) identifier[auth] . identifier[configure_curve] ( identifier[domain] = literal[string] , identifier[location] = identifier[public_keys_dir] ) identifier[server_secret_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[secret_keys_dir] , literal[string] ) identifier[server_public] , identifier[server_secret] = identifier[load_certificate] ( identifier[server_secret_file] ) identifier[drone_data_inbound] = identifier[beeswarm] . identifier[shared] . identifier[zmq_context] . identifier[socket] ( identifier[zmq] . identifier[PULL] ) identifier[drone_data_inbound] . identifier[curve_secretkey] = identifier[server_secret] identifier[drone_data_inbound] . identifier[curve_publickey] = identifier[server_public] identifier[drone_data_inbound] . identifier[curve_server] = keyword[True] identifier[drone_data_inbound] . identifier[bind] ( literal[string] . identifier[format] ( identifier[self] . identifier[config] [ literal[string] ][ literal[string] ])) identifier[drone_data_outbound] = identifier[beeswarm] . identifier[shared] . identifier[zmq_context] . identifier[socket] ( identifier[zmq] . identifier[PUB] ) identifier[drone_data_outbound] . identifier[curve_secretkey] = identifier[server_secret] identifier[drone_data_outbound] . identifier[curve_publickey] = identifier[server_public] identifier[drone_data_outbound] . identifier[curve_server] = keyword[True] identifier[drone_data_outbound] . identifier[bind] ( literal[string] . identifier[format] ( identifier[self] . identifier[config] [ literal[string] ][ literal[string] ])) identifier[drone_data_socket] = identifier[beeswarm] . identifier[shared] . identifier[zmq_context] . identifier[socket] ( identifier[zmq] . identifier[PUB] ) identifier[drone_data_socket] . identifier[bind] ( identifier[SocketNames] . identifier[DRONE_DATA] . identifier[value] ) identifier[drone_command_socket] = identifier[beeswarm] . identifier[shared] . identifier[zmq_context] . identifier[socket] ( identifier[zmq] . identifier[PULL] ) identifier[drone_command_socket] . identifier[bind] ( identifier[SocketNames] . identifier[DRONE_COMMANDS] . identifier[value] ) identifier[poller] = identifier[zmq] . identifier[Poller] () identifier[poller] . identifier[register] ( identifier[drone_data_inbound] , identifier[zmq] . identifier[POLLIN] ) identifier[poller] . identifier[register] ( identifier[drone_command_socket] , identifier[zmq] . identifier[POLLIN] ) keyword[while] keyword[True] : identifier[socks] = identifier[dict] ( identifier[poller] . identifier[poll] ( literal[int] )) identifier[gevent] . identifier[sleep] () keyword[if] identifier[drone_command_socket] keyword[in] identifier[socks] keyword[and] identifier[socks] [ identifier[drone_command_socket] ]== identifier[zmq] . identifier[POLLIN] : identifier[data] = identifier[drone_command_socket] . identifier[recv] () identifier[drone_id] , identifier[_] = identifier[data] . identifier[split] ( literal[string] , literal[int] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[drone_id] )) identifier[drone_data_outbound] . identifier[send] ( identifier[data] ) keyword[elif] identifier[drone_data_inbound] keyword[in] identifier[socks] keyword[and] identifier[socks] [ identifier[drone_data_inbound] ]== identifier[zmq] . identifier[POLLIN] : identifier[raw_msg] = identifier[drone_data_inbound] . identifier[recv] () identifier[split_data] = identifier[raw_msg] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[len] ( identifier[split_data] )== literal[int] : identifier[topic] , identifier[drone_id] , identifier[data] = identifier[split_data] keyword[else] : identifier[data] = keyword[None] identifier[topic] , identifier[drone_id] ,= identifier[split_data] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[topic] , identifier[drone_id] )) identifier[drone_data_socket] . identifier[send] ( identifier[raw_msg] )
def message_proxy(self, work_dir): """ drone_data_inboud is for data comming from drones drone_data_outbound is for commands to the drones, topic must either be a drone ID or all for sending a broadcast message to all drones """ public_keys_dir = os.path.join(work_dir, 'certificates', 'public_keys') secret_keys_dir = os.path.join(work_dir, 'certificates', 'private_keys') # start and configure auth worker auth = IOLoopAuthenticator() auth.start() auth.allow('127.0.0.1') auth.configure_curve(domain='*', location=public_keys_dir) # external interfaces for communicating with drones server_secret_file = os.path.join(secret_keys_dir, 'beeswarm_server.pri') (server_public, server_secret) = load_certificate(server_secret_file) drone_data_inbound = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_data_inbound.curve_secretkey = server_secret drone_data_inbound.curve_publickey = server_public drone_data_inbound.curve_server = True drone_data_inbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_port'])) drone_data_outbound = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_outbound.curve_secretkey = server_secret drone_data_outbound.curve_publickey = server_public drone_data_outbound.curve_server = True drone_data_outbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_command_port'])) # internal interfaces # all inbound session data from drones will be replayed on this socket drone_data_socket = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_socket.bind(SocketNames.DRONE_DATA.value) # all commands received on this will be published on the external interface drone_command_socket = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_command_socket.bind(SocketNames.DRONE_COMMANDS.value) poller = zmq.Poller() poller.register(drone_data_inbound, zmq.POLLIN) poller.register(drone_command_socket, zmq.POLLIN) while True: # .recv() gives no context switch - why not? using poller with timeout instead socks = dict(poller.poll(100)) gevent.sleep() if drone_command_socket in socks and socks[drone_command_socket] == zmq.POLLIN: data = drone_command_socket.recv() (drone_id, _) = data.split(' ', 1) logger.debug('Sending drone command to: {0}'.format(drone_id)) # pub socket takes care of filtering drone_data_outbound.send(data) # depends on [control=['if'], data=[]] elif drone_data_inbound in socks and socks[drone_data_inbound] == zmq.POLLIN: raw_msg = drone_data_inbound.recv() split_data = raw_msg.split(' ', 2) if len(split_data) == 3: (topic, drone_id, data) = split_data # depends on [control=['if'], data=[]] else: data = None (topic, drone_id) = split_data logger.debug('Received {0} message from {1}.'.format(topic, drone_id)) # relay message on internal socket drone_data_socket.send(raw_msg) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def get_set_voltage(self, channel): """ channel: 1=OP1, 2=OP2, AUX is not supported""" ret = self.ask("V%d?" % channel) if ret[:3] != "V%d " % channel: print("ttiQl355tp.get_voltage() format error", ret) return None return float(ret[3:])
def function[get_set_voltage, parameter[self, channel]]: constant[ channel: 1=OP1, 2=OP2, AUX is not supported] variable[ret] assign[=] call[name[self].ask, parameter[binary_operation[constant[V%d?] <ast.Mod object at 0x7da2590d6920> name[channel]]]] if compare[call[name[ret]][<ast.Slice object at 0x7da1b0507490>] not_equal[!=] binary_operation[constant[V%d ] <ast.Mod object at 0x7da2590d6920> name[channel]]] begin[:] call[name[print], parameter[constant[ttiQl355tp.get_voltage() format error], name[ret]]] return[constant[None]] return[call[name[float], parameter[call[name[ret]][<ast.Slice object at 0x7da1b053abc0>]]]]
keyword[def] identifier[get_set_voltage] ( identifier[self] , identifier[channel] ): literal[string] identifier[ret] = identifier[self] . identifier[ask] ( literal[string] % identifier[channel] ) keyword[if] identifier[ret] [: literal[int] ]!= literal[string] % identifier[channel] : identifier[print] ( literal[string] , identifier[ret] ) keyword[return] keyword[None] keyword[return] identifier[float] ( identifier[ret] [ literal[int] :])
def get_set_voltage(self, channel): """ channel: 1=OP1, 2=OP2, AUX is not supported""" ret = self.ask('V%d?' % channel) if ret[:3] != 'V%d ' % channel: print('ttiQl355tp.get_voltage() format error', ret) return None # depends on [control=['if'], data=[]] return float(ret[3:])
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False): """Parses the sections in the memory and returns a list of them""" sections = [] optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER) offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders image_section_header_size = sizeof(IMAGE_SECTION_HEADER) for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections): ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset) if parse_header_only: raw = None bytes_ = bytearray() else: size = ishdr.SizeOfRawData raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData) bytes_ = bytearray(raw) sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw)) offset += image_section_header_size return sections
def function[_parseSections, parameter[self, data, imageDosHeader, imageNtHeaders, parse_header_only]]: constant[Parses the sections in the memory and returns a list of them] variable[sections] assign[=] list[[]] variable[optional_header_offset] assign[=] binary_operation[binary_operation[name[imageDosHeader].header.e_lfanew + constant[4]] + call[name[sizeof], parameter[name[IMAGE_FILE_HEADER]]]] variable[offset] assign[=] binary_operation[name[optional_header_offset] + name[imageNtHeaders].header.FileHeader.SizeOfOptionalHeader] variable[image_section_header_size] assign[=] call[name[sizeof], parameter[name[IMAGE_SECTION_HEADER]]] for taget[name[sectionNo]] in starred[call[name[range], parameter[name[imageNtHeaders].header.FileHeader.NumberOfSections]]] begin[:] variable[ishdr] assign[=] call[name[IMAGE_SECTION_HEADER].from_buffer, parameter[name[data], name[offset]]] if name[parse_header_only] begin[:] variable[raw] assign[=] constant[None] variable[bytes_] assign[=] call[name[bytearray], parameter[]] call[name[sections].append, parameter[call[name[SectionData], parameter[]]]] <ast.AugAssign object at 0x7da1b26ae7d0> return[name[sections]]
keyword[def] identifier[_parseSections] ( identifier[self] , identifier[data] , identifier[imageDosHeader] , identifier[imageNtHeaders] , identifier[parse_header_only] = keyword[False] ): literal[string] identifier[sections] =[] identifier[optional_header_offset] = identifier[imageDosHeader] . identifier[header] . identifier[e_lfanew] + literal[int] + identifier[sizeof] ( identifier[IMAGE_FILE_HEADER] ) identifier[offset] = identifier[optional_header_offset] + identifier[imageNtHeaders] . identifier[header] . identifier[FileHeader] . identifier[SizeOfOptionalHeader] identifier[image_section_header_size] = identifier[sizeof] ( identifier[IMAGE_SECTION_HEADER] ) keyword[for] identifier[sectionNo] keyword[in] identifier[range] ( identifier[imageNtHeaders] . identifier[header] . identifier[FileHeader] . identifier[NumberOfSections] ): identifier[ishdr] = identifier[IMAGE_SECTION_HEADER] . identifier[from_buffer] ( identifier[data] , identifier[offset] ) keyword[if] identifier[parse_header_only] : identifier[raw] = keyword[None] identifier[bytes_] = identifier[bytearray] () keyword[else] : identifier[size] = identifier[ishdr] . identifier[SizeOfRawData] identifier[raw] =( identifier[c_ubyte] * identifier[size] ). identifier[from_buffer] ( identifier[data] , identifier[ishdr] . identifier[PointerToRawData] ) identifier[bytes_] = identifier[bytearray] ( identifier[raw] ) identifier[sections] . identifier[append] ( identifier[SectionData] ( identifier[header] = identifier[ishdr] , identifier[name] = identifier[ishdr] . identifier[Name] . identifier[decode] ( literal[string] , identifier[errors] = literal[string] ), identifier[bytes] = identifier[bytes_] , identifier[raw] = identifier[raw] )) identifier[offset] += identifier[image_section_header_size] keyword[return] identifier[sections]
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False): """Parses the sections in the memory and returns a list of them""" sections = [] optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER) offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders image_section_header_size = sizeof(IMAGE_SECTION_HEADER) for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections): ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset) if parse_header_only: raw = None bytes_ = bytearray() # depends on [control=['if'], data=[]] else: size = ishdr.SizeOfRawData raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData) bytes_ = bytearray(raw) sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw)) offset += image_section_header_size # depends on [control=['for'], data=[]] return sections
def div(self, y): r"""Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \operatorname{div}_\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2 d[i]}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) """ y = np.asanyarray(y) if y.shape[0] != self.Ne: raise ValueError('First dimension must be the number of edges ' 'G.Ne = {}, got {}.'.format(self.Ne, y.shape)) return self.D.dot(y)
def function[div, parameter[self, y]]: constant[Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \operatorname{div}_\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2 d[i]}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) ] variable[y] assign[=] call[name[np].asanyarray, parameter[name[y]]] if compare[call[name[y].shape][constant[0]] not_equal[!=] name[self].Ne] begin[:] <ast.Raise object at 0x7da20e961e40> return[call[name[self].D.dot, parameter[name[y]]]]
keyword[def] identifier[div] ( identifier[self] , identifier[y] ): literal[string] identifier[y] = identifier[np] . identifier[asanyarray] ( identifier[y] ) keyword[if] identifier[y] . identifier[shape] [ literal[int] ]!= identifier[self] . identifier[Ne] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[Ne] , identifier[y] . identifier[shape] )) keyword[return] identifier[self] . identifier[D] . identifier[dot] ( identifier[y] )
def div(self, y): """Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \\operatorname{div}_\\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \\sum_k D[i, k] y[k] = \\sum_{\\{k,j | e_k=(v_j, v_i) \\in \\mathcal{E}\\}} \\sqrt{\\frac{W[j, i]}{2}} y[k] - \\sum_{\\{k,j | e_k=(v_i, v_j) \\in \\mathcal{E}\\}} \\sqrt{\\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \\sum_k D[i, k] y[k] = \\sum_{\\{k,j | e_k=(v_j, v_i) \\in \\mathcal{E}\\}} \\sqrt{\\frac{W[j, i]}{2 d[i]}} y[k] - \\sum_{\\{k,j | e_k=(v_i, v_j) \\in \\mathcal{E}\\}} \\sqrt{\\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) """ y = np.asanyarray(y) if y.shape[0] != self.Ne: raise ValueError('First dimension must be the number of edges G.Ne = {}, got {}.'.format(self.Ne, y.shape)) # depends on [control=['if'], data=[]] return self.D.dot(y)
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random '''Reading EEPROM calibration for power regulators and temperature ''' header = self.get_format() if header == self.HEADER_V2: data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT)) for idx, channel in enumerate(self._ch_cal.iterkeys()): ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)] values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data) self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip() self._ch_cal[channel]['default'] = values[1] self._ch_cal[channel]['ADCI']['gain'] = values[2] self._ch_cal[channel]['ADCI']['offset'] = values[3] self._ch_cal[channel]['ADCI']['iq_gain'] = values[4] self._ch_cal[channel]['ADCI']['iq_offset'] = values[5] self._ch_cal[channel]['ADCV']['gain'] = values[6] self._ch_cal[channel]['ADCV']['offset'] = values[7] self._ch_cal[channel]['DACV']['gain'] = values[8] self._ch_cal[channel]['DACV']['offset'] = values[9] if temperature: self._ch_cal[channel]['NTC']['B_NTC'] = values[10] self._ch_cal[channel]['NTC']['R1'] = values[11] self._ch_cal[channel]['NTC']['R2'] = values[12] self._ch_cal[channel]['NTC']['R4'] = values[13] self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14] self._ch_cal[channel]['NTC']['VREF'] = values[15] else: raise ValueError('EEPROM data format not supported (header: %s)' % header)
def function[read_eeprom_calibration, parameter[self, temperature]]: constant[Reading EEPROM calibration for power regulators and temperature ] variable[header] assign[=] call[name[self].get_format, parameter[]] if compare[name[header] equal[==] name[self].HEADER_V2] begin[:] variable[data] assign[=] call[name[self]._read_eeprom, parameter[name[self].CAL_DATA_ADDR]] for taget[tuple[[<ast.Name object at 0x7da1b050c040>, <ast.Name object at 0x7da1b050df30>]]] in starred[call[name[enumerate], parameter[call[name[self]._ch_cal.iterkeys, parameter[]]]]] begin[:] variable[ch_data] assign[=] call[name[data]][<ast.Slice object at 0x7da1b050d150>] variable[values] assign[=] call[name[unpack_from], parameter[name[self].CAL_DATA_CH_V2_FORMAT, name[ch_data]]] call[call[name[self]._ch_cal][name[channel]]][constant[name]] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da1b050f580>]] call[call[name[self]._ch_cal][name[channel]]][constant[default]] assign[=] call[name[values]][constant[1]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCI]]][constant[gain]] assign[=] call[name[values]][constant[2]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCI]]][constant[offset]] assign[=] call[name[values]][constant[3]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCI]]][constant[iq_gain]] assign[=] call[name[values]][constant[4]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCI]]][constant[iq_offset]] assign[=] call[name[values]][constant[5]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCV]]][constant[gain]] assign[=] call[name[values]][constant[6]] call[call[call[name[self]._ch_cal][name[channel]]][constant[ADCV]]][constant[offset]] assign[=] call[name[values]][constant[7]] call[call[call[name[self]._ch_cal][name[channel]]][constant[DACV]]][constant[gain]] assign[=] call[name[values]][constant[8]] call[call[call[name[self]._ch_cal][name[channel]]][constant[DACV]]][constant[offset]] assign[=] call[name[values]][constant[9]] if name[temperature] begin[:] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[B_NTC]] assign[=] call[name[values]][constant[10]] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[R1]] assign[=] call[name[values]][constant[11]] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[R2]] assign[=] call[name[values]][constant[12]] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[R4]] assign[=] call[name[values]][constant[13]] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[R_NTC_25]] assign[=] call[name[values]][constant[14]] call[call[call[name[self]._ch_cal][name[channel]]][constant[NTC]]][constant[VREF]] assign[=] call[name[values]][constant[15]]
keyword[def] identifier[read_eeprom_calibration] ( identifier[self] , identifier[temperature] = keyword[False] ): literal[string] identifier[header] = identifier[self] . identifier[get_format] () keyword[if] identifier[header] == identifier[self] . identifier[HEADER_V2] : identifier[data] = identifier[self] . identifier[_read_eeprom] ( identifier[self] . identifier[CAL_DATA_ADDR] , identifier[size] = identifier[calcsize] ( identifier[self] . identifier[CAL_DATA_V2_FORMAT] )) keyword[for] identifier[idx] , identifier[channel] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_ch_cal] . identifier[iterkeys] ()): identifier[ch_data] = identifier[data] [ identifier[idx] * identifier[calcsize] ( identifier[self] . identifier[CAL_DATA_CH_V2_FORMAT] ):( identifier[idx] + literal[int] )* identifier[calcsize] ( identifier[self] . identifier[CAL_DATA_CH_V2_FORMAT] )] identifier[values] = identifier[unpack_from] ( identifier[self] . identifier[CAL_DATA_CH_V2_FORMAT] , identifier[ch_data] ) identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ]= literal[string] . identifier[join] ([ identifier[c] keyword[for] identifier[c] keyword[in] identifier[values] [ literal[int] ] keyword[if] ( identifier[c] keyword[in] identifier[string] . identifier[printable] )]) identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] keyword[if] identifier[temperature] : identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] identifier[self] . identifier[_ch_cal] [ identifier[channel] ][ literal[string] ][ literal[string] ]= identifier[values] [ literal[int] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[header] )
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random 'Reading EEPROM calibration for power regulators and temperature\n ' header = self.get_format() if header == self.HEADER_V2: data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT)) for (idx, channel) in enumerate(self._ch_cal.iterkeys()): ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)] values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data) self._ch_cal[channel]['name'] = ''.join([c for c in values[0] if c in string.printable]) # values[0].strip() self._ch_cal[channel]['default'] = values[1] self._ch_cal[channel]['ADCI']['gain'] = values[2] self._ch_cal[channel]['ADCI']['offset'] = values[3] self._ch_cal[channel]['ADCI']['iq_gain'] = values[4] self._ch_cal[channel]['ADCI']['iq_offset'] = values[5] self._ch_cal[channel]['ADCV']['gain'] = values[6] self._ch_cal[channel]['ADCV']['offset'] = values[7] self._ch_cal[channel]['DACV']['gain'] = values[8] self._ch_cal[channel]['DACV']['offset'] = values[9] if temperature: self._ch_cal[channel]['NTC']['B_NTC'] = values[10] self._ch_cal[channel]['NTC']['R1'] = values[11] self._ch_cal[channel]['NTC']['R2'] = values[12] self._ch_cal[channel]['NTC']['R4'] = values[13] self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14] self._ch_cal[channel]['NTC']['VREF'] = values[15] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise ValueError('EEPROM data format not supported (header: %s)' % header)
def alter_columns(op, name, *columns, **kwargs): """Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. """ selection_string = kwargs.pop('selection_string', None) if kwargs: raise TypeError( 'alter_columns received extra arguments: %r' % sorted(kwargs), ) if selection_string is None: selection_string = ', '.join(column.name for column in columns) tmp_name = '_alter_columns_' + name op.rename_table(name, tmp_name) for column in columns: # Clear any indices that already exist on this table, otherwise we will # fail to create the table because the indices will already be present. # When we create the table below, the indices that we want to preserve # will just get recreated. for table in name, tmp_name: try: op.drop_index('ix_%s_%s' % (table, column.name)) except sa.exc.OperationalError: pass op.create_table(name, *columns) op.execute( 'insert into %s select %s from %s' % ( name, selection_string, tmp_name, ), ) op.drop_table(tmp_name)
def function[alter_columns, parameter[op, name]]: constant[Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. ] variable[selection_string] assign[=] call[name[kwargs].pop, parameter[constant[selection_string], constant[None]]] if name[kwargs] begin[:] <ast.Raise object at 0x7da1b2024c70> if compare[name[selection_string] is constant[None]] begin[:] variable[selection_string] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b2025930>]] variable[tmp_name] assign[=] binary_operation[constant[_alter_columns_] + name[name]] call[name[op].rename_table, parameter[name[name], name[tmp_name]]] for taget[name[column]] in starred[name[columns]] begin[:] for taget[name[table]] in starred[tuple[[<ast.Name object at 0x7da1b2005c60>, <ast.Name object at 0x7da1b2004100>]]] begin[:] <ast.Try object at 0x7da1b2004eb0> call[name[op].create_table, parameter[name[name], <ast.Starred object at 0x7da1b20063b0>]] call[name[op].execute, parameter[binary_operation[constant[insert into %s select %s from %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2005240>, <ast.Name object at 0x7da1b2006b60>, <ast.Name object at 0x7da1b2004be0>]]]]] call[name[op].drop_table, parameter[name[tmp_name]]]
keyword[def] identifier[alter_columns] ( identifier[op] , identifier[name] ,* identifier[columns] ,** identifier[kwargs] ): literal[string] identifier[selection_string] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[kwargs] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[sorted] ( identifier[kwargs] ), ) keyword[if] identifier[selection_string] keyword[is] keyword[None] : identifier[selection_string] = literal[string] . identifier[join] ( identifier[column] . identifier[name] keyword[for] identifier[column] keyword[in] identifier[columns] ) identifier[tmp_name] = literal[string] + identifier[name] identifier[op] . identifier[rename_table] ( identifier[name] , identifier[tmp_name] ) keyword[for] identifier[column] keyword[in] identifier[columns] : keyword[for] identifier[table] keyword[in] identifier[name] , identifier[tmp_name] : keyword[try] : identifier[op] . identifier[drop_index] ( literal[string] %( identifier[table] , identifier[column] . identifier[name] )) keyword[except] identifier[sa] . identifier[exc] . identifier[OperationalError] : keyword[pass] identifier[op] . identifier[create_table] ( identifier[name] ,* identifier[columns] ) identifier[op] . identifier[execute] ( literal[string] %( identifier[name] , identifier[selection_string] , identifier[tmp_name] , ), ) identifier[op] . identifier[drop_table] ( identifier[tmp_name] )
def alter_columns(op, name, *columns, **kwargs): """Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. """ selection_string = kwargs.pop('selection_string', None) if kwargs: raise TypeError('alter_columns received extra arguments: %r' % sorted(kwargs)) # depends on [control=['if'], data=[]] if selection_string is None: selection_string = ', '.join((column.name for column in columns)) # depends on [control=['if'], data=['selection_string']] tmp_name = '_alter_columns_' + name op.rename_table(name, tmp_name) for column in columns: # Clear any indices that already exist on this table, otherwise we will # fail to create the table because the indices will already be present. # When we create the table below, the indices that we want to preserve # will just get recreated. for table in (name, tmp_name): try: op.drop_index('ix_%s_%s' % (table, column.name)) # depends on [control=['try'], data=[]] except sa.exc.OperationalError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['table']] # depends on [control=['for'], data=['column']] op.create_table(name, *columns) op.execute('insert into %s select %s from %s' % (name, selection_string, tmp_name)) op.drop_table(tmp_name)
def peak_interval(self, name, alpha=_alpha, npoints=_npoints, **kwargs): """ Calculate peak interval for parameter. """ data = self.get(name, **kwargs) return peak_interval(data,alpha,npoints)
def function[peak_interval, parameter[self, name, alpha, npoints]]: constant[ Calculate peak interval for parameter. ] variable[data] assign[=] call[name[self].get, parameter[name[name]]] return[call[name[peak_interval], parameter[name[data], name[alpha], name[npoints]]]]
keyword[def] identifier[peak_interval] ( identifier[self] , identifier[name] , identifier[alpha] = identifier[_alpha] , identifier[npoints] = identifier[_npoints] ,** identifier[kwargs] ): literal[string] identifier[data] = identifier[self] . identifier[get] ( identifier[name] ,** identifier[kwargs] ) keyword[return] identifier[peak_interval] ( identifier[data] , identifier[alpha] , identifier[npoints] )
def peak_interval(self, name, alpha=_alpha, npoints=_npoints, **kwargs): """ Calculate peak interval for parameter. """ data = self.get(name, **kwargs) return peak_interval(data, alpha, npoints)
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
def function[fingerprint_helper, parameter[egg, permute, n_perms, match, distance, features]]: constant[ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension ] if compare[name[features] is constant[None]] begin[:] variable[features] assign[=] call[name[egg].dist_funcs.keys, parameter[]] variable[inds] assign[=] call[name[egg].pres.index.tolist, parameter[]] variable[slices] assign[=] <ast.ListComp object at 0x7da1b0febca0> variable[weights] assign[=] call[name[_get_weights], parameter[name[slices], name[features], name[distdict], name[permute], name[n_perms], name[match], name[distance]]] return[call[name[np].nanmean, parameter[name[weights]]]]
keyword[def] identifier[fingerprint_helper] ( identifier[egg] , identifier[permute] = keyword[False] , identifier[n_perms] = literal[int] , identifier[match] = literal[string] , identifier[distance] = literal[string] , identifier[features] = keyword[None] ): literal[string] keyword[if] identifier[features] keyword[is] keyword[None] : identifier[features] = identifier[egg] . identifier[dist_funcs] . identifier[keys] () identifier[inds] = identifier[egg] . identifier[pres] . identifier[index] . identifier[tolist] () identifier[slices] =[ identifier[egg] . identifier[crack] ( identifier[subjects] =[ identifier[i] ], identifier[lists] =[ identifier[j] ]) keyword[for] identifier[i] , identifier[j] keyword[in] identifier[inds] ] identifier[weights] = identifier[_get_weights] ( identifier[slices] , identifier[features] , identifier[distdict] , identifier[permute] , identifier[n_perms] , identifier[match] , identifier[distance] ) keyword[return] identifier[np] . identifier[nanmean] ( identifier[weights] , identifier[axis] = literal[int] )
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() # depends on [control=['if'], data=['features']] inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for (i, j) in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
def extendsTree( self ): """ Returns whether or not the grid lines should extend through the tree \ area or not. :return <bool> """ delegate = self.itemDelegate() if ( isinstance(delegate, XTreeWidgetDelegate) ): return delegate.extendsTree() return False
def function[extendsTree, parameter[self]]: constant[ Returns whether or not the grid lines should extend through the tree area or not. :return <bool> ] variable[delegate] assign[=] call[name[self].itemDelegate, parameter[]] if call[name[isinstance], parameter[name[delegate], name[XTreeWidgetDelegate]]] begin[:] return[call[name[delegate].extendsTree, parameter[]]] return[constant[False]]
keyword[def] identifier[extendsTree] ( identifier[self] ): literal[string] identifier[delegate] = identifier[self] . identifier[itemDelegate] () keyword[if] ( identifier[isinstance] ( identifier[delegate] , identifier[XTreeWidgetDelegate] )): keyword[return] identifier[delegate] . identifier[extendsTree] () keyword[return] keyword[False]
def extendsTree(self): """ Returns whether or not the grid lines should extend through the tree area or not. :return <bool> """ delegate = self.itemDelegate() if isinstance(delegate, XTreeWidgetDelegate): return delegate.extendsTree() # depends on [control=['if'], data=[]] return False
def clean_value(self): """ Populates json serialization ready data. This is the method used to serialize and store the object data in to DB Returns: List of dicts. """ result = [] for mdl in self: result.append(super(ListNode, mdl).clean_value()) return result
def function[clean_value, parameter[self]]: constant[ Populates json serialization ready data. This is the method used to serialize and store the object data in to DB Returns: List of dicts. ] variable[result] assign[=] list[[]] for taget[name[mdl]] in starred[name[self]] begin[:] call[name[result].append, parameter[call[call[name[super], parameter[name[ListNode], name[mdl]]].clean_value, parameter[]]]] return[name[result]]
keyword[def] identifier[clean_value] ( identifier[self] ): literal[string] identifier[result] =[] keyword[for] identifier[mdl] keyword[in] identifier[self] : identifier[result] . identifier[append] ( identifier[super] ( identifier[ListNode] , identifier[mdl] ). identifier[clean_value] ()) keyword[return] identifier[result]
def clean_value(self): """ Populates json serialization ready data. This is the method used to serialize and store the object data in to DB Returns: List of dicts. """ result = [] for mdl in self: result.append(super(ListNode, mdl).clean_value()) # depends on [control=['for'], data=['mdl']] return result
def get_all_enclave_tags(self, enclave_ids=None): """ Retrieves all tags present in the given enclaves. If the enclave list is empty, the tags returned include all tags for all enclaves the user has access to. :param (string) list enclave_ids: list of enclave IDs :return: The list of |Tag| objects. """ params = {'enclaveIds': enclave_ids} resp = self._client.get("reports/tags", params=params) return [Tag.from_dict(indicator) for indicator in resp.json()]
def function[get_all_enclave_tags, parameter[self, enclave_ids]]: constant[ Retrieves all tags present in the given enclaves. If the enclave list is empty, the tags returned include all tags for all enclaves the user has access to. :param (string) list enclave_ids: list of enclave IDs :return: The list of |Tag| objects. ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20e9551b0>], [<ast.Name object at 0x7da20e9562f0>]] variable[resp] assign[=] call[name[self]._client.get, parameter[constant[reports/tags]]] return[<ast.ListComp object at 0x7da20e957790>]
keyword[def] identifier[get_all_enclave_tags] ( identifier[self] , identifier[enclave_ids] = keyword[None] ): literal[string] identifier[params] ={ literal[string] : identifier[enclave_ids] } identifier[resp] = identifier[self] . identifier[_client] . identifier[get] ( literal[string] , identifier[params] = identifier[params] ) keyword[return] [ identifier[Tag] . identifier[from_dict] ( identifier[indicator] ) keyword[for] identifier[indicator] keyword[in] identifier[resp] . identifier[json] ()]
def get_all_enclave_tags(self, enclave_ids=None): """ Retrieves all tags present in the given enclaves. If the enclave list is empty, the tags returned include all tags for all enclaves the user has access to. :param (string) list enclave_ids: list of enclave IDs :return: The list of |Tag| objects. """ params = {'enclaveIds': enclave_ids} resp = self._client.get('reports/tags', params=params) return [Tag.from_dict(indicator) for indicator in resp.json()]
def trimmed_mean(self, p1, p2): """ Computes the mean of the distribution between the two percentiles p1 and p2. This is a modified algorithm than the one presented in the original t-Digest paper. """ if not (p1 < p2): raise ValueError("p1 must be between 0 and 100 and less than p2.") min_count = p1 / 100. * self.n max_count = p2 / 100. * self.n trimmed_sum = trimmed_count = curr_count = 0 for i, c in enumerate(self.C.values()): next_count = curr_count + c.count if next_count <= min_count: curr_count = next_count continue count = c.count if curr_count < min_count: count = next_count - min_count if next_count > max_count: count -= next_count - max_count trimmed_sum += count * c.mean trimmed_count += count if next_count >= max_count: break curr_count = next_count if trimmed_count == 0: return 0 return trimmed_sum / trimmed_count
def function[trimmed_mean, parameter[self, p1, p2]]: constant[ Computes the mean of the distribution between the two percentiles p1 and p2. This is a modified algorithm than the one presented in the original t-Digest paper. ] if <ast.UnaryOp object at 0x7da1b0786e30> begin[:] <ast.Raise object at 0x7da1b07860e0> variable[min_count] assign[=] binary_operation[binary_operation[name[p1] / constant[100.0]] * name[self].n] variable[max_count] assign[=] binary_operation[binary_operation[name[p2] / constant[100.0]] * name[self].n] variable[trimmed_sum] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b0716b30>, <ast.Name object at 0x7da1b0714910>]]] in starred[call[name[enumerate], parameter[call[name[self].C.values, parameter[]]]]] begin[:] variable[next_count] assign[=] binary_operation[name[curr_count] + name[c].count] if compare[name[next_count] less_or_equal[<=] name[min_count]] begin[:] variable[curr_count] assign[=] name[next_count] continue variable[count] assign[=] name[c].count if compare[name[curr_count] less[<] name[min_count]] begin[:] variable[count] assign[=] binary_operation[name[next_count] - name[min_count]] if compare[name[next_count] greater[>] name[max_count]] begin[:] <ast.AugAssign object at 0x7da1b0716680> <ast.AugAssign object at 0x7da1b0716ef0> <ast.AugAssign object at 0x7da1b0714bb0> if compare[name[next_count] greater_or_equal[>=] name[max_count]] begin[:] break variable[curr_count] assign[=] name[next_count] if compare[name[trimmed_count] equal[==] constant[0]] begin[:] return[constant[0]] return[binary_operation[name[trimmed_sum] / name[trimmed_count]]]
keyword[def] identifier[trimmed_mean] ( identifier[self] , identifier[p1] , identifier[p2] ): literal[string] keyword[if] keyword[not] ( identifier[p1] < identifier[p2] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[min_count] = identifier[p1] / literal[int] * identifier[self] . identifier[n] identifier[max_count] = identifier[p2] / literal[int] * identifier[self] . identifier[n] identifier[trimmed_sum] = identifier[trimmed_count] = identifier[curr_count] = literal[int] keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[self] . identifier[C] . identifier[values] ()): identifier[next_count] = identifier[curr_count] + identifier[c] . identifier[count] keyword[if] identifier[next_count] <= identifier[min_count] : identifier[curr_count] = identifier[next_count] keyword[continue] identifier[count] = identifier[c] . identifier[count] keyword[if] identifier[curr_count] < identifier[min_count] : identifier[count] = identifier[next_count] - identifier[min_count] keyword[if] identifier[next_count] > identifier[max_count] : identifier[count] -= identifier[next_count] - identifier[max_count] identifier[trimmed_sum] += identifier[count] * identifier[c] . identifier[mean] identifier[trimmed_count] += identifier[count] keyword[if] identifier[next_count] >= identifier[max_count] : keyword[break] identifier[curr_count] = identifier[next_count] keyword[if] identifier[trimmed_count] == literal[int] : keyword[return] literal[int] keyword[return] identifier[trimmed_sum] / identifier[trimmed_count]
def trimmed_mean(self, p1, p2): """ Computes the mean of the distribution between the two percentiles p1 and p2. This is a modified algorithm than the one presented in the original t-Digest paper. """ if not p1 < p2: raise ValueError('p1 must be between 0 and 100 and less than p2.') # depends on [control=['if'], data=[]] min_count = p1 / 100.0 * self.n max_count = p2 / 100.0 * self.n trimmed_sum = trimmed_count = curr_count = 0 for (i, c) in enumerate(self.C.values()): next_count = curr_count + c.count if next_count <= min_count: curr_count = next_count continue # depends on [control=['if'], data=['next_count']] count = c.count if curr_count < min_count: count = next_count - min_count # depends on [control=['if'], data=['min_count']] if next_count > max_count: count -= next_count - max_count # depends on [control=['if'], data=['next_count', 'max_count']] trimmed_sum += count * c.mean trimmed_count += count if next_count >= max_count: break # depends on [control=['if'], data=[]] curr_count = next_count # depends on [control=['for'], data=[]] if trimmed_count == 0: return 0 # depends on [control=['if'], data=[]] return trimmed_sum / trimmed_count
async def new_messages(self, poll_period=30, *, fromid=None): """New messages on the Regional Message Board:: tnp = region('The North Pacific') async for post in tnp.new_messages(): # Your processing code here print(post.text) # As an example Guarantees that: * Every post is generated from the moment the generator is started; * No post is generated more than once; * Posts are generated in order from oldest to newest. Parameters ---------- poll_period : int How long to wait between requesting the next bunch of posts, in seconds. Ignored while catching up to the end of the Message Board, meaning that no matter how long of a period you set you will never encounter a situation where posts are made faster than the generator can deliver them. Note that, regardless of the ``poll_period`` you set, all of the code in your loop body still has to execute (possibly several times) before a new bunch of posts can be requested. Consider wrapping your post-processing code in a coroutine and launching it as a task from the loop body if you suspect this might be an issue. fromid : int Request posts starting with the one with this id, as as opposed to the last one at the time. Useful if you need to avoid losing posts between restarts. Set to `1` to request the entire RMB history chronologically. Returns ------- an asynchronous generator that yields :class:`Post` """ if fromid is not None: # fromid of 0 gets ignored by NS fromid = 1 if fromid == 0 else fromid else: try: # We only need the posts from this point forwards fromid = (await self._get_messages(limit=1))[0].id + 1 except IndexError: # Empty RMB fromid = 1 # Sleep before the loop body to avoid wasting the first request. # We only want to apply this "optimization" if fromid was not # specified, as only then we know for sure we're at the end of the # RMB. await sleep(poll_period) while True: posts = await self._get_messages(fromid=fromid) with suppress(IndexError): fromid = posts[-1].id + 1 for post in posts: yield post if len(posts) < 100: await sleep(poll_period)
<ast.AsyncFunctionDef object at 0x7da18c4ccd90>
keyword[async] keyword[def] identifier[new_messages] ( identifier[self] , identifier[poll_period] = literal[int] ,*, identifier[fromid] = keyword[None] ): literal[string] keyword[if] identifier[fromid] keyword[is] keyword[not] keyword[None] : identifier[fromid] = literal[int] keyword[if] identifier[fromid] == literal[int] keyword[else] identifier[fromid] keyword[else] : keyword[try] : identifier[fromid] =( keyword[await] identifier[self] . identifier[_get_messages] ( identifier[limit] = literal[int] ))[ literal[int] ]. identifier[id] + literal[int] keyword[except] identifier[IndexError] : identifier[fromid] = literal[int] keyword[await] identifier[sleep] ( identifier[poll_period] ) keyword[while] keyword[True] : identifier[posts] = keyword[await] identifier[self] . identifier[_get_messages] ( identifier[fromid] = identifier[fromid] ) keyword[with] identifier[suppress] ( identifier[IndexError] ): identifier[fromid] = identifier[posts] [- literal[int] ]. identifier[id] + literal[int] keyword[for] identifier[post] keyword[in] identifier[posts] : keyword[yield] identifier[post] keyword[if] identifier[len] ( identifier[posts] )< literal[int] : keyword[await] identifier[sleep] ( identifier[poll_period] )
async def new_messages(self, poll_period=30, *, fromid=None): """New messages on the Regional Message Board:: tnp = region('The North Pacific') async for post in tnp.new_messages(): # Your processing code here print(post.text) # As an example Guarantees that: * Every post is generated from the moment the generator is started; * No post is generated more than once; * Posts are generated in order from oldest to newest. Parameters ---------- poll_period : int How long to wait between requesting the next bunch of posts, in seconds. Ignored while catching up to the end of the Message Board, meaning that no matter how long of a period you set you will never encounter a situation where posts are made faster than the generator can deliver them. Note that, regardless of the ``poll_period`` you set, all of the code in your loop body still has to execute (possibly several times) before a new bunch of posts can be requested. Consider wrapping your post-processing code in a coroutine and launching it as a task from the loop body if you suspect this might be an issue. fromid : int Request posts starting with the one with this id, as as opposed to the last one at the time. Useful if you need to avoid losing posts between restarts. Set to `1` to request the entire RMB history chronologically. Returns ------- an asynchronous generator that yields :class:`Post` """ if fromid is not None: # fromid of 0 gets ignored by NS fromid = 1 if fromid == 0 else fromid # depends on [control=['if'], data=['fromid']] else: try: # We only need the posts from this point forwards fromid = (await self._get_messages(limit=1))[0].id + 1 # depends on [control=['try'], data=[]] except IndexError: # Empty RMB fromid = 1 # depends on [control=['except'], data=[]] # Sleep before the loop body to avoid wasting the first request. # We only want to apply this "optimization" if fromid was not # specified, as only then we know for sure we're at the end of the # RMB. await sleep(poll_period) while True: posts = await self._get_messages(fromid=fromid) with suppress(IndexError): fromid = posts[-1].id + 1 # depends on [control=['with'], data=[]] for post in posts: yield post # depends on [control=['for'], data=['post']] if len(posts) < 100: await sleep(poll_period) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def get_top_edge_depth(self): """ Return minimum depth of surface's top edge. :returns: Float value, the vertical distance between the earth surface and the shallowest point in surface's top edge in km. """ top_edge = self.mesh[0:1] if top_edge.depths is None: return 0 else: return numpy.min(top_edge.depths)
def function[get_top_edge_depth, parameter[self]]: constant[ Return minimum depth of surface's top edge. :returns: Float value, the vertical distance between the earth surface and the shallowest point in surface's top edge in km. ] variable[top_edge] assign[=] call[name[self].mesh][<ast.Slice object at 0x7da18ede79d0>] if compare[name[top_edge].depths is constant[None]] begin[:] return[constant[0]]
keyword[def] identifier[get_top_edge_depth] ( identifier[self] ): literal[string] identifier[top_edge] = identifier[self] . identifier[mesh] [ literal[int] : literal[int] ] keyword[if] identifier[top_edge] . identifier[depths] keyword[is] keyword[None] : keyword[return] literal[int] keyword[else] : keyword[return] identifier[numpy] . identifier[min] ( identifier[top_edge] . identifier[depths] )
def get_top_edge_depth(self): """ Return minimum depth of surface's top edge. :returns: Float value, the vertical distance between the earth surface and the shallowest point in surface's top edge in km. """ top_edge = self.mesh[0:1] if top_edge.depths is None: return 0 # depends on [control=['if'], data=[]] else: return numpy.min(top_edge.depths)
def connect(self): """Create new connection unless we already have one.""" if not getattr(self._local, 'conn', None): try: server = self._servers.get() logger.debug('Connecting to %s', server) self._local.conn = ClientTransport(server, self._framed_transport, self._timeout, self._recycle) except (Thrift.TException, socket.timeout, socket.error): logger.warning('Connection to %s failed.', server) self._servers.mark_dead(server) return self.connect() return self._local.conn
def function[connect, parameter[self]]: constant[Create new connection unless we already have one.] if <ast.UnaryOp object at 0x7da1b0e6cf40> begin[:] <ast.Try object at 0x7da18c4cf370> return[name[self]._local.conn]
keyword[def] identifier[connect] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[getattr] ( identifier[self] . identifier[_local] , literal[string] , keyword[None] ): keyword[try] : identifier[server] = identifier[self] . identifier[_servers] . identifier[get] () identifier[logger] . identifier[debug] ( literal[string] , identifier[server] ) identifier[self] . identifier[_local] . identifier[conn] = identifier[ClientTransport] ( identifier[server] , identifier[self] . identifier[_framed_transport] , identifier[self] . identifier[_timeout] , identifier[self] . identifier[_recycle] ) keyword[except] ( identifier[Thrift] . identifier[TException] , identifier[socket] . identifier[timeout] , identifier[socket] . identifier[error] ): identifier[logger] . identifier[warning] ( literal[string] , identifier[server] ) identifier[self] . identifier[_servers] . identifier[mark_dead] ( identifier[server] ) keyword[return] identifier[self] . identifier[connect] () keyword[return] identifier[self] . identifier[_local] . identifier[conn]
def connect(self): """Create new connection unless we already have one.""" if not getattr(self._local, 'conn', None): try: server = self._servers.get() logger.debug('Connecting to %s', server) self._local.conn = ClientTransport(server, self._framed_transport, self._timeout, self._recycle) # depends on [control=['try'], data=[]] except (Thrift.TException, socket.timeout, socket.error): logger.warning('Connection to %s failed.', server) self._servers.mark_dead(server) return self.connect() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return self._local.conn
def to_python(self, value): """ Overrides standard to_python method from django models to allow correct translation of Mongo array to a python list. """ if value is None: return value assert isinstance(value, list) ret = [] for mdl_dict in value: if isinstance(mdl_dict, self.model_container): ret.append(mdl_dict) continue mdl = make_mdl(self.model_container, mdl_dict) ret.append(mdl) return ret
def function[to_python, parameter[self, value]]: constant[ Overrides standard to_python method from django models to allow correct translation of Mongo array to a python list. ] if compare[name[value] is constant[None]] begin[:] return[name[value]] assert[call[name[isinstance], parameter[name[value], name[list]]]] variable[ret] assign[=] list[[]] for taget[name[mdl_dict]] in starred[name[value]] begin[:] if call[name[isinstance], parameter[name[mdl_dict], name[self].model_container]] begin[:] call[name[ret].append, parameter[name[mdl_dict]]] continue variable[mdl] assign[=] call[name[make_mdl], parameter[name[self].model_container, name[mdl_dict]]] call[name[ret].append, parameter[name[mdl]]] return[name[ret]]
keyword[def] identifier[to_python] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[None] : keyword[return] identifier[value] keyword[assert] identifier[isinstance] ( identifier[value] , identifier[list] ) identifier[ret] =[] keyword[for] identifier[mdl_dict] keyword[in] identifier[value] : keyword[if] identifier[isinstance] ( identifier[mdl_dict] , identifier[self] . identifier[model_container] ): identifier[ret] . identifier[append] ( identifier[mdl_dict] ) keyword[continue] identifier[mdl] = identifier[make_mdl] ( identifier[self] . identifier[model_container] , identifier[mdl_dict] ) identifier[ret] . identifier[append] ( identifier[mdl] ) keyword[return] identifier[ret]
def to_python(self, value): """ Overrides standard to_python method from django models to allow correct translation of Mongo array to a python list. """ if value is None: return value # depends on [control=['if'], data=['value']] assert isinstance(value, list) ret = [] for mdl_dict in value: if isinstance(mdl_dict, self.model_container): ret.append(mdl_dict) continue # depends on [control=['if'], data=[]] mdl = make_mdl(self.model_container, mdl_dict) ret.append(mdl) # depends on [control=['for'], data=['mdl_dict']] return ret
def _serialize(self): """ Serialize the ResponseObject. Returns a webob `Response` object. """ # Do something appropriate if the response object is unbound if self._defcode is None: raise exceptions.UnboundResponse() # Build the response resp = self.response_class(request=self.req, status=self.code, headerlist=self._headers.items()) # Do we have a body? if self.result: resp.content_type = self.content_type resp.body = self.serializer(self.result) # Return the response return resp
def function[_serialize, parameter[self]]: constant[ Serialize the ResponseObject. Returns a webob `Response` object. ] if compare[name[self]._defcode is constant[None]] begin[:] <ast.Raise object at 0x7da18bcc9a20> variable[resp] assign[=] call[name[self].response_class, parameter[]] if name[self].result begin[:] name[resp].content_type assign[=] name[self].content_type name[resp].body assign[=] call[name[self].serializer, parameter[name[self].result]] return[name[resp]]
keyword[def] identifier[_serialize] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_defcode] keyword[is] keyword[None] : keyword[raise] identifier[exceptions] . identifier[UnboundResponse] () identifier[resp] = identifier[self] . identifier[response_class] ( identifier[request] = identifier[self] . identifier[req] , identifier[status] = identifier[self] . identifier[code] , identifier[headerlist] = identifier[self] . identifier[_headers] . identifier[items] ()) keyword[if] identifier[self] . identifier[result] : identifier[resp] . identifier[content_type] = identifier[self] . identifier[content_type] identifier[resp] . identifier[body] = identifier[self] . identifier[serializer] ( identifier[self] . identifier[result] ) keyword[return] identifier[resp]
def _serialize(self): """ Serialize the ResponseObject. Returns a webob `Response` object. """ # Do something appropriate if the response object is unbound if self._defcode is None: raise exceptions.UnboundResponse() # depends on [control=['if'], data=[]] # Build the response resp = self.response_class(request=self.req, status=self.code, headerlist=self._headers.items()) # Do we have a body? if self.result: resp.content_type = self.content_type resp.body = self.serializer(self.result) # depends on [control=['if'], data=[]] # Return the response return resp
def get_plugin_icon(self): """Return widget icon""" path = osp.join(self.PLUGIN_PATH, self.IMG_PATH) return ima.icon('pylint', icon_path=path)
def function[get_plugin_icon, parameter[self]]: constant[Return widget icon] variable[path] assign[=] call[name[osp].join, parameter[name[self].PLUGIN_PATH, name[self].IMG_PATH]] return[call[name[ima].icon, parameter[constant[pylint]]]]
keyword[def] identifier[get_plugin_icon] ( identifier[self] ): literal[string] identifier[path] = identifier[osp] . identifier[join] ( identifier[self] . identifier[PLUGIN_PATH] , identifier[self] . identifier[IMG_PATH] ) keyword[return] identifier[ima] . identifier[icon] ( literal[string] , identifier[icon_path] = identifier[path] )
def get_plugin_icon(self): """Return widget icon""" path = osp.join(self.PLUGIN_PATH, self.IMG_PATH) return ima.icon('pylint', icon_path=path)
def write_vcf(tree_dict, file_name):#, compress=False): """ Writes out a VCF-style file (which seems to be minimally handleable by vcftools and pyvcf) of the alignment. This is created from a dict in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf` Positions of variable sites are transformed to start at 1 to match VCF convention. Parameters ---------- tree_dict: nested dict A nested dict with keys 'sequence' 'reference' and 'positions', as is created by :py:meth:`treetime.TreeAnc.get_tree_dict` file_name: str File to which the new VCF should be written out. File names ending with '.gz' will result in the VCF automatically being gzipped. """ # Programming Logic Note: # # For a sequence like: # Pos 1 2 3 4 5 6 # Ref A C T T A C # Seq1 A C - - - G # # In a dict it is stored: # Seq1:{3:'-', 4:'-', 5:'-', 6:'G'} (Numbering from 1 for simplicity) # # In a VCF it needs to be: # POS REF ALT Seq1 # 2 CTTA C 1/1 # 6 C G 1/1 # # If a position is deleted (pos 3), need to get invariable position preceeding it # # However, in alternative case, the base before a deletion is mutant, so need to check # that next position isn't a deletion (as otherwise won't be found until after the # current single bp mutation is written out) # # When deleted position found, need to gather up all adjacent mutant positions with deletions, # but not include adjacent mutant positions that aren't deletions (pos 6) # # Don't run off the 'end' of the position list if deletion is the last thing to be included # in the VCF file sequences = tree_dict['sequences'] ref = tree_dict['reference'] positions = tree_dict['positions'] def handleDeletions(i, pi, pos, ref, delete, pattern): refb = ref[pi] if delete: #Need to get the position before i-=1 #As we'll next go to this position again pi-=1 pos = pi+1 refb = ref[pi] #re-get pattern pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) sites = [] sites.append(pattern) #Gather all positions affected by deletion - but don't run off end of position list while (i+1) < len(positions) and positions[i+1] == pi+1: i+=1 pi = positions[i] pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) #Stops 'greedy' behaviour from adding mutations adjacent to deletions if any(pattern == '-'): #if part of deletion, append sites.append(pattern) refb = refb+ref[pi] else: #this is another mutation next to the deletion! i-=1 #don't append, break this loop #Rotate them into 'calls' sites = np.asarray(sites) align = np.rot90(sites) align = np.flipud(align) #Get rid of '-', and put '.' for calls that match ref #Only removes trailing '-'. This breaks VCF convension, but the standard #VCF way of handling this* is really complicated, and the situation is rare. #(*deletions and mutations at the same locations) fullpat = [] for pt in align: gp = len(pt)-1 while pt[gp] == '-': pt[gp] = '' gp-=1 pat = "".join(pt) if pat == refb: fullpat.append('.') else: fullpat.append(pat) pattern = np.array(fullpat) return i, pi, pos, refb, pattern #prepare the header of the VCF & write out header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+list(sequences.keys()) with open(file_name, 'w') as the_file: the_file.write( "##fileformat=VCFv4.2\n"+ "##source=NextStrain\n"+ "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n") the_file.write("\t".join(header)+"\n") vcfWrite = [] errorPositions = [] explainedErrors = 0 #Why so basic? Because we sometimes have to back up a position! i=0 while i < len(positions): #Get the 'pattern' of all calls at this position. #Look out specifically for current (this pos) or upcoming (next pos) deletions #But also distinguish these two, as handled differently. pi = positions[i] pos = pi+1 #change numbering to match VCF, not python, for output refb = ref[pi] #reference base at this position delete = False #deletion at this position - need to grab previous base (invariable) deleteGroup = False #deletion at next position (mutation at this pos) - do not need to get prev base #try/except is much more efficient than 'if' statements for constructing patterns, #as on average a 'variable' location will not be variable for any given sequence pattern = [] #pattern2 gets the pattern at next position to check for upcoming deletions #it's more efficient to get both here rather than loop through sequences twice! pattern2 = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) try: pattern2.append(sequences[k][pi+1]) except KeyError: pattern2.append(ref[pi+1]) pattern = np.array(pattern) pattern2 = np.array(pattern2) #If a deletion here, need to gather up all bases, and position before if any(pattern == '-'): if pos != 1: deleteGroup = True delete = True else: #If theres a deletion in 1st pos, VCF files do not handle this well. #Proceed keeping it as '-' for alt (violates VCF), but warn user to check output. #(This is rare) print ("WARNING: You have a deletion in the first position of your alignment. VCF format does not handle this well. Please check the output to ensure it is correct.") else: #If a deletion in next pos, need to gather up all bases if any(pattern2 == '-'): deleteGroup = True #If deletion, treat affected bases as 1 'call': if delete or deleteGroup: i, pi, pos, refb, pattern = handleDeletions(i, pi, pos, ref, delete, pattern) #If no deletion, replace ref with '.', as in VCF format else: pattern[pattern==refb] = '.' #Get the list of ALTs - minus any '.'! uniques = np.unique(pattern) uniques = uniques[np.where(uniques!='.')] #Convert bases to the number that matches the ALT j=1 for u in uniques: pattern[np.where(pattern==u)[0]] = str(j) j+=1 #Now convert these calls to #/# (VCF format) calls = [ j+"/"+j if j!='.' else '.' for j in pattern ] #What if there's no variation at a variable site?? #This can happen when sites are modified by TreeTime - see below. printPos = True if len(uniques)==0: #If we expect it (it was made constant by TreeTime), it's fine. if 'inferred_const_sites' in tree_dict and pi in tree_dict['inferred_const_sites']: explainedErrors += 1 printPos = False #and don't output position to the VCF else: #If we don't expect, raise an error errorPositions.append(str(pi)) #Write it out - Increment positions by 1 so it's in VCF numbering #If no longer variable, and explained, don't write it out if printPos: output = ["MTB_anc", str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls vcfWrite.append("\t".join(output)) i+=1 #Note: The number of 'inferred_const_sites' passed back by TreeTime will often be longer #than the number of 'site that were made constant' that prints below. This is because given the site: # Ref Alt Seq # G A AANAA #This will be converted to 'AAAAA' and listed as an 'inferred_const_sites'. However, for VCF #purposes, because the site is 'variant' against the ref, it is variant, as expected, and so #won't be counted in the below list, which is only sites removed from the VCF. if 'inferred_const_sites' in tree_dict and explainedErrors != 0: print ( "Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.".format(explainedErrors)) if len(errorPositions) != 0: print ("\n***WARNING: vcf_utils.py" "\n{} sites were found that had no alternative bases. If this data has been " "run through TreeTime and contains ambiguous bases, try calling get_tree_dict with " "var_ambigs=True to see if this clears the error." "\n\nAlternative causes:" "\n- Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline " "this is most likely)" "\n- In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug)" "\n\nThese are the positions affected (numbering starts at 0):".format(str(len(errorPositions)))) print (",".join(errorPositions)) with open(file_name, 'a') as the_file: the_file.write("\n".join(vcfWrite)) if file_name.endswith(('.gz', '.GZ')): import os #must temporarily remove .gz ending, or gzip won't zip it! os.rename(file_name, file_name[:-3]) call = ["gzip", file_name[:-3]] os.system(" ".join(call))
def function[write_vcf, parameter[tree_dict, file_name]]: constant[ Writes out a VCF-style file (which seems to be minimally handleable by vcftools and pyvcf) of the alignment. This is created from a dict in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf` Positions of variable sites are transformed to start at 1 to match VCF convention. Parameters ---------- tree_dict: nested dict A nested dict with keys 'sequence' 'reference' and 'positions', as is created by :py:meth:`treetime.TreeAnc.get_tree_dict` file_name: str File to which the new VCF should be written out. File names ending with '.gz' will result in the VCF automatically being gzipped. ] variable[sequences] assign[=] call[name[tree_dict]][constant[sequences]] variable[ref] assign[=] call[name[tree_dict]][constant[reference]] variable[positions] assign[=] call[name[tree_dict]][constant[positions]] def function[handleDeletions, parameter[i, pi, pos, ref, delete, pattern]]: variable[refb] assign[=] call[name[ref]][name[pi]] if name[delete] begin[:] <ast.AugAssign object at 0x7da1b23454e0> <ast.AugAssign object at 0x7da1b2345990> variable[pos] assign[=] binary_operation[name[pi] + constant[1]] variable[refb] assign[=] call[name[ref]][name[pi]] variable[pattern] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b2344820>, <ast.Name object at 0x7da1b2346080>]]] in starred[call[name[sequences].items, parameter[]]] begin[:] <ast.Try object at 0x7da1b2346e60> variable[pattern] assign[=] call[name[np].array, parameter[name[pattern]]] variable[sites] assign[=] list[[]] call[name[sites].append, parameter[name[pattern]]] while <ast.BoolOp object at 0x7da1b2346560> begin[:] <ast.AugAssign object at 0x7da1b23474c0> variable[pi] assign[=] call[name[positions]][name[i]] variable[pattern] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b2344220>, <ast.Name object at 0x7da1b2345db0>]]] in starred[call[name[sequences].items, parameter[]]] begin[:] <ast.Try object at 0x7da1b2344250> variable[pattern] assign[=] call[name[np].array, parameter[name[pattern]]] if call[name[any], parameter[compare[name[pattern] equal[==] constant[-]]]] begin[:] call[name[sites].append, parameter[name[pattern]]] variable[refb] assign[=] binary_operation[name[refb] + call[name[ref]][name[pi]]] variable[sites] assign[=] call[name[np].asarray, parameter[name[sites]]] variable[align] assign[=] call[name[np].rot90, parameter[name[sites]]] variable[align] assign[=] call[name[np].flipud, parameter[name[align]]] variable[fullpat] assign[=] list[[]] for taget[name[pt]] in starred[name[align]] begin[:] variable[gp] assign[=] binary_operation[call[name[len], parameter[name[pt]]] - constant[1]] while compare[call[name[pt]][name[gp]] equal[==] constant[-]] begin[:] call[name[pt]][name[gp]] assign[=] constant[] <ast.AugAssign object at 0x7da1b2344160> variable[pat] assign[=] call[constant[].join, parameter[name[pt]]] if compare[name[pat] equal[==] name[refb]] begin[:] call[name[fullpat].append, parameter[constant[.]]] variable[pattern] assign[=] call[name[np].array, parameter[name[fullpat]]] return[tuple[[<ast.Name object at 0x7da1b23470d0>, <ast.Name object at 0x7da1b2347d30>, <ast.Name object at 0x7da1b2345390>, <ast.Name object at 0x7da1b23442b0>, <ast.Name object at 0x7da1b23462c0>]]] variable[header] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b2344e50>, <ast.Constant object at 0x7da1b2346980>, <ast.Constant object at 0x7da1b23478e0>, <ast.Constant object at 0x7da1b2347070>, <ast.Constant object at 0x7da1b2345930>, <ast.Constant object at 0x7da1b2346b90>, <ast.Constant object at 0x7da1b23449a0>, <ast.Constant object at 0x7da1b23451b0>, <ast.Constant object at 0x7da1b2344a30>]] + call[name[list], parameter[call[name[sequences].keys, parameter[]]]]] with call[name[open], parameter[name[file_name], constant[w]]] begin[:] call[name[the_file].write, parameter[binary_operation[binary_operation[constant[##fileformat=VCFv4.2 ] + constant[##source=NextStrain ]] + constant[##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype"> ]]]] call[name[the_file].write, parameter[binary_operation[call[constant[ ].join, parameter[name[header]]] + constant[ ]]]] variable[vcfWrite] assign[=] list[[]] variable[errorPositions] assign[=] list[[]] variable[explainedErrors] assign[=] constant[0] variable[i] assign[=] constant[0] while compare[name[i] less[<] call[name[len], parameter[name[positions]]]] begin[:] variable[pi] assign[=] call[name[positions]][name[i]] variable[pos] assign[=] binary_operation[name[pi] + constant[1]] variable[refb] assign[=] call[name[ref]][name[pi]] variable[delete] assign[=] constant[False] variable[deleteGroup] assign[=] constant[False] variable[pattern] assign[=] list[[]] variable[pattern2] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c6e6770>, <ast.Name object at 0x7da20c6e76a0>]]] in starred[call[name[sequences].items, parameter[]]] begin[:] <ast.Try object at 0x7da20c6e5b10> <ast.Try object at 0x7da20c6e5300> variable[pattern] assign[=] call[name[np].array, parameter[name[pattern]]] variable[pattern2] assign[=] call[name[np].array, parameter[name[pattern2]]] if call[name[any], parameter[compare[name[pattern] equal[==] constant[-]]]] begin[:] if compare[name[pos] not_equal[!=] constant[1]] begin[:] variable[deleteGroup] assign[=] constant[True] variable[delete] assign[=] constant[True] if <ast.BoolOp object at 0x7da20c6e7850> begin[:] <ast.Tuple object at 0x7da20c6e4bb0> assign[=] call[name[handleDeletions], parameter[name[i], name[pi], name[pos], name[ref], name[delete], name[pattern]]] variable[uniques] assign[=] call[name[np].unique, parameter[name[pattern]]] variable[uniques] assign[=] call[name[uniques]][call[name[np].where, parameter[compare[name[uniques] not_equal[!=] constant[.]]]]] variable[j] assign[=] constant[1] for taget[name[u]] in starred[name[uniques]] begin[:] call[name[pattern]][call[call[name[np].where, parameter[compare[name[pattern] equal[==] name[u]]]]][constant[0]]] assign[=] call[name[str], parameter[name[j]]] <ast.AugAssign object at 0x7da20c6e6590> variable[calls] assign[=] <ast.ListComp object at 0x7da20c6e6080> variable[printPos] assign[=] constant[True] if compare[call[name[len], parameter[name[uniques]]] equal[==] constant[0]] begin[:] if <ast.BoolOp object at 0x7da20c6e7af0> begin[:] <ast.AugAssign object at 0x7da20c6e72e0> variable[printPos] assign[=] constant[False] if name[printPos] begin[:] variable[output] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b02c7040>, <ast.Call object at 0x7da1b02c69b0>, <ast.Constant object at 0x7da1b02c7760>, <ast.Name object at 0x7da1b02c49a0>, <ast.Call object at 0x7da1b02c4ac0>, <ast.Constant object at 0x7da1b02c7490>, <ast.Constant object at 0x7da1b02c4670>, <ast.Constant object at 0x7da1b02c6c20>, <ast.Constant object at 0x7da1b02c6710>]] + name[calls]] call[name[vcfWrite].append, parameter[call[constant[ ].join, parameter[name[output]]]]] <ast.AugAssign object at 0x7da1b02c4fa0> if <ast.BoolOp object at 0x7da1b02c62c0> begin[:] call[name[print], parameter[call[constant[Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.].format, parameter[name[explainedErrors]]]]] if compare[call[name[len], parameter[name[errorPositions]]] not_equal[!=] constant[0]] begin[:] call[name[print], parameter[call[constant[ ***WARNING: vcf_utils.py {} sites were found that had no alternative bases. If this data has been run through TreeTime and contains ambiguous bases, try calling get_tree_dict with var_ambigs=True to see if this clears the error. Alternative causes: - Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline this is most likely) - In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug) These are the positions affected (numbering starts at 0):].format, parameter[call[name[str], parameter[call[name[len], parameter[name[errorPositions]]]]]]]]] call[name[print], parameter[call[constant[,].join, parameter[name[errorPositions]]]]] with call[name[open], parameter[name[file_name], constant[a]]] begin[:] call[name[the_file].write, parameter[call[constant[ ].join, parameter[name[vcfWrite]]]]] if call[name[file_name].endswith, parameter[tuple[[<ast.Constant object at 0x7da1b02c5f60>, <ast.Constant object at 0x7da1b02c4e50>]]]] begin[:] import module[os] call[name[os].rename, parameter[name[file_name], call[name[file_name]][<ast.Slice object at 0x7da1b02c4df0>]]] variable[call] assign[=] list[[<ast.Constant object at 0x7da1b02c4a30>, <ast.Subscript object at 0x7da1b02c5ea0>]] call[name[os].system, parameter[call[constant[ ].join, parameter[name[call]]]]]
keyword[def] identifier[write_vcf] ( identifier[tree_dict] , identifier[file_name] ): literal[string] identifier[sequences] = identifier[tree_dict] [ literal[string] ] identifier[ref] = identifier[tree_dict] [ literal[string] ] identifier[positions] = identifier[tree_dict] [ literal[string] ] keyword[def] identifier[handleDeletions] ( identifier[i] , identifier[pi] , identifier[pos] , identifier[ref] , identifier[delete] , identifier[pattern] ): identifier[refb] = identifier[ref] [ identifier[pi] ] keyword[if] identifier[delete] : identifier[i] -= literal[int] identifier[pi] -= literal[int] identifier[pos] = identifier[pi] + literal[int] identifier[refb] = identifier[ref] [ identifier[pi] ] identifier[pattern] =[] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sequences] . identifier[items] (): keyword[try] : identifier[pattern] . identifier[append] ( identifier[sequences] [ identifier[k] ][ identifier[pi] ]) keyword[except] identifier[KeyError] : identifier[pattern] . identifier[append] ( identifier[ref] [ identifier[pi] ]) identifier[pattern] = identifier[np] . identifier[array] ( identifier[pattern] ) identifier[sites] =[] identifier[sites] . identifier[append] ( identifier[pattern] ) keyword[while] ( identifier[i] + literal[int] )< identifier[len] ( identifier[positions] ) keyword[and] identifier[positions] [ identifier[i] + literal[int] ]== identifier[pi] + literal[int] : identifier[i] += literal[int] identifier[pi] = identifier[positions] [ identifier[i] ] identifier[pattern] =[] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sequences] . identifier[items] (): keyword[try] : identifier[pattern] . identifier[append] ( identifier[sequences] [ identifier[k] ][ identifier[pi] ]) keyword[except] identifier[KeyError] : identifier[pattern] . identifier[append] ( identifier[ref] [ identifier[pi] ]) identifier[pattern] = identifier[np] . identifier[array] ( identifier[pattern] ) keyword[if] identifier[any] ( identifier[pattern] == literal[string] ): identifier[sites] . identifier[append] ( identifier[pattern] ) identifier[refb] = identifier[refb] + identifier[ref] [ identifier[pi] ] keyword[else] : identifier[i] -= literal[int] identifier[sites] = identifier[np] . identifier[asarray] ( identifier[sites] ) identifier[align] = identifier[np] . identifier[rot90] ( identifier[sites] ) identifier[align] = identifier[np] . identifier[flipud] ( identifier[align] ) identifier[fullpat] =[] keyword[for] identifier[pt] keyword[in] identifier[align] : identifier[gp] = identifier[len] ( identifier[pt] )- literal[int] keyword[while] identifier[pt] [ identifier[gp] ]== literal[string] : identifier[pt] [ identifier[gp] ]= literal[string] identifier[gp] -= literal[int] identifier[pat] = literal[string] . identifier[join] ( identifier[pt] ) keyword[if] identifier[pat] == identifier[refb] : identifier[fullpat] . identifier[append] ( literal[string] ) keyword[else] : identifier[fullpat] . identifier[append] ( identifier[pat] ) identifier[pattern] = identifier[np] . identifier[array] ( identifier[fullpat] ) keyword[return] identifier[i] , identifier[pi] , identifier[pos] , identifier[refb] , identifier[pattern] identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]+ identifier[list] ( identifier[sequences] . identifier[keys] ()) keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[the_file] : identifier[the_file] . identifier[write] ( literal[string] + literal[string] + literal[string] ) identifier[the_file] . identifier[write] ( literal[string] . identifier[join] ( identifier[header] )+ literal[string] ) identifier[vcfWrite] =[] identifier[errorPositions] =[] identifier[explainedErrors] = literal[int] identifier[i] = literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[positions] ): identifier[pi] = identifier[positions] [ identifier[i] ] identifier[pos] = identifier[pi] + literal[int] identifier[refb] = identifier[ref] [ identifier[pi] ] identifier[delete] = keyword[False] identifier[deleteGroup] = keyword[False] identifier[pattern] =[] identifier[pattern2] =[] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sequences] . identifier[items] (): keyword[try] : identifier[pattern] . identifier[append] ( identifier[sequences] [ identifier[k] ][ identifier[pi] ]) keyword[except] identifier[KeyError] : identifier[pattern] . identifier[append] ( identifier[ref] [ identifier[pi] ]) keyword[try] : identifier[pattern2] . identifier[append] ( identifier[sequences] [ identifier[k] ][ identifier[pi] + literal[int] ]) keyword[except] identifier[KeyError] : identifier[pattern2] . identifier[append] ( identifier[ref] [ identifier[pi] + literal[int] ]) identifier[pattern] = identifier[np] . identifier[array] ( identifier[pattern] ) identifier[pattern2] = identifier[np] . identifier[array] ( identifier[pattern2] ) keyword[if] identifier[any] ( identifier[pattern] == literal[string] ): keyword[if] identifier[pos] != literal[int] : identifier[deleteGroup] = keyword[True] identifier[delete] = keyword[True] keyword[else] : identifier[print] ( literal[string] ) keyword[else] : keyword[if] identifier[any] ( identifier[pattern2] == literal[string] ): identifier[deleteGroup] = keyword[True] keyword[if] identifier[delete] keyword[or] identifier[deleteGroup] : identifier[i] , identifier[pi] , identifier[pos] , identifier[refb] , identifier[pattern] = identifier[handleDeletions] ( identifier[i] , identifier[pi] , identifier[pos] , identifier[ref] , identifier[delete] , identifier[pattern] ) keyword[else] : identifier[pattern] [ identifier[pattern] == identifier[refb] ]= literal[string] identifier[uniques] = identifier[np] . identifier[unique] ( identifier[pattern] ) identifier[uniques] = identifier[uniques] [ identifier[np] . identifier[where] ( identifier[uniques] != literal[string] )] identifier[j] = literal[int] keyword[for] identifier[u] keyword[in] identifier[uniques] : identifier[pattern] [ identifier[np] . identifier[where] ( identifier[pattern] == identifier[u] )[ literal[int] ]]= identifier[str] ( identifier[j] ) identifier[j] += literal[int] identifier[calls] =[ identifier[j] + literal[string] + identifier[j] keyword[if] identifier[j] != literal[string] keyword[else] literal[string] keyword[for] identifier[j] keyword[in] identifier[pattern] ] identifier[printPos] = keyword[True] keyword[if] identifier[len] ( identifier[uniques] )== literal[int] : keyword[if] literal[string] keyword[in] identifier[tree_dict] keyword[and] identifier[pi] keyword[in] identifier[tree_dict] [ literal[string] ]: identifier[explainedErrors] += literal[int] identifier[printPos] = keyword[False] keyword[else] : identifier[errorPositions] . identifier[append] ( identifier[str] ( identifier[pi] )) keyword[if] identifier[printPos] : identifier[output] =[ literal[string] , identifier[str] ( identifier[pos] ), literal[string] , identifier[refb] , literal[string] . identifier[join] ( identifier[uniques] ), literal[string] , literal[string] , literal[string] , literal[string] ]+ identifier[calls] identifier[vcfWrite] . identifier[append] ( literal[string] . identifier[join] ( identifier[output] )) identifier[i] += literal[int] keyword[if] literal[string] keyword[in] identifier[tree_dict] keyword[and] identifier[explainedErrors] != literal[int] : identifier[print] ( literal[string] . identifier[format] ( identifier[explainedErrors] )) keyword[if] identifier[len] ( identifier[errorPositions] )!= literal[int] : identifier[print] ( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[str] ( identifier[len] ( identifier[errorPositions] )))) identifier[print] ( literal[string] . identifier[join] ( identifier[errorPositions] )) keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[the_file] : identifier[the_file] . identifier[write] ( literal[string] . identifier[join] ( identifier[vcfWrite] )) keyword[if] identifier[file_name] . identifier[endswith] (( literal[string] , literal[string] )): keyword[import] identifier[os] identifier[os] . identifier[rename] ( identifier[file_name] , identifier[file_name] [:- literal[int] ]) identifier[call] =[ literal[string] , identifier[file_name] [:- literal[int] ]] identifier[os] . identifier[system] ( literal[string] . identifier[join] ( identifier[call] ))
def write_vcf(tree_dict, file_name): #, compress=False): "\n Writes out a VCF-style file (which seems to be minimally handleable\n by vcftools and pyvcf) of the alignment. This is created from a dict\n in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf`\n\n Positions of variable sites are transformed to start at 1 to match\n VCF convention.\n\n Parameters\n ----------\n tree_dict: nested dict\n A nested dict with keys 'sequence' 'reference' and 'positions',\n as is created by :py:meth:`treetime.TreeAnc.get_tree_dict`\n\n file_name: str\n File to which the new VCF should be written out. File names ending with\n '.gz' will result in the VCF automatically being gzipped.\n\n " # Programming Logic Note: # # For a sequence like: # Pos 1 2 3 4 5 6 # Ref A C T T A C # Seq1 A C - - - G # # In a dict it is stored: # Seq1:{3:'-', 4:'-', 5:'-', 6:'G'} (Numbering from 1 for simplicity) # # In a VCF it needs to be: # POS REF ALT Seq1 # 2 CTTA C 1/1 # 6 C G 1/1 # # If a position is deleted (pos 3), need to get invariable position preceeding it # # However, in alternative case, the base before a deletion is mutant, so need to check # that next position isn't a deletion (as otherwise won't be found until after the # current single bp mutation is written out) # # When deleted position found, need to gather up all adjacent mutant positions with deletions, # but not include adjacent mutant positions that aren't deletions (pos 6) # # Don't run off the 'end' of the position list if deletion is the last thing to be included # in the VCF file sequences = tree_dict['sequences'] ref = tree_dict['reference'] positions = tree_dict['positions'] def handleDeletions(i, pi, pos, ref, delete, pattern): refb = ref[pi] if delete: #Need to get the position before i -= 1 #As we'll next go to this position again pi -= 1 pos = pi + 1 refb = ref[pi] #re-get pattern pattern = [] for (k, v) in sequences.items(): try: pattern.append(sequences[k][pi]) # depends on [control=['try'], data=[]] except KeyError: pattern.append(ref[pi]) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] pattern = np.array(pattern) # depends on [control=['if'], data=[]] sites = [] sites.append(pattern) #Gather all positions affected by deletion - but don't run off end of position list while i + 1 < len(positions) and positions[i + 1] == pi + 1: i += 1 pi = positions[i] pattern = [] for (k, v) in sequences.items(): try: pattern.append(sequences[k][pi]) # depends on [control=['try'], data=[]] except KeyError: pattern.append(ref[pi]) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] pattern = np.array(pattern) #Stops 'greedy' behaviour from adding mutations adjacent to deletions if any(pattern == '-'): #if part of deletion, append sites.append(pattern) refb = refb + ref[pi] # depends on [control=['if'], data=[]] else: #this is another mutation next to the deletion! i -= 1 #don't append, break this loop # depends on [control=['while'], data=[]] #Rotate them into 'calls' sites = np.asarray(sites) align = np.rot90(sites) align = np.flipud(align) #Get rid of '-', and put '.' for calls that match ref #Only removes trailing '-'. This breaks VCF convension, but the standard #VCF way of handling this* is really complicated, and the situation is rare. #(*deletions and mutations at the same locations) fullpat = [] for pt in align: gp = len(pt) - 1 while pt[gp] == '-': pt[gp] = '' gp -= 1 # depends on [control=['while'], data=[]] pat = ''.join(pt) if pat == refb: fullpat.append('.') # depends on [control=['if'], data=[]] else: fullpat.append(pat) # depends on [control=['for'], data=['pt']] pattern = np.array(fullpat) return (i, pi, pos, refb, pattern) #prepare the header of the VCF & write out header = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT'] + list(sequences.keys()) with open(file_name, 'w') as the_file: the_file.write('##fileformat=VCFv4.2\n' + '##source=NextStrain\n' + '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') the_file.write('\t'.join(header) + '\n') # depends on [control=['with'], data=['the_file']] vcfWrite = [] errorPositions = [] explainedErrors = 0 #Why so basic? Because we sometimes have to back up a position! i = 0 while i < len(positions): #Get the 'pattern' of all calls at this position. #Look out specifically for current (this pos) or upcoming (next pos) deletions #But also distinguish these two, as handled differently. pi = positions[i] pos = pi + 1 #change numbering to match VCF, not python, for output refb = ref[pi] #reference base at this position delete = False #deletion at this position - need to grab previous base (invariable) deleteGroup = False #deletion at next position (mutation at this pos) - do not need to get prev base #try/except is much more efficient than 'if' statements for constructing patterns, #as on average a 'variable' location will not be variable for any given sequence pattern = [] #pattern2 gets the pattern at next position to check for upcoming deletions #it's more efficient to get both here rather than loop through sequences twice! pattern2 = [] for (k, v) in sequences.items(): try: pattern.append(sequences[k][pi]) # depends on [control=['try'], data=[]] except KeyError: pattern.append(ref[pi]) # depends on [control=['except'], data=[]] try: pattern2.append(sequences[k][pi + 1]) # depends on [control=['try'], data=[]] except KeyError: pattern2.append(ref[pi + 1]) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] pattern = np.array(pattern) pattern2 = np.array(pattern2) #If a deletion here, need to gather up all bases, and position before if any(pattern == '-'): if pos != 1: deleteGroup = True delete = True # depends on [control=['if'], data=[]] else: #If theres a deletion in 1st pos, VCF files do not handle this well. #Proceed keeping it as '-' for alt (violates VCF), but warn user to check output. #(This is rare) print('WARNING: You have a deletion in the first position of your alignment. VCF format does not handle this well. Please check the output to ensure it is correct.') # depends on [control=['if'], data=[]] #If a deletion in next pos, need to gather up all bases elif any(pattern2 == '-'): deleteGroup = True # depends on [control=['if'], data=[]] #If deletion, treat affected bases as 1 'call': if delete or deleteGroup: (i, pi, pos, refb, pattern) = handleDeletions(i, pi, pos, ref, delete, pattern) # depends on [control=['if'], data=[]] else: #If no deletion, replace ref with '.', as in VCF format pattern[pattern == refb] = '.' #Get the list of ALTs - minus any '.'! uniques = np.unique(pattern) uniques = uniques[np.where(uniques != '.')] #Convert bases to the number that matches the ALT j = 1 for u in uniques: pattern[np.where(pattern == u)[0]] = str(j) j += 1 # depends on [control=['for'], data=['u']] #Now convert these calls to #/# (VCF format) calls = [j + '/' + j if j != '.' else '.' for j in pattern] #What if there's no variation at a variable site?? #This can happen when sites are modified by TreeTime - see below. printPos = True if len(uniques) == 0: #If we expect it (it was made constant by TreeTime), it's fine. if 'inferred_const_sites' in tree_dict and pi in tree_dict['inferred_const_sites']: explainedErrors += 1 printPos = False #and don't output position to the VCF # depends on [control=['if'], data=[]] else: #If we don't expect, raise an error errorPositions.append(str(pi)) # depends on [control=['if'], data=[]] #Write it out - Increment positions by 1 so it's in VCF numbering #If no longer variable, and explained, don't write it out if printPos: output = ['MTB_anc', str(pos), '.', refb, ','.join(uniques), '.', 'PASS', '.', 'GT'] + calls vcfWrite.append('\t'.join(output)) # depends on [control=['if'], data=[]] i += 1 # depends on [control=['while'], data=['i']] #Note: The number of 'inferred_const_sites' passed back by TreeTime will often be longer #than the number of 'site that were made constant' that prints below. This is because given the site: # Ref Alt Seq # G A AANAA #This will be converted to 'AAAAA' and listed as an 'inferred_const_sites'. However, for VCF #purposes, because the site is 'variant' against the ref, it is variant, as expected, and so #won't be counted in the below list, which is only sites removed from the VCF. if 'inferred_const_sites' in tree_dict and explainedErrors != 0: print('Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.'.format(explainedErrors)) # depends on [control=['if'], data=[]] if len(errorPositions) != 0: print('\n***WARNING: vcf_utils.py\n{} sites were found that had no alternative bases. If this data has been run through TreeTime and contains ambiguous bases, try calling get_tree_dict with var_ambigs=True to see if this clears the error.\n\nAlternative causes:\n- Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline this is most likely)\n- In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug)\n\nThese are the positions affected (numbering starts at 0):'.format(str(len(errorPositions)))) print(','.join(errorPositions)) # depends on [control=['if'], data=[]] with open(file_name, 'a') as the_file: the_file.write('\n'.join(vcfWrite)) # depends on [control=['with'], data=['the_file']] if file_name.endswith(('.gz', '.GZ')): import os #must temporarily remove .gz ending, or gzip won't zip it! os.rename(file_name, file_name[:-3]) call = ['gzip', file_name[:-3]] os.system(' '.join(call)) # depends on [control=['if'], data=[]]
def _reinit_daq_daemons(sender, instance, **kwargs): """ update the daq daemon configuration when changes be applied in the models """ if type(instance) is SMbusDevice: post_save.send_robust(sender=Device, instance=instance.smbus_device) elif type(instance) is SMbusVariable: post_save.send_robust(sender=Variable, instance=instance.smbus_variable) elif type(instance) is ExtendedSMbusVariable: post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk)) elif type(instance) is ExtendedSMBusDevice: post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
def function[_reinit_daq_daemons, parameter[sender, instance]]: constant[ update the daq daemon configuration when changes be applied in the models ] if compare[call[name[type], parameter[name[instance]]] is name[SMbusDevice]] begin[:] call[name[post_save].send_robust, parameter[]]
keyword[def] identifier[_reinit_daq_daemons] ( identifier[sender] , identifier[instance] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[type] ( identifier[instance] ) keyword[is] identifier[SMbusDevice] : identifier[post_save] . identifier[send_robust] ( identifier[sender] = identifier[Device] , identifier[instance] = identifier[instance] . identifier[smbus_device] ) keyword[elif] identifier[type] ( identifier[instance] ) keyword[is] identifier[SMbusVariable] : identifier[post_save] . identifier[send_robust] ( identifier[sender] = identifier[Variable] , identifier[instance] = identifier[instance] . identifier[smbus_variable] ) keyword[elif] identifier[type] ( identifier[instance] ) keyword[is] identifier[ExtendedSMbusVariable] : identifier[post_save] . identifier[send_robust] ( identifier[sender] = identifier[Variable] , identifier[instance] = identifier[Variable] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[instance] . identifier[pk] )) keyword[elif] identifier[type] ( identifier[instance] ) keyword[is] identifier[ExtendedSMBusDevice] : identifier[post_save] . identifier[send_robust] ( identifier[sender] = identifier[Device] , identifier[instance] = identifier[Device] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[instance] . identifier[pk] ))
def _reinit_daq_daemons(sender, instance, **kwargs): """ update the daq daemon configuration when changes be applied in the models """ if type(instance) is SMbusDevice: post_save.send_robust(sender=Device, instance=instance.smbus_device) # depends on [control=['if'], data=[]] elif type(instance) is SMbusVariable: post_save.send_robust(sender=Variable, instance=instance.smbus_variable) # depends on [control=['if'], data=[]] elif type(instance) is ExtendedSMbusVariable: post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk)) # depends on [control=['if'], data=[]] elif type(instance) is ExtendedSMBusDevice: post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk)) # depends on [control=['if'], data=[]]
def invalid_content_type(self, request=None, response=None): """Returns the content type that should be used by default on validation errors""" if callable(self.invalid_outputs.content_type): return self.invalid_outputs.content_type(request=request, response=response) else: return self.invalid_outputs.content_type
def function[invalid_content_type, parameter[self, request, response]]: constant[Returns the content type that should be used by default on validation errors] if call[name[callable], parameter[name[self].invalid_outputs.content_type]] begin[:] return[call[name[self].invalid_outputs.content_type, parameter[]]]
keyword[def] identifier[invalid_content_type] ( identifier[self] , identifier[request] = keyword[None] , identifier[response] = keyword[None] ): literal[string] keyword[if] identifier[callable] ( identifier[self] . identifier[invalid_outputs] . identifier[content_type] ): keyword[return] identifier[self] . identifier[invalid_outputs] . identifier[content_type] ( identifier[request] = identifier[request] , identifier[response] = identifier[response] ) keyword[else] : keyword[return] identifier[self] . identifier[invalid_outputs] . identifier[content_type]
def invalid_content_type(self, request=None, response=None): """Returns the content type that should be used by default on validation errors""" if callable(self.invalid_outputs.content_type): return self.invalid_outputs.content_type(request=request, response=response) # depends on [control=['if'], data=[]] else: return self.invalid_outputs.content_type
def init_poolmanager(self, connections, maxsize, block=requests.adapters.DEFAULT_POOLBLOCK, **pool_kwargs): """Initialize poolmanager with cipher and Tlsv1""" context = create_urllib3_context(ciphers=self.CIPHERS, ssl_version=ssl.PROTOCOL_TLSv1) pool_kwargs['ssl_context'] = context return super(TLSv1Adapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
def function[init_poolmanager, parameter[self, connections, maxsize, block]]: constant[Initialize poolmanager with cipher and Tlsv1] variable[context] assign[=] call[name[create_urllib3_context], parameter[]] call[name[pool_kwargs]][constant[ssl_context]] assign[=] name[context] return[call[call[name[super], parameter[name[TLSv1Adapter], name[self]]].init_poolmanager, parameter[name[connections], name[maxsize], name[block]]]]
keyword[def] identifier[init_poolmanager] ( identifier[self] , identifier[connections] , identifier[maxsize] , identifier[block] = identifier[requests] . identifier[adapters] . identifier[DEFAULT_POOLBLOCK] , ** identifier[pool_kwargs] ): literal[string] identifier[context] = identifier[create_urllib3_context] ( identifier[ciphers] = identifier[self] . identifier[CIPHERS] , identifier[ssl_version] = identifier[ssl] . identifier[PROTOCOL_TLSv1] ) identifier[pool_kwargs] [ literal[string] ]= identifier[context] keyword[return] identifier[super] ( identifier[TLSv1Adapter] , identifier[self] ). identifier[init_poolmanager] ( identifier[connections] , identifier[maxsize] , identifier[block] ,** identifier[pool_kwargs] )
def init_poolmanager(self, connections, maxsize, block=requests.adapters.DEFAULT_POOLBLOCK, **pool_kwargs): """Initialize poolmanager with cipher and Tlsv1""" context = create_urllib3_context(ciphers=self.CIPHERS, ssl_version=ssl.PROTOCOL_TLSv1) pool_kwargs['ssl_context'] = context return super(TLSv1Adapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
def __write_filter_dic(wk_sheet, column): ''' return filter dic for certain column ''' row1_val = wk_sheet['{0}1'.format(column)].value row2_val = wk_sheet['{0}2'.format(column)].value row3_val = wk_sheet['{0}3'.format(column)].value row4_val = wk_sheet['{0}4'.format(column)].value if row1_val and row1_val.strip() != '': row2_val = row2_val.strip() slug_name = row1_val.strip() c_name = row2_val.strip() tags1 = [x.strip() for x in row3_val.split(',')] tags_dic = {} # if only one tag, if len(tags1) == 1: xx_1 = row2_val.split(':') # 'text' # HTML text input control. if xx_1[0].lower() in INPUT_ARR: xx_1[0] = xx_1[0].lower() else: xx_1[0] = 'text' if len(xx_1) == 2: ctr_type, unit = xx_1 else: ctr_type = xx_1[0] unit = '' tags_dic[1] = unit else: ctr_type = 'select' # HTML selectiom control. for index, tag_val in enumerate(tags1): # the index of tags_dic starts from 1. tags_dic[index + 1] = tag_val.strip() outkey = 'html_{0}'.format(slug_name) outval = { 'en': slug_name, 'zh': c_name, 'dic': tags_dic, 'type': ctr_type, 'display': row4_val, } return (outkey, outval) else: return (None, None)
def function[__write_filter_dic, parameter[wk_sheet, column]]: constant[ return filter dic for certain column ] variable[row1_val] assign[=] call[name[wk_sheet]][call[constant[{0}1].format, parameter[name[column]]]].value variable[row2_val] assign[=] call[name[wk_sheet]][call[constant[{0}2].format, parameter[name[column]]]].value variable[row3_val] assign[=] call[name[wk_sheet]][call[constant[{0}3].format, parameter[name[column]]]].value variable[row4_val] assign[=] call[name[wk_sheet]][call[constant[{0}4].format, parameter[name[column]]]].value if <ast.BoolOp object at 0x7da1b0673850> begin[:] variable[row2_val] assign[=] call[name[row2_val].strip, parameter[]] variable[slug_name] assign[=] call[name[row1_val].strip, parameter[]] variable[c_name] assign[=] call[name[row2_val].strip, parameter[]] variable[tags1] assign[=] <ast.ListComp object at 0x7da1b06687f0> variable[tags_dic] assign[=] dictionary[[], []] if compare[call[name[len], parameter[name[tags1]]] equal[==] constant[1]] begin[:] variable[xx_1] assign[=] call[name[row2_val].split, parameter[constant[:]]] if compare[call[call[name[xx_1]][constant[0]].lower, parameter[]] in name[INPUT_ARR]] begin[:] call[name[xx_1]][constant[0]] assign[=] call[call[name[xx_1]][constant[0]].lower, parameter[]] if compare[call[name[len], parameter[name[xx_1]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da1b06693f0> assign[=] name[xx_1] call[name[tags_dic]][constant[1]] assign[=] name[unit] variable[outkey] assign[=] call[constant[html_{0}].format, parameter[name[slug_name]]] variable[outval] assign[=] dictionary[[<ast.Constant object at 0x7da1b06688e0>, <ast.Constant object at 0x7da1b0668df0>, <ast.Constant object at 0x7da1b06691b0>, <ast.Constant object at 0x7da1b06691e0>, <ast.Constant object at 0x7da1b0668640>], [<ast.Name object at 0x7da1b06695d0>, <ast.Name object at 0x7da1b06685e0>, <ast.Name object at 0x7da1b0668610>, <ast.Name object at 0x7da1b0668760>, <ast.Name object at 0x7da1b0668cd0>]] return[tuple[[<ast.Name object at 0x7da1b0668f40>, <ast.Name object at 0x7da1b0668910>]]]
keyword[def] identifier[__write_filter_dic] ( identifier[wk_sheet] , identifier[column] ): literal[string] identifier[row1_val] = identifier[wk_sheet] [ literal[string] . identifier[format] ( identifier[column] )]. identifier[value] identifier[row2_val] = identifier[wk_sheet] [ literal[string] . identifier[format] ( identifier[column] )]. identifier[value] identifier[row3_val] = identifier[wk_sheet] [ literal[string] . identifier[format] ( identifier[column] )]. identifier[value] identifier[row4_val] = identifier[wk_sheet] [ literal[string] . identifier[format] ( identifier[column] )]. identifier[value] keyword[if] identifier[row1_val] keyword[and] identifier[row1_val] . identifier[strip] ()!= literal[string] : identifier[row2_val] = identifier[row2_val] . identifier[strip] () identifier[slug_name] = identifier[row1_val] . identifier[strip] () identifier[c_name] = identifier[row2_val] . identifier[strip] () identifier[tags1] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[row3_val] . identifier[split] ( literal[string] )] identifier[tags_dic] ={} keyword[if] identifier[len] ( identifier[tags1] )== literal[int] : identifier[xx_1] = identifier[row2_val] . identifier[split] ( literal[string] ) keyword[if] identifier[xx_1] [ literal[int] ]. identifier[lower] () keyword[in] identifier[INPUT_ARR] : identifier[xx_1] [ literal[int] ]= identifier[xx_1] [ literal[int] ]. identifier[lower] () keyword[else] : identifier[xx_1] [ literal[int] ]= literal[string] keyword[if] identifier[len] ( identifier[xx_1] )== literal[int] : identifier[ctr_type] , identifier[unit] = identifier[xx_1] keyword[else] : identifier[ctr_type] = identifier[xx_1] [ literal[int] ] identifier[unit] = literal[string] identifier[tags_dic] [ literal[int] ]= identifier[unit] keyword[else] : identifier[ctr_type] = literal[string] keyword[for] identifier[index] , identifier[tag_val] keyword[in] identifier[enumerate] ( identifier[tags1] ): identifier[tags_dic] [ identifier[index] + literal[int] ]= identifier[tag_val] . identifier[strip] () identifier[outkey] = literal[string] . identifier[format] ( identifier[slug_name] ) identifier[outval] ={ literal[string] : identifier[slug_name] , literal[string] : identifier[c_name] , literal[string] : identifier[tags_dic] , literal[string] : identifier[ctr_type] , literal[string] : identifier[row4_val] , } keyword[return] ( identifier[outkey] , identifier[outval] ) keyword[else] : keyword[return] ( keyword[None] , keyword[None] )
def __write_filter_dic(wk_sheet, column): """ return filter dic for certain column """ row1_val = wk_sheet['{0}1'.format(column)].value row2_val = wk_sheet['{0}2'.format(column)].value row3_val = wk_sheet['{0}3'.format(column)].value row4_val = wk_sheet['{0}4'.format(column)].value if row1_val and row1_val.strip() != '': row2_val = row2_val.strip() slug_name = row1_val.strip() c_name = row2_val.strip() tags1 = [x.strip() for x in row3_val.split(',')] tags_dic = {} # if only one tag, if len(tags1) == 1: xx_1 = row2_val.split(':') # 'text' # HTML text input control. if xx_1[0].lower() in INPUT_ARR: xx_1[0] = xx_1[0].lower() # depends on [control=['if'], data=[]] else: xx_1[0] = 'text' if len(xx_1) == 2: (ctr_type, unit) = xx_1 # depends on [control=['if'], data=[]] else: ctr_type = xx_1[0] unit = '' tags_dic[1] = unit # depends on [control=['if'], data=[]] else: ctr_type = 'select' # HTML selectiom control. for (index, tag_val) in enumerate(tags1): # the index of tags_dic starts from 1. tags_dic[index + 1] = tag_val.strip() # depends on [control=['for'], data=[]] outkey = 'html_{0}'.format(slug_name) outval = {'en': slug_name, 'zh': c_name, 'dic': tags_dic, 'type': ctr_type, 'display': row4_val} return (outkey, outval) # depends on [control=['if'], data=[]] else: return (None, None)
def autoload(self, state=True): """ Begins the process for autoloading this item when it becomes visible within the tree. :param state | <bool> """ if state and not self._timer: self._timer = QtCore.QTimer() self._timer.setInterval(500) self._timer.timeout.connect(self.testAutoload) if state and self._timer and not self._timer.isActive(): self._timer.start() elif not state and self._timer and self._timer.isActive(): self._timer.stop() del self._timer self._timer = None
def function[autoload, parameter[self, state]]: constant[ Begins the process for autoloading this item when it becomes visible within the tree. :param state | <bool> ] if <ast.BoolOp object at 0x7da18f09d600> begin[:] name[self]._timer assign[=] call[name[QtCore].QTimer, parameter[]] call[name[self]._timer.setInterval, parameter[constant[500]]] call[name[self]._timer.timeout.connect, parameter[name[self].testAutoload]] if <ast.BoolOp object at 0x7da18f09f6d0> begin[:] call[name[self]._timer.start, parameter[]]
keyword[def] identifier[autoload] ( identifier[self] , identifier[state] = keyword[True] ): literal[string] keyword[if] identifier[state] keyword[and] keyword[not] identifier[self] . identifier[_timer] : identifier[self] . identifier[_timer] = identifier[QtCore] . identifier[QTimer] () identifier[self] . identifier[_timer] . identifier[setInterval] ( literal[int] ) identifier[self] . identifier[_timer] . identifier[timeout] . identifier[connect] ( identifier[self] . identifier[testAutoload] ) keyword[if] identifier[state] keyword[and] identifier[self] . identifier[_timer] keyword[and] keyword[not] identifier[self] . identifier[_timer] . identifier[isActive] (): identifier[self] . identifier[_timer] . identifier[start] () keyword[elif] keyword[not] identifier[state] keyword[and] identifier[self] . identifier[_timer] keyword[and] identifier[self] . identifier[_timer] . identifier[isActive] (): identifier[self] . identifier[_timer] . identifier[stop] () keyword[del] identifier[self] . identifier[_timer] identifier[self] . identifier[_timer] = keyword[None]
def autoload(self, state=True): """ Begins the process for autoloading this item when it becomes visible within the tree. :param state | <bool> """ if state and (not self._timer): self._timer = QtCore.QTimer() self._timer.setInterval(500) self._timer.timeout.connect(self.testAutoload) # depends on [control=['if'], data=[]] if state and self._timer and (not self._timer.isActive()): self._timer.start() # depends on [control=['if'], data=[]] elif not state and self._timer and self._timer.isActive(): self._timer.stop() del self._timer self._timer = None # depends on [control=['if'], data=[]]
def get_server_model(snmp_client): """Get server model of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of server model. """ try: server_model = snmp_client.get(SERVER_MODEL_OID) return six.text_type(server_model) except SNMPFailure as e: raise SNMPServerModelFailure( SNMP_FAILURE_MSG % ("GET SERVER MODEL", e))
def function[get_server_model, parameter[snmp_client]]: constant[Get server model of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of server model. ] <ast.Try object at 0x7da1b19d05e0>
keyword[def] identifier[get_server_model] ( identifier[snmp_client] ): literal[string] keyword[try] : identifier[server_model] = identifier[snmp_client] . identifier[get] ( identifier[SERVER_MODEL_OID] ) keyword[return] identifier[six] . identifier[text_type] ( identifier[server_model] ) keyword[except] identifier[SNMPFailure] keyword[as] identifier[e] : keyword[raise] identifier[SNMPServerModelFailure] ( identifier[SNMP_FAILURE_MSG] %( literal[string] , identifier[e] ))
def get_server_model(snmp_client): """Get server model of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of server model. """ try: server_model = snmp_client.get(SERVER_MODEL_OID) return six.text_type(server_model) # depends on [control=['try'], data=[]] except SNMPFailure as e: raise SNMPServerModelFailure(SNMP_FAILURE_MSG % ('GET SERVER MODEL', e)) # depends on [control=['except'], data=['e']]
def open(self, url): """ Open a WSDL schema at the specified I{URL}. First, the WSDL schema is looked up in the I{object cache}. If not found, a new one constructed using the I{fn} factory function and the result is cached for the next open(). @param url: A WSDL URL. @type url: str. @return: The WSDL object. @rtype: I{Definitions} """ cache = self.__cache() id = self.mangle(url, "wsdl") wsdl = cache.get(id) if wsdl is None: wsdl = self.fn(url, self.options) cache.put(id, wsdl) else: # Cached WSDL Definitions objects may have been created with # different options so we update them here with our current ones. wsdl.options = self.options for imp in wsdl.imports: imp.imported.options = self.options return wsdl
def function[open, parameter[self, url]]: constant[ Open a WSDL schema at the specified I{URL}. First, the WSDL schema is looked up in the I{object cache}. If not found, a new one constructed using the I{fn} factory function and the result is cached for the next open(). @param url: A WSDL URL. @type url: str. @return: The WSDL object. @rtype: I{Definitions} ] variable[cache] assign[=] call[name[self].__cache, parameter[]] variable[id] assign[=] call[name[self].mangle, parameter[name[url], constant[wsdl]]] variable[wsdl] assign[=] call[name[cache].get, parameter[name[id]]] if compare[name[wsdl] is constant[None]] begin[:] variable[wsdl] assign[=] call[name[self].fn, parameter[name[url], name[self].options]] call[name[cache].put, parameter[name[id], name[wsdl]]] return[name[wsdl]]
keyword[def] identifier[open] ( identifier[self] , identifier[url] ): literal[string] identifier[cache] = identifier[self] . identifier[__cache] () identifier[id] = identifier[self] . identifier[mangle] ( identifier[url] , literal[string] ) identifier[wsdl] = identifier[cache] . identifier[get] ( identifier[id] ) keyword[if] identifier[wsdl] keyword[is] keyword[None] : identifier[wsdl] = identifier[self] . identifier[fn] ( identifier[url] , identifier[self] . identifier[options] ) identifier[cache] . identifier[put] ( identifier[id] , identifier[wsdl] ) keyword[else] : identifier[wsdl] . identifier[options] = identifier[self] . identifier[options] keyword[for] identifier[imp] keyword[in] identifier[wsdl] . identifier[imports] : identifier[imp] . identifier[imported] . identifier[options] = identifier[self] . identifier[options] keyword[return] identifier[wsdl]
def open(self, url): """ Open a WSDL schema at the specified I{URL}. First, the WSDL schema is looked up in the I{object cache}. If not found, a new one constructed using the I{fn} factory function and the result is cached for the next open(). @param url: A WSDL URL. @type url: str. @return: The WSDL object. @rtype: I{Definitions} """ cache = self.__cache() id = self.mangle(url, 'wsdl') wsdl = cache.get(id) if wsdl is None: wsdl = self.fn(url, self.options) cache.put(id, wsdl) # depends on [control=['if'], data=['wsdl']] else: # Cached WSDL Definitions objects may have been created with # different options so we update them here with our current ones. wsdl.options = self.options for imp in wsdl.imports: imp.imported.options = self.options # depends on [control=['for'], data=['imp']] return wsdl
def mutateString(original, n, replacements='acgt'): """ Mutate C{original} in C{n} places with chars chosen from C{replacements}. @param original: The original C{str} to mutate. @param n: The C{int} number of locations to mutate. @param replacements: The C{str} of replacement letters. @return: A new C{str} with C{n} places of C{original} mutated. @raises ValueError: if C{n} is too high, or C{replacement} contains duplicates, or if no replacement can be made at a certain locus because C{replacements} is of length one, or if C{original} is of zero length. """ if not original: raise ValueError('Empty original string passed.') if n > len(original): raise ValueError('Cannot make %d mutations in a string of length %d' % (n, len(original))) if len(replacements) != len(set(replacements)): raise ValueError('Replacement string contains duplicates') if len(replacements) == 1 and original.find(replacements) != -1: raise ValueError('Impossible replacement') result = list(original) length = len(original) for offset in range(length): if uniform(0.0, 1.0) < float(n) / (length - offset): # Mutate. while True: new = choice(replacements) if new != result[offset]: result[offset] = new break n -= 1 if n == 0: break return ''.join(result)
def function[mutateString, parameter[original, n, replacements]]: constant[ Mutate C{original} in C{n} places with chars chosen from C{replacements}. @param original: The original C{str} to mutate. @param n: The C{int} number of locations to mutate. @param replacements: The C{str} of replacement letters. @return: A new C{str} with C{n} places of C{original} mutated. @raises ValueError: if C{n} is too high, or C{replacement} contains duplicates, or if no replacement can be made at a certain locus because C{replacements} is of length one, or if C{original} is of zero length. ] if <ast.UnaryOp object at 0x7da1b0ca5e40> begin[:] <ast.Raise object at 0x7da1b0ca7610> if compare[name[n] greater[>] call[name[len], parameter[name[original]]]] begin[:] <ast.Raise object at 0x7da1b0ca5a20> if compare[call[name[len], parameter[name[replacements]]] not_equal[!=] call[name[len], parameter[call[name[set], parameter[name[replacements]]]]]] begin[:] <ast.Raise object at 0x7da1b0ca7340> if <ast.BoolOp object at 0x7da1b0ca73a0> begin[:] <ast.Raise object at 0x7da20c796740> variable[result] assign[=] call[name[list], parameter[name[original]]] variable[length] assign[=] call[name[len], parameter[name[original]]] for taget[name[offset]] in starred[call[name[range], parameter[name[length]]]] begin[:] if compare[call[name[uniform], parameter[constant[0.0], constant[1.0]]] less[<] binary_operation[call[name[float], parameter[name[n]]] / binary_operation[name[length] - name[offset]]]] begin[:] while constant[True] begin[:] variable[new] assign[=] call[name[choice], parameter[name[replacements]]] if compare[name[new] not_equal[!=] call[name[result]][name[offset]]] begin[:] call[name[result]][name[offset]] assign[=] name[new] break <ast.AugAssign object at 0x7da20c794550> if compare[name[n] equal[==] constant[0]] begin[:] break return[call[constant[].join, parameter[name[result]]]]
keyword[def] identifier[mutateString] ( identifier[original] , identifier[n] , identifier[replacements] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[original] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[n] > identifier[len] ( identifier[original] ): keyword[raise] identifier[ValueError] ( literal[string] % ( identifier[n] , identifier[len] ( identifier[original] ))) keyword[if] identifier[len] ( identifier[replacements] )!= identifier[len] ( identifier[set] ( identifier[replacements] )): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[replacements] )== literal[int] keyword[and] identifier[original] . identifier[find] ( identifier[replacements] )!=- literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[result] = identifier[list] ( identifier[original] ) identifier[length] = identifier[len] ( identifier[original] ) keyword[for] identifier[offset] keyword[in] identifier[range] ( identifier[length] ): keyword[if] identifier[uniform] ( literal[int] , literal[int] )< identifier[float] ( identifier[n] )/( identifier[length] - identifier[offset] ): keyword[while] keyword[True] : identifier[new] = identifier[choice] ( identifier[replacements] ) keyword[if] identifier[new] != identifier[result] [ identifier[offset] ]: identifier[result] [ identifier[offset] ]= identifier[new] keyword[break] identifier[n] -= literal[int] keyword[if] identifier[n] == literal[int] : keyword[break] keyword[return] literal[string] . identifier[join] ( identifier[result] )
def mutateString(original, n, replacements='acgt'): """ Mutate C{original} in C{n} places with chars chosen from C{replacements}. @param original: The original C{str} to mutate. @param n: The C{int} number of locations to mutate. @param replacements: The C{str} of replacement letters. @return: A new C{str} with C{n} places of C{original} mutated. @raises ValueError: if C{n} is too high, or C{replacement} contains duplicates, or if no replacement can be made at a certain locus because C{replacements} is of length one, or if C{original} is of zero length. """ if not original: raise ValueError('Empty original string passed.') # depends on [control=['if'], data=[]] if n > len(original): raise ValueError('Cannot make %d mutations in a string of length %d' % (n, len(original))) # depends on [control=['if'], data=['n']] if len(replacements) != len(set(replacements)): raise ValueError('Replacement string contains duplicates') # depends on [control=['if'], data=[]] if len(replacements) == 1 and original.find(replacements) != -1: raise ValueError('Impossible replacement') # depends on [control=['if'], data=[]] result = list(original) length = len(original) for offset in range(length): if uniform(0.0, 1.0) < float(n) / (length - offset): # Mutate. while True: new = choice(replacements) if new != result[offset]: result[offset] = new break # depends on [control=['if'], data=['new']] # depends on [control=['while'], data=[]] n -= 1 if n == 0: break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['offset']] return ''.join(result)
def virtual(cls, **options): """ Allows for defining virtual columns and collectors on models -- these are objects that are defined in code and not directly in a data store. :param cls: :param options: :return: """ def wrapped(func): param_name = inflection.underscore(func.__name__) options.setdefault('name', param_name) if 'flags' in options: if isinstance(options['flags'], set): options['flags'].add('Virtual') options['flags'].add('ReadOnly') else: options['flags'] |= (cls.Flags.Virtual | cls.Flags.ReadOnly) else: options['flags'] = {'Virtual', 'ReadOnly'} def define_setter(): def setter_wrapped(setter_func): func.__orb__.setFlags(func.__orb__.flags() & ~cls.Flags.ReadOnly) func.__orb__.setter()(setter_func) return setter_func return setter_wrapped def define_query_filter(): def shortcut_wrapped(shortcut_func): func.__orb__.queryFilter(shortcut_func) return shortcut_func return shortcut_wrapped func.__orb__ = cls(**options) func.__orb__.getter()(func) func.setter = define_setter func.queryFilter = define_query_filter return func return wrapped
def function[virtual, parameter[cls]]: constant[ Allows for defining virtual columns and collectors on models -- these are objects that are defined in code and not directly in a data store. :param cls: :param options: :return: ] def function[wrapped, parameter[func]]: variable[param_name] assign[=] call[name[inflection].underscore, parameter[name[func].__name__]] call[name[options].setdefault, parameter[constant[name], name[param_name]]] if compare[constant[flags] in name[options]] begin[:] if call[name[isinstance], parameter[call[name[options]][constant[flags]], name[set]]] begin[:] call[call[name[options]][constant[flags]].add, parameter[constant[Virtual]]] call[call[name[options]][constant[flags]].add, parameter[constant[ReadOnly]]] def function[define_setter, parameter[]]: def function[setter_wrapped, parameter[setter_func]]: call[name[func].__orb__.setFlags, parameter[binary_operation[call[name[func].__orb__.flags, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da18bcca2c0>]]] call[call[name[func].__orb__.setter, parameter[]], parameter[name[setter_func]]] return[name[setter_func]] return[name[setter_wrapped]] def function[define_query_filter, parameter[]]: def function[shortcut_wrapped, parameter[shortcut_func]]: call[name[func].__orb__.queryFilter, parameter[name[shortcut_func]]] return[name[shortcut_func]] return[name[shortcut_wrapped]] name[func].__orb__ assign[=] call[name[cls], parameter[]] call[call[name[func].__orb__.getter, parameter[]], parameter[name[func]]] name[func].setter assign[=] name[define_setter] name[func].queryFilter assign[=] name[define_query_filter] return[name[func]] return[name[wrapped]]
keyword[def] identifier[virtual] ( identifier[cls] ,** identifier[options] ): literal[string] keyword[def] identifier[wrapped] ( identifier[func] ): identifier[param_name] = identifier[inflection] . identifier[underscore] ( identifier[func] . identifier[__name__] ) identifier[options] . identifier[setdefault] ( literal[string] , identifier[param_name] ) keyword[if] literal[string] keyword[in] identifier[options] : keyword[if] identifier[isinstance] ( identifier[options] [ literal[string] ], identifier[set] ): identifier[options] [ literal[string] ]. identifier[add] ( literal[string] ) identifier[options] [ literal[string] ]. identifier[add] ( literal[string] ) keyword[else] : identifier[options] [ literal[string] ]|=( identifier[cls] . identifier[Flags] . identifier[Virtual] | identifier[cls] . identifier[Flags] . identifier[ReadOnly] ) keyword[else] : identifier[options] [ literal[string] ]={ literal[string] , literal[string] } keyword[def] identifier[define_setter] (): keyword[def] identifier[setter_wrapped] ( identifier[setter_func] ): identifier[func] . identifier[__orb__] . identifier[setFlags] ( identifier[func] . identifier[__orb__] . identifier[flags] ()&~ identifier[cls] . identifier[Flags] . identifier[ReadOnly] ) identifier[func] . identifier[__orb__] . identifier[setter] ()( identifier[setter_func] ) keyword[return] identifier[setter_func] keyword[return] identifier[setter_wrapped] keyword[def] identifier[define_query_filter] (): keyword[def] identifier[shortcut_wrapped] ( identifier[shortcut_func] ): identifier[func] . identifier[__orb__] . identifier[queryFilter] ( identifier[shortcut_func] ) keyword[return] identifier[shortcut_func] keyword[return] identifier[shortcut_wrapped] identifier[func] . identifier[__orb__] = identifier[cls] (** identifier[options] ) identifier[func] . identifier[__orb__] . identifier[getter] ()( identifier[func] ) identifier[func] . identifier[setter] = identifier[define_setter] identifier[func] . identifier[queryFilter] = identifier[define_query_filter] keyword[return] identifier[func] keyword[return] identifier[wrapped]
def virtual(cls, **options): """ Allows for defining virtual columns and collectors on models -- these are objects that are defined in code and not directly in a data store. :param cls: :param options: :return: """ def wrapped(func): param_name = inflection.underscore(func.__name__) options.setdefault('name', param_name) if 'flags' in options: if isinstance(options['flags'], set): options['flags'].add('Virtual') options['flags'].add('ReadOnly') # depends on [control=['if'], data=[]] else: options['flags'] |= cls.Flags.Virtual | cls.Flags.ReadOnly # depends on [control=['if'], data=['options']] else: options['flags'] = {'Virtual', 'ReadOnly'} def define_setter(): def setter_wrapped(setter_func): func.__orb__.setFlags(func.__orb__.flags() & ~cls.Flags.ReadOnly) func.__orb__.setter()(setter_func) return setter_func return setter_wrapped def define_query_filter(): def shortcut_wrapped(shortcut_func): func.__orb__.queryFilter(shortcut_func) return shortcut_func return shortcut_wrapped func.__orb__ = cls(**options) func.__orb__.getter()(func) func.setter = define_setter func.queryFilter = define_query_filter return func return wrapped
def get_course_video_image_url(course_id, edx_video_id): """ Returns course video image url or None if no image found """ try: video_image = CourseVideo.objects.select_related('video_image').get( course_id=course_id, video__edx_video_id=edx_video_id ).video_image return video_image.image_url() except ObjectDoesNotExist: return None
def function[get_course_video_image_url, parameter[course_id, edx_video_id]]: constant[ Returns course video image url or None if no image found ] <ast.Try object at 0x7da1b0334e50>
keyword[def] identifier[get_course_video_image_url] ( identifier[course_id] , identifier[edx_video_id] ): literal[string] keyword[try] : identifier[video_image] = identifier[CourseVideo] . identifier[objects] . identifier[select_related] ( literal[string] ). identifier[get] ( identifier[course_id] = identifier[course_id] , identifier[video__edx_video_id] = identifier[edx_video_id] ). identifier[video_image] keyword[return] identifier[video_image] . identifier[image_url] () keyword[except] identifier[ObjectDoesNotExist] : keyword[return] keyword[None]
def get_course_video_image_url(course_id, edx_video_id): """ Returns course video image url or None if no image found """ try: video_image = CourseVideo.objects.select_related('video_image').get(course_id=course_id, video__edx_video_id=edx_video_id).video_image return video_image.image_url() # depends on [control=['try'], data=[]] except ObjectDoesNotExist: return None # depends on [control=['except'], data=[]]
def use_federated_family_view(self): """Pass through to provider RelationshipLookupSession.use_federated_family_view""" self._family_view = FEDERATED # self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_federated_family_view() except AttributeError: pass
def function[use_federated_family_view, parameter[self]]: constant[Pass through to provider RelationshipLookupSession.use_federated_family_view] name[self]._family_view assign[=] name[FEDERATED] for taget[name[session]] in starred[call[name[self]._get_provider_sessions, parameter[]]] begin[:] <ast.Try object at 0x7da20c7cbfd0>
keyword[def] identifier[use_federated_family_view] ( identifier[self] ): literal[string] identifier[self] . identifier[_family_view] = identifier[FEDERATED] keyword[for] identifier[session] keyword[in] identifier[self] . identifier[_get_provider_sessions] (): keyword[try] : identifier[session] . identifier[use_federated_family_view] () keyword[except] identifier[AttributeError] : keyword[pass]
def use_federated_family_view(self): """Pass through to provider RelationshipLookupSession.use_federated_family_view""" self._family_view = FEDERATED # self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_federated_family_view() # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['session']]
def atexit_rmglob(path, glob=glob.glob, isdir=os.path.isdir, isfile=os.path.isfile, remove=os.remove, rmtree=shutil.rmtree): # pragma: no cover """Ensure removal of multiple files at interpreter exit.""" for p in glob(path): if isfile(p): remove(p) elif isdir(p): rmtree(p)
def function[atexit_rmglob, parameter[path, glob, isdir, isfile, remove, rmtree]]: constant[Ensure removal of multiple files at interpreter exit.] for taget[name[p]] in starred[call[name[glob], parameter[name[path]]]] begin[:] if call[name[isfile], parameter[name[p]]] begin[:] call[name[remove], parameter[name[p]]]
keyword[def] identifier[atexit_rmglob] ( identifier[path] , identifier[glob] = identifier[glob] . identifier[glob] , identifier[isdir] = identifier[os] . identifier[path] . identifier[isdir] , identifier[isfile] = identifier[os] . identifier[path] . identifier[isfile] , identifier[remove] = identifier[os] . identifier[remove] , identifier[rmtree] = identifier[shutil] . identifier[rmtree] ): literal[string] keyword[for] identifier[p] keyword[in] identifier[glob] ( identifier[path] ): keyword[if] identifier[isfile] ( identifier[p] ): identifier[remove] ( identifier[p] ) keyword[elif] identifier[isdir] ( identifier[p] ): identifier[rmtree] ( identifier[p] )
def atexit_rmglob(path, glob=glob.glob, isdir=os.path.isdir, isfile=os.path.isfile, remove=os.remove, rmtree=shutil.rmtree): # pragma: no cover 'Ensure removal of multiple files at interpreter exit.' for p in glob(path): if isfile(p): remove(p) # depends on [control=['if'], data=[]] elif isdir(p): rmtree(p) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
def unmajority(p, a, b, c): """Unmajority gate.""" p.ccx(a, b, c) p.cx(c, a) p.cx(a, b)
def function[unmajority, parameter[p, a, b, c]]: constant[Unmajority gate.] call[name[p].ccx, parameter[name[a], name[b], name[c]]] call[name[p].cx, parameter[name[c], name[a]]] call[name[p].cx, parameter[name[a], name[b]]]
keyword[def] identifier[unmajority] ( identifier[p] , identifier[a] , identifier[b] , identifier[c] ): literal[string] identifier[p] . identifier[ccx] ( identifier[a] , identifier[b] , identifier[c] ) identifier[p] . identifier[cx] ( identifier[c] , identifier[a] ) identifier[p] . identifier[cx] ( identifier[a] , identifier[b] )
def unmajority(p, a, b, c): """Unmajority gate.""" p.ccx(a, b, c) p.cx(c, a) p.cx(a, b)
def get_init_argspec(klass): """Wrapper around inspect.getargspec(klass.__init__) which, for cython classes uses an auxiliary '_init_argspec' method, since they don't play nice with the inspect module. By convention, a cython class should define the classmethod _init_argspec that, when called, returns what ``inspect.getargspec`` would be expected to return when called on that class's __init__ method. """ if hasattr(klass, '_init_argspec'): return _shim_argspec(klass._init_argspec()) elif PY2: return _shim_argspec(inspect.getargspec(klass.__init__)) else: return inspect.signature(klass.__init__)
def function[get_init_argspec, parameter[klass]]: constant[Wrapper around inspect.getargspec(klass.__init__) which, for cython classes uses an auxiliary '_init_argspec' method, since they don't play nice with the inspect module. By convention, a cython class should define the classmethod _init_argspec that, when called, returns what ``inspect.getargspec`` would be expected to return when called on that class's __init__ method. ] if call[name[hasattr], parameter[name[klass], constant[_init_argspec]]] begin[:] return[call[name[_shim_argspec], parameter[call[name[klass]._init_argspec, parameter[]]]]]
keyword[def] identifier[get_init_argspec] ( identifier[klass] ): literal[string] keyword[if] identifier[hasattr] ( identifier[klass] , literal[string] ): keyword[return] identifier[_shim_argspec] ( identifier[klass] . identifier[_init_argspec] ()) keyword[elif] identifier[PY2] : keyword[return] identifier[_shim_argspec] ( identifier[inspect] . identifier[getargspec] ( identifier[klass] . identifier[__init__] )) keyword[else] : keyword[return] identifier[inspect] . identifier[signature] ( identifier[klass] . identifier[__init__] )
def get_init_argspec(klass): """Wrapper around inspect.getargspec(klass.__init__) which, for cython classes uses an auxiliary '_init_argspec' method, since they don't play nice with the inspect module. By convention, a cython class should define the classmethod _init_argspec that, when called, returns what ``inspect.getargspec`` would be expected to return when called on that class's __init__ method. """ if hasattr(klass, '_init_argspec'): return _shim_argspec(klass._init_argspec()) # depends on [control=['if'], data=[]] elif PY2: return _shim_argspec(inspect.getargspec(klass.__init__)) # depends on [control=['if'], data=[]] else: return inspect.signature(klass.__init__)
def collapsedintervals(table, start='start', stop='stop', key=None): """ Utility function to collapse intervals in a table. If no facet `key` is given, returns an iterator over `(start, stop)` tuples. If facet `key` is given, returns an iterator over `(key, start, stop)` tuples. """ if key is None: table = sort(table, key=start) for iv in _collapse(values(table, (start, stop))): yield iv else: table = sort(table, key=(key, start)) for k, g in rowgroupby(table, key=key, value=(start, stop)): for iv in _collapse(g): yield (k,) + iv
def function[collapsedintervals, parameter[table, start, stop, key]]: constant[ Utility function to collapse intervals in a table. If no facet `key` is given, returns an iterator over `(start, stop)` tuples. If facet `key` is given, returns an iterator over `(key, start, stop)` tuples. ] if compare[name[key] is constant[None]] begin[:] variable[table] assign[=] call[name[sort], parameter[name[table]]] for taget[name[iv]] in starred[call[name[_collapse], parameter[call[name[values], parameter[name[table], tuple[[<ast.Name object at 0x7da1b08e44c0>, <ast.Name object at 0x7da1b08e61a0>]]]]]]] begin[:] <ast.Yield object at 0x7da1b08e4610>
keyword[def] identifier[collapsedintervals] ( identifier[table] , identifier[start] = literal[string] , identifier[stop] = literal[string] , identifier[key] = keyword[None] ): literal[string] keyword[if] identifier[key] keyword[is] keyword[None] : identifier[table] = identifier[sort] ( identifier[table] , identifier[key] = identifier[start] ) keyword[for] identifier[iv] keyword[in] identifier[_collapse] ( identifier[values] ( identifier[table] ,( identifier[start] , identifier[stop] ))): keyword[yield] identifier[iv] keyword[else] : identifier[table] = identifier[sort] ( identifier[table] , identifier[key] =( identifier[key] , identifier[start] )) keyword[for] identifier[k] , identifier[g] keyword[in] identifier[rowgroupby] ( identifier[table] , identifier[key] = identifier[key] , identifier[value] =( identifier[start] , identifier[stop] )): keyword[for] identifier[iv] keyword[in] identifier[_collapse] ( identifier[g] ): keyword[yield] ( identifier[k] ,)+ identifier[iv]
def collapsedintervals(table, start='start', stop='stop', key=None): """ Utility function to collapse intervals in a table. If no facet `key` is given, returns an iterator over `(start, stop)` tuples. If facet `key` is given, returns an iterator over `(key, start, stop)` tuples. """ if key is None: table = sort(table, key=start) for iv in _collapse(values(table, (start, stop))): yield iv # depends on [control=['for'], data=['iv']] # depends on [control=['if'], data=[]] else: table = sort(table, key=(key, start)) for (k, g) in rowgroupby(table, key=key, value=(start, stop)): for iv in _collapse(g): yield ((k,) + iv) # depends on [control=['for'], data=['iv']] # depends on [control=['for'], data=[]]
def tag_id(self, name): """ Get the unique tag identifier for a given tag. :param name: The tag :type name: str :rtype: str """ return self._store.get(self.tag_key(name)) or self.reset_tag(name)
def function[tag_id, parameter[self, name]]: constant[ Get the unique tag identifier for a given tag. :param name: The tag :type name: str :rtype: str ] return[<ast.BoolOp object at 0x7da1b1932f20>]
keyword[def] identifier[tag_id] ( identifier[self] , identifier[name] ): literal[string] keyword[return] identifier[self] . identifier[_store] . identifier[get] ( identifier[self] . identifier[tag_key] ( identifier[name] )) keyword[or] identifier[self] . identifier[reset_tag] ( identifier[name] )
def tag_id(self, name): """ Get the unique tag identifier for a given tag. :param name: The tag :type name: str :rtype: str """ return self._store.get(self.tag_key(name)) or self.reset_tag(name)
def to_unicode(value): """Converts bytes, unicode, and C char arrays to unicode strings. Bytes and C char arrays are decoded from UTF-8. """ if isinstance(value, ffi.CData): return ffi.string(value).decode('utf-8') elif isinstance(value, binary_type): return value.decode('utf-8') elif isinstance(value, text_type): return value else: raise ValueError('Value must be text, bytes, or char[]')
def function[to_unicode, parameter[value]]: constant[Converts bytes, unicode, and C char arrays to unicode strings. Bytes and C char arrays are decoded from UTF-8. ] if call[name[isinstance], parameter[name[value], name[ffi].CData]] begin[:] return[call[call[name[ffi].string, parameter[name[value]]].decode, parameter[constant[utf-8]]]]
keyword[def] identifier[to_unicode] ( identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[ffi] . identifier[CData] ): keyword[return] identifier[ffi] . identifier[string] ( identifier[value] ). identifier[decode] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[binary_type] ): keyword[return] identifier[value] . identifier[decode] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[text_type] ): keyword[return] identifier[value] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def to_unicode(value): """Converts bytes, unicode, and C char arrays to unicode strings. Bytes and C char arrays are decoded from UTF-8. """ if isinstance(value, ffi.CData): return ffi.string(value).decode('utf-8') # depends on [control=['if'], data=[]] elif isinstance(value, binary_type): return value.decode('utf-8') # depends on [control=['if'], data=[]] elif isinstance(value, text_type): return value # depends on [control=['if'], data=[]] else: raise ValueError('Value must be text, bytes, or char[]')
def purgeCache(self, *args, **kwargs): """ Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
def function[purgeCache, parameter[self]]: constant[ Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable`` ] return[call[name[self]._makeApiCall, parameter[call[name[self].funcinfo][constant[purgeCache]], <ast.Starred object at 0x7da18f723190>]]]
keyword[def] identifier[purgeCache] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] )
def purgeCache(self, *args, **kwargs): """ Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo['purgeCache'], *args, **kwargs)
async def checked_run(*cmd): """Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result. """ # Start the subprocess. logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) # Stream output from the process stdout. chunks = [] while True: chunk = await p.stdout.read(16 * 1024) if not chunk: break chunks.append(chunk) # Wait for the process to finish, check it was successful & build stdout. await p.wait() stdout = b''.join(chunks).decode()[:-1] if p.returncode: raise RuntimeError('Return code {} from process: {}\n{}'.format( p.returncode, expand_cmd_str(cmd), stdout)) return stdout
<ast.AsyncFunctionDef object at 0x7da1b21efd30>
keyword[async] keyword[def] identifier[checked_run] (* identifier[cmd] ): literal[string] identifier[logging] . identifier[info] ( literal[string] , identifier[expand_cmd_str] ( identifier[cmd] )) keyword[with] identifier[logged_timer] ( literal[string] . identifier[format] ( identifier[get_cmd_name] ( identifier[cmd] ))): identifier[p] = keyword[await] identifier[asyncio] . identifier[create_subprocess_exec] ( * identifier[cmd] , identifier[stdout] = identifier[asyncio] . identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[asyncio] . identifier[subprocess] . identifier[STDOUT] ) identifier[chunks] =[] keyword[while] keyword[True] : identifier[chunk] = keyword[await] identifier[p] . identifier[stdout] . identifier[read] ( literal[int] * literal[int] ) keyword[if] keyword[not] identifier[chunk] : keyword[break] identifier[chunks] . identifier[append] ( identifier[chunk] ) keyword[await] identifier[p] . identifier[wait] () identifier[stdout] = literal[string] . identifier[join] ( identifier[chunks] ). identifier[decode] ()[:- literal[int] ] keyword[if] identifier[p] . identifier[returncode] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[p] . identifier[returncode] , identifier[expand_cmd_str] ( identifier[cmd] ), identifier[stdout] )) keyword[return] identifier[stdout]
async def checked_run(*cmd): """Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result. """ # Start the subprocess. logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec(*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) # Stream output from the process stdout. chunks = [] while True: chunk = await p.stdout.read(16 * 1024) if not chunk: break # depends on [control=['if'], data=[]] chunks.append(chunk) # depends on [control=['while'], data=[]] # Wait for the process to finish, check it was successful & build stdout. await p.wait() stdout = b''.join(chunks).decode()[:-1] if p.returncode: raise RuntimeError('Return code {} from process: {}\n{}'.format(p.returncode, expand_cmd_str(cmd), stdout)) # depends on [control=['if'], data=[]] return stdout # depends on [control=['with'], data=[]]
def confirm(prompt, default=None, show_default=True, abort=False, input_function=None): '''Prompts for confirmation from the user. ''' valid = { 'yes': True, 'y': True, 'no': False, 'n': False } input_function = get_input_fn(input_function) if default not in ['yes', 'no', None]: default = None if show_default: prompt = '{} [{}/{}]: '.format(prompt, 'Y' if default == 'yes' else 'y', 'N' if default == 'no' else 'n') while True: choice = prompt_fn(input_function, prompt, default).lower() if choice in valid: if valid[choice] == False and abort: raise_abort() return valid[choice] else: echo('Please respond with "yes" or "no" (or "y" or "n").')
def function[confirm, parameter[prompt, default, show_default, abort, input_function]]: constant[Prompts for confirmation from the user. ] variable[valid] assign[=] dictionary[[<ast.Constant object at 0x7da18ede6b60>, <ast.Constant object at 0x7da18ede74f0>, <ast.Constant object at 0x7da18ede73a0>, <ast.Constant object at 0x7da18ede66b0>], [<ast.Constant object at 0x7da18ede4af0>, <ast.Constant object at 0x7da18ede5870>, <ast.Constant object at 0x7da18ede52a0>, <ast.Constant object at 0x7da18ede5bd0>]] variable[input_function] assign[=] call[name[get_input_fn], parameter[name[input_function]]] if compare[name[default] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da204623820>, <ast.Constant object at 0x7da204621210>, <ast.Constant object at 0x7da2046213f0>]]] begin[:] variable[default] assign[=] constant[None] if name[show_default] begin[:] variable[prompt] assign[=] call[constant[{} [{}/{}]: ].format, parameter[name[prompt], <ast.IfExp object at 0x7da2046223e0>, <ast.IfExp object at 0x7da20c6a8a00>]] while constant[True] begin[:] variable[choice] assign[=] call[call[name[prompt_fn], parameter[name[input_function], name[prompt], name[default]]].lower, parameter[]] if compare[name[choice] in name[valid]] begin[:] if <ast.BoolOp object at 0x7da20c6a8eb0> begin[:] call[name[raise_abort], parameter[]] return[call[name[valid]][name[choice]]]
keyword[def] identifier[confirm] ( identifier[prompt] , identifier[default] = keyword[None] , identifier[show_default] = keyword[True] , identifier[abort] = keyword[False] , identifier[input_function] = keyword[None] ): literal[string] identifier[valid] ={ literal[string] : keyword[True] , literal[string] : keyword[True] , literal[string] : keyword[False] , literal[string] : keyword[False] } identifier[input_function] = identifier[get_input_fn] ( identifier[input_function] ) keyword[if] identifier[default] keyword[not] keyword[in] [ literal[string] , literal[string] , keyword[None] ]: identifier[default] = keyword[None] keyword[if] identifier[show_default] : identifier[prompt] = literal[string] . identifier[format] ( identifier[prompt] , literal[string] keyword[if] identifier[default] == literal[string] keyword[else] literal[string] , literal[string] keyword[if] identifier[default] == literal[string] keyword[else] literal[string] ) keyword[while] keyword[True] : identifier[choice] = identifier[prompt_fn] ( identifier[input_function] , identifier[prompt] , identifier[default] ). identifier[lower] () keyword[if] identifier[choice] keyword[in] identifier[valid] : keyword[if] identifier[valid] [ identifier[choice] ]== keyword[False] keyword[and] identifier[abort] : identifier[raise_abort] () keyword[return] identifier[valid] [ identifier[choice] ] keyword[else] : identifier[echo] ( literal[string] )
def confirm(prompt, default=None, show_default=True, abort=False, input_function=None): """Prompts for confirmation from the user. """ valid = {'yes': True, 'y': True, 'no': False, 'n': False} input_function = get_input_fn(input_function) if default not in ['yes', 'no', None]: default = None # depends on [control=['if'], data=['default']] if show_default: prompt = '{} [{}/{}]: '.format(prompt, 'Y' if default == 'yes' else 'y', 'N' if default == 'no' else 'n') # depends on [control=['if'], data=[]] while True: choice = prompt_fn(input_function, prompt, default).lower() if choice in valid: if valid[choice] == False and abort: raise_abort() # depends on [control=['if'], data=[]] return valid[choice] # depends on [control=['if'], data=['choice', 'valid']] else: echo('Please respond with "yes" or "no" (or "y" or "n").') # depends on [control=['while'], data=[]]
def collect_episodes(local_evaluator=None, remote_evaluators=[], timeout_seconds=180): """Gathers new episodes metrics tuples from the given evaluators.""" pending = [ a.apply.remote(lambda ev: ev.get_metrics()) for a in remote_evaluators ] collected, _ = ray.wait( pending, num_returns=len(pending), timeout=timeout_seconds * 1.0) num_metric_batches_dropped = len(pending) - len(collected) if pending and len(collected) == 0: raise ValueError( "Timed out waiting for metrics from workers. You can configure " "this timeout with `collect_metrics_timeout`.") metric_lists = ray_get_and_free(collected) if local_evaluator: metric_lists.append(local_evaluator.get_metrics()) episodes = [] for metrics in metric_lists: episodes.extend(metrics) return episodes, num_metric_batches_dropped
def function[collect_episodes, parameter[local_evaluator, remote_evaluators, timeout_seconds]]: constant[Gathers new episodes metrics tuples from the given evaluators.] variable[pending] assign[=] <ast.ListComp object at 0x7da18eb54b80> <ast.Tuple object at 0x7da18f09d7e0> assign[=] call[name[ray].wait, parameter[name[pending]]] variable[num_metric_batches_dropped] assign[=] binary_operation[call[name[len], parameter[name[pending]]] - call[name[len], parameter[name[collected]]]] if <ast.BoolOp object at 0x7da18f09c1f0> begin[:] <ast.Raise object at 0x7da18f09e320> variable[metric_lists] assign[=] call[name[ray_get_and_free], parameter[name[collected]]] if name[local_evaluator] begin[:] call[name[metric_lists].append, parameter[call[name[local_evaluator].get_metrics, parameter[]]]] variable[episodes] assign[=] list[[]] for taget[name[metrics]] in starred[name[metric_lists]] begin[:] call[name[episodes].extend, parameter[name[metrics]]] return[tuple[[<ast.Name object at 0x7da18f09d120>, <ast.Name object at 0x7da18f09d150>]]]
keyword[def] identifier[collect_episodes] ( identifier[local_evaluator] = keyword[None] , identifier[remote_evaluators] =[], identifier[timeout_seconds] = literal[int] ): literal[string] identifier[pending] =[ identifier[a] . identifier[apply] . identifier[remote] ( keyword[lambda] identifier[ev] : identifier[ev] . identifier[get_metrics] ()) keyword[for] identifier[a] keyword[in] identifier[remote_evaluators] ] identifier[collected] , identifier[_] = identifier[ray] . identifier[wait] ( identifier[pending] , identifier[num_returns] = identifier[len] ( identifier[pending] ), identifier[timeout] = identifier[timeout_seconds] * literal[int] ) identifier[num_metric_batches_dropped] = identifier[len] ( identifier[pending] )- identifier[len] ( identifier[collected] ) keyword[if] identifier[pending] keyword[and] identifier[len] ( identifier[collected] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[metric_lists] = identifier[ray_get_and_free] ( identifier[collected] ) keyword[if] identifier[local_evaluator] : identifier[metric_lists] . identifier[append] ( identifier[local_evaluator] . identifier[get_metrics] ()) identifier[episodes] =[] keyword[for] identifier[metrics] keyword[in] identifier[metric_lists] : identifier[episodes] . identifier[extend] ( identifier[metrics] ) keyword[return] identifier[episodes] , identifier[num_metric_batches_dropped]
def collect_episodes(local_evaluator=None, remote_evaluators=[], timeout_seconds=180): """Gathers new episodes metrics tuples from the given evaluators.""" pending = [a.apply.remote(lambda ev: ev.get_metrics()) for a in remote_evaluators] (collected, _) = ray.wait(pending, num_returns=len(pending), timeout=timeout_seconds * 1.0) num_metric_batches_dropped = len(pending) - len(collected) if pending and len(collected) == 0: raise ValueError('Timed out waiting for metrics from workers. You can configure this timeout with `collect_metrics_timeout`.') # depends on [control=['if'], data=[]] metric_lists = ray_get_and_free(collected) if local_evaluator: metric_lists.append(local_evaluator.get_metrics()) # depends on [control=['if'], data=[]] episodes = [] for metrics in metric_lists: episodes.extend(metrics) # depends on [control=['for'], data=['metrics']] return (episodes, num_metric_batches_dropped)