code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def append_dynamic(self, t, dynamic, canvas=0, color='blue'): """! @brief Append single dynamic to specified canvas (by default to the first with index '0'). @param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis. @param[in] dynamic (list): Value points of dynamic that are considered on an Y axis. @param[in] canvas (uint): Canvas where dynamic should be displayed. @param[in] color (string): Color that is used for drawing dynamic on the canvas. """ description = dynamic_descr(canvas, t, dynamic, False, color); self.__dynamic_storage.append(description); self.__update_canvas_xlim(description.time, description.separate);
def function[append_dynamic, parameter[self, t, dynamic, canvas, color]]: constant[! @brief Append single dynamic to specified canvas (by default to the first with index '0'). @param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis. @param[in] dynamic (list): Value points of dynamic that are considered on an Y axis. @param[in] canvas (uint): Canvas where dynamic should be displayed. @param[in] color (string): Color that is used for drawing dynamic on the canvas. ] variable[description] assign[=] call[name[dynamic_descr], parameter[name[canvas], name[t], name[dynamic], constant[False], name[color]]] call[name[self].__dynamic_storage.append, parameter[name[description]]] call[name[self].__update_canvas_xlim, parameter[name[description].time, name[description].separate]]
keyword[def] identifier[append_dynamic] ( identifier[self] , identifier[t] , identifier[dynamic] , identifier[canvas] = literal[int] , identifier[color] = literal[string] ): literal[string] identifier[description] = identifier[dynamic_descr] ( identifier[canvas] , identifier[t] , identifier[dynamic] , keyword[False] , identifier[color] ); identifier[self] . identifier[__dynamic_storage] . identifier[append] ( identifier[description] ); identifier[self] . identifier[__update_canvas_xlim] ( identifier[description] . identifier[time] , identifier[description] . identifier[separate] );
def append_dynamic(self, t, dynamic, canvas=0, color='blue'): """! @brief Append single dynamic to specified canvas (by default to the first with index '0'). @param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis. @param[in] dynamic (list): Value points of dynamic that are considered on an Y axis. @param[in] canvas (uint): Canvas where dynamic should be displayed. @param[in] color (string): Color that is used for drawing dynamic on the canvas. """ description = dynamic_descr(canvas, t, dynamic, False, color) self.__dynamic_storage.append(description) self.__update_canvas_xlim(description.time, description.separate)
def pretty_print(node): """漂亮地打印一个节点 Args: node (TYPE): Description """ for pre, _, node in RenderTree(node): print('{}{}'.format(pre, node.name))
def function[pretty_print, parameter[node]]: constant[漂亮地打印一个节点 Args: node (TYPE): Description ] for taget[tuple[[<ast.Name object at 0x7da204622080>, <ast.Name object at 0x7da204620b20>, <ast.Name object at 0x7da204622b30>]]] in starred[call[name[RenderTree], parameter[name[node]]]] begin[:] call[name[print], parameter[call[constant[{}{}].format, parameter[name[pre], name[node].name]]]]
keyword[def] identifier[pretty_print] ( identifier[node] ): literal[string] keyword[for] identifier[pre] , identifier[_] , identifier[node] keyword[in] identifier[RenderTree] ( identifier[node] ): identifier[print] ( literal[string] . identifier[format] ( identifier[pre] , identifier[node] . identifier[name] ))
def pretty_print(node): """漂亮地打印一个节点 Args: node (TYPE): Description """ for (pre, _, node) in RenderTree(node): print('{}{}'.format(pre, node.name)) # depends on [control=['for'], data=[]]
def set_state(self, entity_id, new_state, **kwargs): "Updates or creates the current state of an entity." return remote.set_state(self.api, new_state, **kwargs)
def function[set_state, parameter[self, entity_id, new_state]]: constant[Updates or creates the current state of an entity.] return[call[name[remote].set_state, parameter[name[self].api, name[new_state]]]]
keyword[def] identifier[set_state] ( identifier[self] , identifier[entity_id] , identifier[new_state] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[remote] . identifier[set_state] ( identifier[self] . identifier[api] , identifier[new_state] ,** identifier[kwargs] )
def set_state(self, entity_id, new_state, **kwargs): """Updates or creates the current state of an entity.""" return remote.set_state(self.api, new_state, **kwargs)
def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment """ Return gateways for the specified node. You can also specify type to find only gateways of a specific type. Valid types are: bgp_peering, netlink, ospfv2_area. :param RoutingNode self: the routing node to check :param str type: bgp_peering, netlink, ospfv2_area :param str on_network: if network is specified, should be CIDR and specifies a filter to only return gateways on that network when an interface has multiple :return: tuple of RoutingNode(interface,network,gateway) :rtype: list """ gateways = route_level(self, 'gateway') if not type: for gw in gateways: yield gw else: for node in gateways: #TODO: Change to type == node.related_element_type when # only supporting SMC >= 6.4 if type == node.routing_node_element.typeof: # If the parent is level interface, this is a tunnel interface # where the gateway is bound to interface versus network parent = node._parent if parent.level == 'interface': interface = parent network = None else: network = parent interface = network._parent if on_network is not None: if network and network.ip == on_network: yield (interface, network, node) else: yield (interface, network, node)
def function[gateway_by_type, parameter[self, type, on_network]]: constant[ Return gateways for the specified node. You can also specify type to find only gateways of a specific type. Valid types are: bgp_peering, netlink, ospfv2_area. :param RoutingNode self: the routing node to check :param str type: bgp_peering, netlink, ospfv2_area :param str on_network: if network is specified, should be CIDR and specifies a filter to only return gateways on that network when an interface has multiple :return: tuple of RoutingNode(interface,network,gateway) :rtype: list ] variable[gateways] assign[=] call[name[route_level], parameter[name[self], constant[gateway]]] if <ast.UnaryOp object at 0x7da1b1be44f0> begin[:] for taget[name[gw]] in starred[name[gateways]] begin[:] <ast.Yield object at 0x7da1b1be6d10>
keyword[def] identifier[gateway_by_type] ( identifier[self] , identifier[type] = keyword[None] , identifier[on_network] = keyword[None] ): literal[string] identifier[gateways] = identifier[route_level] ( identifier[self] , literal[string] ) keyword[if] keyword[not] identifier[type] : keyword[for] identifier[gw] keyword[in] identifier[gateways] : keyword[yield] identifier[gw] keyword[else] : keyword[for] identifier[node] keyword[in] identifier[gateways] : keyword[if] identifier[type] == identifier[node] . identifier[routing_node_element] . identifier[typeof] : identifier[parent] = identifier[node] . identifier[_parent] keyword[if] identifier[parent] . identifier[level] == literal[string] : identifier[interface] = identifier[parent] identifier[network] = keyword[None] keyword[else] : identifier[network] = identifier[parent] identifier[interface] = identifier[network] . identifier[_parent] keyword[if] identifier[on_network] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[network] keyword[and] identifier[network] . identifier[ip] == identifier[on_network] : keyword[yield] ( identifier[interface] , identifier[network] , identifier[node] ) keyword[else] : keyword[yield] ( identifier[interface] , identifier[network] , identifier[node] )
def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment '\n Return gateways for the specified node. You can also\n specify type to find only gateways of a specific type.\n Valid types are: bgp_peering, netlink, ospfv2_area.\n \n :param RoutingNode self: the routing node to check\n :param str type: bgp_peering, netlink, ospfv2_area\n :param str on_network: if network is specified, should be CIDR and\n specifies a filter to only return gateways on that network when\n an interface has multiple\n :return: tuple of RoutingNode(interface,network,gateway)\n :rtype: list\n ' gateways = route_level(self, 'gateway') if not type: for gw in gateways: yield gw # depends on [control=['for'], data=['gw']] # depends on [control=['if'], data=[]] else: for node in gateways: #TODO: Change to type == node.related_element_type when # only supporting SMC >= 6.4 if type == node.routing_node_element.typeof: # If the parent is level interface, this is a tunnel interface # where the gateway is bound to interface versus network parent = node._parent if parent.level == 'interface': interface = parent network = None # depends on [control=['if'], data=[]] else: network = parent interface = network._parent if on_network is not None: if network and network.ip == on_network: yield (interface, network, node) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['on_network']] else: yield (interface, network, node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
def attributes(self): """ A dictionary mapping names of attributes to BiomartAttribute instances. This causes overwriting errors if there are diffferent pages which use the same attribute names, but is kept for backward compatibility. """ if not self._attribute_pages: self.fetch_attributes() result = {} for page in self._attribute_pages.values(): result.update(page.attributes) return result
def function[attributes, parameter[self]]: constant[ A dictionary mapping names of attributes to BiomartAttribute instances. This causes overwriting errors if there are diffferent pages which use the same attribute names, but is kept for backward compatibility. ] if <ast.UnaryOp object at 0x7da20cabd6f0> begin[:] call[name[self].fetch_attributes, parameter[]] variable[result] assign[=] dictionary[[], []] for taget[name[page]] in starred[call[name[self]._attribute_pages.values, parameter[]]] begin[:] call[name[result].update, parameter[name[page].attributes]] return[name[result]]
keyword[def] identifier[attributes] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_attribute_pages] : identifier[self] . identifier[fetch_attributes] () identifier[result] ={} keyword[for] identifier[page] keyword[in] identifier[self] . identifier[_attribute_pages] . identifier[values] (): identifier[result] . identifier[update] ( identifier[page] . identifier[attributes] ) keyword[return] identifier[result]
def attributes(self): """ A dictionary mapping names of attributes to BiomartAttribute instances. This causes overwriting errors if there are diffferent pages which use the same attribute names, but is kept for backward compatibility. """ if not self._attribute_pages: self.fetch_attributes() # depends on [control=['if'], data=[]] result = {} for page in self._attribute_pages.values(): result.update(page.attributes) # depends on [control=['for'], data=['page']] return result
def present(name, deployment_id, metric_name, alert_config, api_key=None, profile='telemetry'): ''' Ensure the telemetry alert exists. name An optional description of the alarm (not currently supported by telemetry API) deployment_id Specifies the ID of the root deployment resource (replica set cluster or sharded cluster) to which this alert definition is attached metric_name Specifies the unique ID of the metric to whose values these thresholds will be applied alert_config: Is a list of dictionaries where each dict contains the following fields: filter By default the alert will apply to the deployment and all its constituent resources. If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope. min the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced. max the largest "ok" value the metric may take on; if missing or null, no maximum is enforced. notify_all Used to indicate if you want to alert both onCallEngineer and apiNotifications api_key Telemetry api key for the user profile A dict of telemetry config information. If present, will be used instead of api_key. ''' ret = {'name': metric_name, 'result': True, 'comment': '', 'changes': {}} saved_alert_config = __salt__['telemetry.get_alert_config']( deployment_id, metric_name, api_key, profile) post_body = { "deployment": deployment_id, "filter": alert_config.get('filter'), "notificationChannel": __salt__['telemetry.get_notification_channel_id'](alert_config.get('escalate_to')).split(), "condition": { "metric": metric_name, "max": alert_config.get('max'), "min": alert_config.get('min') } } # Diff the alert config with the passed-in attributes difference = [] if saved_alert_config: #del saved_alert_config["_id"] for k, v in post_body.items(): if k not in saved_alert_config: difference.append("{0}={1} (new)".format(k, v)) continue v2 = saved_alert_config[k] if v == v2: continue if isinstance(v, string_types) and six.text_type(v) == six.text_type(v2): continue if isinstance(v, float) and v == float(v2): continue if isinstance(v, int) and v == int(v2): continue difference.append("{0}='{1}' was: '{2}'".format(k, v, v2)) else: difference.append("new alert config") create_or_update_args = ( deployment_id, metric_name, alert_config, api_key, profile, ) if saved_alert_config: # alert config is present. update, or do nothing # check to see if attributes matches is_present. If so, do nothing. if not difference: ret['comment'] = "alert config {0} present and matching".format(metric_name) return ret if __opts__['test']: msg = 'alert config {0} is to be updated.'.format(metric_name) ret['comment'] = msg ret['result'] = "\n".join(difference) return ret result, msg = __salt__['telemetry.update_alarm'](*create_or_update_args) if result: ret['changes']['diff'] = difference ret['comment'] = "Alert updated." else: ret['result'] = False ret['comment'] = 'Failed to update {0} alert config: {1}'.format(metric_name, msg) else: # alert config is absent. create it. if __opts__['test']: msg = 'alert config {0} is to be created.'.format(metric_name) ret['comment'] = msg ret['result'] = None return ret result, msg = __salt__['telemetry.create_alarm'](*create_or_update_args) if result: ret['changes']['new'] = msg else: ret['result'] = False ret['comment'] = 'Failed to create {0} alert config: {1}'.format(metric_name, msg) return ret
def function[present, parameter[name, deployment_id, metric_name, alert_config, api_key, profile]]: constant[ Ensure the telemetry alert exists. name An optional description of the alarm (not currently supported by telemetry API) deployment_id Specifies the ID of the root deployment resource (replica set cluster or sharded cluster) to which this alert definition is attached metric_name Specifies the unique ID of the metric to whose values these thresholds will be applied alert_config: Is a list of dictionaries where each dict contains the following fields: filter By default the alert will apply to the deployment and all its constituent resources. If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope. min the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced. max the largest "ok" value the metric may take on; if missing or null, no maximum is enforced. notify_all Used to indicate if you want to alert both onCallEngineer and apiNotifications api_key Telemetry api key for the user profile A dict of telemetry config information. If present, will be used instead of api_key. ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ac970>, <ast.Constant object at 0x7da1b26adea0>, <ast.Constant object at 0x7da1b26aca60>, <ast.Constant object at 0x7da1b26af6a0>], [<ast.Name object at 0x7da1b26adc90>, <ast.Constant object at 0x7da1b26ae770>, <ast.Constant object at 0x7da1b26ac100>, <ast.Dict object at 0x7da1b26aece0>]] variable[saved_alert_config] assign[=] call[call[name[__salt__]][constant[telemetry.get_alert_config]], parameter[name[deployment_id], name[metric_name], name[api_key], name[profile]]] variable[post_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ae530>, <ast.Constant object at 0x7da1b26af5b0>, <ast.Constant object at 0x7da1b26af940>, <ast.Constant object at 0x7da1b26aed70>], [<ast.Name object at 0x7da1b26ac0a0>, <ast.Call object at 0x7da1b26aead0>, <ast.Call object at 0x7da1b26ac160>, <ast.Dict object at 0x7da1b26ad300>]] variable[difference] assign[=] list[[]] if name[saved_alert_config] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b26ac580>, <ast.Name object at 0x7da1b26ae4a0>]]] in starred[call[name[post_body].items, parameter[]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[saved_alert_config]] begin[:] call[name[difference].append, parameter[call[constant[{0}={1} (new)].format, parameter[name[k], name[v]]]]] continue variable[v2] assign[=] call[name[saved_alert_config]][name[k]] if compare[name[v] equal[==] name[v2]] begin[:] continue if <ast.BoolOp object at 0x7da1b26afb20> begin[:] continue if <ast.BoolOp object at 0x7da1b26ad7e0> begin[:] continue if <ast.BoolOp object at 0x7da1b26ad480> begin[:] continue call[name[difference].append, parameter[call[constant[{0}='{1}' was: '{2}'].format, parameter[name[k], name[v], name[v2]]]]] variable[create_or_update_args] assign[=] tuple[[<ast.Name object at 0x7da1b26af670>, <ast.Name object at 0x7da1b26acac0>, <ast.Name object at 0x7da1b26ade40>, <ast.Name object at 0x7da1b26adc00>, <ast.Name object at 0x7da1b26afd00>]] if name[saved_alert_config] begin[:] if <ast.UnaryOp object at 0x7da1b26aed40> begin[:] call[name[ret]][constant[comment]] assign[=] call[constant[alert config {0} present and matching].format, parameter[name[metric_name]]] return[name[ret]] if call[name[__opts__]][constant[test]] begin[:] variable[msg] assign[=] call[constant[alert config {0} is to be updated.].format, parameter[name[metric_name]]] call[name[ret]][constant[comment]] assign[=] name[msg] call[name[ret]][constant[result]] assign[=] call[constant[ ].join, parameter[name[difference]]] return[name[ret]] <ast.Tuple object at 0x7da1b26af0a0> assign[=] call[call[name[__salt__]][constant[telemetry.update_alarm]], parameter[<ast.Starred object at 0x7da1b26ac5b0>]] if name[result] begin[:] call[call[name[ret]][constant[changes]]][constant[diff]] assign[=] name[difference] call[name[ret]][constant[comment]] assign[=] constant[Alert updated.] return[name[ret]]
keyword[def] identifier[present] ( identifier[name] , identifier[deployment_id] , identifier[metric_name] , identifier[alert_config] , identifier[api_key] = keyword[None] , identifier[profile] = literal[string] ): literal[string] identifier[ret] ={ literal[string] : identifier[metric_name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}} identifier[saved_alert_config] = identifier[__salt__] [ literal[string] ]( identifier[deployment_id] , identifier[metric_name] , identifier[api_key] , identifier[profile] ) identifier[post_body] ={ literal[string] : identifier[deployment_id] , literal[string] : identifier[alert_config] . identifier[get] ( literal[string] ), literal[string] : identifier[__salt__] [ literal[string] ]( identifier[alert_config] . identifier[get] ( literal[string] )). identifier[split] (), literal[string] :{ literal[string] : identifier[metric_name] , literal[string] : identifier[alert_config] . identifier[get] ( literal[string] ), literal[string] : identifier[alert_config] . identifier[get] ( literal[string] ) } } identifier[difference] =[] keyword[if] identifier[saved_alert_config] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[post_body] . identifier[items] (): keyword[if] identifier[k] keyword[not] keyword[in] identifier[saved_alert_config] : identifier[difference] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] )) keyword[continue] identifier[v2] = identifier[saved_alert_config] [ identifier[k] ] keyword[if] identifier[v] == identifier[v2] : keyword[continue] keyword[if] identifier[isinstance] ( identifier[v] , identifier[string_types] ) keyword[and] identifier[six] . identifier[text_type] ( identifier[v] )== identifier[six] . identifier[text_type] ( identifier[v2] ): keyword[continue] keyword[if] identifier[isinstance] ( identifier[v] , identifier[float] ) keyword[and] identifier[v] == identifier[float] ( identifier[v2] ): keyword[continue] keyword[if] identifier[isinstance] ( identifier[v] , identifier[int] ) keyword[and] identifier[v] == identifier[int] ( identifier[v2] ): keyword[continue] identifier[difference] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] , identifier[v2] )) keyword[else] : identifier[difference] . identifier[append] ( literal[string] ) identifier[create_or_update_args] =( identifier[deployment_id] , identifier[metric_name] , identifier[alert_config] , identifier[api_key] , identifier[profile] , ) keyword[if] identifier[saved_alert_config] : keyword[if] keyword[not] identifier[difference] : identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] ) keyword[return] identifier[ret] keyword[if] identifier[__opts__] [ literal[string] ]: identifier[msg] = literal[string] . identifier[format] ( identifier[metric_name] ) identifier[ret] [ literal[string] ]= identifier[msg] identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[difference] ) keyword[return] identifier[ret] identifier[result] , identifier[msg] = identifier[__salt__] [ literal[string] ](* identifier[create_or_update_args] ) keyword[if] identifier[result] : identifier[ret] [ literal[string] ][ literal[string] ]= identifier[difference] identifier[ret] [ literal[string] ]= literal[string] keyword[else] : identifier[ret] [ literal[string] ]= keyword[False] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] , identifier[msg] ) keyword[else] : keyword[if] identifier[__opts__] [ literal[string] ]: identifier[msg] = literal[string] . identifier[format] ( identifier[metric_name] ) identifier[ret] [ literal[string] ]= identifier[msg] identifier[ret] [ literal[string] ]= keyword[None] keyword[return] identifier[ret] identifier[result] , identifier[msg] = identifier[__salt__] [ literal[string] ](* identifier[create_or_update_args] ) keyword[if] identifier[result] : identifier[ret] [ literal[string] ][ literal[string] ]= identifier[msg] keyword[else] : identifier[ret] [ literal[string] ]= keyword[False] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] , identifier[msg] ) keyword[return] identifier[ret]
def present(name, deployment_id, metric_name, alert_config, api_key=None, profile='telemetry'): """ Ensure the telemetry alert exists. name An optional description of the alarm (not currently supported by telemetry API) deployment_id Specifies the ID of the root deployment resource (replica set cluster or sharded cluster) to which this alert definition is attached metric_name Specifies the unique ID of the metric to whose values these thresholds will be applied alert_config: Is a list of dictionaries where each dict contains the following fields: filter By default the alert will apply to the deployment and all its constituent resources. If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope. min the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced. max the largest "ok" value the metric may take on; if missing or null, no maximum is enforced. notify_all Used to indicate if you want to alert both onCallEngineer and apiNotifications api_key Telemetry api key for the user profile A dict of telemetry config information. If present, will be used instead of api_key. """ ret = {'name': metric_name, 'result': True, 'comment': '', 'changes': {}} saved_alert_config = __salt__['telemetry.get_alert_config'](deployment_id, metric_name, api_key, profile) post_body = {'deployment': deployment_id, 'filter': alert_config.get('filter'), 'notificationChannel': __salt__['telemetry.get_notification_channel_id'](alert_config.get('escalate_to')).split(), 'condition': {'metric': metric_name, 'max': alert_config.get('max'), 'min': alert_config.get('min')}} # Diff the alert config with the passed-in attributes difference = [] if saved_alert_config: #del saved_alert_config["_id"] for (k, v) in post_body.items(): if k not in saved_alert_config: difference.append('{0}={1} (new)'.format(k, v)) continue # depends on [control=['if'], data=['k']] v2 = saved_alert_config[k] if v == v2: continue # depends on [control=['if'], data=[]] if isinstance(v, string_types) and six.text_type(v) == six.text_type(v2): continue # depends on [control=['if'], data=[]] if isinstance(v, float) and v == float(v2): continue # depends on [control=['if'], data=[]] if isinstance(v, int) and v == int(v2): continue # depends on [control=['if'], data=[]] difference.append("{0}='{1}' was: '{2}'".format(k, v, v2)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: difference.append('new alert config') create_or_update_args = (deployment_id, metric_name, alert_config, api_key, profile) if saved_alert_config: # alert config is present. update, or do nothing # check to see if attributes matches is_present. If so, do nothing. if not difference: ret['comment'] = 'alert config {0} present and matching'.format(metric_name) return ret # depends on [control=['if'], data=[]] if __opts__['test']: msg = 'alert config {0} is to be updated.'.format(metric_name) ret['comment'] = msg ret['result'] = '\n'.join(difference) return ret # depends on [control=['if'], data=[]] (result, msg) = __salt__['telemetry.update_alarm'](*create_or_update_args) if result: ret['changes']['diff'] = difference ret['comment'] = 'Alert updated.' # depends on [control=['if'], data=[]] else: ret['result'] = False ret['comment'] = 'Failed to update {0} alert config: {1}'.format(metric_name, msg) # depends on [control=['if'], data=[]] else: # alert config is absent. create it. if __opts__['test']: msg = 'alert config {0} is to be created.'.format(metric_name) ret['comment'] = msg ret['result'] = None return ret # depends on [control=['if'], data=[]] (result, msg) = __salt__['telemetry.create_alarm'](*create_or_update_args) if result: ret['changes']['new'] = msg # depends on [control=['if'], data=[]] else: ret['result'] = False ret['comment'] = 'Failed to create {0} alert config: {1}'.format(metric_name, msg) return ret
def pos(self): """ Lazy-loads the part of speech tag for this word :getter: Returns the plain string value of the POS tag for the word :type: str """ if self._pos is None: poses = self._element.xpath('POS/text()') if len(poses) > 0: self._pos = poses[0] return self._pos
def function[pos, parameter[self]]: constant[ Lazy-loads the part of speech tag for this word :getter: Returns the plain string value of the POS tag for the word :type: str ] if compare[name[self]._pos is constant[None]] begin[:] variable[poses] assign[=] call[name[self]._element.xpath, parameter[constant[POS/text()]]] if compare[call[name[len], parameter[name[poses]]] greater[>] constant[0]] begin[:] name[self]._pos assign[=] call[name[poses]][constant[0]] return[name[self]._pos]
keyword[def] identifier[pos] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_pos] keyword[is] keyword[None] : identifier[poses] = identifier[self] . identifier[_element] . identifier[xpath] ( literal[string] ) keyword[if] identifier[len] ( identifier[poses] )> literal[int] : identifier[self] . identifier[_pos] = identifier[poses] [ literal[int] ] keyword[return] identifier[self] . identifier[_pos]
def pos(self): """ Lazy-loads the part of speech tag for this word :getter: Returns the plain string value of the POS tag for the word :type: str """ if self._pos is None: poses = self._element.xpath('POS/text()') if len(poses) > 0: self._pos = poses[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return self._pos
async def handle_client_request(self, req, res): """ Entry point for the request + response middleware chain. This is called by growler.HTTPResponder (the default responder) after the headers have been processed in the begin_application method. This iterates over all middleware in the middleware list which matches the client's method and path. It executes the middleware and continues iterating until the res.has_ended property is true. If the middleware raises a GrowlerStopIteration exception, this method immediately returns None, breaking the loop and leaving res without sending any information back to the client. Be *sure* that you have another coroutine scheduled that will take over handling client data. If a middleware function raises any other exception, the exception is forwarded to the middleware generator, which changes behavior to generating any error handlers it had encountered. This method then calls the handle_server_error method which *should* handle the error and notify the user. If after the chain is exhausted, either with an exception raised or not, res.has_ended does not evaluate to true, the response is sent a simple server error message in text. Args: req (growler.HTTPRequest): The incoming request, containing all information about the client. res (growler.HTTPResponse): The outgoing response, containing methods for sending headers and data back to the client. """ # create a middleware generator mw_generator = self.middleware(req.method, req.path) # loop through middleware for mw in mw_generator: # try calling the function try: ret_val = mw(req, res) if inspect.isawaitable(ret_val): await ret_val # special exception - immediately stop the loop # - do not check if res has sent except GrowlerStopIteration: return None # on an unhandled exception - notify the generator of the error except Exception as error: mw_generator.throw(error) await self.handle_server_error(req, res, mw_generator, error) return if res.has_ended: break if not res.has_ended: self.handle_response_not_sent(req, res)
<ast.AsyncFunctionDef object at 0x7da18bcc8070>
keyword[async] keyword[def] identifier[handle_client_request] ( identifier[self] , identifier[req] , identifier[res] ): literal[string] identifier[mw_generator] = identifier[self] . identifier[middleware] ( identifier[req] . identifier[method] , identifier[req] . identifier[path] ) keyword[for] identifier[mw] keyword[in] identifier[mw_generator] : keyword[try] : identifier[ret_val] = identifier[mw] ( identifier[req] , identifier[res] ) keyword[if] identifier[inspect] . identifier[isawaitable] ( identifier[ret_val] ): keyword[await] identifier[ret_val] keyword[except] identifier[GrowlerStopIteration] : keyword[return] keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[error] : identifier[mw_generator] . identifier[throw] ( identifier[error] ) keyword[await] identifier[self] . identifier[handle_server_error] ( identifier[req] , identifier[res] , identifier[mw_generator] , identifier[error] ) keyword[return] keyword[if] identifier[res] . identifier[has_ended] : keyword[break] keyword[if] keyword[not] identifier[res] . identifier[has_ended] : identifier[self] . identifier[handle_response_not_sent] ( identifier[req] , identifier[res] )
async def handle_client_request(self, req, res): """ Entry point for the request + response middleware chain. This is called by growler.HTTPResponder (the default responder) after the headers have been processed in the begin_application method. This iterates over all middleware in the middleware list which matches the client's method and path. It executes the middleware and continues iterating until the res.has_ended property is true. If the middleware raises a GrowlerStopIteration exception, this method immediately returns None, breaking the loop and leaving res without sending any information back to the client. Be *sure* that you have another coroutine scheduled that will take over handling client data. If a middleware function raises any other exception, the exception is forwarded to the middleware generator, which changes behavior to generating any error handlers it had encountered. This method then calls the handle_server_error method which *should* handle the error and notify the user. If after the chain is exhausted, either with an exception raised or not, res.has_ended does not evaluate to true, the response is sent a simple server error message in text. Args: req (growler.HTTPRequest): The incoming request, containing all information about the client. res (growler.HTTPResponse): The outgoing response, containing methods for sending headers and data back to the client. """ # create a middleware generator mw_generator = self.middleware(req.method, req.path) # loop through middleware for mw in mw_generator: # try calling the function try: ret_val = mw(req, res) if inspect.isawaitable(ret_val): await ret_val # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] # special exception - immediately stop the loop # - do not check if res has sent except GrowlerStopIteration: return None # depends on [control=['except'], data=[]] # on an unhandled exception - notify the generator of the error except Exception as error: mw_generator.throw(error) await self.handle_server_error(req, res, mw_generator, error) return # depends on [control=['except'], data=['error']] if res.has_ended: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mw']] if not res.has_ended: self.handle_response_not_sent(req, res) # depends on [control=['if'], data=[]]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'key') and self.key is not None: _dict['key'] = self.key if hasattr(self, 'matching_results') and self.matching_results is not None: _dict['matching_results'] = self.matching_results if hasattr(self, 'event_rate') and self.event_rate is not None: _dict['event_rate'] = self.event_rate return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da2044c2920> begin[:] call[name[_dict]][constant[key]] assign[=] name[self].key if <ast.BoolOp object at 0x7da2044c0340> begin[:] call[name[_dict]][constant[matching_results]] assign[=] name[self].matching_results if <ast.BoolOp object at 0x7da18bccb4c0> begin[:] call[name[_dict]][constant[event_rate]] assign[=] name[self].event_rate return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[key] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[key] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[matching_results] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[matching_results] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[event_rate] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[event_rate] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'key') and self.key is not None: _dict['key'] = self.key # depends on [control=['if'], data=[]] if hasattr(self, 'matching_results') and self.matching_results is not None: _dict['matching_results'] = self.matching_results # depends on [control=['if'], data=[]] if hasattr(self, 'event_rate') and self.event_rate is not None: _dict['event_rate'] = self.event_rate # depends on [control=['if'], data=[]] return _dict
def device_unmounted(self, device): """Show unmount notification for specified device object.""" if not self._mounter.is_handleable(device): return self._show_notification( 'device_unmounted', _('Device unmounted'), _('{0.ui_label} unmounted', device), device.icon_name)
def function[device_unmounted, parameter[self, device]]: constant[Show unmount notification for specified device object.] if <ast.UnaryOp object at 0x7da20c6e5300> begin[:] return[None] call[name[self]._show_notification, parameter[constant[device_unmounted], call[name[_], parameter[constant[Device unmounted]]], call[name[_], parameter[constant[{0.ui_label} unmounted], name[device]]], name[device].icon_name]]
keyword[def] identifier[device_unmounted] ( identifier[self] , identifier[device] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_mounter] . identifier[is_handleable] ( identifier[device] ): keyword[return] identifier[self] . identifier[_show_notification] ( literal[string] , identifier[_] ( literal[string] ), identifier[_] ( literal[string] , identifier[device] ), identifier[device] . identifier[icon_name] )
def device_unmounted(self, device): """Show unmount notification for specified device object.""" if not self._mounter.is_handleable(device): return # depends on [control=['if'], data=[]] self._show_notification('device_unmounted', _('Device unmounted'), _('{0.ui_label} unmounted', device), device.icon_name)
def update(self, play): """ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` """ new_tally = { } #if any(isinstance(play.event, te) for te in self.trigger_event_types): if self._count_play(play): # the team who made the play / triggered the event team = self._get_team(play) try: self.total[team] += 1 except: self.total[team] = 1 self.teams.append(team) for i in range(len(self.tally)): self.tally[i][team] = 0 try: new_tally = { k:v for k,v in self.tally[len(self.tally)-1].items() } new_tally['period'] = play.period new_tally['time'] = play.time new_tally[team] += 1 new_tally['play'] = play except: new_tally = { 'period': play.period, 'time': play.time, team: 1, 'play': play } self.tally.append(new_tally) return new_tally
def function[update, parameter[self, play]]: constant[ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` ] variable[new_tally] assign[=] dictionary[[], []] if call[name[self]._count_play, parameter[name[play]]] begin[:] variable[team] assign[=] call[name[self]._get_team, parameter[name[play]]] <ast.Try object at 0x7da1b10c2350> <ast.Try object at 0x7da1b10c15a0> call[name[self].tally.append, parameter[name[new_tally]]] return[name[new_tally]]
keyword[def] identifier[update] ( identifier[self] , identifier[play] ): literal[string] identifier[new_tally] ={} keyword[if] identifier[self] . identifier[_count_play] ( identifier[play] ): identifier[team] = identifier[self] . identifier[_get_team] ( identifier[play] ) keyword[try] : identifier[self] . identifier[total] [ identifier[team] ]+= literal[int] keyword[except] : identifier[self] . identifier[total] [ identifier[team] ]= literal[int] identifier[self] . identifier[teams] . identifier[append] ( identifier[team] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[tally] )): identifier[self] . identifier[tally] [ identifier[i] ][ identifier[team] ]= literal[int] keyword[try] : identifier[new_tally] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[tally] [ identifier[len] ( identifier[self] . identifier[tally] )- literal[int] ]. identifier[items] ()} identifier[new_tally] [ literal[string] ]= identifier[play] . identifier[period] identifier[new_tally] [ literal[string] ]= identifier[play] . identifier[time] identifier[new_tally] [ identifier[team] ]+= literal[int] identifier[new_tally] [ literal[string] ]= identifier[play] keyword[except] : identifier[new_tally] ={ literal[string] : identifier[play] . identifier[period] , literal[string] : identifier[play] . identifier[time] , identifier[team] : literal[int] , literal[string] : identifier[play] } identifier[self] . identifier[tally] . identifier[append] ( identifier[new_tally] ) keyword[return] identifier[new_tally]
def update(self, play): """ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` """ new_tally = {} #if any(isinstance(play.event, te) for te in self.trigger_event_types): if self._count_play(play): # the team who made the play / triggered the event team = self._get_team(play) try: self.total[team] += 1 # depends on [control=['try'], data=[]] except: self.total[team] = 1 self.teams.append(team) for i in range(len(self.tally)): self.tally[i][team] = 0 # depends on [control=['for'], data=['i']] # depends on [control=['except'], data=[]] try: new_tally = {k: v for (k, v) in self.tally[len(self.tally) - 1].items()} new_tally['period'] = play.period new_tally['time'] = play.time new_tally[team] += 1 new_tally['play'] = play # depends on [control=['try'], data=[]] except: new_tally = {'period': play.period, 'time': play.time, team: 1, 'play': play} # depends on [control=['except'], data=[]] self.tally.append(new_tally) # depends on [control=['if'], data=[]] return new_tally
def reject_source(ident, comment): '''Reject a source for automatic harvesting''' source = get_source(ident) source.validation.on = datetime.now() source.validation.comment = comment source.validation.state = VALIDATION_REFUSED if current_user.is_authenticated: source.validation.by = current_user._get_current_object() source.save() return source
def function[reject_source, parameter[ident, comment]]: constant[Reject a source for automatic harvesting] variable[source] assign[=] call[name[get_source], parameter[name[ident]]] name[source].validation.on assign[=] call[name[datetime].now, parameter[]] name[source].validation.comment assign[=] name[comment] name[source].validation.state assign[=] name[VALIDATION_REFUSED] if name[current_user].is_authenticated begin[:] name[source].validation.by assign[=] call[name[current_user]._get_current_object, parameter[]] call[name[source].save, parameter[]] return[name[source]]
keyword[def] identifier[reject_source] ( identifier[ident] , identifier[comment] ): literal[string] identifier[source] = identifier[get_source] ( identifier[ident] ) identifier[source] . identifier[validation] . identifier[on] = identifier[datetime] . identifier[now] () identifier[source] . identifier[validation] . identifier[comment] = identifier[comment] identifier[source] . identifier[validation] . identifier[state] = identifier[VALIDATION_REFUSED] keyword[if] identifier[current_user] . identifier[is_authenticated] : identifier[source] . identifier[validation] . identifier[by] = identifier[current_user] . identifier[_get_current_object] () identifier[source] . identifier[save] () keyword[return] identifier[source]
def reject_source(ident, comment): """Reject a source for automatic harvesting""" source = get_source(ident) source.validation.on = datetime.now() source.validation.comment = comment source.validation.state = VALIDATION_REFUSED if current_user.is_authenticated: source.validation.by = current_user._get_current_object() # depends on [control=['if'], data=[]] source.save() return source
def expandService(service_element): """Take a service element and expand it into an iterator of: ([type_uri], uri, service_element) """ uris = sortedURIs(service_element) if not uris: uris = [None] expanded = [] for uri in uris: type_uris = getTypeURIs(service_element) expanded.append((type_uris, uri, service_element)) return expanded
def function[expandService, parameter[service_element]]: constant[Take a service element and expand it into an iterator of: ([type_uri], uri, service_element) ] variable[uris] assign[=] call[name[sortedURIs], parameter[name[service_element]]] if <ast.UnaryOp object at 0x7da18dc07af0> begin[:] variable[uris] assign[=] list[[<ast.Constant object at 0x7da18dc05360>]] variable[expanded] assign[=] list[[]] for taget[name[uri]] in starred[name[uris]] begin[:] variable[type_uris] assign[=] call[name[getTypeURIs], parameter[name[service_element]]] call[name[expanded].append, parameter[tuple[[<ast.Name object at 0x7da2054a4430>, <ast.Name object at 0x7da2054a5c30>, <ast.Name object at 0x7da2054a7a00>]]]] return[name[expanded]]
keyword[def] identifier[expandService] ( identifier[service_element] ): literal[string] identifier[uris] = identifier[sortedURIs] ( identifier[service_element] ) keyword[if] keyword[not] identifier[uris] : identifier[uris] =[ keyword[None] ] identifier[expanded] =[] keyword[for] identifier[uri] keyword[in] identifier[uris] : identifier[type_uris] = identifier[getTypeURIs] ( identifier[service_element] ) identifier[expanded] . identifier[append] (( identifier[type_uris] , identifier[uri] , identifier[service_element] )) keyword[return] identifier[expanded]
def expandService(service_element): """Take a service element and expand it into an iterator of: ([type_uri], uri, service_element) """ uris = sortedURIs(service_element) if not uris: uris = [None] # depends on [control=['if'], data=[]] expanded = [] for uri in uris: type_uris = getTypeURIs(service_element) expanded.append((type_uris, uri, service_element)) # depends on [control=['for'], data=['uri']] return expanded
def euler_angles(self): """:obj:`tuple` of float: The three euler angles for the rotation. """ q_wxyz = self.quaternion q_xyzw = np.roll(q_wxyz, -1) return transformations.euler_from_quaternion(q_xyzw)
def function[euler_angles, parameter[self]]: constant[:obj:`tuple` of float: The three euler angles for the rotation. ] variable[q_wxyz] assign[=] name[self].quaternion variable[q_xyzw] assign[=] call[name[np].roll, parameter[name[q_wxyz], <ast.UnaryOp object at 0x7da1b12b4be0>]] return[call[name[transformations].euler_from_quaternion, parameter[name[q_xyzw]]]]
keyword[def] identifier[euler_angles] ( identifier[self] ): literal[string] identifier[q_wxyz] = identifier[self] . identifier[quaternion] identifier[q_xyzw] = identifier[np] . identifier[roll] ( identifier[q_wxyz] ,- literal[int] ) keyword[return] identifier[transformations] . identifier[euler_from_quaternion] ( identifier[q_xyzw] )
def euler_angles(self): """:obj:`tuple` of float: The three euler angles for the rotation. """ q_wxyz = self.quaternion q_xyzw = np.roll(q_wxyz, -1) return transformations.euler_from_quaternion(q_xyzw)
def plot_survival(self, on, how="os", survival_units="Days", strata=None, ax=None, ci_show=False, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set2", threshold=None, **kwargs): """Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort """ assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True) df = filter_not_null(df, plot_col) results = plot_kmf( df=df, condition_col=plot_col, xlabel=survival_units, ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)", censor_col="deceased" if how == "os" else "progressed_or_deceased", survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map, ) return results
def function[plot_survival, parameter[self, on, how, survival_units, strata, ax, ci_show, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, threshold]]: constant[Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort ] assert[compare[name[how] in list[[<ast.Constant object at 0x7da18eb562f0>, <ast.Constant object at 0x7da18eb55990>]]]] <ast.Tuple object at 0x7da18eb56da0> assign[=] call[name[self].as_dataframe, parameter[name[on]]] variable[plot_col] assign[=] call[name[self].plot_col_from_cols, parameter[]] variable[df] assign[=] call[name[filter_not_null], parameter[name[df], name[plot_col]]] variable[results] assign[=] call[name[plot_kmf], parameter[]] return[name[results]]
keyword[def] identifier[plot_survival] ( identifier[self] , identifier[on] , identifier[how] = literal[string] , identifier[survival_units] = literal[string] , identifier[strata] = keyword[None] , identifier[ax] = keyword[None] , identifier[ci_show] = keyword[False] , identifier[with_condition_color] = literal[string] , identifier[no_condition_color] = literal[string] , identifier[with_condition_label] = keyword[None] , identifier[no_condition_label] = keyword[None] , identifier[color_map] = keyword[None] , identifier[label_map] = keyword[None] , identifier[color_palette] = literal[string] , identifier[threshold] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[assert] identifier[how] keyword[in] [ literal[string] , literal[string] ], literal[string] % identifier[how] identifier[cols] , identifier[df] = identifier[self] . identifier[as_dataframe] ( identifier[on] , identifier[return_cols] = keyword[True] ,** identifier[kwargs] ) identifier[plot_col] = identifier[self] . identifier[plot_col_from_cols] ( identifier[cols] = identifier[cols] , identifier[only_allow_one] = keyword[True] ) identifier[df] = identifier[filter_not_null] ( identifier[df] , identifier[plot_col] ) identifier[results] = identifier[plot_kmf] ( identifier[df] = identifier[df] , identifier[condition_col] = identifier[plot_col] , identifier[xlabel] = identifier[survival_units] , identifier[ylabel] = literal[string] keyword[if] identifier[how] == literal[string] keyword[else] literal[string] , identifier[censor_col] = literal[string] keyword[if] identifier[how] == literal[string] keyword[else] literal[string] , identifier[survival_col] = identifier[how] , identifier[strata_col] = identifier[strata] , identifier[threshold] = identifier[threshold] , identifier[ax] = identifier[ax] , identifier[ci_show] = identifier[ci_show] , identifier[with_condition_color] = identifier[with_condition_color] , identifier[no_condition_color] = identifier[no_condition_color] , identifier[with_condition_label] = identifier[with_condition_label] , identifier[no_condition_label] = identifier[no_condition_label] , identifier[color_palette] = identifier[color_palette] , identifier[label_map] = identifier[label_map] , identifier[color_map] = identifier[color_map] , ) keyword[return] identifier[results]
def plot_survival(self, on, how='os', survival_units='Days', strata=None, ax=None, ci_show=False, with_condition_color='#B38600', no_condition_color='#A941AC', with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette='Set2', threshold=None, **kwargs): """Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort """ assert how in ['os', 'pfs'], 'Invalid choice of survival plot type %s' % how (cols, df) = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True) df = filter_not_null(df, plot_col) results = plot_kmf(df=df, condition_col=plot_col, xlabel=survival_units, ylabel='Overall Survival (%)' if how == 'os' else 'Progression-Free Survival (%)', censor_col='deceased' if how == 'os' else 'progressed_or_deceased', survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map) return results
def change_cell(self, x, y, ch, fg, bg): """Change cell in position (x;y). """ self.console.draw_char(x, y, ch, fg, bg)
def function[change_cell, parameter[self, x, y, ch, fg, bg]]: constant[Change cell in position (x;y). ] call[name[self].console.draw_char, parameter[name[x], name[y], name[ch], name[fg], name[bg]]]
keyword[def] identifier[change_cell] ( identifier[self] , identifier[x] , identifier[y] , identifier[ch] , identifier[fg] , identifier[bg] ): literal[string] identifier[self] . identifier[console] . identifier[draw_char] ( identifier[x] , identifier[y] , identifier[ch] , identifier[fg] , identifier[bg] )
def change_cell(self, x, y, ch, fg, bg): """Change cell in position (x;y). """ self.console.draw_char(x, y, ch, fg, bg)
def _emit_internal(self, sid, event, data, namespace=None, id=None): """Send a message to a client.""" if six.PY2 and not self.binary: binary = False # pragma: nocover else: binary = None # tuples are expanded to multiple arguments, everything else is sent # as a single argument if isinstance(data, tuple): data = list(data) else: data = [data] self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace, data=[event] + data, id=id, binary=binary))
def function[_emit_internal, parameter[self, sid, event, data, namespace, id]]: constant[Send a message to a client.] if <ast.BoolOp object at 0x7da1b21ba290> begin[:] variable[binary] assign[=] constant[False] if call[name[isinstance], parameter[name[data], name[tuple]]] begin[:] variable[data] assign[=] call[name[list], parameter[name[data]]] call[name[self]._send_packet, parameter[name[sid], call[name[packet].Packet, parameter[name[packet].EVENT]]]]
keyword[def] identifier[_emit_internal] ( identifier[self] , identifier[sid] , identifier[event] , identifier[data] , identifier[namespace] = keyword[None] , identifier[id] = keyword[None] ): literal[string] keyword[if] identifier[six] . identifier[PY2] keyword[and] keyword[not] identifier[self] . identifier[binary] : identifier[binary] = keyword[False] keyword[else] : identifier[binary] = keyword[None] keyword[if] identifier[isinstance] ( identifier[data] , identifier[tuple] ): identifier[data] = identifier[list] ( identifier[data] ) keyword[else] : identifier[data] =[ identifier[data] ] identifier[self] . identifier[_send_packet] ( identifier[sid] , identifier[packet] . identifier[Packet] ( identifier[packet] . identifier[EVENT] , identifier[namespace] = identifier[namespace] , identifier[data] =[ identifier[event] ]+ identifier[data] , identifier[id] = identifier[id] , identifier[binary] = identifier[binary] ))
def _emit_internal(self, sid, event, data, namespace=None, id=None): """Send a message to a client.""" if six.PY2 and (not self.binary): binary = False # pragma: nocover # depends on [control=['if'], data=[]] else: binary = None # tuples are expanded to multiple arguments, everything else is sent # as a single argument if isinstance(data, tuple): data = list(data) # depends on [control=['if'], data=[]] else: data = [data] self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace, data=[event] + data, id=id, binary=binary))
def routeByMonthAbbr(self, request, year, monthAbbr): """Route a request with a month abbreviation to the monthly view.""" month = (DatePictures['Mon'].index(monthAbbr.lower()) // 4) + 1 return self.serveMonth(request, year, month)
def function[routeByMonthAbbr, parameter[self, request, year, monthAbbr]]: constant[Route a request with a month abbreviation to the monthly view.] variable[month] assign[=] binary_operation[binary_operation[call[call[name[DatePictures]][constant[Mon]].index, parameter[call[name[monthAbbr].lower, parameter[]]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]] + constant[1]] return[call[name[self].serveMonth, parameter[name[request], name[year], name[month]]]]
keyword[def] identifier[routeByMonthAbbr] ( identifier[self] , identifier[request] , identifier[year] , identifier[monthAbbr] ): literal[string] identifier[month] =( identifier[DatePictures] [ literal[string] ]. identifier[index] ( identifier[monthAbbr] . identifier[lower] ())// literal[int] )+ literal[int] keyword[return] identifier[self] . identifier[serveMonth] ( identifier[request] , identifier[year] , identifier[month] )
def routeByMonthAbbr(self, request, year, monthAbbr): """Route a request with a month abbreviation to the monthly view.""" month = DatePictures['Mon'].index(monthAbbr.lower()) // 4 + 1 return self.serveMonth(request, year, month)
def _extents(self): """ A (cx, cy) 2-tuple representing the effective rendering area for text within this text frame when margins are taken into account. """ return ( self._parent.width - self.margin_left - self.margin_right, self._parent.height - self.margin_top - self.margin_bottom )
def function[_extents, parameter[self]]: constant[ A (cx, cy) 2-tuple representing the effective rendering area for text within this text frame when margins are taken into account. ] return[tuple[[<ast.BinOp object at 0x7da20c6ab8b0>, <ast.BinOp object at 0x7da20c6a9b40>]]]
keyword[def] identifier[_extents] ( identifier[self] ): literal[string] keyword[return] ( identifier[self] . identifier[_parent] . identifier[width] - identifier[self] . identifier[margin_left] - identifier[self] . identifier[margin_right] , identifier[self] . identifier[_parent] . identifier[height] - identifier[self] . identifier[margin_top] - identifier[self] . identifier[margin_bottom] )
def _extents(self): """ A (cx, cy) 2-tuple representing the effective rendering area for text within this text frame when margins are taken into account. """ return (self._parent.width - self.margin_left - self.margin_right, self._parent.height - self.margin_top - self.margin_bottom)
def _batch_gather_with_broadcast(params, indices, axis): """Like batch_gather, but broadcasts to the left of axis.""" # batch_gather assumes... # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [A1,...,AN, C] # which gives output of shape # [A1,...,AN, C, B1,...,BM] # Here we broadcast dims of each to the left of `axis` in params, and left of # the rightmost dim in indices, e.g. we can # have # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [a1,...,aN, C], # where ai broadcasts with Ai. # leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN]. leading_bcast_shape = tf.broadcast_dynamic_shape( tf.shape(input=params)[:axis], tf.shape(input=indices)[:-1]) params += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0), dtype=params.dtype) indices += tf.zeros( tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0), dtype=indices.dtype) return tf.compat.v1.batch_gather(params, indices)
def function[_batch_gather_with_broadcast, parameter[params, indices, axis]]: constant[Like batch_gather, but broadcasts to the left of axis.] variable[leading_bcast_shape] assign[=] call[name[tf].broadcast_dynamic_shape, parameter[call[call[name[tf].shape, parameter[]]][<ast.Slice object at 0x7da1b03e36a0>], call[call[name[tf].shape, parameter[]]][<ast.Slice object at 0x7da1b03e3970>]]] <ast.AugAssign object at 0x7da1b03e32e0> <ast.AugAssign object at 0x7da1b03e3130> return[call[name[tf].compat.v1.batch_gather, parameter[name[params], name[indices]]]]
keyword[def] identifier[_batch_gather_with_broadcast] ( identifier[params] , identifier[indices] , identifier[axis] ): literal[string] identifier[leading_bcast_shape] = identifier[tf] . identifier[broadcast_dynamic_shape] ( identifier[tf] . identifier[shape] ( identifier[input] = identifier[params] )[: identifier[axis] ], identifier[tf] . identifier[shape] ( identifier[input] = identifier[indices] )[:- literal[int] ]) identifier[params] += identifier[tf] . identifier[zeros] ( identifier[tf] . identifier[concat] (( identifier[leading_bcast_shape] , identifier[tf] . identifier[shape] ( identifier[input] = identifier[params] )[ identifier[axis] :]), identifier[axis] = literal[int] ), identifier[dtype] = identifier[params] . identifier[dtype] ) identifier[indices] += identifier[tf] . identifier[zeros] ( identifier[tf] . identifier[concat] (( identifier[leading_bcast_shape] , identifier[tf] . identifier[shape] ( identifier[input] = identifier[indices] )[- literal[int] :]), identifier[axis] = literal[int] ), identifier[dtype] = identifier[indices] . identifier[dtype] ) keyword[return] identifier[tf] . identifier[compat] . identifier[v1] . identifier[batch_gather] ( identifier[params] , identifier[indices] )
def _batch_gather_with_broadcast(params, indices, axis): """Like batch_gather, but broadcasts to the left of axis.""" # batch_gather assumes... # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [A1,...,AN, C] # which gives output of shape # [A1,...,AN, C, B1,...,BM] # Here we broadcast dims of each to the left of `axis` in params, and left of # the rightmost dim in indices, e.g. we can # have # params.shape = [A1,...,AN, B1,...,BM] # indices.shape = [a1,...,aN, C], # where ai broadcasts with Ai. # leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN]. leading_bcast_shape = tf.broadcast_dynamic_shape(tf.shape(input=params)[:axis], tf.shape(input=indices)[:-1]) params += tf.zeros(tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0), dtype=params.dtype) indices += tf.zeros(tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0), dtype=indices.dtype) return tf.compat.v1.batch_gather(params, indices)
def get_library_name(database='Human'): """return enrichr active enrichr library name. :param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' } """ # make a get request to get the gmt names and meta data from Enrichr # old code # response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta') # gmt_data = response.json() # # generate list of lib names # libs = [] # # get library names # for inst_gmt in gmt_data['libraries']: # # only include active gmts # if inst_gmt['isActive'] == True: # libs.append(inst_gmt['libraryName']) if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']: sys.stderr.write("""No supported database. Please input one of these: 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' """) return if database in ['Human', 'Mouse']: database='' lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database libs_json = json.loads(requests.get(lib_url).text) libs = [lib['libraryName'] for lib in libs_json['statistics']] return sorted(libs)
def function[get_library_name, parameter[database]]: constant[return enrichr active enrichr library name. :param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' } ] if compare[name[database] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20c6aa320>, <ast.Constant object at 0x7da20c6aa0e0>, <ast.Constant object at 0x7da20c6a95d0>, <ast.Constant object at 0x7da20c6aaec0>, <ast.Constant object at 0x7da20c6aa530>, <ast.Constant object at 0x7da20c6a9030>]]] begin[:] call[name[sys].stderr.write, parameter[constant[No supported database. Please input one of these: 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' ]]] return[None] if compare[name[database] in list[[<ast.Constant object at 0x7da20c6aa650>, <ast.Constant object at 0x7da20c6a97e0>]]] begin[:] variable[database] assign[=] constant[] variable[lib_url] assign[=] binary_operation[constant[http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics] <ast.Mod object at 0x7da2590d6920> name[database]] variable[libs_json] assign[=] call[name[json].loads, parameter[call[name[requests].get, parameter[name[lib_url]]].text]] variable[libs] assign[=] <ast.ListComp object at 0x7da20c6aa200> return[call[name[sorted], parameter[name[libs]]]]
keyword[def] identifier[get_library_name] ( identifier[database] = literal[string] ): literal[string] keyword[if] identifier[database] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) keyword[return] keyword[if] identifier[database] keyword[in] [ literal[string] , literal[string] ]: identifier[database] = literal[string] identifier[lib_url] = literal[string] % identifier[database] identifier[libs_json] = identifier[json] . identifier[loads] ( identifier[requests] . identifier[get] ( identifier[lib_url] ). identifier[text] ) identifier[libs] =[ identifier[lib] [ literal[string] ] keyword[for] identifier[lib] keyword[in] identifier[libs_json] [ literal[string] ]] keyword[return] identifier[sorted] ( identifier[libs] )
def get_library_name(database='Human'): """return enrichr active enrichr library name. :param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' } """ # make a get request to get the gmt names and meta data from Enrichr # old code # response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta') # gmt_data = response.json() # # generate list of lib names # libs = [] # # get library names # for inst_gmt in gmt_data['libraries']: # # only include active gmts # if inst_gmt['isActive'] == True: # libs.append(inst_gmt['libraryName']) if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']: sys.stderr.write("No supported database. Please input one of these:\n 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' ") return # depends on [control=['if'], data=[]] if database in ['Human', 'Mouse']: database = '' # depends on [control=['if'], data=['database']] lib_url = 'http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics' % database libs_json = json.loads(requests.get(lib_url).text) libs = [lib['libraryName'] for lib in libs_json['statistics']] return sorted(libs)
def parse(self): """ Parse the vmstat file :return: status of the metric parse """ file_status = True for input_file in self.infile_list: file_status = file_status and naarad.utils.is_valid_file(input_file) if not file_status: return False status = True data = {} # stores the data of each column for input_file in self.infile_list: logger.info('Processing : %s', input_file) timestamp_format = None with open(input_file) as fh: for line in fh: words = line.split() # [0] is day; [1] is seconds; [2] is field name:; [3] is value [4] is unit if len(words) < 3: continue ts = words[0] + " " + words[1] if not timestamp_format or timestamp_format == 'unknown': timestamp_format = naarad.utils.detect_timestamp_format(ts) if timestamp_format == 'unknown': continue ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format) if self.ts_out_of_range(ts): continue col = words[2].strip(':') # only process sub_metrics specified in config. if self.sub_metrics and col not in self.sub_metrics: continue # add unit to metric description; most of the metrics have 'KB'; a few others do not have unit, they are in number of pages if len(words) > 4 and words[4]: unit = words[4] else: unit = 'pages' self.sub_metric_unit[col] = unit # stores the values in data[] before finally writing out if col in self.column_csv_map: out_csv = self.column_csv_map[col] else: out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv() data[out_csv] = [] data[out_csv].append(ts + "," + words[3]) # post processing, putting data in csv files; for csv in data.keys(): self.csv_files.append(csv) with open(csv, 'w') as fh: fh.write('\n'.join(sorted(data[csv]))) return status
def function[parse, parameter[self]]: constant[ Parse the vmstat file :return: status of the metric parse ] variable[file_status] assign[=] constant[True] for taget[name[input_file]] in starred[name[self].infile_list] begin[:] variable[file_status] assign[=] <ast.BoolOp object at 0x7da1b00daa40> if <ast.UnaryOp object at 0x7da1b00db610> begin[:] return[constant[False]] variable[status] assign[=] constant[True] variable[data] assign[=] dictionary[[], []] for taget[name[input_file]] in starred[name[self].infile_list] begin[:] call[name[logger].info, parameter[constant[Processing : %s], name[input_file]]] variable[timestamp_format] assign[=] constant[None] with call[name[open], parameter[name[input_file]]] begin[:] for taget[name[line]] in starred[name[fh]] begin[:] variable[words] assign[=] call[name[line].split, parameter[]] if compare[call[name[len], parameter[name[words]]] less[<] constant[3]] begin[:] continue variable[ts] assign[=] binary_operation[binary_operation[call[name[words]][constant[0]] + constant[ ]] + call[name[words]][constant[1]]] if <ast.BoolOp object at 0x7da1b00dac80> begin[:] variable[timestamp_format] assign[=] call[name[naarad].utils.detect_timestamp_format, parameter[name[ts]]] if compare[name[timestamp_format] equal[==] constant[unknown]] begin[:] continue variable[ts] assign[=] call[name[naarad].utils.get_standardized_timestamp, parameter[name[ts], name[timestamp_format]]] if call[name[self].ts_out_of_range, parameter[name[ts]]] begin[:] continue variable[col] assign[=] call[call[name[words]][constant[2]].strip, parameter[constant[:]]] if <ast.BoolOp object at 0x7da1aff75390> begin[:] continue if <ast.BoolOp object at 0x7da1aff755a0> begin[:] variable[unit] assign[=] call[name[words]][constant[4]] call[name[self].sub_metric_unit][name[col]] assign[=] name[unit] if compare[name[col] in name[self].column_csv_map] begin[:] variable[out_csv] assign[=] call[name[self].column_csv_map][name[col]] call[call[name[data]][name[out_csv]].append, parameter[binary_operation[binary_operation[name[ts] + constant[,]] + call[name[words]][constant[3]]]]] for taget[name[csv]] in starred[call[name[data].keys, parameter[]]] begin[:] call[name[self].csv_files.append, parameter[name[csv]]] with call[name[open], parameter[name[csv], constant[w]]] begin[:] call[name[fh].write, parameter[call[constant[ ].join, parameter[call[name[sorted], parameter[call[name[data]][name[csv]]]]]]]] return[name[status]]
keyword[def] identifier[parse] ( identifier[self] ): literal[string] identifier[file_status] = keyword[True] keyword[for] identifier[input_file] keyword[in] identifier[self] . identifier[infile_list] : identifier[file_status] = identifier[file_status] keyword[and] identifier[naarad] . identifier[utils] . identifier[is_valid_file] ( identifier[input_file] ) keyword[if] keyword[not] identifier[file_status] : keyword[return] keyword[False] identifier[status] = keyword[True] identifier[data] ={} keyword[for] identifier[input_file] keyword[in] identifier[self] . identifier[infile_list] : identifier[logger] . identifier[info] ( literal[string] , identifier[input_file] ) identifier[timestamp_format] = keyword[None] keyword[with] identifier[open] ( identifier[input_file] ) keyword[as] identifier[fh] : keyword[for] identifier[line] keyword[in] identifier[fh] : identifier[words] = identifier[line] . identifier[split] () keyword[if] identifier[len] ( identifier[words] )< literal[int] : keyword[continue] identifier[ts] = identifier[words] [ literal[int] ]+ literal[string] + identifier[words] [ literal[int] ] keyword[if] keyword[not] identifier[timestamp_format] keyword[or] identifier[timestamp_format] == literal[string] : identifier[timestamp_format] = identifier[naarad] . identifier[utils] . identifier[detect_timestamp_format] ( identifier[ts] ) keyword[if] identifier[timestamp_format] == literal[string] : keyword[continue] identifier[ts] = identifier[naarad] . identifier[utils] . identifier[get_standardized_timestamp] ( identifier[ts] , identifier[timestamp_format] ) keyword[if] identifier[self] . identifier[ts_out_of_range] ( identifier[ts] ): keyword[continue] identifier[col] = identifier[words] [ literal[int] ]. identifier[strip] ( literal[string] ) keyword[if] identifier[self] . identifier[sub_metrics] keyword[and] identifier[col] keyword[not] keyword[in] identifier[self] . identifier[sub_metrics] : keyword[continue] keyword[if] identifier[len] ( identifier[words] )> literal[int] keyword[and] identifier[words] [ literal[int] ]: identifier[unit] = identifier[words] [ literal[int] ] keyword[else] : identifier[unit] = literal[string] identifier[self] . identifier[sub_metric_unit] [ identifier[col] ]= identifier[unit] keyword[if] identifier[col] keyword[in] identifier[self] . identifier[column_csv_map] : identifier[out_csv] = identifier[self] . identifier[column_csv_map] [ identifier[col] ] keyword[else] : identifier[out_csv] = identifier[self] . identifier[get_csv] ( identifier[col] ) identifier[data] [ identifier[out_csv] ]=[] identifier[data] [ identifier[out_csv] ]. identifier[append] ( identifier[ts] + literal[string] + identifier[words] [ literal[int] ]) keyword[for] identifier[csv] keyword[in] identifier[data] . identifier[keys] (): identifier[self] . identifier[csv_files] . identifier[append] ( identifier[csv] ) keyword[with] identifier[open] ( identifier[csv] , literal[string] ) keyword[as] identifier[fh] : identifier[fh] . identifier[write] ( literal[string] . identifier[join] ( identifier[sorted] ( identifier[data] [ identifier[csv] ]))) keyword[return] identifier[status]
def parse(self): """ Parse the vmstat file :return: status of the metric parse """ file_status = True for input_file in self.infile_list: file_status = file_status and naarad.utils.is_valid_file(input_file) if not file_status: return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['input_file']] status = True data = {} # stores the data of each column for input_file in self.infile_list: logger.info('Processing : %s', input_file) timestamp_format = None with open(input_file) as fh: for line in fh: words = line.split() # [0] is day; [1] is seconds; [2] is field name:; [3] is value [4] is unit if len(words) < 3: continue # depends on [control=['if'], data=[]] ts = words[0] + ' ' + words[1] if not timestamp_format or timestamp_format == 'unknown': timestamp_format = naarad.utils.detect_timestamp_format(ts) # depends on [control=['if'], data=[]] if timestamp_format == 'unknown': continue # depends on [control=['if'], data=[]] ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format) if self.ts_out_of_range(ts): continue # depends on [control=['if'], data=[]] col = words[2].strip(':') # only process sub_metrics specified in config. if self.sub_metrics and col not in self.sub_metrics: continue # depends on [control=['if'], data=[]] # add unit to metric description; most of the metrics have 'KB'; a few others do not have unit, they are in number of pages if len(words) > 4 and words[4]: unit = words[4] # depends on [control=['if'], data=[]] else: unit = 'pages' self.sub_metric_unit[col] = unit # stores the values in data[] before finally writing out if col in self.column_csv_map: out_csv = self.column_csv_map[col] # depends on [control=['if'], data=['col']] else: out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv() data[out_csv] = [] data[out_csv].append(ts + ',' + words[3]) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fh']] # depends on [control=['for'], data=['input_file']] # post processing, putting data in csv files; for csv in data.keys(): self.csv_files.append(csv) with open(csv, 'w') as fh: fh.write('\n'.join(sorted(data[csv]))) # depends on [control=['with'], data=['fh']] # depends on [control=['for'], data=['csv']] return status
def remove_role_from_user(self, user, role): """Removes a role from a user. :param user: The user to manipulate :param role: The role to remove from the user """ rv = False user, role = self._prepare_role_modify_args(user, role) if role in user.roles: rv = True user.roles.remove(role) self.put(user) return rv
def function[remove_role_from_user, parameter[self, user, role]]: constant[Removes a role from a user. :param user: The user to manipulate :param role: The role to remove from the user ] variable[rv] assign[=] constant[False] <ast.Tuple object at 0x7da18fe91870> assign[=] call[name[self]._prepare_role_modify_args, parameter[name[user], name[role]]] if compare[name[role] in name[user].roles] begin[:] variable[rv] assign[=] constant[True] call[name[user].roles.remove, parameter[name[role]]] call[name[self].put, parameter[name[user]]] return[name[rv]]
keyword[def] identifier[remove_role_from_user] ( identifier[self] , identifier[user] , identifier[role] ): literal[string] identifier[rv] = keyword[False] identifier[user] , identifier[role] = identifier[self] . identifier[_prepare_role_modify_args] ( identifier[user] , identifier[role] ) keyword[if] identifier[role] keyword[in] identifier[user] . identifier[roles] : identifier[rv] = keyword[True] identifier[user] . identifier[roles] . identifier[remove] ( identifier[role] ) identifier[self] . identifier[put] ( identifier[user] ) keyword[return] identifier[rv]
def remove_role_from_user(self, user, role): """Removes a role from a user. :param user: The user to manipulate :param role: The role to remove from the user """ rv = False (user, role) = self._prepare_role_modify_args(user, role) if role in user.roles: rv = True user.roles.remove(role) self.put(user) # depends on [control=['if'], data=['role']] return rv
def set_fft_params(func): """Decorate a method to automatically convert quantities to samples """ @wraps(func) def wrapped_func(series, method_func, *args, **kwargs): """Wrap function to normalize FFT params before execution """ if isinstance(series, tuple): data = series[0] else: data = series # normalise FFT parmeters for all libraries normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
def function[set_fft_params, parameter[func]]: constant[Decorate a method to automatically convert quantities to samples ] def function[wrapped_func, parameter[series, method_func]]: constant[Wrap function to normalize FFT params before execution ] if call[name[isinstance], parameter[name[series], name[tuple]]] begin[:] variable[data] assign[=] call[name[series]][constant[0]] call[name[normalize_fft_params], parameter[name[data]]] return[call[name[func], parameter[name[series], name[method_func], <ast.Starred object at 0x7da20e955270>]]] return[name[wrapped_func]]
keyword[def] identifier[set_fft_params] ( identifier[func] ): literal[string] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapped_func] ( identifier[series] , identifier[method_func] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[isinstance] ( identifier[series] , identifier[tuple] ): identifier[data] = identifier[series] [ literal[int] ] keyword[else] : identifier[data] = identifier[series] identifier[normalize_fft_params] ( identifier[data] , identifier[kwargs] = identifier[kwargs] , identifier[func] = identifier[method_func] ) keyword[return] identifier[func] ( identifier[series] , identifier[method_func] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[wrapped_func]
def set_fft_params(func): """Decorate a method to automatically convert quantities to samples """ @wraps(func) def wrapped_func(series, method_func, *args, **kwargs): """Wrap function to normalize FFT params before execution """ if isinstance(series, tuple): data = series[0] # depends on [control=['if'], data=[]] else: data = series # normalise FFT parmeters for all libraries normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = max(it, default=sentinel) else: result = max(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: if top < elem: _heapreplace(result, (elem, order)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: k = key(elem) if top < k: _heapreplace(result, (k, order, elem)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[2] for r in result]
def function[nlargest, parameter[n, iterable, key]]: constant[Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] ] if compare[name[n] equal[==] constant[1]] begin[:] variable[it] assign[=] call[name[iter], parameter[name[iterable]]] variable[sentinel] assign[=] call[name[object], parameter[]] if compare[name[key] is constant[None]] begin[:] variable[result] assign[=] call[name[max], parameter[name[it]]] return[<ast.IfExp object at 0x7da1b20a8190>] <ast.Try object at 0x7da1b20aac80> if compare[name[key] is constant[None]] begin[:] variable[it] assign[=] call[name[iter], parameter[name[iterable]]] variable[result] assign[=] <ast.ListComp object at 0x7da1b20a94b0> if <ast.UnaryOp object at 0x7da1b20a8640> begin[:] return[name[result]] call[name[heapify], parameter[name[result]]] variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]] variable[order] assign[=] <ast.UnaryOp object at 0x7da1b20a88e0> variable[_heapreplace] assign[=] name[heapreplace] for taget[name[elem]] in starred[name[it]] begin[:] if compare[name[top] less[<] name[elem]] begin[:] call[name[_heapreplace], parameter[name[result], tuple[[<ast.Name object at 0x7da1b20a80a0>, <ast.Name object at 0x7da1b20a8670>]]]] variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]] <ast.AugAssign object at 0x7da1b20a8070> call[name[result].sort, parameter[]] return[<ast.ListComp object at 0x7da1b20a8460>] variable[it] assign[=] call[name[iter], parameter[name[iterable]]] variable[result] assign[=] <ast.ListComp object at 0x7da20e954310> if <ast.UnaryOp object at 0x7da20e957850> begin[:] return[name[result]] call[name[heapify], parameter[name[result]]] variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]] variable[order] assign[=] <ast.UnaryOp object at 0x7da20e956440> variable[_heapreplace] assign[=] name[heapreplace] for taget[name[elem]] in starred[name[it]] begin[:] variable[k] assign[=] call[name[key], parameter[name[elem]]] if compare[name[top] less[<] name[k]] begin[:] call[name[_heapreplace], parameter[name[result], tuple[[<ast.Name object at 0x7da20e9568c0>, <ast.Name object at 0x7da20e9572b0>, <ast.Name object at 0x7da20e954ee0>]]]] variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]] <ast.AugAssign object at 0x7da20e9549a0> call[name[result].sort, parameter[]] return[<ast.ListComp object at 0x7da20e957250>]
keyword[def] identifier[nlargest] ( identifier[n] , identifier[iterable] , identifier[key] = keyword[None] ): literal[string] keyword[if] identifier[n] == literal[int] : identifier[it] = identifier[iter] ( identifier[iterable] ) identifier[sentinel] = identifier[object] () keyword[if] identifier[key] keyword[is] keyword[None] : identifier[result] = identifier[max] ( identifier[it] , identifier[default] = identifier[sentinel] ) keyword[else] : identifier[result] = identifier[max] ( identifier[it] , identifier[default] = identifier[sentinel] , identifier[key] = identifier[key] ) keyword[return] [] keyword[if] identifier[result] keyword[is] identifier[sentinel] keyword[else] [ identifier[result] ] keyword[try] : identifier[size] = identifier[len] ( identifier[iterable] ) keyword[except] ( identifier[TypeError] , identifier[AttributeError] ): keyword[pass] keyword[else] : keyword[if] identifier[n] >= identifier[size] : keyword[return] identifier[sorted] ( identifier[iterable] , identifier[key] = identifier[key] , identifier[reverse] = keyword[True] )[: identifier[n] ] keyword[if] identifier[key] keyword[is] keyword[None] : identifier[it] = identifier[iter] ( identifier[iterable] ) identifier[result] =[( identifier[elem] , identifier[i] ) keyword[for] identifier[i] , identifier[elem] keyword[in] identifier[zip] ( identifier[range] ( literal[int] ,- identifier[n] ,- literal[int] ), identifier[it] )] keyword[if] keyword[not] identifier[result] : keyword[return] identifier[result] identifier[heapify] ( identifier[result] ) identifier[top] = identifier[result] [ literal[int] ][ literal[int] ] identifier[order] =- identifier[n] identifier[_heapreplace] = identifier[heapreplace] keyword[for] identifier[elem] keyword[in] identifier[it] : keyword[if] identifier[top] < identifier[elem] : identifier[_heapreplace] ( identifier[result] ,( identifier[elem] , identifier[order] )) identifier[top] = identifier[result] [ literal[int] ][ literal[int] ] identifier[order] -= literal[int] identifier[result] . identifier[sort] ( identifier[reverse] = keyword[True] ) keyword[return] [ identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[result] ] identifier[it] = identifier[iter] ( identifier[iterable] ) identifier[result] =[( identifier[key] ( identifier[elem] ), identifier[i] , identifier[elem] ) keyword[for] identifier[i] , identifier[elem] keyword[in] identifier[zip] ( identifier[range] ( literal[int] ,- identifier[n] ,- literal[int] ), identifier[it] )] keyword[if] keyword[not] identifier[result] : keyword[return] identifier[result] identifier[heapify] ( identifier[result] ) identifier[top] = identifier[result] [ literal[int] ][ literal[int] ] identifier[order] =- identifier[n] identifier[_heapreplace] = identifier[heapreplace] keyword[for] identifier[elem] keyword[in] identifier[it] : identifier[k] = identifier[key] ( identifier[elem] ) keyword[if] identifier[top] < identifier[k] : identifier[_heapreplace] ( identifier[result] ,( identifier[k] , identifier[order] , identifier[elem] )) identifier[top] = identifier[result] [ literal[int] ][ literal[int] ] identifier[order] -= literal[int] identifier[result] . identifier[sort] ( identifier[reverse] = keyword[True] ) keyword[return] [ identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[result] ]
def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = max(it, default=sentinel) # depends on [control=['if'], data=[]] else: result = max(it, default=sentinel, key=key) return [] if result is sentinel else [result] # depends on [control=['if'], data=[]] # When n>=size, it's faster to use sorted() try: size = len(iterable) # depends on [control=['try'], data=[]] except (TypeError, AttributeError): pass # depends on [control=['except'], data=[]] else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # depends on [control=['if'], data=['n']] # When key is none, use simpler decoration if key is None: it = iter(iterable) result = [(elem, i) for (i, elem) in zip(range(0, -n, -1), it)] if not result: return result # depends on [control=['if'], data=[]] heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: if top < elem: _heapreplace(result, (elem, order)) top = result[0][0] order -= 1 # depends on [control=['if'], data=['top', 'elem']] # depends on [control=['for'], data=['elem']] result.sort(reverse=True) return [r[0] for r in result] # depends on [control=['if'], data=[]] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for (i, elem) in zip(range(0, -n, -1), it)] if not result: return result # depends on [control=['if'], data=[]] heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: k = key(elem) if top < k: _heapreplace(result, (k, order, elem)) top = result[0][0] order -= 1 # depends on [control=['if'], data=['top', 'k']] # depends on [control=['for'], data=['elem']] result.sort(reverse=True) return [r[2] for r in result]
def _parse_response(resp): """ Get xmlrpc response from scgi response """ # Assume they care for standards and send us CRLF (not just LF) try: headers, payload = resp.split("\r\n\r\n", 1) except (TypeError, ValueError) as exc: raise SCGIException("No header delimiter in SCGI response of length %d (%s)" % (len(resp), exc,)) headers = _parse_headers(headers) clen = headers.get("Content-Length") if clen is not None: # Check length, just in case the transport is bogus assert len(payload) == int(clen) return payload, headers
def function[_parse_response, parameter[resp]]: constant[ Get xmlrpc response from scgi response ] <ast.Try object at 0x7da18bcc9720> variable[headers] assign[=] call[name[_parse_headers], parameter[name[headers]]] variable[clen] assign[=] call[name[headers].get, parameter[constant[Content-Length]]] if compare[name[clen] is_not constant[None]] begin[:] assert[compare[call[name[len], parameter[name[payload]]] equal[==] call[name[int], parameter[name[clen]]]]] return[tuple[[<ast.Name object at 0x7da18bccb010>, <ast.Name object at 0x7da18bcca8f0>]]]
keyword[def] identifier[_parse_response] ( identifier[resp] ): literal[string] keyword[try] : identifier[headers] , identifier[payload] = identifier[resp] . identifier[split] ( literal[string] , literal[int] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[exc] : keyword[raise] identifier[SCGIException] ( literal[string] %( identifier[len] ( identifier[resp] ), identifier[exc] ,)) identifier[headers] = identifier[_parse_headers] ( identifier[headers] ) identifier[clen] = identifier[headers] . identifier[get] ( literal[string] ) keyword[if] identifier[clen] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[len] ( identifier[payload] )== identifier[int] ( identifier[clen] ) keyword[return] identifier[payload] , identifier[headers]
def _parse_response(resp): """ Get xmlrpc response from scgi response """ # Assume they care for standards and send us CRLF (not just LF) try: (headers, payload) = resp.split('\r\n\r\n', 1) # depends on [control=['try'], data=[]] except (TypeError, ValueError) as exc: raise SCGIException('No header delimiter in SCGI response of length %d (%s)' % (len(resp), exc)) # depends on [control=['except'], data=['exc']] headers = _parse_headers(headers) clen = headers.get('Content-Length') if clen is not None: # Check length, just in case the transport is bogus assert len(payload) == int(clen) # depends on [control=['if'], data=['clen']] return (payload, headers)
def sessions(status, access_key, id_only, all): ''' List and manage compute sessions. ''' fields = [ ('Session ID', 'sess_id'), ] with Session() as session: if is_admin(session): fields.append(('Owner', 'access_key')) if not id_only: fields.extend([ ('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at',), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('Occupied Resource', 'occupied_slots'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('CPU Using (%)', 'cpu_using'), ]) if is_legacy_server(): del fields[2] def execute_paginated_query(limit, offset): q = ''' query($limit:Int!, $offset:Int!, $ak:String, $status:String) { compute_session_list( limit:$limit, offset:$offset, access_key:$ak, status:$status) { items { $fields } total_count } }''' q = textwrap.dedent(q).strip() q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = { 'limit': limit, 'offset': offset, 'status': status if status != 'ALL' else None, 'ak': access_key, } try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) return resp['compute_session_list'] def round_mem(items): for item in items: if 'mem_cur_bytes' in item: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) if 'mem_max_bytes' in item: item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) return items def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = (interval if is_first else min(interval, total_count - offset)) try: result = execute_paginated_query(limit, offset) except Exception as e: print_error(e) sys.exit(1) offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) if id_only: yield '\n'.join([item['sess_id'] for item in items]) + '\n' else: table = tabulate([item.values() for item in items], headers=(item[0] for item in fields)) if not is_first: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) yield table + '\n' if is_first: is_first = False if not offset < total_count: break with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no compute sessions currently {0}.' .format(status.lower())) return items = result['items'] items = round_mem(items) if id_only: for item in items: print(item['sess_id']) else: print(tabulate([item.values() for item in items], headers=(item[0] for item in fields))) if total_count > paginating_interval: print("More sessions can be displayed by using --all option.")
def function[sessions, parameter[status, access_key, id_only, all]]: constant[ List and manage compute sessions. ] variable[fields] assign[=] list[[<ast.Tuple object at 0x7da207f9aec0>]] with call[name[Session], parameter[]] begin[:] if call[name[is_admin], parameter[name[session]]] begin[:] call[name[fields].append, parameter[tuple[[<ast.Constant object at 0x7da207f9b8e0>, <ast.Constant object at 0x7da207f98cd0>]]]] if <ast.UnaryOp object at 0x7da207f98100> begin[:] call[name[fields].extend, parameter[list[[<ast.Tuple object at 0x7da207f9b1f0>, <ast.Tuple object at 0x7da207f9a050>, <ast.Tuple object at 0x7da207f9bd90>, <ast.Tuple object at 0x7da207f9a800>, <ast.Tuple object at 0x7da207f9a8f0>, <ast.Tuple object at 0x7da207f986d0>, <ast.Tuple object at 0x7da207f99a50>, <ast.Tuple object at 0x7da207f9a2c0>, <ast.Tuple object at 0x7da1b26ad660>]]]] if call[name[is_legacy_server], parameter[]] begin[:] <ast.Delete object at 0x7da1b26ae4a0> def function[execute_paginated_query, parameter[limit, offset]]: variable[q] assign[=] constant[ query($limit:Int!, $offset:Int!, $ak:String, $status:String) { compute_session_list( limit:$limit, offset:$offset, access_key:$ak, status:$status) { items { $fields } total_count } }] variable[q] assign[=] call[call[name[textwrap].dedent, parameter[name[q]]].strip, parameter[]] variable[q] assign[=] call[name[q].replace, parameter[constant[$fields], call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da207f98520>]]]] variable[v] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c63e0>, <ast.Constant object at 0x7da20c6c6110>, <ast.Constant object at 0x7da20c6c4e20>, <ast.Constant object at 0x7da20c6c48e0>], [<ast.Name object at 0x7da20c6c4430>, <ast.Name object at 0x7da20c6c40d0>, <ast.IfExp object at 0x7da20c6c7d00>, <ast.Name object at 0x7da20c6c7640>]] <ast.Try object at 0x7da20c6c77f0> return[call[name[resp]][constant[compute_session_list]]] def function[round_mem, parameter[items]]: for taget[name[item]] in starred[name[items]] begin[:] if compare[constant[mem_cur_bytes] in name[item]] begin[:] call[name[item]][constant[mem_cur_bytes]] assign[=] call[name[round], parameter[binary_operation[call[name[item]][constant[mem_cur_bytes]] / binary_operation[constant[2] ** constant[20]]], constant[1]]] if compare[constant[mem_max_bytes] in name[item]] begin[:] call[name[item]][constant[mem_max_bytes]] assign[=] call[name[round], parameter[binary_operation[call[name[item]][constant[mem_max_bytes]] / binary_operation[constant[2] ** constant[20]]], constant[1]]] return[name[items]] def function[_generate_paginated_results, parameter[interval]]: variable[offset] assign[=] constant[0] variable[is_first] assign[=] constant[True] variable[total_count] assign[=] <ast.UnaryOp object at 0x7da20c6c5b40> while constant[True] begin[:] variable[limit] assign[=] <ast.IfExp object at 0x7da20c6c7370> <ast.Try object at 0x7da20c6c7d30> <ast.AugAssign object at 0x7da20c6c4250> variable[total_count] assign[=] call[name[result]][constant[total_count]] variable[items] assign[=] call[name[result]][constant[items]] variable[items] assign[=] call[name[round_mem], parameter[name[items]]] if name[id_only] begin[:] <ast.Yield object at 0x7da20c6c6bf0> if name[is_first] begin[:] variable[is_first] assign[=] constant[False] if <ast.UnaryOp object at 0x7da20c6c6320> begin[:] break with call[name[Session], parameter[]] begin[:] variable[paginating_interval] assign[=] constant[10] if name[all] begin[:] call[name[click].echo_via_pager, parameter[call[name[_generate_paginated_results], parameter[name[paginating_interval]]]]]
keyword[def] identifier[sessions] ( identifier[status] , identifier[access_key] , identifier[id_only] , identifier[all] ): literal[string] identifier[fields] =[ ( literal[string] , literal[string] ), ] keyword[with] identifier[Session] () keyword[as] identifier[session] : keyword[if] identifier[is_admin] ( identifier[session] ): identifier[fields] . identifier[append] (( literal[string] , literal[string] )) keyword[if] keyword[not] identifier[id_only] : identifier[fields] . identifier[extend] ([ ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ,), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ]) keyword[if] identifier[is_legacy_server] (): keyword[del] identifier[fields] [ literal[int] ] keyword[def] identifier[execute_paginated_query] ( identifier[limit] , identifier[offset] ): identifier[q] = literal[string] identifier[q] = identifier[textwrap] . identifier[dedent] ( identifier[q] ). identifier[strip] () identifier[q] = identifier[q] . identifier[replace] ( literal[string] , literal[string] . identifier[join] ( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] )) identifier[v] ={ literal[string] : identifier[limit] , literal[string] : identifier[offset] , literal[string] : identifier[status] keyword[if] identifier[status] != literal[string] keyword[else] keyword[None] , literal[string] : identifier[access_key] , } keyword[try] : identifier[resp] = identifier[session] . identifier[Admin] . identifier[query] ( identifier[q] , identifier[v] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print_error] ( identifier[e] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[return] identifier[resp] [ literal[string] ] keyword[def] identifier[round_mem] ( identifier[items] ): keyword[for] identifier[item] keyword[in] identifier[items] : keyword[if] literal[string] keyword[in] identifier[item] : identifier[item] [ literal[string] ]= identifier[round] ( identifier[item] [ literal[string] ]/ literal[int] ** literal[int] , literal[int] ) keyword[if] literal[string] keyword[in] identifier[item] : identifier[item] [ literal[string] ]= identifier[round] ( identifier[item] [ literal[string] ]/ literal[int] ** literal[int] , literal[int] ) keyword[return] identifier[items] keyword[def] identifier[_generate_paginated_results] ( identifier[interval] ): identifier[offset] = literal[int] identifier[is_first] = keyword[True] identifier[total_count] =- literal[int] keyword[while] keyword[True] : identifier[limit] =( identifier[interval] keyword[if] identifier[is_first] keyword[else] identifier[min] ( identifier[interval] , identifier[total_count] - identifier[offset] )) keyword[try] : identifier[result] = identifier[execute_paginated_query] ( identifier[limit] , identifier[offset] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print_error] ( identifier[e] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[offset] += identifier[interval] identifier[total_count] = identifier[result] [ literal[string] ] identifier[items] = identifier[result] [ literal[string] ] identifier[items] = identifier[round_mem] ( identifier[items] ) keyword[if] identifier[id_only] : keyword[yield] literal[string] . identifier[join] ([ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[items] ])+ literal[string] keyword[else] : identifier[table] = identifier[tabulate] ([ identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ], identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] )) keyword[if] keyword[not] identifier[is_first] : identifier[table_rows] = identifier[table] . identifier[split] ( literal[string] ) identifier[table] = literal[string] . identifier[join] ( identifier[table_rows] [ literal[int] :]) keyword[yield] identifier[table] + literal[string] keyword[if] identifier[is_first] : identifier[is_first] = keyword[False] keyword[if] keyword[not] identifier[offset] < identifier[total_count] : keyword[break] keyword[with] identifier[Session] () keyword[as] identifier[session] : identifier[paginating_interval] = literal[int] keyword[if] identifier[all] : identifier[click] . identifier[echo_via_pager] ( identifier[_generate_paginated_results] ( identifier[paginating_interval] )) keyword[else] : identifier[result] = identifier[execute_paginated_query] ( identifier[paginating_interval] , identifier[offset] = literal[int] ) identifier[total_count] = identifier[result] [ literal[string] ] keyword[if] identifier[total_count] == literal[int] : identifier[print] ( literal[string] . identifier[format] ( identifier[status] . identifier[lower] ())) keyword[return] identifier[items] = identifier[result] [ literal[string] ] identifier[items] = identifier[round_mem] ( identifier[items] ) keyword[if] identifier[id_only] : keyword[for] identifier[item] keyword[in] identifier[items] : identifier[print] ( identifier[item] [ literal[string] ]) keyword[else] : identifier[print] ( identifier[tabulate] ([ identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ], identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))) keyword[if] identifier[total_count] > identifier[paginating_interval] : identifier[print] ( literal[string] )
def sessions(status, access_key, id_only, all): """ List and manage compute sessions. """ fields = [('Session ID', 'sess_id')] with Session() as session: if is_admin(session): fields.append(('Owner', 'access_key')) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['session']] if not id_only: fields.extend([('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at'), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('Occupied Resource', 'occupied_slots'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('CPU Using (%)', 'cpu_using')]) if is_legacy_server(): del fields[2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] def execute_paginated_query(limit, offset): q = '\n query($limit:Int!, $offset:Int!, $ak:String, $status:String) {\n compute_session_list(\n limit:$limit, offset:$offset, access_key:$ak, status:$status) {\n items { $fields }\n total_count\n }\n }' q = textwrap.dedent(q).strip() q = q.replace('$fields', ' '.join((item[1] for item in fields))) v = {'limit': limit, 'offset': offset, 'status': status if status != 'ALL' else None, 'ak': access_key} try: resp = session.Admin.query(q, v) # depends on [control=['try'], data=[]] except Exception as e: print_error(e) sys.exit(1) # depends on [control=['except'], data=['e']] return resp['compute_session_list'] def round_mem(items): for item in items: if 'mem_cur_bytes' in item: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) # depends on [control=['if'], data=['item']] if 'mem_max_bytes' in item: item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) # depends on [control=['if'], data=['item']] # depends on [control=['for'], data=['item']] return items def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = interval if is_first else min(interval, total_count - offset) try: result = execute_paginated_query(limit, offset) # depends on [control=['try'], data=[]] except Exception as e: print_error(e) sys.exit(1) # depends on [control=['except'], data=['e']] offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) if id_only: yield ('\n'.join([item['sess_id'] for item in items]) + '\n') # depends on [control=['if'], data=[]] else: table = tabulate([item.values() for item in items], headers=(item[0] for item in fields)) if not is_first: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) # depends on [control=['if'], data=[]] yield (table + '\n') if is_first: is_first = False # depends on [control=['if'], data=[]] if not offset < total_count: break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) # depends on [control=['if'], data=[]] else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no compute sessions currently {0}.'.format(status.lower())) return # depends on [control=['if'], data=[]] items = result['items'] items = round_mem(items) if id_only: for item in items: print(item['sess_id']) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] else: print(tabulate([item.values() for item in items], headers=(item[0] for item in fields))) if total_count > paginating_interval: print('More sessions can be displayed by using --all option.') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
def cosmological_quantity_from_redshift(z, quantity, strip_unit=True, **kwargs): r"""Returns the value of a cosmological quantity (e.g., age) at a redshift. Parameters ---------- z : float The redshift. quantity : str The name of the quantity to get. The name may be any attribute of :py:class:`astropy.cosmology.FlatLambdaCDM`. strip_unit : bool, optional Just return the value of the quantity, sans units. Default is True. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float or astropy.units.quantity : The value of the quantity at the requested value. If ``strip_unit`` is ``True``, will return the value. Otherwise, will return the value with units. """ cosmology = get_cosmology(**kwargs) val = getattr(cosmology, quantity)(z) if strip_unit: val = val.value return val
def function[cosmological_quantity_from_redshift, parameter[z, quantity, strip_unit]]: constant[Returns the value of a cosmological quantity (e.g., age) at a redshift. Parameters ---------- z : float The redshift. quantity : str The name of the quantity to get. The name may be any attribute of :py:class:`astropy.cosmology.FlatLambdaCDM`. strip_unit : bool, optional Just return the value of the quantity, sans units. Default is True. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float or astropy.units.quantity : The value of the quantity at the requested value. If ``strip_unit`` is ``True``, will return the value. Otherwise, will return the value with units. ] variable[cosmology] assign[=] call[name[get_cosmology], parameter[]] variable[val] assign[=] call[call[name[getattr], parameter[name[cosmology], name[quantity]]], parameter[name[z]]] if name[strip_unit] begin[:] variable[val] assign[=] name[val].value return[name[val]]
keyword[def] identifier[cosmological_quantity_from_redshift] ( identifier[z] , identifier[quantity] , identifier[strip_unit] = keyword[True] , ** identifier[kwargs] ): literal[string] identifier[cosmology] = identifier[get_cosmology] (** identifier[kwargs] ) identifier[val] = identifier[getattr] ( identifier[cosmology] , identifier[quantity] )( identifier[z] ) keyword[if] identifier[strip_unit] : identifier[val] = identifier[val] . identifier[value] keyword[return] identifier[val]
def cosmological_quantity_from_redshift(z, quantity, strip_unit=True, **kwargs): """Returns the value of a cosmological quantity (e.g., age) at a redshift. Parameters ---------- z : float The redshift. quantity : str The name of the quantity to get. The name may be any attribute of :py:class:`astropy.cosmology.FlatLambdaCDM`. strip_unit : bool, optional Just return the value of the quantity, sans units. Default is True. \\**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float or astropy.units.quantity : The value of the quantity at the requested value. If ``strip_unit`` is ``True``, will return the value. Otherwise, will return the value with units. """ cosmology = get_cosmology(**kwargs) val = getattr(cosmology, quantity)(z) if strip_unit: val = val.value # depends on [control=['if'], data=[]] return val
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" kwargs = { 'body': message, 'from': domain, 'to': '%s@%s' % (username, domain), } if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' else: command = 'send_message' kwargs['subject'] = subject kwargs['type'] = 'normal' result = self.rpc(command, **kwargs) if result['res'] == 0: return else: raise BackendError(result.get('text', 'Unknown Error'))
def function[message_user, parameter[self, username, domain, subject, message]]: constant[Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.] variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b10eb0>, <ast.Constant object at 0x7da1b0b12e60>, <ast.Constant object at 0x7da1b0b10310>], [<ast.Name object at 0x7da1b0b12080>, <ast.Name object at 0x7da1b0b10ac0>, <ast.BinOp object at 0x7da1b0b12dd0>]] if compare[name[self].api_version less_or_equal[<=] tuple[[<ast.Constant object at 0x7da1b0b12350>, <ast.Constant object at 0x7da1b0b11d20>]]] begin[:] variable[command] assign[=] constant[send_message_chat] variable[result] assign[=] call[name[self].rpc, parameter[name[command]]] if compare[call[name[result]][constant[res]] equal[==] constant[0]] begin[:] return[None]
keyword[def] identifier[message_user] ( identifier[self] , identifier[username] , identifier[domain] , identifier[subject] , identifier[message] ): literal[string] identifier[kwargs] ={ literal[string] : identifier[message] , literal[string] : identifier[domain] , literal[string] : literal[string] %( identifier[username] , identifier[domain] ), } keyword[if] identifier[self] . identifier[api_version] <=( literal[int] , literal[int] ): identifier[command] = literal[string] keyword[else] : identifier[command] = literal[string] identifier[kwargs] [ literal[string] ]= identifier[subject] identifier[kwargs] [ literal[string] ]= literal[string] identifier[result] = identifier[self] . identifier[rpc] ( identifier[command] ,** identifier[kwargs] ) keyword[if] identifier[result] [ literal[string] ]== literal[int] : keyword[return] keyword[else] : keyword[raise] identifier[BackendError] ( identifier[result] . identifier[get] ( literal[string] , literal[string] ))
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" kwargs = {'body': message, 'from': domain, 'to': '%s@%s' % (username, domain)} if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' # depends on [control=['if'], data=[]] else: command = 'send_message' kwargs['subject'] = subject kwargs['type'] = 'normal' result = self.rpc(command, **kwargs) if result['res'] == 0: return # depends on [control=['if'], data=[]] else: raise BackendError(result.get('text', 'Unknown Error'))
def create_view(self, query_criteria=None, uid='_all_users'): ''' a method to add a view to a design document of a uid :param query_criteria: dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_ # https://developer.couchbase.com/documentation/server/3.x/admin/Views/views-writing.html title = '%s.create_view' % self.__class__.__name__ # validate inputs input_fields = { 'uid': uid } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate inputs if query_criteria: if not self.model: raise ValueError('%s(query_criteria={...} requires a document_schema.' % title) self.model.query(query_criteria) else: query_criteria = {} if uid != '_all_users' and self.public: raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title') # catch missing args if not query_criteria and not uid: raise IndexError('%s requires either a uid or query_criteria argument.' % title) # create a view of all user documents else: # retrieve the design document for the uid url = self.bucket_url + '/_design/%s' % uid design_details = { 'views': {} } response = requests.get(url) if response.status_code in (200, 201): design_details = response.json() design_details['views'] = self._clean_views(design_details['views']) # create a view of all docs for the uid if not query_criteria: if uid == '_all_users': return response.status_code else: function_string = 'function(doc, meta) { if (doc.uid == "%s") { emit(null, null); } }' % uid design_details['views']['_all_docs'] = { 'map': function_string } # construct a view for a query criteria else: # determine hashed key for criteria import hashlib import json from collections import OrderedDict ordered_criteria = OrderedDict(**query_criteria) hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_keys=True).encode('utf-8')).hexdigest() # determine function string for criteria uid_insert = 'emit();' if uid != '_all_users': uid_insert = 'if (doc.uid == "%s") { emit(); }' % uid function_string = 'function(doc, meta) { %s }' % uid_insert emit_insert = 'emit(null, [' count = 0 for key in ordered_criteria.keys(): if count: emit_insert += ',' emit_insert += 'doc%s' % key emit_insert += ']);' function_string = function_string.replace('emit();', emit_insert) # construct updated design details design_details['views'][hashed_criteria] = { 'map': function_string } # send update of design document response = requests.put(url, json=design_details) return response.status_code
def function[create_view, parameter[self, query_criteria, uid]]: constant[ a method to add a view to a design document of a uid :param query_criteria: dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria ] variable[title] assign[=] binary_operation[constant[%s.create_view] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__] variable[input_fields] assign[=] dictionary[[<ast.Constant object at 0x7da20e9633a0>], [<ast.Name object at 0x7da20e963d30>]] for taget[tuple[[<ast.Name object at 0x7da20e962020>, <ast.Name object at 0x7da20e9639a0>]]] in starred[call[name[input_fields].items, parameter[]]] begin[:] if name[value] begin[:] variable[object_title] assign[=] binary_operation[constant[%s(%s=%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e962920>, <ast.Name object at 0x7da20e962aa0>, <ast.Call object at 0x7da20e9629b0>]]] call[name[self].fields.validate, parameter[name[value], binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[key]], name[object_title]]] if name[query_criteria] begin[:] if <ast.UnaryOp object at 0x7da20e962500> begin[:] <ast.Raise object at 0x7da20e963580> call[name[self].model.query, parameter[name[query_criteria]]] if <ast.BoolOp object at 0x7da1b1335d80> begin[:] <ast.Raise object at 0x7da1b1335cc0> if <ast.BoolOp object at 0x7da1b1335a80> begin[:] <ast.Raise object at 0x7da1b1334160> return[name[response].status_code]
keyword[def] identifier[create_view] ( identifier[self] , identifier[query_criteria] = keyword[None] , identifier[uid] = literal[string] ): literal[string] identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__] identifier[input_fields] ={ literal[string] : identifier[uid] } keyword[for] identifier[key] , identifier[value] keyword[in] identifier[input_fields] . identifier[items] (): keyword[if] identifier[value] : identifier[object_title] = literal[string] %( identifier[title] , identifier[key] , identifier[str] ( identifier[value] )) identifier[self] . identifier[fields] . identifier[validate] ( identifier[value] , literal[string] % identifier[key] , identifier[object_title] ) keyword[if] identifier[query_criteria] : keyword[if] keyword[not] identifier[self] . identifier[model] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[title] ) identifier[self] . identifier[model] . identifier[query] ( identifier[query_criteria] ) keyword[else] : identifier[query_criteria] ={} keyword[if] identifier[uid] != literal[string] keyword[and] identifier[self] . identifier[public] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[not] identifier[query_criteria] keyword[and] keyword[not] identifier[uid] : keyword[raise] identifier[IndexError] ( literal[string] % identifier[title] ) keyword[else] : identifier[url] = identifier[self] . identifier[bucket_url] + literal[string] % identifier[uid] identifier[design_details] ={ literal[string] :{} } identifier[response] = identifier[requests] . identifier[get] ( identifier[url] ) keyword[if] identifier[response] . identifier[status_code] keyword[in] ( literal[int] , literal[int] ): identifier[design_details] = identifier[response] . identifier[json] () identifier[design_details] [ literal[string] ]= identifier[self] . identifier[_clean_views] ( identifier[design_details] [ literal[string] ]) keyword[if] keyword[not] identifier[query_criteria] : keyword[if] identifier[uid] == literal[string] : keyword[return] identifier[response] . identifier[status_code] keyword[else] : identifier[function_string] = literal[string] % identifier[uid] identifier[design_details] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[function_string] } keyword[else] : keyword[import] identifier[hashlib] keyword[import] identifier[json] keyword[from] identifier[collections] keyword[import] identifier[OrderedDict] identifier[ordered_criteria] = identifier[OrderedDict] (** identifier[query_criteria] ) identifier[hashed_criteria] = identifier[hashlib] . identifier[md5] ( identifier[json] . identifier[dumps] ( identifier[query_criteria] , identifier[sort_keys] = keyword[True] ). identifier[encode] ( literal[string] )). identifier[hexdigest] () identifier[uid_insert] = literal[string] keyword[if] identifier[uid] != literal[string] : identifier[uid_insert] = literal[string] % identifier[uid] identifier[function_string] = literal[string] % identifier[uid_insert] identifier[emit_insert] = literal[string] identifier[count] = literal[int] keyword[for] identifier[key] keyword[in] identifier[ordered_criteria] . identifier[keys] (): keyword[if] identifier[count] : identifier[emit_insert] += literal[string] identifier[emit_insert] += literal[string] % identifier[key] identifier[emit_insert] += literal[string] identifier[function_string] = identifier[function_string] . identifier[replace] ( literal[string] , identifier[emit_insert] ) identifier[design_details] [ literal[string] ][ identifier[hashed_criteria] ]={ literal[string] : identifier[function_string] } identifier[response] = identifier[requests] . identifier[put] ( identifier[url] , identifier[json] = identifier[design_details] ) keyword[return] identifier[response] . identifier[status_code]
def create_view(self, query_criteria=None, uid='_all_users'): """ a method to add a view to a design document of a uid :param query_criteria: dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria """ # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_ # https://developer.couchbase.com/documentation/server/3.x/admin/Views/views-writing.html title = '%s.create_view' % self.__class__.__name__ # validate inputs input_fields = {'uid': uid} for (key, value) in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # validate inputs if query_criteria: if not self.model: raise ValueError('%s(query_criteria={...} requires a document_schema.' % title) # depends on [control=['if'], data=[]] self.model.query(query_criteria) # depends on [control=['if'], data=[]] else: query_criteria = {} if uid != '_all_users' and self.public: raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title') # depends on [control=['if'], data=[]] # catch missing args if not query_criteria and (not uid): raise IndexError('%s requires either a uid or query_criteria argument.' % title) # depends on [control=['if'], data=[]] else: # create a view of all user documents # retrieve the design document for the uid url = self.bucket_url + '/_design/%s' % uid design_details = {'views': {}} response = requests.get(url) if response.status_code in (200, 201): design_details = response.json() design_details['views'] = self._clean_views(design_details['views']) # depends on [control=['if'], data=[]] # create a view of all docs for the uid if not query_criteria: if uid == '_all_users': return response.status_code # depends on [control=['if'], data=[]] else: function_string = 'function(doc, meta) { if (doc.uid == "%s") { emit(null, null); } }' % uid design_details['views']['_all_docs'] = {'map': function_string} # depends on [control=['if'], data=[]] else: # construct a view for a query criteria # determine hashed key for criteria import hashlib import json from collections import OrderedDict ordered_criteria = OrderedDict(**query_criteria) hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_keys=True).encode('utf-8')).hexdigest() # determine function string for criteria uid_insert = 'emit();' if uid != '_all_users': uid_insert = 'if (doc.uid == "%s") { emit(); }' % uid # depends on [control=['if'], data=['uid']] function_string = 'function(doc, meta) { %s }' % uid_insert emit_insert = 'emit(null, [' count = 0 for key in ordered_criteria.keys(): if count: emit_insert += ',' # depends on [control=['if'], data=[]] emit_insert += 'doc%s' % key # depends on [control=['for'], data=['key']] emit_insert += ']);' function_string = function_string.replace('emit();', emit_insert) # construct updated design details design_details['views'][hashed_criteria] = {'map': function_string} # send update of design document response = requests.put(url, json=design_details) return response.status_code
def tx_hash( cls, tx ): """ Calculate the hash of a transction structure given by bitcoind """ tx_hex = bits.btc_bitcoind_tx_serialize( tx ) tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex') return tx_hash
def function[tx_hash, parameter[cls, tx]]: constant[ Calculate the hash of a transction structure given by bitcoind ] variable[tx_hex] assign[=] call[name[bits].btc_bitcoind_tx_serialize, parameter[name[tx]]] variable[tx_hash] assign[=] call[call[call[name[hashing].bin_double_sha256, parameter[call[name[tx_hex].decode, parameter[constant[hex]]]]]][<ast.Slice object at 0x7da1b2840fa0>].encode, parameter[constant[hex]]] return[name[tx_hash]]
keyword[def] identifier[tx_hash] ( identifier[cls] , identifier[tx] ): literal[string] identifier[tx_hex] = identifier[bits] . identifier[btc_bitcoind_tx_serialize] ( identifier[tx] ) identifier[tx_hash] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[tx_hex] . identifier[decode] ( literal[string] ))[::- literal[int] ]. identifier[encode] ( literal[string] ) keyword[return] identifier[tx_hash]
def tx_hash(cls, tx): """ Calculate the hash of a transction structure given by bitcoind """ tx_hex = bits.btc_bitcoind_tx_serialize(tx) tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex') return tx_hash
def _from_dict(cls, _dict): """Initialize a ListCollectionFieldsResponse object from a json dictionary.""" args = {} if 'fields' in _dict: args['fields'] = [ Field._from_dict(x) for x in (_dict.get('fields')) ] return cls(**args)
def function[_from_dict, parameter[cls, _dict]]: constant[Initialize a ListCollectionFieldsResponse object from a json dictionary.] variable[args] assign[=] dictionary[[], []] if compare[constant[fields] in name[_dict]] begin[:] call[name[args]][constant[fields]] assign[=] <ast.ListComp object at 0x7da18bcca140> return[call[name[cls], parameter[]]]
keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ): literal[string] identifier[args] ={} keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]=[ identifier[Field] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] )) ] keyword[return] identifier[cls] (** identifier[args] )
def _from_dict(cls, _dict): """Initialize a ListCollectionFieldsResponse object from a json dictionary.""" args = {} if 'fields' in _dict: args['fields'] = [Field._from_dict(x) for x in _dict.get('fields')] # depends on [control=['if'], data=['_dict']] return cls(**args)
def check_py_version(): """Check if a propper Python version is used.""" try: if sys.version_info >= (2, 7): return except: pass print(" ") print(" ERROR - memtop needs python version at least 2.7") print(("Chances are that you can install newer version from your " "repositories, or even that you have some newer version " "installed yet.")) print("(one way to find out which versions are installed is to try " "following: 'which python2.7' , 'which python3' and so...)") print(" ") sys.exit(-1)
def function[check_py_version, parameter[]]: constant[Check if a propper Python version is used.] <ast.Try object at 0x7da1b10a4100> call[name[print], parameter[constant[ ]]] call[name[print], parameter[constant[ ERROR - memtop needs python version at least 2.7]]] call[name[print], parameter[constant[Chances are that you can install newer version from your repositories, or even that you have some newer version installed yet.]]] call[name[print], parameter[constant[(one way to find out which versions are installed is to try following: 'which python2.7' , 'which python3' and so...)]]] call[name[print], parameter[constant[ ]]] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b10a4c70>]]
keyword[def] identifier[check_py_version] (): literal[string] keyword[try] : keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ): keyword[return] keyword[except] : keyword[pass] identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] (( literal[string] literal[string] literal[string] )) identifier[print] ( literal[string] literal[string] ) identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] (- literal[int] )
def check_py_version(): """Check if a propper Python version is used.""" try: if sys.version_info >= (2, 7): return # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] print(' ') print(' ERROR - memtop needs python version at least 2.7') print('Chances are that you can install newer version from your repositories, or even that you have some newer version installed yet.') print("(one way to find out which versions are installed is to try following: 'which python2.7' , 'which python3' and so...)") print(' ') sys.exit(-1)
def save_task_info(self, res, mem_gb=0): """ :param self: an object with attributes .hdf5, .argnames, .sent :parent res: a :class:`Result` object :param mem_gb: memory consumption at the saving time (optional) """ mon = res.mon name = mon.operation[6:] # strip 'total ' if self.hdf5: mon.hdf5 = self.hdf5 # needed for the flush below t = (mon.task_no, mon.weight, mon.duration, len(res.pik), mem_gb) data = numpy.array([t], task_info_dt) hdf5.extend3(self.hdf5.filename, 'task_info/' + name, data, argnames=self.argnames, sent=self.sent) mon.flush()
def function[save_task_info, parameter[self, res, mem_gb]]: constant[ :param self: an object with attributes .hdf5, .argnames, .sent :parent res: a :class:`Result` object :param mem_gb: memory consumption at the saving time (optional) ] variable[mon] assign[=] name[res].mon variable[name] assign[=] call[name[mon].operation][<ast.Slice object at 0x7da20c7c9510>] if name[self].hdf5 begin[:] name[mon].hdf5 assign[=] name[self].hdf5 variable[t] assign[=] tuple[[<ast.Attribute object at 0x7da20c7c9060>, <ast.Attribute object at 0x7da20c7c9b40>, <ast.Attribute object at 0x7da20c7cbd30>, <ast.Call object at 0x7da20c7c8730>, <ast.Name object at 0x7da20c7c9330>]] variable[data] assign[=] call[name[numpy].array, parameter[list[[<ast.Name object at 0x7da20c7c8e80>]], name[task_info_dt]]] call[name[hdf5].extend3, parameter[name[self].hdf5.filename, binary_operation[constant[task_info/] + name[name]], name[data]]] call[name[mon].flush, parameter[]]
keyword[def] identifier[save_task_info] ( identifier[self] , identifier[res] , identifier[mem_gb] = literal[int] ): literal[string] identifier[mon] = identifier[res] . identifier[mon] identifier[name] = identifier[mon] . identifier[operation] [ literal[int] :] keyword[if] identifier[self] . identifier[hdf5] : identifier[mon] . identifier[hdf5] = identifier[self] . identifier[hdf5] identifier[t] =( identifier[mon] . identifier[task_no] , identifier[mon] . identifier[weight] , identifier[mon] . identifier[duration] , identifier[len] ( identifier[res] . identifier[pik] ), identifier[mem_gb] ) identifier[data] = identifier[numpy] . identifier[array] ([ identifier[t] ], identifier[task_info_dt] ) identifier[hdf5] . identifier[extend3] ( identifier[self] . identifier[hdf5] . identifier[filename] , literal[string] + identifier[name] , identifier[data] , identifier[argnames] = identifier[self] . identifier[argnames] , identifier[sent] = identifier[self] . identifier[sent] ) identifier[mon] . identifier[flush] ()
def save_task_info(self, res, mem_gb=0): """ :param self: an object with attributes .hdf5, .argnames, .sent :parent res: a :class:`Result` object :param mem_gb: memory consumption at the saving time (optional) """ mon = res.mon name = mon.operation[6:] # strip 'total ' if self.hdf5: mon.hdf5 = self.hdf5 # needed for the flush below t = (mon.task_no, mon.weight, mon.duration, len(res.pik), mem_gb) data = numpy.array([t], task_info_dt) hdf5.extend3(self.hdf5.filename, 'task_info/' + name, data, argnames=self.argnames, sent=self.sent) # depends on [control=['if'], data=[]] mon.flush()
def is_redundant_multiplicon(self, value): """ Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID """ if not hasattr(self, '_redundant_multiplicon_cache'): sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"''' cur = self._dbconn.cursor() cur.execute(sql, {'id': str(value)}) result = [int(r[0]) for r in cur.fetchall()] self._redundant_multiplicon_cache = set(result) if value in self._redundant_multiplicon_cache: return True else: return False
def function[is_redundant_multiplicon, parameter[self, value]]: constant[ Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID ] if <ast.UnaryOp object at 0x7da1b0ae1000> begin[:] variable[sql] assign[=] constant[SELECT id FROM multiplicons WHERE is_redundant="-1"] variable[cur] assign[=] call[name[self]._dbconn.cursor, parameter[]] call[name[cur].execute, parameter[name[sql], dictionary[[<ast.Constant object at 0x7da1b0ae0d30>], [<ast.Call object at 0x7da1b09c76a0>]]]] variable[result] assign[=] <ast.ListComp object at 0x7da1b09c5180> name[self]._redundant_multiplicon_cache assign[=] call[name[set], parameter[name[result]]] if compare[name[value] in name[self]._redundant_multiplicon_cache] begin[:] return[constant[True]]
keyword[def] identifier[is_redundant_multiplicon] ( identifier[self] , identifier[value] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[sql] = literal[string] identifier[cur] = identifier[self] . identifier[_dbconn] . identifier[cursor] () identifier[cur] . identifier[execute] ( identifier[sql] ,{ literal[string] : identifier[str] ( identifier[value] )}) identifier[result] =[ identifier[int] ( identifier[r] [ literal[int] ]) keyword[for] identifier[r] keyword[in] identifier[cur] . identifier[fetchall] ()] identifier[self] . identifier[_redundant_multiplicon_cache] = identifier[set] ( identifier[result] ) keyword[if] identifier[value] keyword[in] identifier[self] . identifier[_redundant_multiplicon_cache] : keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def is_redundant_multiplicon(self, value): """ Returns True if the passed multiplicon ID is redundant, False otherwise. - value, (int) multiplicon ID """ if not hasattr(self, '_redundant_multiplicon_cache'): sql = 'SELECT id FROM multiplicons WHERE is_redundant="-1"' cur = self._dbconn.cursor() cur.execute(sql, {'id': str(value)}) result = [int(r[0]) for r in cur.fetchall()] self._redundant_multiplicon_cache = set(result) # depends on [control=['if'], data=[]] if value in self._redundant_multiplicon_cache: return True # depends on [control=['if'], data=[]] else: return False
def _ConvertManagedPropertyType(self, propType): """ Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi managed property definition """ if propType: name = propType.name version = propType.version aType = propType.type flags = self._ConvertAnnotations(propType.annotation) privId = propType.privId prop = (name, aType, version, flags, privId) else: prop = None return prop
def function[_ConvertManagedPropertyType, parameter[self, propType]]: constant[ Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi managed property definition ] if name[propType] begin[:] variable[name] assign[=] name[propType].name variable[version] assign[=] name[propType].version variable[aType] assign[=] name[propType].type variable[flags] assign[=] call[name[self]._ConvertAnnotations, parameter[name[propType].annotation]] variable[privId] assign[=] name[propType].privId variable[prop] assign[=] tuple[[<ast.Name object at 0x7da18ede5810>, <ast.Name object at 0x7da18ede7df0>, <ast.Name object at 0x7da18ede6c50>, <ast.Name object at 0x7da18ede7040>, <ast.Name object at 0x7da18ede5480>]] return[name[prop]]
keyword[def] identifier[_ConvertManagedPropertyType] ( identifier[self] , identifier[propType] ): literal[string] keyword[if] identifier[propType] : identifier[name] = identifier[propType] . identifier[name] identifier[version] = identifier[propType] . identifier[version] identifier[aType] = identifier[propType] . identifier[type] identifier[flags] = identifier[self] . identifier[_ConvertAnnotations] ( identifier[propType] . identifier[annotation] ) identifier[privId] = identifier[propType] . identifier[privId] identifier[prop] =( identifier[name] , identifier[aType] , identifier[version] , identifier[flags] , identifier[privId] ) keyword[else] : identifier[prop] = keyword[None] keyword[return] identifier[prop]
def _ConvertManagedPropertyType(self, propType): """ Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi managed property definition """ if propType: name = propType.name version = propType.version aType = propType.type flags = self._ConvertAnnotations(propType.annotation) privId = propType.privId prop = (name, aType, version, flags, privId) # depends on [control=['if'], data=[]] else: prop = None return prop
def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL): """ Find the timezone for a point in `query`. GeoNames always returns a timezone: if the point being queried doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset`` timezone is used to produce the :class:`geopy.timezone.Timezone`. .. versionadded:: 1.18.0 :param query: The coordinates for which you want a timezone. :type query: :class:`geopy.point.Point`, list or tuple of (latitude, longitude), or string as "%(latitude)s, %(longitude)s" :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: :class:`geopy.timezone.Timezone` """ ensure_pytz_is_installed() try: lat, lng = self._coerce_point_to_string(query).split(',') except ValueError: raise ValueError("Must be a coordinate pair or Point") params = { "lat": lat, "lng": lng, "username": self.username, } url = "?".join((self.api_timezone, urlencode(params))) logger.debug("%s.reverse_timezone: %s", self.__class__.__name__, url) return self._parse_json_timezone( self._call_geocoder(url, timeout=timeout) )
def function[reverse_timezone, parameter[self, query, timeout]]: constant[ Find the timezone for a point in `query`. GeoNames always returns a timezone: if the point being queried doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset`` timezone is used to produce the :class:`geopy.timezone.Timezone`. .. versionadded:: 1.18.0 :param query: The coordinates for which you want a timezone. :type query: :class:`geopy.point.Point`, list or tuple of (latitude, longitude), or string as "%(latitude)s, %(longitude)s" :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: :class:`geopy.timezone.Timezone` ] call[name[ensure_pytz_is_installed], parameter[]] <ast.Try object at 0x7da20c6e54e0> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7e20>, <ast.Constant object at 0x7da20c6e7e50>, <ast.Constant object at 0x7da20c6e4430>], [<ast.Name object at 0x7da20c6e7460>, <ast.Name object at 0x7da20c6e6410>, <ast.Attribute object at 0x7da20c6e6f50>]] variable[url] assign[=] call[constant[?].join, parameter[tuple[[<ast.Attribute object at 0x7da20c6e5840>, <ast.Call object at 0x7da20c6e62f0>]]]] call[name[logger].debug, parameter[constant[%s.reverse_timezone: %s], name[self].__class__.__name__, name[url]]] return[call[name[self]._parse_json_timezone, parameter[call[name[self]._call_geocoder, parameter[name[url]]]]]]
keyword[def] identifier[reverse_timezone] ( identifier[self] , identifier[query] , identifier[timeout] = identifier[DEFAULT_SENTINEL] ): literal[string] identifier[ensure_pytz_is_installed] () keyword[try] : identifier[lat] , identifier[lng] = identifier[self] . identifier[_coerce_point_to_string] ( identifier[query] ). identifier[split] ( literal[string] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[params] ={ literal[string] : identifier[lat] , literal[string] : identifier[lng] , literal[string] : identifier[self] . identifier[username] , } identifier[url] = literal[string] . identifier[join] (( identifier[self] . identifier[api_timezone] , identifier[urlencode] ( identifier[params] ))) identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[__class__] . identifier[__name__] , identifier[url] ) keyword[return] identifier[self] . identifier[_parse_json_timezone] ( identifier[self] . identifier[_call_geocoder] ( identifier[url] , identifier[timeout] = identifier[timeout] ) )
def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL): """ Find the timezone for a point in `query`. GeoNames always returns a timezone: if the point being queried doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset`` timezone is used to produce the :class:`geopy.timezone.Timezone`. .. versionadded:: 1.18.0 :param query: The coordinates for which you want a timezone. :type query: :class:`geopy.point.Point`, list or tuple of (latitude, longitude), or string as "%(latitude)s, %(longitude)s" :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: :class:`geopy.timezone.Timezone` """ ensure_pytz_is_installed() try: (lat, lng) = self._coerce_point_to_string(query).split(',') # depends on [control=['try'], data=[]] except ValueError: raise ValueError('Must be a coordinate pair or Point') # depends on [control=['except'], data=[]] params = {'lat': lat, 'lng': lng, 'username': self.username} url = '?'.join((self.api_timezone, urlencode(params))) logger.debug('%s.reverse_timezone: %s', self.__class__.__name__, url) return self._parse_json_timezone(self._call_geocoder(url, timeout=timeout))
def add_transcription(self, gene: Gene, rna: Union[Rna, MicroRna]) -> str: """Add a transcription relation from a gene to an RNA or miRNA node. :param gene: A gene node :param rna: An RNA or microRNA node """ return self.add_unqualified_edge(gene, rna, TRANSCRIBED_TO)
def function[add_transcription, parameter[self, gene, rna]]: constant[Add a transcription relation from a gene to an RNA or miRNA node. :param gene: A gene node :param rna: An RNA or microRNA node ] return[call[name[self].add_unqualified_edge, parameter[name[gene], name[rna], name[TRANSCRIBED_TO]]]]
keyword[def] identifier[add_transcription] ( identifier[self] , identifier[gene] : identifier[Gene] , identifier[rna] : identifier[Union] [ identifier[Rna] , identifier[MicroRna] ])-> identifier[str] : literal[string] keyword[return] identifier[self] . identifier[add_unqualified_edge] ( identifier[gene] , identifier[rna] , identifier[TRANSCRIBED_TO] )
def add_transcription(self, gene: Gene, rna: Union[Rna, MicroRna]) -> str: """Add a transcription relation from a gene to an RNA or miRNA node. :param gene: A gene node :param rna: An RNA or microRNA node """ return self.add_unqualified_edge(gene, rna, TRANSCRIBED_TO)
def fetch_and_parse(method, uri, params_prefix=None, **params): """Fetch the given uri and return python dictionary with parsed data-types.""" response = fetch(method, uri, params_prefix, **params) return _parse(json.loads(response.text))
def function[fetch_and_parse, parameter[method, uri, params_prefix]]: constant[Fetch the given uri and return python dictionary with parsed data-types.] variable[response] assign[=] call[name[fetch], parameter[name[method], name[uri], name[params_prefix]]] return[call[name[_parse], parameter[call[name[json].loads, parameter[name[response].text]]]]]
keyword[def] identifier[fetch_and_parse] ( identifier[method] , identifier[uri] , identifier[params_prefix] = keyword[None] ,** identifier[params] ): literal[string] identifier[response] = identifier[fetch] ( identifier[method] , identifier[uri] , identifier[params_prefix] ,** identifier[params] ) keyword[return] identifier[_parse] ( identifier[json] . identifier[loads] ( identifier[response] . identifier[text] ))
def fetch_and_parse(method, uri, params_prefix=None, **params): """Fetch the given uri and return python dictionary with parsed data-types.""" response = fetch(method, uri, params_prefix, **params) return _parse(json.loads(response.text))
def memoize(func): """ Memoization decorator for a function taking one or more arguments. """ class Memodict(dict): """ just a dict""" def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): """ this makes it faster """ ret = self[key] = func(*key) return ret return Memodict().__getitem__
def function[memoize, parameter[func]]: constant[ Memoization decorator for a function taking one or more arguments. ] class class[Memodict, parameter[]] begin[:] constant[ just a dict] def function[__getitem__, parameter[self]]: return[call[name[dict].__getitem__, parameter[name[self], name[key]]]] def function[__missing__, parameter[self, key]]: constant[ this makes it faster ] variable[ret] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da18ede7190>]] return[name[ret]] return[call[name[Memodict], parameter[]].__getitem__]
keyword[def] identifier[memoize] ( identifier[func] ): literal[string] keyword[class] identifier[Memodict] ( identifier[dict] ): literal[string] keyword[def] identifier[__getitem__] ( identifier[self] ,* identifier[key] ): keyword[return] identifier[dict] . identifier[__getitem__] ( identifier[self] , identifier[key] ) keyword[def] identifier[__missing__] ( identifier[self] , identifier[key] ): literal[string] identifier[ret] = identifier[self] [ identifier[key] ]= identifier[func] (* identifier[key] ) keyword[return] identifier[ret] keyword[return] identifier[Memodict] (). identifier[__getitem__]
def memoize(func): """ Memoization decorator for a function taking one or more arguments. """ class Memodict(dict): """ just a dict""" def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): """ this makes it faster """ ret = self[key] = func(*key) return ret return Memodict().__getitem__
def _set_pseudotime(self): """Return pseudotime with respect to root point. """ self.pseudotime = self.distances_dpt[self.iroot].copy() self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf])
def function[_set_pseudotime, parameter[self]]: constant[Return pseudotime with respect to root point. ] name[self].pseudotime assign[=] call[call[name[self].distances_dpt][name[self].iroot].copy, parameter[]] <ast.AugAssign object at 0x7da18f58faf0>
keyword[def] identifier[_set_pseudotime] ( identifier[self] ): literal[string] identifier[self] . identifier[pseudotime] = identifier[self] . identifier[distances_dpt] [ identifier[self] . identifier[iroot] ]. identifier[copy] () identifier[self] . identifier[pseudotime] /= identifier[np] . identifier[max] ( identifier[self] . identifier[pseudotime] [ identifier[self] . identifier[pseudotime] < identifier[np] . identifier[inf] ])
def _set_pseudotime(self): """Return pseudotime with respect to root point. """ self.pseudotime = self.distances_dpt[self.iroot].copy() self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf])
def resetCanvasDimensions(self, windowHeight, windowWidth): 'sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)' self.plotwidth = windowWidth*2 self.plotheight = (windowHeight-1)*4 # exclude status line # pixels[y][x] = { attr: list(rows), ... } self.pixels = [[defaultdict(list) for x in range(self.plotwidth)] for y in range(self.plotheight)]
def function[resetCanvasDimensions, parameter[self, windowHeight, windowWidth]]: constant[sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)] name[self].plotwidth assign[=] binary_operation[name[windowWidth] * constant[2]] name[self].plotheight assign[=] binary_operation[binary_operation[name[windowHeight] - constant[1]] * constant[4]] name[self].pixels assign[=] <ast.ListComp object at 0x7da20e9b2b00>
keyword[def] identifier[resetCanvasDimensions] ( identifier[self] , identifier[windowHeight] , identifier[windowWidth] ): literal[string] identifier[self] . identifier[plotwidth] = identifier[windowWidth] * literal[int] identifier[self] . identifier[plotheight] =( identifier[windowHeight] - literal[int] )* literal[int] identifier[self] . identifier[pixels] =[[ identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[plotwidth] )] keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[self] . identifier[plotheight] )]
def resetCanvasDimensions(self, windowHeight, windowWidth): """sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)""" self.plotwidth = windowWidth * 2 self.plotheight = (windowHeight - 1) * 4 # exclude status line # pixels[y][x] = { attr: list(rows), ... } self.pixels = [[defaultdict(list) for x in range(self.plotwidth)] for y in range(self.plotheight)]
def verify_words(self): """Verify the fields source, imagery_used and comment of the changeset for some suspect words. """ if self.comment: if find_words(self.comment, self.suspect_words, self.excluded_words): self.label_suspicious('suspect_word') if self.source: for word in self.illegal_sources: if word in self.source.lower(): self.label_suspicious('suspect_word') break if self.imagery_used: for word in self.illegal_sources: if word in self.imagery_used.lower(): self.label_suspicious('suspect_word') break self.suspicion_reasons = list(set(self.suspicion_reasons))
def function[verify_words, parameter[self]]: constant[Verify the fields source, imagery_used and comment of the changeset for some suspect words. ] if name[self].comment begin[:] if call[name[find_words], parameter[name[self].comment, name[self].suspect_words, name[self].excluded_words]] begin[:] call[name[self].label_suspicious, parameter[constant[suspect_word]]] if name[self].source begin[:] for taget[name[word]] in starred[name[self].illegal_sources] begin[:] if compare[name[word] in call[name[self].source.lower, parameter[]]] begin[:] call[name[self].label_suspicious, parameter[constant[suspect_word]]] break if name[self].imagery_used begin[:] for taget[name[word]] in starred[name[self].illegal_sources] begin[:] if compare[name[word] in call[name[self].imagery_used.lower, parameter[]]] begin[:] call[name[self].label_suspicious, parameter[constant[suspect_word]]] break name[self].suspicion_reasons assign[=] call[name[list], parameter[call[name[set], parameter[name[self].suspicion_reasons]]]]
keyword[def] identifier[verify_words] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[comment] : keyword[if] identifier[find_words] ( identifier[self] . identifier[comment] , identifier[self] . identifier[suspect_words] , identifier[self] . identifier[excluded_words] ): identifier[self] . identifier[label_suspicious] ( literal[string] ) keyword[if] identifier[self] . identifier[source] : keyword[for] identifier[word] keyword[in] identifier[self] . identifier[illegal_sources] : keyword[if] identifier[word] keyword[in] identifier[self] . identifier[source] . identifier[lower] (): identifier[self] . identifier[label_suspicious] ( literal[string] ) keyword[break] keyword[if] identifier[self] . identifier[imagery_used] : keyword[for] identifier[word] keyword[in] identifier[self] . identifier[illegal_sources] : keyword[if] identifier[word] keyword[in] identifier[self] . identifier[imagery_used] . identifier[lower] (): identifier[self] . identifier[label_suspicious] ( literal[string] ) keyword[break] identifier[self] . identifier[suspicion_reasons] = identifier[list] ( identifier[set] ( identifier[self] . identifier[suspicion_reasons] ))
def verify_words(self): """Verify the fields source, imagery_used and comment of the changeset for some suspect words. """ if self.comment: if find_words(self.comment, self.suspect_words, self.excluded_words): self.label_suspicious('suspect_word') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.source: for word in self.illegal_sources: if word in self.source.lower(): self.label_suspicious('suspect_word') break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] if self.imagery_used: for word in self.illegal_sources: if word in self.imagery_used.lower(): self.label_suspicious('suspect_word') break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] self.suspicion_reasons = list(set(self.suspicion_reasons))
def get_left_ngrams(mention, window=3, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams within a window to the *left* from the sentence Context. For higher-arity Candidates, defaults to the *first* argument. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :param window: The number of tokens to the left of the first argument to return. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ span = _to_span(mention) i = span.get_word_start_index() for ngram in tokens_to_ngrams( getattr(span.sentence, attrib)[max(0, i - window) : i], n_min=n_min, n_max=n_max, lower=lower, ): yield ngram
def function[get_left_ngrams, parameter[mention, window, attrib, n_min, n_max, lower]]: constant[Get the ngrams within a window to the *left* from the sentence Context. For higher-arity Candidates, defaults to the *first* argument. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :param window: The number of tokens to the left of the first argument to return. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams ] variable[span] assign[=] call[name[_to_span], parameter[name[mention]]] variable[i] assign[=] call[name[span].get_word_start_index, parameter[]] for taget[name[ngram]] in starred[call[name[tokens_to_ngrams], parameter[call[call[name[getattr], parameter[name[span].sentence, name[attrib]]]][<ast.Slice object at 0x7da18ede6f80>]]]] begin[:] <ast.Yield object at 0x7da18ede5930>
keyword[def] identifier[get_left_ngrams] ( identifier[mention] , identifier[window] = literal[int] , identifier[attrib] = literal[string] , identifier[n_min] = literal[int] , identifier[n_max] = literal[int] , identifier[lower] = keyword[True] ): literal[string] identifier[span] = identifier[_to_span] ( identifier[mention] ) identifier[i] = identifier[span] . identifier[get_word_start_index] () keyword[for] identifier[ngram] keyword[in] identifier[tokens_to_ngrams] ( identifier[getattr] ( identifier[span] . identifier[sentence] , identifier[attrib] )[ identifier[max] ( literal[int] , identifier[i] - identifier[window] ): identifier[i] ], identifier[n_min] = identifier[n_min] , identifier[n_max] = identifier[n_max] , identifier[lower] = identifier[lower] , ): keyword[yield] identifier[ngram]
def get_left_ngrams(mention, window=3, attrib='words', n_min=1, n_max=1, lower=True): """Get the ngrams within a window to the *left* from the sentence Context. For higher-arity Candidates, defaults to the *first* argument. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :param window: The number of tokens to the left of the first argument to return. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ span = _to_span(mention) i = span.get_word_start_index() for ngram in tokens_to_ngrams(getattr(span.sentence, attrib)[max(0, i - window):i], n_min=n_min, n_max=n_max, lower=lower): yield ngram # depends on [control=['for'], data=['ngram']]
def error(bot, update, error): """Log Errors caused by Updates.""" logger.error('Update {} caused error {}'.format(update, error), extra={"tag": "err"})
def function[error, parameter[bot, update, error]]: constant[Log Errors caused by Updates.] call[name[logger].error, parameter[call[constant[Update {} caused error {}].format, parameter[name[update], name[error]]]]]
keyword[def] identifier[error] ( identifier[bot] , identifier[update] , identifier[error] ): literal[string] identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[update] , identifier[error] ), identifier[extra] ={ literal[string] : literal[string] })
def error(bot, update, error): """Log Errors caused by Updates.""" logger.error('Update {} caused error {}'.format(update, error), extra={'tag': 'err'})
def run_via_binary(self, run_command_instance=None, command=None, volumes=None, additional_opts=None, **kwargs): """ create a container using this image and run it in background; this method is useful to test real user scenarios when users invoke containers using binary :param run_command_instance: instance of DockerRunBuilder :param command: list of str, command to run in the container, examples: - ["ls", "/"] - ["bash", "-c", "ls / | grep bin"] :param volumes: tuple or list of tuples in the form: * `("/path/to/directory", )` * `("/host/path", "/container/path")` * `("/host/path", "/container/path", "mode")` * `(conu.Directory('/host/path'), "/container/path")` (source can be also Directory instance) :param additional_opts: list of str, additional options for `docker run` :return: instance of DockerContainer """ logger.info("run container via binary in background") if (command is not None or additional_opts is not None) \ and run_command_instance is not None: raise ConuException( "run_command_instance and command parameters cannot be passed " "into method at same time") if run_command_instance is None: command = command or [] additional_opts = additional_opts or [] if (isinstance(command, list) or isinstance(command, tuple) and isinstance(additional_opts, list) or isinstance(additional_opts, tuple)): run_command_instance = DockerRunBuilder( command=command, additional_opts=additional_opts) else: raise ConuException("command and additional_opts needs to be list of str or None") else: run_command_instance = run_command_instance or DockerRunBuilder() if not isinstance(run_command_instance, DockerRunBuilder): raise ConuException( "run_command_instance needs to be an instance of DockerRunBuilder") run_command_instance.image_name = self.get_id() run_command_instance.options += ["-d"] if volumes: run_command_instance.options += self.get_volume_options(volumes=volumes) def callback(): try: # FIXME: catch std{out,err}, print stdout to logger.debug, stderr to logger.error run_cmd(run_command_instance.build()) except subprocess.CalledProcessError as ex: raise ConuException("Container exited with an error: %s" % ex.returncode) container_id, _ = self._run_container(run_command_instance, callback) container_name = self.d.inspect_container(container_id)['Name'][1:] return DockerContainer(self, container_id, name=container_name)
def function[run_via_binary, parameter[self, run_command_instance, command, volumes, additional_opts]]: constant[ create a container using this image and run it in background; this method is useful to test real user scenarios when users invoke containers using binary :param run_command_instance: instance of DockerRunBuilder :param command: list of str, command to run in the container, examples: - ["ls", "/"] - ["bash", "-c", "ls / | grep bin"] :param volumes: tuple or list of tuples in the form: * `("/path/to/directory", )` * `("/host/path", "/container/path")` * `("/host/path", "/container/path", "mode")` * `(conu.Directory('/host/path'), "/container/path")` (source can be also Directory instance) :param additional_opts: list of str, additional options for `docker run` :return: instance of DockerContainer ] call[name[logger].info, parameter[constant[run container via binary in background]]] if <ast.BoolOp object at 0x7da1b121b670> begin[:] <ast.Raise object at 0x7da1b1188190> if compare[name[run_command_instance] is constant[None]] begin[:] variable[command] assign[=] <ast.BoolOp object at 0x7da1b11881f0> variable[additional_opts] assign[=] <ast.BoolOp object at 0x7da1b13218d0> if <ast.BoolOp object at 0x7da1b12c11e0> begin[:] variable[run_command_instance] assign[=] call[name[DockerRunBuilder], parameter[]] name[run_command_instance].image_name assign[=] call[name[self].get_id, parameter[]] <ast.AugAssign object at 0x7da1b11b8340> if name[volumes] begin[:] <ast.AugAssign object at 0x7da1b11bac50> def function[callback, parameter[]]: <ast.Try object at 0x7da1b11f8820> <ast.Tuple object at 0x7da1b11fbe20> assign[=] call[name[self]._run_container, parameter[name[run_command_instance], name[callback]]] variable[container_name] assign[=] call[call[call[name[self].d.inspect_container, parameter[name[container_id]]]][constant[Name]]][<ast.Slice object at 0x7da1b12c27a0>] return[call[name[DockerContainer], parameter[name[self], name[container_id]]]]
keyword[def] identifier[run_via_binary] ( identifier[self] , identifier[run_command_instance] = keyword[None] , identifier[command] = keyword[None] , identifier[volumes] = keyword[None] , identifier[additional_opts] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) keyword[if] ( identifier[command] keyword[is] keyword[not] keyword[None] keyword[or] identifier[additional_opts] keyword[is] keyword[not] keyword[None] ) keyword[and] identifier[run_command_instance] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[ConuException] ( literal[string] literal[string] ) keyword[if] identifier[run_command_instance] keyword[is] keyword[None] : identifier[command] = identifier[command] keyword[or] [] identifier[additional_opts] = identifier[additional_opts] keyword[or] [] keyword[if] ( identifier[isinstance] ( identifier[command] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[command] , identifier[tuple] ) keyword[and] identifier[isinstance] ( identifier[additional_opts] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[additional_opts] , identifier[tuple] )): identifier[run_command_instance] = identifier[DockerRunBuilder] ( identifier[command] = identifier[command] , identifier[additional_opts] = identifier[additional_opts] ) keyword[else] : keyword[raise] identifier[ConuException] ( literal[string] ) keyword[else] : identifier[run_command_instance] = identifier[run_command_instance] keyword[or] identifier[DockerRunBuilder] () keyword[if] keyword[not] identifier[isinstance] ( identifier[run_command_instance] , identifier[DockerRunBuilder] ): keyword[raise] identifier[ConuException] ( literal[string] ) identifier[run_command_instance] . identifier[image_name] = identifier[self] . identifier[get_id] () identifier[run_command_instance] . identifier[options] +=[ literal[string] ] keyword[if] identifier[volumes] : identifier[run_command_instance] . identifier[options] += identifier[self] . identifier[get_volume_options] ( identifier[volumes] = identifier[volumes] ) keyword[def] identifier[callback] (): keyword[try] : identifier[run_cmd] ( identifier[run_command_instance] . identifier[build] ()) keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[ex] : keyword[raise] identifier[ConuException] ( literal[string] % identifier[ex] . identifier[returncode] ) identifier[container_id] , identifier[_] = identifier[self] . identifier[_run_container] ( identifier[run_command_instance] , identifier[callback] ) identifier[container_name] = identifier[self] . identifier[d] . identifier[inspect_container] ( identifier[container_id] )[ literal[string] ][ literal[int] :] keyword[return] identifier[DockerContainer] ( identifier[self] , identifier[container_id] , identifier[name] = identifier[container_name] )
def run_via_binary(self, run_command_instance=None, command=None, volumes=None, additional_opts=None, **kwargs): """ create a container using this image and run it in background; this method is useful to test real user scenarios when users invoke containers using binary :param run_command_instance: instance of DockerRunBuilder :param command: list of str, command to run in the container, examples: - ["ls", "/"] - ["bash", "-c", "ls / | grep bin"] :param volumes: tuple or list of tuples in the form: * `("/path/to/directory", )` * `("/host/path", "/container/path")` * `("/host/path", "/container/path", "mode")` * `(conu.Directory('/host/path'), "/container/path")` (source can be also Directory instance) :param additional_opts: list of str, additional options for `docker run` :return: instance of DockerContainer """ logger.info('run container via binary in background') if (command is not None or additional_opts is not None) and run_command_instance is not None: raise ConuException('run_command_instance and command parameters cannot be passed into method at same time') # depends on [control=['if'], data=[]] if run_command_instance is None: command = command or [] additional_opts = additional_opts or [] if isinstance(command, list) or (isinstance(command, tuple) and isinstance(additional_opts, list)) or isinstance(additional_opts, tuple): run_command_instance = DockerRunBuilder(command=command, additional_opts=additional_opts) # depends on [control=['if'], data=[]] else: raise ConuException('command and additional_opts needs to be list of str or None') # depends on [control=['if'], data=['run_command_instance']] else: run_command_instance = run_command_instance or DockerRunBuilder() if not isinstance(run_command_instance, DockerRunBuilder): raise ConuException('run_command_instance needs to be an instance of DockerRunBuilder') # depends on [control=['if'], data=[]] run_command_instance.image_name = self.get_id() run_command_instance.options += ['-d'] if volumes: run_command_instance.options += self.get_volume_options(volumes=volumes) # depends on [control=['if'], data=[]] def callback(): try: # FIXME: catch std{out,err}, print stdout to logger.debug, stderr to logger.error run_cmd(run_command_instance.build()) # depends on [control=['try'], data=[]] except subprocess.CalledProcessError as ex: raise ConuException('Container exited with an error: %s' % ex.returncode) # depends on [control=['except'], data=['ex']] (container_id, _) = self._run_container(run_command_instance, callback) container_name = self.d.inspect_container(container_id)['Name'][1:] return DockerContainer(self, container_id, name=container_name)
def suggestion_list(inp, options): """ Given an invalid input string and a list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. """ options_by_distance = OrderedDict() input_threshold = len(inp) / 2 for option in options: distance = lexical_distance(inp, option) threshold = max(input_threshold, len(option) / 2, 1) if distance <= threshold: options_by_distance[option] = distance return sorted( list(options_by_distance.keys()), key=lambda k: options_by_distance[k] )
def function[suggestion_list, parameter[inp, options]]: constant[ Given an invalid input string and a list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. ] variable[options_by_distance] assign[=] call[name[OrderedDict], parameter[]] variable[input_threshold] assign[=] binary_operation[call[name[len], parameter[name[inp]]] / constant[2]] for taget[name[option]] in starred[name[options]] begin[:] variable[distance] assign[=] call[name[lexical_distance], parameter[name[inp], name[option]]] variable[threshold] assign[=] call[name[max], parameter[name[input_threshold], binary_operation[call[name[len], parameter[name[option]]] / constant[2]], constant[1]]] if compare[name[distance] less_or_equal[<=] name[threshold]] begin[:] call[name[options_by_distance]][name[option]] assign[=] name[distance] return[call[name[sorted], parameter[call[name[list], parameter[call[name[options_by_distance].keys, parameter[]]]]]]]
keyword[def] identifier[suggestion_list] ( identifier[inp] , identifier[options] ): literal[string] identifier[options_by_distance] = identifier[OrderedDict] () identifier[input_threshold] = identifier[len] ( identifier[inp] )/ literal[int] keyword[for] identifier[option] keyword[in] identifier[options] : identifier[distance] = identifier[lexical_distance] ( identifier[inp] , identifier[option] ) identifier[threshold] = identifier[max] ( identifier[input_threshold] , identifier[len] ( identifier[option] )/ literal[int] , literal[int] ) keyword[if] identifier[distance] <= identifier[threshold] : identifier[options_by_distance] [ identifier[option] ]= identifier[distance] keyword[return] identifier[sorted] ( identifier[list] ( identifier[options_by_distance] . identifier[keys] ()), identifier[key] = keyword[lambda] identifier[k] : identifier[options_by_distance] [ identifier[k] ] )
def suggestion_list(inp, options): """ Given an invalid input string and a list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. """ options_by_distance = OrderedDict() input_threshold = len(inp) / 2 for option in options: distance = lexical_distance(inp, option) threshold = max(input_threshold, len(option) / 2, 1) if distance <= threshold: options_by_distance[option] = distance # depends on [control=['if'], data=['distance']] # depends on [control=['for'], data=['option']] return sorted(list(options_by_distance.keys()), key=lambda k: options_by_distance[k])
def getroot(self): """Return the root element of the figure. The root element is a group of elements after stripping the toplevel ``<svg>`` tag. Returns ------- GroupElement All elements of the figure without the ``<svg>`` tag. """ if 'class' in self.root.attrib: attrib = {'class': self.root.attrib['class']} else: attrib = None return GroupElement(self.root.getchildren(), attrib=attrib)
def function[getroot, parameter[self]]: constant[Return the root element of the figure. The root element is a group of elements after stripping the toplevel ``<svg>`` tag. Returns ------- GroupElement All elements of the figure without the ``<svg>`` tag. ] if compare[constant[class] in name[self].root.attrib] begin[:] variable[attrib] assign[=] dictionary[[<ast.Constant object at 0x7da1b11a3970>], [<ast.Subscript object at 0x7da1b11a0df0>]] return[call[name[GroupElement], parameter[call[name[self].root.getchildren, parameter[]]]]]
keyword[def] identifier[getroot] ( identifier[self] ): literal[string] keyword[if] literal[string] keyword[in] identifier[self] . identifier[root] . identifier[attrib] : identifier[attrib] ={ literal[string] : identifier[self] . identifier[root] . identifier[attrib] [ literal[string] ]} keyword[else] : identifier[attrib] = keyword[None] keyword[return] identifier[GroupElement] ( identifier[self] . identifier[root] . identifier[getchildren] (), identifier[attrib] = identifier[attrib] )
def getroot(self): """Return the root element of the figure. The root element is a group of elements after stripping the toplevel ``<svg>`` tag. Returns ------- GroupElement All elements of the figure without the ``<svg>`` tag. """ if 'class' in self.root.attrib: attrib = {'class': self.root.attrib['class']} # depends on [control=['if'], data=[]] else: attrib = None return GroupElement(self.root.getchildren(), attrib=attrib)
def parseFASTAFilteringCommandLineOptions(args, reads): """ Examine parsed FASTA filtering command-line options and return filtered reads. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. """ keepSequences = ( parseRangeString(args.keepSequences, convertToZeroBased=True) if args.keepSequences else None) removeSequences = ( parseRangeString(args.removeSequences, convertToZeroBased=True) if args.removeSequences else None) return reads.filter( minLength=args.minLength, maxLength=args.maxLength, whitelist=set(args.whitelist) if args.whitelist else None, blacklist=set(args.blacklist) if args.blacklist else None, whitelistFile=args.whitelistFile, blacklistFile=args.blacklistFile, titleRegex=args.titleRegex, negativeTitleRegex=args.negativeTitleRegex, keepSequences=keepSequences, removeSequences=removeSequences, head=args.head, removeDuplicates=args.removeDuplicates, removeDuplicatesById=args.removeDuplicatesById, randomSubset=args.randomSubset, trueLength=args.trueLength, sampleFraction=args.sampleFraction, sequenceNumbersFile=args.sequenceNumbersFile)
def function[parseFASTAFilteringCommandLineOptions, parameter[args, reads]]: constant[ Examine parsed FASTA filtering command-line options and return filtered reads. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. ] variable[keepSequences] assign[=] <ast.IfExp object at 0x7da1b0c65240> variable[removeSequences] assign[=] <ast.IfExp object at 0x7da1b0c64130> return[call[name[reads].filter, parameter[]]]
keyword[def] identifier[parseFASTAFilteringCommandLineOptions] ( identifier[args] , identifier[reads] ): literal[string] identifier[keepSequences] =( identifier[parseRangeString] ( identifier[args] . identifier[keepSequences] , identifier[convertToZeroBased] = keyword[True] ) keyword[if] identifier[args] . identifier[keepSequences] keyword[else] keyword[None] ) identifier[removeSequences] =( identifier[parseRangeString] ( identifier[args] . identifier[removeSequences] , identifier[convertToZeroBased] = keyword[True] ) keyword[if] identifier[args] . identifier[removeSequences] keyword[else] keyword[None] ) keyword[return] identifier[reads] . identifier[filter] ( identifier[minLength] = identifier[args] . identifier[minLength] , identifier[maxLength] = identifier[args] . identifier[maxLength] , identifier[whitelist] = identifier[set] ( identifier[args] . identifier[whitelist] ) keyword[if] identifier[args] . identifier[whitelist] keyword[else] keyword[None] , identifier[blacklist] = identifier[set] ( identifier[args] . identifier[blacklist] ) keyword[if] identifier[args] . identifier[blacklist] keyword[else] keyword[None] , identifier[whitelistFile] = identifier[args] . identifier[whitelistFile] , identifier[blacklistFile] = identifier[args] . identifier[blacklistFile] , identifier[titleRegex] = identifier[args] . identifier[titleRegex] , identifier[negativeTitleRegex] = identifier[args] . identifier[negativeTitleRegex] , identifier[keepSequences] = identifier[keepSequences] , identifier[removeSequences] = identifier[removeSequences] , identifier[head] = identifier[args] . identifier[head] , identifier[removeDuplicates] = identifier[args] . identifier[removeDuplicates] , identifier[removeDuplicatesById] = identifier[args] . identifier[removeDuplicatesById] , identifier[randomSubset] = identifier[args] . identifier[randomSubset] , identifier[trueLength] = identifier[args] . identifier[trueLength] , identifier[sampleFraction] = identifier[args] . identifier[sampleFraction] , identifier[sequenceNumbersFile] = identifier[args] . identifier[sequenceNumbersFile] )
def parseFASTAFilteringCommandLineOptions(args, reads): """ Examine parsed FASTA filtering command-line options and return filtered reads. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. """ keepSequences = parseRangeString(args.keepSequences, convertToZeroBased=True) if args.keepSequences else None removeSequences = parseRangeString(args.removeSequences, convertToZeroBased=True) if args.removeSequences else None return reads.filter(minLength=args.minLength, maxLength=args.maxLength, whitelist=set(args.whitelist) if args.whitelist else None, blacklist=set(args.blacklist) if args.blacklist else None, whitelistFile=args.whitelistFile, blacklistFile=args.blacklistFile, titleRegex=args.titleRegex, negativeTitleRegex=args.negativeTitleRegex, keepSequences=keepSequences, removeSequences=removeSequences, head=args.head, removeDuplicates=args.removeDuplicates, removeDuplicatesById=args.removeDuplicatesById, randomSubset=args.randomSubset, trueLength=args.trueLength, sampleFraction=args.sampleFraction, sequenceNumbersFile=args.sequenceNumbersFile)
async def get_dnssec_validation(cls) -> DNSSEC: """Enable DNSSEC validation of upstream zones. Only used when MAAS is running its own DNS server. This value is used as the value of 'dnssec_validation' in the DNS server config. """ data = await cls.get_config("dnssec_validation") return cls.DNSSEC.lookup(data)
<ast.AsyncFunctionDef object at 0x7da20c992230>
keyword[async] keyword[def] identifier[get_dnssec_validation] ( identifier[cls] )-> identifier[DNSSEC] : literal[string] identifier[data] = keyword[await] identifier[cls] . identifier[get_config] ( literal[string] ) keyword[return] identifier[cls] . identifier[DNSSEC] . identifier[lookup] ( identifier[data] )
async def get_dnssec_validation(cls) -> DNSSEC: """Enable DNSSEC validation of upstream zones. Only used when MAAS is running its own DNS server. This value is used as the value of 'dnssec_validation' in the DNS server config. """ data = await cls.get_config('dnssec_validation') return cls.DNSSEC.lookup(data)
def set_right_margin(self, right_margin): """ Set the right margin of the menu. This will determine the number of spaces between the right edge of the screen and the right menu border. :param right_margin: an integer value """ self.__header.style.margins.right = right_margin self.__prologue.style.margins.right = right_margin self.__items_section.style.margins.right = right_margin self.__epilogue.style.margins.right = right_margin self.__footer.style.margins.right = right_margin self.__prompt.style.margins.right = right_margin return self
def function[set_right_margin, parameter[self, right_margin]]: constant[ Set the right margin of the menu. This will determine the number of spaces between the right edge of the screen and the right menu border. :param right_margin: an integer value ] name[self].__header.style.margins.right assign[=] name[right_margin] name[self].__prologue.style.margins.right assign[=] name[right_margin] name[self].__items_section.style.margins.right assign[=] name[right_margin] name[self].__epilogue.style.margins.right assign[=] name[right_margin] name[self].__footer.style.margins.right assign[=] name[right_margin] name[self].__prompt.style.margins.right assign[=] name[right_margin] return[name[self]]
keyword[def] identifier[set_right_margin] ( identifier[self] , identifier[right_margin] ): literal[string] identifier[self] . identifier[__header] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] identifier[self] . identifier[__prologue] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] identifier[self] . identifier[__items_section] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] identifier[self] . identifier[__epilogue] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] identifier[self] . identifier[__footer] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] identifier[self] . identifier[__prompt] . identifier[style] . identifier[margins] . identifier[right] = identifier[right_margin] keyword[return] identifier[self]
def set_right_margin(self, right_margin): """ Set the right margin of the menu. This will determine the number of spaces between the right edge of the screen and the right menu border. :param right_margin: an integer value """ self.__header.style.margins.right = right_margin self.__prologue.style.margins.right = right_margin self.__items_section.style.margins.right = right_margin self.__epilogue.style.margins.right = right_margin self.__footer.style.margins.right = right_margin self.__prompt.style.margins.right = right_margin return self
def make_transformer(self, decompose='svd', decompose_by=50, tsne_kwargs={}): """ Creates an internal transformer pipeline to project the data set into 2D space using TSNE, applying an pre-decomposition technique ahead of embedding if necessary. This method will reset the transformer on the class, and can be used to explore different decompositions. Parameters ---------- decompose : string or None, default: ``'svd'`` A preliminary decomposition is often used prior to TSNE to make the projection faster. Specify ``"svd"`` for sparse data or ``"pca"`` for dense data. If decompose is None, the original data set will be used. decompose_by : int, default: 50 Specify the number of components for preliminary decomposition, by default this is 50; the more components, the slower TSNE will be. Returns ------- transformer : Pipeline Pipelined transformer for TSNE projections """ # TODO: detect decompose by inferring from sparse matrix or dense or # If number of features > 50 etc. decompositions = { 'svd': TruncatedSVD, 'pca': PCA, } if decompose and decompose.lower() not in decompositions: raise YellowbrickValueError( "'{}' is not a valid decomposition, use {}, or None".format( decompose, ", ".join(decompositions.keys()) ) ) # Create the pipeline steps steps = [] # Add the pre-decomposition if decompose: klass = decompositions[decompose] steps.append((decompose, klass( n_components=decompose_by, random_state=self.random_state))) # Add the TSNE manifold steps.append(('tsne', TSNE( n_components=2, random_state=self.random_state, **tsne_kwargs))) # return the pipeline return Pipeline(steps)
def function[make_transformer, parameter[self, decompose, decompose_by, tsne_kwargs]]: constant[ Creates an internal transformer pipeline to project the data set into 2D space using TSNE, applying an pre-decomposition technique ahead of embedding if necessary. This method will reset the transformer on the class, and can be used to explore different decompositions. Parameters ---------- decompose : string or None, default: ``'svd'`` A preliminary decomposition is often used prior to TSNE to make the projection faster. Specify ``"svd"`` for sparse data or ``"pca"`` for dense data. If decompose is None, the original data set will be used. decompose_by : int, default: 50 Specify the number of components for preliminary decomposition, by default this is 50; the more components, the slower TSNE will be. Returns ------- transformer : Pipeline Pipelined transformer for TSNE projections ] variable[decompositions] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9990>, <ast.Constant object at 0x7da18bccbb20>], [<ast.Name object at 0x7da18bcc9330>, <ast.Name object at 0x7da18bccb3a0>]] if <ast.BoolOp object at 0x7da18bcc9780> begin[:] <ast.Raise object at 0x7da18bccaf50> variable[steps] assign[=] list[[]] if name[decompose] begin[:] variable[klass] assign[=] call[name[decompositions]][name[decompose]] call[name[steps].append, parameter[tuple[[<ast.Name object at 0x7da20cabd2a0>, <ast.Call object at 0x7da20cabfd90>]]]] call[name[steps].append, parameter[tuple[[<ast.Constant object at 0x7da20cabe920>, <ast.Call object at 0x7da20cabc0a0>]]]] return[call[name[Pipeline], parameter[name[steps]]]]
keyword[def] identifier[make_transformer] ( identifier[self] , identifier[decompose] = literal[string] , identifier[decompose_by] = literal[int] , identifier[tsne_kwargs] ={}): literal[string] identifier[decompositions] ={ literal[string] : identifier[TruncatedSVD] , literal[string] : identifier[PCA] , } keyword[if] identifier[decompose] keyword[and] identifier[decompose] . identifier[lower] () keyword[not] keyword[in] identifier[decompositions] : keyword[raise] identifier[YellowbrickValueError] ( literal[string] . identifier[format] ( identifier[decompose] , literal[string] . identifier[join] ( identifier[decompositions] . identifier[keys] ()) ) ) identifier[steps] =[] keyword[if] identifier[decompose] : identifier[klass] = identifier[decompositions] [ identifier[decompose] ] identifier[steps] . identifier[append] (( identifier[decompose] , identifier[klass] ( identifier[n_components] = identifier[decompose_by] , identifier[random_state] = identifier[self] . identifier[random_state] ))) identifier[steps] . identifier[append] (( literal[string] , identifier[TSNE] ( identifier[n_components] = literal[int] , identifier[random_state] = identifier[self] . identifier[random_state] ,** identifier[tsne_kwargs] ))) keyword[return] identifier[Pipeline] ( identifier[steps] )
def make_transformer(self, decompose='svd', decompose_by=50, tsne_kwargs={}): """ Creates an internal transformer pipeline to project the data set into 2D space using TSNE, applying an pre-decomposition technique ahead of embedding if necessary. This method will reset the transformer on the class, and can be used to explore different decompositions. Parameters ---------- decompose : string or None, default: ``'svd'`` A preliminary decomposition is often used prior to TSNE to make the projection faster. Specify ``"svd"`` for sparse data or ``"pca"`` for dense data. If decompose is None, the original data set will be used. decompose_by : int, default: 50 Specify the number of components for preliminary decomposition, by default this is 50; the more components, the slower TSNE will be. Returns ------- transformer : Pipeline Pipelined transformer for TSNE projections """ # TODO: detect decompose by inferring from sparse matrix or dense or # If number of features > 50 etc. decompositions = {'svd': TruncatedSVD, 'pca': PCA} if decompose and decompose.lower() not in decompositions: raise YellowbrickValueError("'{}' is not a valid decomposition, use {}, or None".format(decompose, ', '.join(decompositions.keys()))) # depends on [control=['if'], data=[]] # Create the pipeline steps steps = [] # Add the pre-decomposition if decompose: klass = decompositions[decompose] steps.append((decompose, klass(n_components=decompose_by, random_state=self.random_state))) # depends on [control=['if'], data=[]] # Add the TSNE manifold steps.append(('tsne', TSNE(n_components=2, random_state=self.random_state, **tsne_kwargs))) # return the pipeline return Pipeline(steps)
def column_families(self): """List[:class:`~.external_config.BigtableColumnFamily`]: List of column families to expose in the table schema along with their types. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies """ prop = self._properties.get("columnFamilies", []) return [BigtableColumnFamily.from_api_repr(cf) for cf in prop]
def function[column_families, parameter[self]]: constant[List[:class:`~.external_config.BigtableColumnFamily`]: List of column families to expose in the table schema along with their types. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies ] variable[prop] assign[=] call[name[self]._properties.get, parameter[constant[columnFamilies], list[[]]]] return[<ast.ListComp object at 0x7da20c76f850>]
keyword[def] identifier[column_families] ( identifier[self] ): literal[string] identifier[prop] = identifier[self] . identifier[_properties] . identifier[get] ( literal[string] ,[]) keyword[return] [ identifier[BigtableColumnFamily] . identifier[from_api_repr] ( identifier[cf] ) keyword[for] identifier[cf] keyword[in] identifier[prop] ]
def column_families(self): """List[:class:`~.external_config.BigtableColumnFamily`]: List of column families to expose in the table schema along with their types. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).bigtableOptions.columnFamilies https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies """ prop = self._properties.get('columnFamilies', []) return [BigtableColumnFamily.from_api_repr(cf) for cf in prop]
def extract_view(view, decorators=None): """ Extract a view object out of any wrapping decorators. """ # http://stackoverflow.com/questions/9222129/python-inspect-getmembers-does-not-return-the-actual-function-when-used-with-dec if decorators is None: decorators = [] if getattr(view, 'func_closure', None) is not None: decorators.append(view) for closure in view.func_closure: if callable(closure.cell_contents): return extract_view(closure.cell_contents, decorators) if inspect.isfunction(view) or inspect.ismethod(view): pass elif inspect.isclass(view): pass else: view = view.__class__ return view, decorators
def function[extract_view, parameter[view, decorators]]: constant[ Extract a view object out of any wrapping decorators. ] if compare[name[decorators] is constant[None]] begin[:] variable[decorators] assign[=] list[[]] if compare[call[name[getattr], parameter[name[view], constant[func_closure], constant[None]]] is_not constant[None]] begin[:] call[name[decorators].append, parameter[name[view]]] for taget[name[closure]] in starred[name[view].func_closure] begin[:] if call[name[callable], parameter[name[closure].cell_contents]] begin[:] return[call[name[extract_view], parameter[name[closure].cell_contents, name[decorators]]]] if <ast.BoolOp object at 0x7da1b0a83220> begin[:] pass return[tuple[[<ast.Name object at 0x7da1b0a82050>, <ast.Name object at 0x7da1b0a83b80>]]]
keyword[def] identifier[extract_view] ( identifier[view] , identifier[decorators] = keyword[None] ): literal[string] keyword[if] identifier[decorators] keyword[is] keyword[None] : identifier[decorators] =[] keyword[if] identifier[getattr] ( identifier[view] , literal[string] , keyword[None] ) keyword[is] keyword[not] keyword[None] : identifier[decorators] . identifier[append] ( identifier[view] ) keyword[for] identifier[closure] keyword[in] identifier[view] . identifier[func_closure] : keyword[if] identifier[callable] ( identifier[closure] . identifier[cell_contents] ): keyword[return] identifier[extract_view] ( identifier[closure] . identifier[cell_contents] , identifier[decorators] ) keyword[if] identifier[inspect] . identifier[isfunction] ( identifier[view] ) keyword[or] identifier[inspect] . identifier[ismethod] ( identifier[view] ): keyword[pass] keyword[elif] identifier[inspect] . identifier[isclass] ( identifier[view] ): keyword[pass] keyword[else] : identifier[view] = identifier[view] . identifier[__class__] keyword[return] identifier[view] , identifier[decorators]
def extract_view(view, decorators=None): """ Extract a view object out of any wrapping decorators. """ # http://stackoverflow.com/questions/9222129/python-inspect-getmembers-does-not-return-the-actual-function-when-used-with-dec if decorators is None: decorators = [] # depends on [control=['if'], data=['decorators']] if getattr(view, 'func_closure', None) is not None: decorators.append(view) for closure in view.func_closure: if callable(closure.cell_contents): return extract_view(closure.cell_contents, decorators) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['closure']] # depends on [control=['if'], data=[]] if inspect.isfunction(view) or inspect.ismethod(view): pass # depends on [control=['if'], data=[]] elif inspect.isclass(view): pass # depends on [control=['if'], data=[]] else: view = view.__class__ return (view, decorators)
def device(self, idx): """Get a specific GPU device Args: idx: index of device Returns: NvidiaDevice: single GPU device """ class GpuDevice(Structure): pass c_nvmlDevice_t = POINTER(GpuDevice) c_index = c_uint(idx) device = c_nvmlDevice_t() _check_return(_NVML.get_function( "nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device))) return NvidiaDevice(device)
def function[device, parameter[self, idx]]: constant[Get a specific GPU device Args: idx: index of device Returns: NvidiaDevice: single GPU device ] class class[GpuDevice, parameter[]] begin[:] pass variable[c_nvmlDevice_t] assign[=] call[name[POINTER], parameter[name[GpuDevice]]] variable[c_index] assign[=] call[name[c_uint], parameter[name[idx]]] variable[device] assign[=] call[name[c_nvmlDevice_t], parameter[]] call[name[_check_return], parameter[call[call[name[_NVML].get_function, parameter[constant[nvmlDeviceGetHandleByIndex_v2]]], parameter[name[c_index], call[name[byref], parameter[name[device]]]]]]] return[call[name[NvidiaDevice], parameter[name[device]]]]
keyword[def] identifier[device] ( identifier[self] , identifier[idx] ): literal[string] keyword[class] identifier[GpuDevice] ( identifier[Structure] ): keyword[pass] identifier[c_nvmlDevice_t] = identifier[POINTER] ( identifier[GpuDevice] ) identifier[c_index] = identifier[c_uint] ( identifier[idx] ) identifier[device] = identifier[c_nvmlDevice_t] () identifier[_check_return] ( identifier[_NVML] . identifier[get_function] ( literal[string] )( identifier[c_index] , identifier[byref] ( identifier[device] ))) keyword[return] identifier[NvidiaDevice] ( identifier[device] )
def device(self, idx): """Get a specific GPU device Args: idx: index of device Returns: NvidiaDevice: single GPU device """ class GpuDevice(Structure): pass c_nvmlDevice_t = POINTER(GpuDevice) c_index = c_uint(idx) device = c_nvmlDevice_t() _check_return(_NVML.get_function('nvmlDeviceGetHandleByIndex_v2')(c_index, byref(device))) return NvidiaDevice(device)
def removeApplicationManifest(self, pchApplicationManifestFullPath): """Removes an application manifest from the list to load when building the list of installed applications.""" fn = self.function_table.removeApplicationManifest result = fn(pchApplicationManifestFullPath) return result
def function[removeApplicationManifest, parameter[self, pchApplicationManifestFullPath]]: constant[Removes an application manifest from the list to load when building the list of installed applications.] variable[fn] assign[=] name[self].function_table.removeApplicationManifest variable[result] assign[=] call[name[fn], parameter[name[pchApplicationManifestFullPath]]] return[name[result]]
keyword[def] identifier[removeApplicationManifest] ( identifier[self] , identifier[pchApplicationManifestFullPath] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[removeApplicationManifest] identifier[result] = identifier[fn] ( identifier[pchApplicationManifestFullPath] ) keyword[return] identifier[result]
def removeApplicationManifest(self, pchApplicationManifestFullPath): """Removes an application manifest from the list to load when building the list of installed applications.""" fn = self.function_table.removeApplicationManifest result = fn(pchApplicationManifestFullPath) return result
def set_server(self, server_pos, key, value): """Set the key to the value for the server_pos (position in the list).""" if zeroconf_tag and self.zeroconf_enable_tag: self.listener.set_server(server_pos, key, value)
def function[set_server, parameter[self, server_pos, key, value]]: constant[Set the key to the value for the server_pos (position in the list).] if <ast.BoolOp object at 0x7da1b1c3d120> begin[:] call[name[self].listener.set_server, parameter[name[server_pos], name[key], name[value]]]
keyword[def] identifier[set_server] ( identifier[self] , identifier[server_pos] , identifier[key] , identifier[value] ): literal[string] keyword[if] identifier[zeroconf_tag] keyword[and] identifier[self] . identifier[zeroconf_enable_tag] : identifier[self] . identifier[listener] . identifier[set_server] ( identifier[server_pos] , identifier[key] , identifier[value] )
def set_server(self, server_pos, key, value): """Set the key to the value for the server_pos (position in the list).""" if zeroconf_tag and self.zeroconf_enable_tag: self.listener.set_server(server_pos, key, value) # depends on [control=['if'], data=[]]
async def update_bucket(self, *, chat: typing.Union[str, int, None] = None, user: typing.Union[str, int, None] = None, bucket: typing.Dict = None, **kwargs): """ Update bucket for user in chat You can use bucket parameter or|and kwargs. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param bucket: :param chat: :param user: :param kwargs: :return: """ raise NotImplementedError
<ast.AsyncFunctionDef object at 0x7da1b18450c0>
keyword[async] keyword[def] identifier[update_bucket] ( identifier[self] ,*, identifier[chat] : identifier[typing] . identifier[Union] [ identifier[str] , identifier[int] , keyword[None] ]= keyword[None] , identifier[user] : identifier[typing] . identifier[Union] [ identifier[str] , identifier[int] , keyword[None] ]= keyword[None] , identifier[bucket] : identifier[typing] . identifier[Dict] = keyword[None] , ** identifier[kwargs] ): literal[string] keyword[raise] identifier[NotImplementedError]
async def update_bucket(self, *, chat: typing.Union[str, int, None]=None, user: typing.Union[str, int, None]=None, bucket: typing.Dict=None, **kwargs): """ Update bucket for user in chat You can use bucket parameter or|and kwargs. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param bucket: :param chat: :param user: :param kwargs: :return: """ raise NotImplementedError
def get_viscosity(medium="CellCarrier", channel_width=20.0, flow_rate=0.16, temperature=23.0): """Returns the viscosity for RT-DC-specific media Parameters ---------- medium: str The medium to compute the viscosity for. One of ["CellCarrier", "CellCarrier B", "water"]. channel_width: float The channel width in µm flow_rate: float Flow rate in µl/s temperature: float or ndarray Temperature in °C Returns ------- viscosity: float or ndarray Viscosity in mPa*s Notes ----- - CellCarrier and CellCarrier B media are optimized for RT-DC measurements. - Values for the viscosity of water are computed using equation (15) from :cite:`Kestin_1978`. """ if medium.lower() not in ["cellcarrier", "cellcarrier b", "water"]: raise ValueError("Invalid medium: {}".format(medium)) # convert flow_rate from µl/s to m³/s # convert channel_width from µm to m term1 = 1.1856 * 6 * flow_rate * 1e-9 / (channel_width * 1e-6)**3 * 2 / 3 if medium == "CellCarrier": temp_corr = (temperature / 23.2)**-0.866 term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677) eta = 0.179 * (term1 * term2)**(0.677 - 1) * temp_corr * 1e3 elif medium == "CellCarrier B": temp_corr = (temperature / 23.6)**-0.866 term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634) eta = 0.360 * (term1 * term2)**(0.634 - 1) * temp_corr * 1e3 elif medium == "water": # see equation (15) in Kestin et al, J. Phys. Chem. 7(3) 1978 if np.min(temperature) < 0 or np.max(temperature) > 40: msg = "For water, the temperature must be in [0, 40] degC! " \ "Got min/max values of '{}'.".format(np.min(temperature), np.max(temperature)) raise ValueError(msg) eta0 = 1.002 # [mPa] right = (20-temperature) / (temperature + 96) \ * (+ 1.2364 - 1.37e-3 * (20 - temperature) + 5.7e-6 * (20 - temperature)**2 ) eta = eta0 * 10**right return eta
def function[get_viscosity, parameter[medium, channel_width, flow_rate, temperature]]: constant[Returns the viscosity for RT-DC-specific media Parameters ---------- medium: str The medium to compute the viscosity for. One of ["CellCarrier", "CellCarrier B", "water"]. channel_width: float The channel width in µm flow_rate: float Flow rate in µl/s temperature: float or ndarray Temperature in °C Returns ------- viscosity: float or ndarray Viscosity in mPa*s Notes ----- - CellCarrier and CellCarrier B media are optimized for RT-DC measurements. - Values for the viscosity of water are computed using equation (15) from :cite:`Kestin_1978`. ] if compare[call[name[medium].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b1834d00>, <ast.Constant object at 0x7da1b1834ac0>, <ast.Constant object at 0x7da1b18369e0>]]] begin[:] <ast.Raise object at 0x7da1b1834f70> variable[term1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[1.1856] * constant[6]] * name[flow_rate]] * constant[1e-09]] / binary_operation[binary_operation[name[channel_width] * constant[1e-06]] ** constant[3]]] * constant[2]] / constant[3]] if compare[name[medium] equal[==] constant[CellCarrier]] begin[:] variable[temp_corr] assign[=] binary_operation[binary_operation[name[temperature] / constant[23.2]] ** <ast.UnaryOp object at 0x7da1b1836530>] variable[term2] assign[=] binary_operation[binary_operation[constant[0.6771] / constant[0.5928]] + binary_operation[constant[0.2121] / binary_operation[constant[0.5928] * constant[0.677]]]] variable[eta] assign[=] binary_operation[binary_operation[binary_operation[constant[0.179] * binary_operation[binary_operation[name[term1] * name[term2]] ** binary_operation[constant[0.677] - constant[1]]]] * name[temp_corr]] * constant[1000.0]] return[name[eta]]
keyword[def] identifier[get_viscosity] ( identifier[medium] = literal[string] , identifier[channel_width] = literal[int] , identifier[flow_rate] = literal[int] , identifier[temperature] = literal[int] ): literal[string] keyword[if] identifier[medium] . identifier[lower] () keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[medium] )) identifier[term1] = literal[int] * literal[int] * identifier[flow_rate] * literal[int] /( identifier[channel_width] * literal[int] )** literal[int] * literal[int] / literal[int] keyword[if] identifier[medium] == literal[string] : identifier[temp_corr] =( identifier[temperature] / literal[int] )**- literal[int] identifier[term2] = literal[int] / literal[int] + literal[int] /( literal[int] * literal[int] ) identifier[eta] = literal[int] *( identifier[term1] * identifier[term2] )**( literal[int] - literal[int] )* identifier[temp_corr] * literal[int] keyword[elif] identifier[medium] == literal[string] : identifier[temp_corr] =( identifier[temperature] / literal[int] )**- literal[int] identifier[term2] = literal[int] / literal[int] + literal[int] /( literal[int] * literal[int] ) identifier[eta] = literal[int] *( identifier[term1] * identifier[term2] )**( literal[int] - literal[int] )* identifier[temp_corr] * literal[int] keyword[elif] identifier[medium] == literal[string] : keyword[if] identifier[np] . identifier[min] ( identifier[temperature] )< literal[int] keyword[or] identifier[np] . identifier[max] ( identifier[temperature] )> literal[int] : identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[np] . identifier[min] ( identifier[temperature] ), identifier[np] . identifier[max] ( identifier[temperature] )) keyword[raise] identifier[ValueError] ( identifier[msg] ) identifier[eta0] = literal[int] identifier[right] =( literal[int] - identifier[temperature] )/( identifier[temperature] + literal[int] )*(+ literal[int] - literal[int] *( literal[int] - identifier[temperature] ) + literal[int] *( literal[int] - identifier[temperature] )** literal[int] ) identifier[eta] = identifier[eta0] * literal[int] ** identifier[right] keyword[return] identifier[eta]
def get_viscosity(medium='CellCarrier', channel_width=20.0, flow_rate=0.16, temperature=23.0): """Returns the viscosity for RT-DC-specific media Parameters ---------- medium: str The medium to compute the viscosity for. One of ["CellCarrier", "CellCarrier B", "water"]. channel_width: float The channel width in µm flow_rate: float Flow rate in µl/s temperature: float or ndarray Temperature in °C Returns ------- viscosity: float or ndarray Viscosity in mPa*s Notes ----- - CellCarrier and CellCarrier B media are optimized for RT-DC measurements. - Values for the viscosity of water are computed using equation (15) from :cite:`Kestin_1978`. """ if medium.lower() not in ['cellcarrier', 'cellcarrier b', 'water']: raise ValueError('Invalid medium: {}'.format(medium)) # depends on [control=['if'], data=[]] # convert flow_rate from µl/s to m³/s # convert channel_width from µm to m term1 = 1.1856 * 6 * flow_rate * 1e-09 / (channel_width * 1e-06) ** 3 * 2 / 3 if medium == 'CellCarrier': temp_corr = (temperature / 23.2) ** (-0.866) term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677) eta = 0.179 * (term1 * term2) ** (0.677 - 1) * temp_corr * 1000.0 # depends on [control=['if'], data=[]] elif medium == 'CellCarrier B': temp_corr = (temperature / 23.6) ** (-0.866) term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634) eta = 0.36 * (term1 * term2) ** (0.634 - 1) * temp_corr * 1000.0 # depends on [control=['if'], data=[]] elif medium == 'water': # see equation (15) in Kestin et al, J. Phys. Chem. 7(3) 1978 if np.min(temperature) < 0 or np.max(temperature) > 40: msg = "For water, the temperature must be in [0, 40] degC! Got min/max values of '{}'.".format(np.min(temperature), np.max(temperature)) raise ValueError(msg) # depends on [control=['if'], data=[]] eta0 = 1.002 # [mPa] right = (20 - temperature) / (temperature + 96) * (+1.2364 - 0.00137 * (20 - temperature) + 5.7e-06 * (20 - temperature) ** 2) eta = eta0 * 10 ** right # depends on [control=['if'], data=[]] return eta
def dancing_links(size_universe, sets): """Exact set cover by the dancing links algorithm :param size_universe: universe = {0, 1, ..., size_universe - 1} :param sets: list of sets :returns: list of set indices partitioning the universe, or None :complexity: huge """ header = Cell(None, None, 0, None) # building the cell structure col = [] for j in range(size_universe): col.append(Cell(header, None, 0, None)) for i in range(len(sets)): row = None for j in sets[i]: col[j].S += 1 # one more entry in this column row = Cell(row, col[j], i, col[j]) sol = [] if solve(header, sol): return sol else: return None
def function[dancing_links, parameter[size_universe, sets]]: constant[Exact set cover by the dancing links algorithm :param size_universe: universe = {0, 1, ..., size_universe - 1} :param sets: list of sets :returns: list of set indices partitioning the universe, or None :complexity: huge ] variable[header] assign[=] call[name[Cell], parameter[constant[None], constant[None], constant[0], constant[None]]] variable[col] assign[=] list[[]] for taget[name[j]] in starred[call[name[range], parameter[name[size_universe]]]] begin[:] call[name[col].append, parameter[call[name[Cell], parameter[name[header], constant[None], constant[0], constant[None]]]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sets]]]]]] begin[:] variable[row] assign[=] constant[None] for taget[name[j]] in starred[call[name[sets]][name[i]]] begin[:] <ast.AugAssign object at 0x7da1b07ceb60> variable[row] assign[=] call[name[Cell], parameter[name[row], call[name[col]][name[j]], name[i], call[name[col]][name[j]]]] variable[sol] assign[=] list[[]] if call[name[solve], parameter[name[header], name[sol]]] begin[:] return[name[sol]]
keyword[def] identifier[dancing_links] ( identifier[size_universe] , identifier[sets] ): literal[string] identifier[header] = identifier[Cell] ( keyword[None] , keyword[None] , literal[int] , keyword[None] ) identifier[col] =[] keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[size_universe] ): identifier[col] . identifier[append] ( identifier[Cell] ( identifier[header] , keyword[None] , literal[int] , keyword[None] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sets] )): identifier[row] = keyword[None] keyword[for] identifier[j] keyword[in] identifier[sets] [ identifier[i] ]: identifier[col] [ identifier[j] ]. identifier[S] += literal[int] identifier[row] = identifier[Cell] ( identifier[row] , identifier[col] [ identifier[j] ], identifier[i] , identifier[col] [ identifier[j] ]) identifier[sol] =[] keyword[if] identifier[solve] ( identifier[header] , identifier[sol] ): keyword[return] identifier[sol] keyword[else] : keyword[return] keyword[None]
def dancing_links(size_universe, sets): """Exact set cover by the dancing links algorithm :param size_universe: universe = {0, 1, ..., size_universe - 1} :param sets: list of sets :returns: list of set indices partitioning the universe, or None :complexity: huge """ header = Cell(None, None, 0, None) # building the cell structure col = [] for j in range(size_universe): col.append(Cell(header, None, 0, None)) # depends on [control=['for'], data=[]] for i in range(len(sets)): row = None for j in sets[i]: col[j].S += 1 # one more entry in this column row = Cell(row, col[j], i, col[j]) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] sol = [] if solve(header, sol): return sol # depends on [control=['if'], data=[]] else: return None
def _enforce_no_overlap(self, start_at=0): """Enforce that no ranges overlap in internal storage.""" i = start_at while i+1 < len(self.data): if self.data[i][1] >= self.data[i+1][0]: # beginning of i+1-th range is contained in i-th range if self.data[i][1] < self.data[i+1][1]: # i+1-th range is longer, thus enlarge i-th range self.data[i][1] = self.data[i+1][1] # removed contained range del self.data[i+1] i += 1
def function[_enforce_no_overlap, parameter[self, start_at]]: constant[Enforce that no ranges overlap in internal storage.] variable[i] assign[=] name[start_at] while compare[binary_operation[name[i] + constant[1]] less[<] call[name[len], parameter[name[self].data]]] begin[:] if compare[call[call[name[self].data][name[i]]][constant[1]] greater_or_equal[>=] call[call[name[self].data][binary_operation[name[i] + constant[1]]]][constant[0]]] begin[:] if compare[call[call[name[self].data][name[i]]][constant[1]] less[<] call[call[name[self].data][binary_operation[name[i] + constant[1]]]][constant[1]]] begin[:] call[call[name[self].data][name[i]]][constant[1]] assign[=] call[call[name[self].data][binary_operation[name[i] + constant[1]]]][constant[1]] <ast.Delete object at 0x7da18bc71990> <ast.AugAssign object at 0x7da18bc70730>
keyword[def] identifier[_enforce_no_overlap] ( identifier[self] , identifier[start_at] = literal[int] ): literal[string] identifier[i] = identifier[start_at] keyword[while] identifier[i] + literal[int] < identifier[len] ( identifier[self] . identifier[data] ): keyword[if] identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ]>= identifier[self] . identifier[data] [ identifier[i] + literal[int] ][ literal[int] ]: keyword[if] identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ]< identifier[self] . identifier[data] [ identifier[i] + literal[int] ][ literal[int] ]: identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ]= identifier[self] . identifier[data] [ identifier[i] + literal[int] ][ literal[int] ] keyword[del] identifier[self] . identifier[data] [ identifier[i] + literal[int] ] identifier[i] += literal[int]
def _enforce_no_overlap(self, start_at=0): """Enforce that no ranges overlap in internal storage.""" i = start_at while i + 1 < len(self.data): if self.data[i][1] >= self.data[i + 1][0]: # beginning of i+1-th range is contained in i-th range if self.data[i][1] < self.data[i + 1][1]: # i+1-th range is longer, thus enlarge i-th range self.data[i][1] = self.data[i + 1][1] # depends on [control=['if'], data=[]] # removed contained range del self.data[i + 1] # depends on [control=['if'], data=[]] i += 1 # depends on [control=['while'], data=[]]
def match_rules_context_multi(tree, rules, parent_context={}): """Recursively matches a Tree structure with rules and returns context Args: tree (Tree): Parsed tree structure rules (dict): See match_rules parent_context (dict): Context of parent call Returns: dict: Context matched dictionary of matched rules or None if no match """ all_contexts = [] for template, match_rules in rules.items(): context = parent_context.copy() if match_template(tree, template, context): child_contextss = [] if not match_rules: all_contexts += [context] else: for key, child_rules in match_rules.items(): child_contextss.append(match_rules_context_multi(context[key], child_rules, context)) all_contexts += cross_context(child_contextss) return all_contexts
def function[match_rules_context_multi, parameter[tree, rules, parent_context]]: constant[Recursively matches a Tree structure with rules and returns context Args: tree (Tree): Parsed tree structure rules (dict): See match_rules parent_context (dict): Context of parent call Returns: dict: Context matched dictionary of matched rules or None if no match ] variable[all_contexts] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c993e50>, <ast.Name object at 0x7da20c9931f0>]]] in starred[call[name[rules].items, parameter[]]] begin[:] variable[context] assign[=] call[name[parent_context].copy, parameter[]] if call[name[match_template], parameter[name[tree], name[template], name[context]]] begin[:] variable[child_contextss] assign[=] list[[]] if <ast.UnaryOp object at 0x7da20c991870> begin[:] <ast.AugAssign object at 0x7da20c9910f0> return[name[all_contexts]]
keyword[def] identifier[match_rules_context_multi] ( identifier[tree] , identifier[rules] , identifier[parent_context] ={}): literal[string] identifier[all_contexts] =[] keyword[for] identifier[template] , identifier[match_rules] keyword[in] identifier[rules] . identifier[items] (): identifier[context] = identifier[parent_context] . identifier[copy] () keyword[if] identifier[match_template] ( identifier[tree] , identifier[template] , identifier[context] ): identifier[child_contextss] =[] keyword[if] keyword[not] identifier[match_rules] : identifier[all_contexts] +=[ identifier[context] ] keyword[else] : keyword[for] identifier[key] , identifier[child_rules] keyword[in] identifier[match_rules] . identifier[items] (): identifier[child_contextss] . identifier[append] ( identifier[match_rules_context_multi] ( identifier[context] [ identifier[key] ], identifier[child_rules] , identifier[context] )) identifier[all_contexts] += identifier[cross_context] ( identifier[child_contextss] ) keyword[return] identifier[all_contexts]
def match_rules_context_multi(tree, rules, parent_context={}): """Recursively matches a Tree structure with rules and returns context Args: tree (Tree): Parsed tree structure rules (dict): See match_rules parent_context (dict): Context of parent call Returns: dict: Context matched dictionary of matched rules or None if no match """ all_contexts = [] for (template, match_rules) in rules.items(): context = parent_context.copy() if match_template(tree, template, context): child_contextss = [] if not match_rules: all_contexts += [context] # depends on [control=['if'], data=[]] else: for (key, child_rules) in match_rules.items(): child_contextss.append(match_rules_context_multi(context[key], child_rules, context)) # depends on [control=['for'], data=[]] all_contexts += cross_context(child_contextss) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return all_contexts
def user_list(profile=None, **connection_args): ''' Return a list of available users (keystone user-list) CLI Example: .. code-block:: bash salt '*' keystone.user_list ''' kstone = auth(profile, **connection_args) ret = {} for user in kstone.users.list(): ret[user.name] = dict((value, getattr(user, value, None)) for value in dir(user) if not value.startswith('_') and isinstance(getattr(user, value, None), (six.string_types, dict, bool))) tenant_id = getattr(user, 'tenantId', None) if tenant_id: ret[user.name]['tenant_id'] = tenant_id return ret
def function[user_list, parameter[profile]]: constant[ Return a list of available users (keystone user-list) CLI Example: .. code-block:: bash salt '*' keystone.user_list ] variable[kstone] assign[=] call[name[auth], parameter[name[profile]]] variable[ret] assign[=] dictionary[[], []] for taget[name[user]] in starred[call[name[kstone].users.list, parameter[]]] begin[:] call[name[ret]][name[user].name] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18bc70fa0>]] variable[tenant_id] assign[=] call[name[getattr], parameter[name[user], constant[tenantId], constant[None]]] if name[tenant_id] begin[:] call[call[name[ret]][name[user].name]][constant[tenant_id]] assign[=] name[tenant_id] return[name[ret]]
keyword[def] identifier[user_list] ( identifier[profile] = keyword[None] ,** identifier[connection_args] ): literal[string] identifier[kstone] = identifier[auth] ( identifier[profile] ,** identifier[connection_args] ) identifier[ret] ={} keyword[for] identifier[user] keyword[in] identifier[kstone] . identifier[users] . identifier[list] (): identifier[ret] [ identifier[user] . identifier[name] ]= identifier[dict] (( identifier[value] , identifier[getattr] ( identifier[user] , identifier[value] , keyword[None] )) keyword[for] identifier[value] keyword[in] identifier[dir] ( identifier[user] ) keyword[if] keyword[not] identifier[value] . identifier[startswith] ( literal[string] ) keyword[and] identifier[isinstance] ( identifier[getattr] ( identifier[user] , identifier[value] , keyword[None] ),( identifier[six] . identifier[string_types] , identifier[dict] , identifier[bool] ))) identifier[tenant_id] = identifier[getattr] ( identifier[user] , literal[string] , keyword[None] ) keyword[if] identifier[tenant_id] : identifier[ret] [ identifier[user] . identifier[name] ][ literal[string] ]= identifier[tenant_id] keyword[return] identifier[ret]
def user_list(profile=None, **connection_args): """ Return a list of available users (keystone user-list) CLI Example: .. code-block:: bash salt '*' keystone.user_list """ kstone = auth(profile, **connection_args) ret = {} for user in kstone.users.list(): ret[user.name] = dict(((value, getattr(user, value, None)) for value in dir(user) if not value.startswith('_') and isinstance(getattr(user, value, None), (six.string_types, dict, bool)))) tenant_id = getattr(user, 'tenantId', None) if tenant_id: ret[user.name]['tenant_id'] = tenant_id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['user']] return ret
def from_str(self, instr): '''Undo affect of __str__''' if not instr: return FlexiDate() out = self.our_re.match(instr) if out is None: # no match TODO: raise Exception? return None else: return FlexiDate( out.group('year'), out.group('month'), out.group('day'), out.group('hour'), out.group('minute'), out.group('second'), out.group('microsecond'), qualifier=out.group('qualifier') )
def function[from_str, parameter[self, instr]]: constant[Undo affect of __str__] if <ast.UnaryOp object at 0x7da1b0c15420> begin[:] return[call[name[FlexiDate], parameter[]]] variable[out] assign[=] call[name[self].our_re.match, parameter[name[instr]]] if compare[name[out] is constant[None]] begin[:] return[constant[None]]
keyword[def] identifier[from_str] ( identifier[self] , identifier[instr] ): literal[string] keyword[if] keyword[not] identifier[instr] : keyword[return] identifier[FlexiDate] () identifier[out] = identifier[self] . identifier[our_re] . identifier[match] ( identifier[instr] ) keyword[if] identifier[out] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[else] : keyword[return] identifier[FlexiDate] ( identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[out] . identifier[group] ( literal[string] ), identifier[qualifier] = identifier[out] . identifier[group] ( literal[string] ) )
def from_str(self, instr): """Undo affect of __str__""" if not instr: return FlexiDate() # depends on [control=['if'], data=[]] out = self.our_re.match(instr) if out is None: # no match TODO: raise Exception? return None # depends on [control=['if'], data=[]] else: return FlexiDate(out.group('year'), out.group('month'), out.group('day'), out.group('hour'), out.group('minute'), out.group('second'), out.group('microsecond'), qualifier=out.group('qualifier'))
def weld_iloc_indices_with_missing(array, weld_type, indices): """Retrieve the values at indices. Indices greater than array length get replaced with a corresponding-type missing value literal. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. weld_type : WeldType The WeldType of the array data. indices : numpy.ndarray or WeldObject The indices to lookup. Returns ------- WeldObject Representation of this computation. """ weld_obj = create_empty_weld_object() weld_obj_id_array = get_weld_obj_id(weld_obj, array) weld_obj_id_indices = get_weld_obj_id(weld_obj, indices) missing_literal = default_missing_data_literal(weld_type) if weld_type == WeldVec(WeldChar()): missing_literal = get_weld_obj_id(weld_obj, missing_literal) weld_template = """let len_array = len({array}); result( for({indices}, appender[{type}], |b: appender[{type}], i: i64, e: i64| if(e >= len_array, merge(b, {missing}), merge(b, lookup({array}, e)) ) ) )""" weld_obj.weld_code = weld_template.format(array=weld_obj_id_array, indices=weld_obj_id_indices, type=weld_type, missing=missing_literal) return weld_obj
def function[weld_iloc_indices_with_missing, parameter[array, weld_type, indices]]: constant[Retrieve the values at indices. Indices greater than array length get replaced with a corresponding-type missing value literal. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. weld_type : WeldType The WeldType of the array data. indices : numpy.ndarray or WeldObject The indices to lookup. Returns ------- WeldObject Representation of this computation. ] variable[weld_obj] assign[=] call[name[create_empty_weld_object], parameter[]] variable[weld_obj_id_array] assign[=] call[name[get_weld_obj_id], parameter[name[weld_obj], name[array]]] variable[weld_obj_id_indices] assign[=] call[name[get_weld_obj_id], parameter[name[weld_obj], name[indices]]] variable[missing_literal] assign[=] call[name[default_missing_data_literal], parameter[name[weld_type]]] if compare[name[weld_type] equal[==] call[name[WeldVec], parameter[call[name[WeldChar], parameter[]]]]] begin[:] variable[missing_literal] assign[=] call[name[get_weld_obj_id], parameter[name[weld_obj], name[missing_literal]]] variable[weld_template] assign[=] constant[let len_array = len({array}); result( for({indices}, appender[{type}], |b: appender[{type}], i: i64, e: i64| if(e >= len_array, merge(b, {missing}), merge(b, lookup({array}, e)) ) ) )] name[weld_obj].weld_code assign[=] call[name[weld_template].format, parameter[]] return[name[weld_obj]]
keyword[def] identifier[weld_iloc_indices_with_missing] ( identifier[array] , identifier[weld_type] , identifier[indices] ): literal[string] identifier[weld_obj] = identifier[create_empty_weld_object] () identifier[weld_obj_id_array] = identifier[get_weld_obj_id] ( identifier[weld_obj] , identifier[array] ) identifier[weld_obj_id_indices] = identifier[get_weld_obj_id] ( identifier[weld_obj] , identifier[indices] ) identifier[missing_literal] = identifier[default_missing_data_literal] ( identifier[weld_type] ) keyword[if] identifier[weld_type] == identifier[WeldVec] ( identifier[WeldChar] ()): identifier[missing_literal] = identifier[get_weld_obj_id] ( identifier[weld_obj] , identifier[missing_literal] ) identifier[weld_template] = literal[string] identifier[weld_obj] . identifier[weld_code] = identifier[weld_template] . identifier[format] ( identifier[array] = identifier[weld_obj_id_array] , identifier[indices] = identifier[weld_obj_id_indices] , identifier[type] = identifier[weld_type] , identifier[missing] = identifier[missing_literal] ) keyword[return] identifier[weld_obj]
def weld_iloc_indices_with_missing(array, weld_type, indices): """Retrieve the values at indices. Indices greater than array length get replaced with a corresponding-type missing value literal. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. weld_type : WeldType The WeldType of the array data. indices : numpy.ndarray or WeldObject The indices to lookup. Returns ------- WeldObject Representation of this computation. """ weld_obj = create_empty_weld_object() weld_obj_id_array = get_weld_obj_id(weld_obj, array) weld_obj_id_indices = get_weld_obj_id(weld_obj, indices) missing_literal = default_missing_data_literal(weld_type) if weld_type == WeldVec(WeldChar()): missing_literal = get_weld_obj_id(weld_obj, missing_literal) # depends on [control=['if'], data=[]] weld_template = 'let len_array = len({array});\nresult(\n for({indices},\n appender[{type}],\n |b: appender[{type}], i: i64, e: i64|\n if(e >= len_array,\n merge(b, {missing}),\n merge(b, lookup({array}, e))\n )\n )\n)' weld_obj.weld_code = weld_template.format(array=weld_obj_id_array, indices=weld_obj_id_indices, type=weld_type, missing=missing_literal) return weld_obj
def move_datetime_year(dt, direction, num_shifts): """ Move datetime 1 year in the chosen direction. unit is a no-op, to keep the API the same as the day case """ delta = relativedelta(years=+num_shifts) return _move_datetime(dt, direction, delta)
def function[move_datetime_year, parameter[dt, direction, num_shifts]]: constant[ Move datetime 1 year in the chosen direction. unit is a no-op, to keep the API the same as the day case ] variable[delta] assign[=] call[name[relativedelta], parameter[]] return[call[name[_move_datetime], parameter[name[dt], name[direction], name[delta]]]]
keyword[def] identifier[move_datetime_year] ( identifier[dt] , identifier[direction] , identifier[num_shifts] ): literal[string] identifier[delta] = identifier[relativedelta] ( identifier[years] =+ identifier[num_shifts] ) keyword[return] identifier[_move_datetime] ( identifier[dt] , identifier[direction] , identifier[delta] )
def move_datetime_year(dt, direction, num_shifts): """ Move datetime 1 year in the chosen direction. unit is a no-op, to keep the API the same as the day case """ delta = relativedelta(years=+num_shifts) return _move_datetime(dt, direction, delta)
def get_fno_lot_sizes(self, cached=True, as_json=False): """ returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict """ url = self.fno_lot_size_url req = Request(url, None, self.headers) res_dict = {} if cached is not True or self.__CODECACHE__ is None: # raises HTTPError and URLError res = self.opener.open(req) if res is not None: # for py3 compat covert byte file like object to # string file like object res = byte_adaptor(res) for line in res.read().split('\n'): if line != '' and re.search(',', line) and (line.casefold().find('symbol') == -1): (code, name) = [x.strip() for x in line.split(',')[1:3]] res_dict[code] = int(name) # else just skip the evaluation, line may not be a valid csv else: raise Exception('no response received') self.__CODECACHE__ = res_dict return self.render_response(self.__CODECACHE__, as_json)
def function[get_fno_lot_sizes, parameter[self, cached, as_json]]: constant[ returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict ] variable[url] assign[=] name[self].fno_lot_size_url variable[req] assign[=] call[name[Request], parameter[name[url], constant[None], name[self].headers]] variable[res_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da1b1d80a60> begin[:] variable[res] assign[=] call[name[self].opener.open, parameter[name[req]]] if compare[name[res] is_not constant[None]] begin[:] variable[res] assign[=] call[name[byte_adaptor], parameter[name[res]]] for taget[name[line]] in starred[call[call[name[res].read, parameter[]].split, parameter[constant[ ]]]] begin[:] if <ast.BoolOp object at 0x7da1b1d81de0> begin[:] <ast.Tuple object at 0x7da1b1d81840> assign[=] <ast.ListComp object at 0x7da1b1d80bb0> call[name[res_dict]][name[code]] assign[=] call[name[int], parameter[name[name]]] name[self].__CODECACHE__ assign[=] name[res_dict] return[call[name[self].render_response, parameter[name[self].__CODECACHE__, name[as_json]]]]
keyword[def] identifier[get_fno_lot_sizes] ( identifier[self] , identifier[cached] = keyword[True] , identifier[as_json] = keyword[False] ): literal[string] identifier[url] = identifier[self] . identifier[fno_lot_size_url] identifier[req] = identifier[Request] ( identifier[url] , keyword[None] , identifier[self] . identifier[headers] ) identifier[res_dict] ={} keyword[if] identifier[cached] keyword[is] keyword[not] keyword[True] keyword[or] identifier[self] . identifier[__CODECACHE__] keyword[is] keyword[None] : identifier[res] = identifier[self] . identifier[opener] . identifier[open] ( identifier[req] ) keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] : identifier[res] = identifier[byte_adaptor] ( identifier[res] ) keyword[for] identifier[line] keyword[in] identifier[res] . identifier[read] (). identifier[split] ( literal[string] ): keyword[if] identifier[line] != literal[string] keyword[and] identifier[re] . identifier[search] ( literal[string] , identifier[line] ) keyword[and] ( identifier[line] . identifier[casefold] (). identifier[find] ( literal[string] )==- literal[int] ): ( identifier[code] , identifier[name] )=[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[line] . identifier[split] ( literal[string] )[ literal[int] : literal[int] ]] identifier[res_dict] [ identifier[code] ]= identifier[int] ( identifier[name] ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[self] . identifier[__CODECACHE__] = identifier[res_dict] keyword[return] identifier[self] . identifier[render_response] ( identifier[self] . identifier[__CODECACHE__] , identifier[as_json] )
def get_fno_lot_sizes(self, cached=True, as_json=False): """ returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict """ url = self.fno_lot_size_url req = Request(url, None, self.headers) res_dict = {} if cached is not True or self.__CODECACHE__ is None: # raises HTTPError and URLError res = self.opener.open(req) if res is not None: # for py3 compat covert byte file like object to # string file like object res = byte_adaptor(res) for line in res.read().split('\n'): if line != '' and re.search(',', line) and (line.casefold().find('symbol') == -1): (code, name) = [x.strip() for x in line.split(',')[1:3]] res_dict[code] = int(name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=['res']] else: # else just skip the evaluation, line may not be a valid csv raise Exception('no response received') self.__CODECACHE__ = res_dict # depends on [control=['if'], data=[]] return self.render_response(self.__CODECACHE__, as_json)
def list_deelgemeenten(self, gewest=2): ''' List all `deelgemeenten` in a `gewest`. :param gewest: The :class:`Gewest` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. ''' try: gewest_id = gewest.id except AttributeError: gewest_id = gewest if gewest_id != 2: raise ValueError('Currently only deelgemeenten in Flanders are known.') def creator(): return [Deelgemeente(dg['id'], dg['naam'], dg['gemeente_niscode']) for dg in self.deelgemeenten.values()] if self.caches['permanent'].is_configured: key = 'ListDeelgemeentenByGewestId#%s' % gewest_id deelgemeenten = self.caches['permanent'].get_or_create(key, creator) else: deelgemeenten = creator() for dg in deelgemeenten: dg.set_gateway(self) return deelgemeenten
def function[list_deelgemeenten, parameter[self, gewest]]: constant[ List all `deelgemeenten` in a `gewest`. :param gewest: The :class:`Gewest` for which the `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. ] <ast.Try object at 0x7da1b0aa4070> if compare[name[gewest_id] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b0aa6620> def function[creator, parameter[]]: return[<ast.ListComp object at 0x7da1b0aa6740>] if call[name[self].caches][constant[permanent]].is_configured begin[:] variable[key] assign[=] binary_operation[constant[ListDeelgemeentenByGewestId#%s] <ast.Mod object at 0x7da2590d6920> name[gewest_id]] variable[deelgemeenten] assign[=] call[call[name[self].caches][constant[permanent]].get_or_create, parameter[name[key], name[creator]]] for taget[name[dg]] in starred[name[deelgemeenten]] begin[:] call[name[dg].set_gateway, parameter[name[self]]] return[name[deelgemeenten]]
keyword[def] identifier[list_deelgemeenten] ( identifier[self] , identifier[gewest] = literal[int] ): literal[string] keyword[try] : identifier[gewest_id] = identifier[gewest] . identifier[id] keyword[except] identifier[AttributeError] : identifier[gewest_id] = identifier[gewest] keyword[if] identifier[gewest_id] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[def] identifier[creator] (): keyword[return] [ identifier[Deelgemeente] ( identifier[dg] [ literal[string] ], identifier[dg] [ literal[string] ], identifier[dg] [ literal[string] ]) keyword[for] identifier[dg] keyword[in] identifier[self] . identifier[deelgemeenten] . identifier[values] ()] keyword[if] identifier[self] . identifier[caches] [ literal[string] ]. identifier[is_configured] : identifier[key] = literal[string] % identifier[gewest_id] identifier[deelgemeenten] = identifier[self] . identifier[caches] [ literal[string] ]. identifier[get_or_create] ( identifier[key] , identifier[creator] ) keyword[else] : identifier[deelgemeenten] = identifier[creator] () keyword[for] identifier[dg] keyword[in] identifier[deelgemeenten] : identifier[dg] . identifier[set_gateway] ( identifier[self] ) keyword[return] identifier[deelgemeenten]
def list_deelgemeenten(self, gewest=2): """ List all `deelgemeenten` in a `gewest`. :param gewest: The :class:`Gewest` for which the `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. """ try: gewest_id = gewest.id # depends on [control=['try'], data=[]] except AttributeError: gewest_id = gewest # depends on [control=['except'], data=[]] if gewest_id != 2: raise ValueError('Currently only deelgemeenten in Flanders are known.') # depends on [control=['if'], data=[]] def creator(): return [Deelgemeente(dg['id'], dg['naam'], dg['gemeente_niscode']) for dg in self.deelgemeenten.values()] if self.caches['permanent'].is_configured: key = 'ListDeelgemeentenByGewestId#%s' % gewest_id deelgemeenten = self.caches['permanent'].get_or_create(key, creator) # depends on [control=['if'], data=[]] else: deelgemeenten = creator() for dg in deelgemeenten: dg.set_gateway(self) # depends on [control=['for'], data=['dg']] return deelgemeenten
def input(self, field): """Gets user input for given field. Can be interrupted with ^C. :field: Field name. :returns: User input. """ try: desc = Get.TYPES[field] return input("{}|{}[{}]> ".format( field, "-" * (Get._LEN - len(field) - len(desc)), desc )) except KeyboardInterrupt: print() exit(0)
def function[input, parameter[self, field]]: constant[Gets user input for given field. Can be interrupted with ^C. :field: Field name. :returns: User input. ] <ast.Try object at 0x7da1b0b71720>
keyword[def] identifier[input] ( identifier[self] , identifier[field] ): literal[string] keyword[try] : identifier[desc] = identifier[Get] . identifier[TYPES] [ identifier[field] ] keyword[return] identifier[input] ( literal[string] . identifier[format] ( identifier[field] , literal[string] *( identifier[Get] . identifier[_LEN] - identifier[len] ( identifier[field] )- identifier[len] ( identifier[desc] )), identifier[desc] )) keyword[except] identifier[KeyboardInterrupt] : identifier[print] () identifier[exit] ( literal[int] )
def input(self, field): """Gets user input for given field. Can be interrupted with ^C. :field: Field name. :returns: User input. """ try: desc = Get.TYPES[field] return input('{}|{}[{}]> '.format(field, '-' * (Get._LEN - len(field) - len(desc)), desc)) # depends on [control=['try'], data=[]] except KeyboardInterrupt: print() exit(0) # depends on [control=['except'], data=[]]
def profile(self, content, accept, content_language=None, accept_language=None, raw_scores=None, csv_headers=None, consumption_preferences=None, content_type=None, **kwargs): """ Get profile. Generates a personality profile for the author of the input text. The service accepts a maximum of 20 MB of input content, but it requires much less text to produce an accurate profile. The service can analyze text in Arabic, English, Japanese, Korean, or Spanish. It can return its results in a variety of languages. **See also:** * [Requesting a profile](https://cloud.ibm.com/docs/services/personality-insights/input.html) * [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient) ### Content types You can provide input content as plain text (`text/plain`), HTML (`text/html`), or JSON (`application/json`) by specifying the **Content-Type** parameter. The default is `text/plain`. * Per the JSON specification, the default character encoding for JSON content is effectively always UTF-8. * Per the HTTP specification, the default encoding for plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When specifying a content type of plain text or HTML, include the `charset` parameter to indicate the character encoding of the input text; for example, `Content-Type: text/plain;charset=utf-8`. **See also:** [Specifying request and response formats](https://cloud.ibm.com/docs/services/personality-insights/input.html#formats) ### Accept types You must request a response as JSON (`application/json`) or comma-separated values (`text/csv`) by specifying the **Accept** parameter. CSV output includes a fixed number of columns. Set the **csv_headers** parameter to `true` to request optional column headers for CSV output. **See also:** * [Understanding a JSON profile](https://cloud.ibm.com/docs/services/personality-insights/output.html) * [Understanding a CSV profile](https://cloud.ibm.com/docs/services/personality-insights/output-csv.html). :param Content content: A maximum of 20 MB of content to analyze, though the service requires much less text; for more information, see [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient). For JSON input, provide an object of type `Content`. :param str accept: The type of the response. For more information, see **Accept types** in the method description. :param str content_language: The language of the input text for the request: Arabic, English, Japanese, Korean, or Spanish. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The effect of the **Content-Language** parameter depends on the **Content-Type** parameter. When **Content-Type** is `text/plain` or `text/html`, **Content-Language** is the only way to specify the language. When **Content-Type** is `application/json`, **Content-Language** overrides a language specified with the `language` parameter of a `ContentItem` object, and content items that specify a different language are ignored; omit this parameter to base the language on the specification of the content items. You can specify any combination of languages for **Content-Language** and **Accept-Language**. :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for the input and response content. :param bool raw_scores: Indicates whether a raw score in addition to a normalized percentile is returned for each characteristic; raw scores are not compared with a sample population. By default, only normalized percentiles are returned. :param bool csv_headers: Indicates whether column labels are returned with a CSV response. By default, no column labels are returned. Applies only when the response type is CSV (`text/csv`). :param bool consumption_preferences: Indicates whether consumption preferences are returned with the results. By default, no consumption preferences are returned. :param str content_type: The type of the input. For more information, see **Content types** in the method description. Default: `text/plain`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if content is None: raise ValueError('content must be provided') if accept is None: raise ValueError('accept must be provided') if isinstance(content, Content): content = self._convert_model(content, Content) headers = { 'Accept': accept, 'Content-Language': content_language, 'Accept-Language': accept_language, 'Content-Type': content_type } if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('personality_insights', 'V3', 'profile') headers.update(sdk_headers) params = { 'version': self.version, 'raw_scores': raw_scores, 'csv_headers': csv_headers, 'consumption_preferences': consumption_preferences } if content_type == 'application/json' and isinstance(content, dict): data = json.dumps(content) else: data = content url = '/v3/profile' response = self.request( method='POST', url=url, headers=headers, params=params, data=data, accept_json=(accept is None or accept == 'application/json')) return response
def function[profile, parameter[self, content, accept, content_language, accept_language, raw_scores, csv_headers, consumption_preferences, content_type]]: constant[ Get profile. Generates a personality profile for the author of the input text. The service accepts a maximum of 20 MB of input content, but it requires much less text to produce an accurate profile. The service can analyze text in Arabic, English, Japanese, Korean, or Spanish. It can return its results in a variety of languages. **See also:** * [Requesting a profile](https://cloud.ibm.com/docs/services/personality-insights/input.html) * [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient) ### Content types You can provide input content as plain text (`text/plain`), HTML (`text/html`), or JSON (`application/json`) by specifying the **Content-Type** parameter. The default is `text/plain`. * Per the JSON specification, the default character encoding for JSON content is effectively always UTF-8. * Per the HTTP specification, the default encoding for plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When specifying a content type of plain text or HTML, include the `charset` parameter to indicate the character encoding of the input text; for example, `Content-Type: text/plain;charset=utf-8`. **See also:** [Specifying request and response formats](https://cloud.ibm.com/docs/services/personality-insights/input.html#formats) ### Accept types You must request a response as JSON (`application/json`) or comma-separated values (`text/csv`) by specifying the **Accept** parameter. CSV output includes a fixed number of columns. Set the **csv_headers** parameter to `true` to request optional column headers for CSV output. **See also:** * [Understanding a JSON profile](https://cloud.ibm.com/docs/services/personality-insights/output.html) * [Understanding a CSV profile](https://cloud.ibm.com/docs/services/personality-insights/output-csv.html). :param Content content: A maximum of 20 MB of content to analyze, though the service requires much less text; for more information, see [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient). For JSON input, provide an object of type `Content`. :param str accept: The type of the response. For more information, see **Accept types** in the method description. :param str content_language: The language of the input text for the request: Arabic, English, Japanese, Korean, or Spanish. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The effect of the **Content-Language** parameter depends on the **Content-Type** parameter. When **Content-Type** is `text/plain` or `text/html`, **Content-Language** is the only way to specify the language. When **Content-Type** is `application/json`, **Content-Language** overrides a language specified with the `language` parameter of a `ContentItem` object, and content items that specify a different language are ignored; omit this parameter to base the language on the specification of the content items. You can specify any combination of languages for **Content-Language** and **Accept-Language**. :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for the input and response content. :param bool raw_scores: Indicates whether a raw score in addition to a normalized percentile is returned for each characteristic; raw scores are not compared with a sample population. By default, only normalized percentiles are returned. :param bool csv_headers: Indicates whether column labels are returned with a CSV response. By default, no column labels are returned. Applies only when the response type is CSV (`text/csv`). :param bool consumption_preferences: Indicates whether consumption preferences are returned with the results. By default, no consumption preferences are returned. :param str content_type: The type of the input. For more information, see **Content types** in the method description. Default: `text/plain`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse ] if compare[name[content] is constant[None]] begin[:] <ast.Raise object at 0x7da18dc074c0> if compare[name[accept] is constant[None]] begin[:] <ast.Raise object at 0x7da18dc04cd0> if call[name[isinstance], parameter[name[content], name[Content]]] begin[:] variable[content] assign[=] call[name[self]._convert_model, parameter[name[content], name[Content]]] variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cd9f0>, <ast.Constant object at 0x7da18c4cf640>, <ast.Constant object at 0x7da18c4cc160>, <ast.Constant object at 0x7da18c4ce740>], [<ast.Name object at 0x7da18c4ce020>, <ast.Name object at 0x7da18c4cd8d0>, <ast.Name object at 0x7da18c4cd990>, <ast.Name object at 0x7da18c4ccb50>]] if compare[constant[headers] in name[kwargs]] begin[:] call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]] variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[personality_insights], constant[V3], constant[profile]]] call[name[headers].update, parameter[name[sdk_headers]]] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf400>, <ast.Constant object at 0x7da18c4cc2b0>, <ast.Constant object at 0x7da18c4cef20>, <ast.Constant object at 0x7da18c4cefe0>], [<ast.Attribute object at 0x7da18c4cd420>, <ast.Name object at 0x7da18c4cc6a0>, <ast.Name object at 0x7da18c4cf010>, <ast.Name object at 0x7da18c4cca90>]] if <ast.BoolOp object at 0x7da18c4ccf40> begin[:] variable[data] assign[=] call[name[json].dumps, parameter[name[content]]] variable[url] assign[=] constant[/v3/profile] variable[response] assign[=] call[name[self].request, parameter[]] return[name[response]]
keyword[def] identifier[profile] ( identifier[self] , identifier[content] , identifier[accept] , identifier[content_language] = keyword[None] , identifier[accept_language] = keyword[None] , identifier[raw_scores] = keyword[None] , identifier[csv_headers] = keyword[None] , identifier[consumption_preferences] = keyword[None] , identifier[content_type] = keyword[None] , ** identifier[kwargs] ): literal[string] keyword[if] identifier[content] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[accept] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[content] , identifier[Content] ): identifier[content] = identifier[self] . identifier[_convert_model] ( identifier[content] , identifier[Content] ) identifier[headers] ={ literal[string] : identifier[accept] , literal[string] : identifier[content_language] , literal[string] : identifier[accept_language] , literal[string] : identifier[content_type] } keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] )) identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] , literal[string] ) identifier[headers] . identifier[update] ( identifier[sdk_headers] ) identifier[params] ={ literal[string] : identifier[self] . identifier[version] , literal[string] : identifier[raw_scores] , literal[string] : identifier[csv_headers] , literal[string] : identifier[consumption_preferences] } keyword[if] identifier[content_type] == literal[string] keyword[and] identifier[isinstance] ( identifier[content] , identifier[dict] ): identifier[data] = identifier[json] . identifier[dumps] ( identifier[content] ) keyword[else] : identifier[data] = identifier[content] identifier[url] = literal[string] identifier[response] = identifier[self] . identifier[request] ( identifier[method] = literal[string] , identifier[url] = identifier[url] , identifier[headers] = identifier[headers] , identifier[params] = identifier[params] , identifier[data] = identifier[data] , identifier[accept_json] =( identifier[accept] keyword[is] keyword[None] keyword[or] identifier[accept] == literal[string] )) keyword[return] identifier[response]
def profile(self, content, accept, content_language=None, accept_language=None, raw_scores=None, csv_headers=None, consumption_preferences=None, content_type=None, **kwargs): """ Get profile. Generates a personality profile for the author of the input text. The service accepts a maximum of 20 MB of input content, but it requires much less text to produce an accurate profile. The service can analyze text in Arabic, English, Japanese, Korean, or Spanish. It can return its results in a variety of languages. **See also:** * [Requesting a profile](https://cloud.ibm.com/docs/services/personality-insights/input.html) * [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient) ### Content types You can provide input content as plain text (`text/plain`), HTML (`text/html`), or JSON (`application/json`) by specifying the **Content-Type** parameter. The default is `text/plain`. * Per the JSON specification, the default character encoding for JSON content is effectively always UTF-8. * Per the HTTP specification, the default encoding for plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When specifying a content type of plain text or HTML, include the `charset` parameter to indicate the character encoding of the input text; for example, `Content-Type: text/plain;charset=utf-8`. **See also:** [Specifying request and response formats](https://cloud.ibm.com/docs/services/personality-insights/input.html#formats) ### Accept types You must request a response as JSON (`application/json`) or comma-separated values (`text/csv`) by specifying the **Accept** parameter. CSV output includes a fixed number of columns. Set the **csv_headers** parameter to `true` to request optional column headers for CSV output. **See also:** * [Understanding a JSON profile](https://cloud.ibm.com/docs/services/personality-insights/output.html) * [Understanding a CSV profile](https://cloud.ibm.com/docs/services/personality-insights/output-csv.html). :param Content content: A maximum of 20 MB of content to analyze, though the service requires much less text; for more information, see [Providing sufficient input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient). For JSON input, provide an object of type `Content`. :param str accept: The type of the response. For more information, see **Accept types** in the method description. :param str content_language: The language of the input text for the request: Arabic, English, Japanese, Korean, or Spanish. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The effect of the **Content-Language** parameter depends on the **Content-Type** parameter. When **Content-Type** is `text/plain` or `text/html`, **Content-Language** is the only way to specify the language. When **Content-Type** is `application/json`, **Content-Language** overrides a language specified with the `language` parameter of a `ContentItem` object, and content items that specify a different language are ignored; omit this parameter to base the language on the specification of the content items. You can specify any combination of languages for **Content-Language** and **Accept-Language**. :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for the input and response content. :param bool raw_scores: Indicates whether a raw score in addition to a normalized percentile is returned for each characteristic; raw scores are not compared with a sample population. By default, only normalized percentiles are returned. :param bool csv_headers: Indicates whether column labels are returned with a CSV response. By default, no column labels are returned. Applies only when the response type is CSV (`text/csv`). :param bool consumption_preferences: Indicates whether consumption preferences are returned with the results. By default, no consumption preferences are returned. :param str content_type: The type of the input. For more information, see **Content types** in the method description. Default: `text/plain`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if content is None: raise ValueError('content must be provided') # depends on [control=['if'], data=[]] if accept is None: raise ValueError('accept must be provided') # depends on [control=['if'], data=[]] if isinstance(content, Content): content = self._convert_model(content, Content) # depends on [control=['if'], data=[]] headers = {'Accept': accept, 'Content-Language': content_language, 'Accept-Language': accept_language, 'Content-Type': content_type} if 'headers' in kwargs: headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']] sdk_headers = get_sdk_headers('personality_insights', 'V3', 'profile') headers.update(sdk_headers) params = {'version': self.version, 'raw_scores': raw_scores, 'csv_headers': csv_headers, 'consumption_preferences': consumption_preferences} if content_type == 'application/json' and isinstance(content, dict): data = json.dumps(content) # depends on [control=['if'], data=[]] else: data = content url = '/v3/profile' response = self.request(method='POST', url=url, headers=headers, params=params, data=data, accept_json=accept is None or accept == 'application/json') return response
def login(self, verbose=False): ''' Authenticate with Opsview :param verbose: Verbose output mode :type verbose: bool :return: The authentification token :rtype: str or unicode ''' url = '{}/{}'.format(self.rest_url, 'login') logger.debug('POST: {}'.format(url)) r = requests.post(url, json={ 'username': self.username, 'password': self.password }, verify=self.verify_ssl ) j = r.json() logger.debug('Request response:') logger.debug(pformat(vars(r))) logger.debug('JSON:') logger.debug(pformat(j)) if 'token' not in j: raise OpsviewLoginException("Failed to retrieve token. " "Please check your credentials") self.headers['X-Opsview-Token'] = j['token'] self._token_age = datetime.datetime.now() return j['token']
def function[login, parameter[self, verbose]]: constant[ Authenticate with Opsview :param verbose: Verbose output mode :type verbose: bool :return: The authentification token :rtype: str or unicode ] variable[url] assign[=] call[constant[{}/{}].format, parameter[name[self].rest_url, constant[login]]] call[name[logger].debug, parameter[call[constant[POST: {}].format, parameter[name[url]]]]] variable[r] assign[=] call[name[requests].post, parameter[name[url]]] variable[j] assign[=] call[name[r].json, parameter[]] call[name[logger].debug, parameter[constant[Request response:]]] call[name[logger].debug, parameter[call[name[pformat], parameter[call[name[vars], parameter[name[r]]]]]]] call[name[logger].debug, parameter[constant[JSON:]]] call[name[logger].debug, parameter[call[name[pformat], parameter[name[j]]]]] if compare[constant[token] <ast.NotIn object at 0x7da2590d7190> name[j]] begin[:] <ast.Raise object at 0x7da18ede7d90> call[name[self].headers][constant[X-Opsview-Token]] assign[=] call[name[j]][constant[token]] name[self]._token_age assign[=] call[name[datetime].datetime.now, parameter[]] return[call[name[j]][constant[token]]]
keyword[def] identifier[login] ( identifier[self] , identifier[verbose] = keyword[False] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[rest_url] , literal[string] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[url] )) identifier[r] = identifier[requests] . identifier[post] ( identifier[url] , identifier[json] ={ literal[string] : identifier[self] . identifier[username] , literal[string] : identifier[self] . identifier[password] }, identifier[verify] = identifier[self] . identifier[verify_ssl] ) identifier[j] = identifier[r] . identifier[json] () identifier[logger] . identifier[debug] ( literal[string] ) identifier[logger] . identifier[debug] ( identifier[pformat] ( identifier[vars] ( identifier[r] ))) identifier[logger] . identifier[debug] ( literal[string] ) identifier[logger] . identifier[debug] ( identifier[pformat] ( identifier[j] )) keyword[if] literal[string] keyword[not] keyword[in] identifier[j] : keyword[raise] identifier[OpsviewLoginException] ( literal[string] literal[string] ) identifier[self] . identifier[headers] [ literal[string] ]= identifier[j] [ literal[string] ] identifier[self] . identifier[_token_age] = identifier[datetime] . identifier[datetime] . identifier[now] () keyword[return] identifier[j] [ literal[string] ]
def login(self, verbose=False): """ Authenticate with Opsview :param verbose: Verbose output mode :type verbose: bool :return: The authentification token :rtype: str or unicode """ url = '{}/{}'.format(self.rest_url, 'login') logger.debug('POST: {}'.format(url)) r = requests.post(url, json={'username': self.username, 'password': self.password}, verify=self.verify_ssl) j = r.json() logger.debug('Request response:') logger.debug(pformat(vars(r))) logger.debug('JSON:') logger.debug(pformat(j)) if 'token' not in j: raise OpsviewLoginException('Failed to retrieve token. Please check your credentials') # depends on [control=['if'], data=[]] self.headers['X-Opsview-Token'] = j['token'] self._token_age = datetime.datetime.now() return j['token']
def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except: return None try: fn = inspect.getsourcefile(obj) except: fn = None if not fn: return None try: source, lineno = inspect.findsource(obj) except: lineno = None if lineno: linespec = "#L%d" % (lineno + 1) else: linespec = "" fn = relpath(fn, start='..') return "https://github.com/mithrandi/txacme/blob/%s/%s%s" % ( txacme_version_info['full-revisionid'], fn, linespec)
def function[linkcode_resolve, parameter[domain, info]]: constant[ Determine the URL corresponding to Python object ] if compare[name[domain] not_equal[!=] constant[py]] begin[:] return[constant[None]] variable[modname] assign[=] call[name[info]][constant[module]] variable[fullname] assign[=] call[name[info]][constant[fullname]] variable[submod] assign[=] call[name[sys].modules.get, parameter[name[modname]]] if compare[name[submod] is constant[None]] begin[:] return[constant[None]] variable[obj] assign[=] name[submod] for taget[name[part]] in starred[call[name[fullname].split, parameter[constant[.]]]] begin[:] <ast.Try object at 0x7da18ede4190> <ast.Try object at 0x7da1b0e0fe20> if <ast.UnaryOp object at 0x7da20c76cf40> begin[:] return[constant[None]] <ast.Try object at 0x7da20c76e0b0> if name[lineno] begin[:] variable[linespec] assign[=] binary_operation[constant[#L%d] <ast.Mod object at 0x7da2590d6920> binary_operation[name[lineno] + constant[1]]] variable[fn] assign[=] call[name[relpath], parameter[name[fn]]] return[binary_operation[constant[https://github.com/mithrandi/txacme/blob/%s/%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18f58d600>, <ast.Name object at 0x7da18f58fac0>, <ast.Name object at 0x7da18f58f250>]]]]
keyword[def] identifier[linkcode_resolve] ( identifier[domain] , identifier[info] ): literal[string] keyword[if] identifier[domain] != literal[string] : keyword[return] keyword[None] identifier[modname] = identifier[info] [ literal[string] ] identifier[fullname] = identifier[info] [ literal[string] ] identifier[submod] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[modname] ) keyword[if] identifier[submod] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[obj] = identifier[submod] keyword[for] identifier[part] keyword[in] identifier[fullname] . identifier[split] ( literal[string] ): keyword[try] : identifier[obj] = identifier[getattr] ( identifier[obj] , identifier[part] ) keyword[except] : keyword[return] keyword[None] keyword[try] : identifier[fn] = identifier[inspect] . identifier[getsourcefile] ( identifier[obj] ) keyword[except] : identifier[fn] = keyword[None] keyword[if] keyword[not] identifier[fn] : keyword[return] keyword[None] keyword[try] : identifier[source] , identifier[lineno] = identifier[inspect] . identifier[findsource] ( identifier[obj] ) keyword[except] : identifier[lineno] = keyword[None] keyword[if] identifier[lineno] : identifier[linespec] = literal[string] %( identifier[lineno] + literal[int] ) keyword[else] : identifier[linespec] = literal[string] identifier[fn] = identifier[relpath] ( identifier[fn] , identifier[start] = literal[string] ) keyword[return] literal[string] %( identifier[txacme_version_info] [ literal[string] ], identifier[fn] , identifier[linespec] )
def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None # depends on [control=['if'], data=[]] modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None # depends on [control=['if'], data=[]] obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) # depends on [control=['try'], data=[]] except: return None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['part']] try: fn = inspect.getsourcefile(obj) # depends on [control=['try'], data=[]] except: fn = None # depends on [control=['except'], data=[]] if not fn: return None # depends on [control=['if'], data=[]] try: (source, lineno) = inspect.findsource(obj) # depends on [control=['try'], data=[]] except: lineno = None # depends on [control=['except'], data=[]] if lineno: linespec = '#L%d' % (lineno + 1) # depends on [control=['if'], data=[]] else: linespec = '' fn = relpath(fn, start='..') return 'https://github.com/mithrandi/txacme/blob/%s/%s%s' % (txacme_version_info['full-revisionid'], fn, linespec)
def create_policy(policy_name, policy_document, path=None, description=None, region=None, key=None, keyid=None, profile=None): ''' Create a policy. CLI Example: .. code-block:: bash salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not isinstance(policy_document, six.string_types): policy_document = salt.utils.json.dumps(policy_document) params = {} for arg in 'path', 'description': if locals()[arg] is not None: params[arg] = locals()[arg] if policy_exists(policy_name, region, key, keyid, profile): return True try: conn.create_policy(policy_name, policy_document, **params) log.info('Created IAM policy %s.', policy_name) except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to create IAM policy %s.', policy_name) return False return True
def function[create_policy, parameter[policy_name, policy_document, path, description, region, key, keyid, profile]]: constant[ Create a policy. CLI Example: .. code-block:: bash salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' ] variable[conn] assign[=] call[name[_get_conn], parameter[]] if <ast.UnaryOp object at 0x7da1b1f398a0> begin[:] variable[policy_document] assign[=] call[name[salt].utils.json.dumps, parameter[name[policy_document]]] variable[params] assign[=] dictionary[[], []] for taget[name[arg]] in starred[tuple[[<ast.Constant object at 0x7da1b1f39b10>, <ast.Constant object at 0x7da1b1f39d50>]]] begin[:] if compare[call[call[name[locals], parameter[]]][name[arg]] is_not constant[None]] begin[:] call[name[params]][name[arg]] assign[=] call[call[name[locals], parameter[]]][name[arg]] if call[name[policy_exists], parameter[name[policy_name], name[region], name[key], name[keyid], name[profile]]] begin[:] return[constant[True]] <ast.Try object at 0x7da1b1fcace0> return[constant[True]]
keyword[def] identifier[create_policy] ( identifier[policy_name] , identifier[policy_document] , identifier[path] = keyword[None] , identifier[description] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[policy_document] , identifier[six] . identifier[string_types] ): identifier[policy_document] = identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[policy_document] ) identifier[params] ={} keyword[for] identifier[arg] keyword[in] literal[string] , literal[string] : keyword[if] identifier[locals] ()[ identifier[arg] ] keyword[is] keyword[not] keyword[None] : identifier[params] [ identifier[arg] ]= identifier[locals] ()[ identifier[arg] ] keyword[if] identifier[policy_exists] ( identifier[policy_name] , identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] ): keyword[return] keyword[True] keyword[try] : identifier[conn] . identifier[create_policy] ( identifier[policy_name] , identifier[policy_document] ,** identifier[params] ) identifier[log] . identifier[info] ( literal[string] , identifier[policy_name] ) keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] : identifier[log] . identifier[debug] ( identifier[e] ) identifier[log] . identifier[error] ( literal[string] , identifier[policy_name] ) keyword[return] keyword[False] keyword[return] keyword[True]
def create_policy(policy_name, policy_document, path=None, description=None, region=None, key=None, keyid=None, profile=None): """ Create a policy. CLI Example: .. code-block:: bash salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not isinstance(policy_document, six.string_types): policy_document = salt.utils.json.dumps(policy_document) # depends on [control=['if'], data=[]] params = {} for arg in ('path', 'description'): if locals()[arg] is not None: params[arg] = locals()[arg] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']] if policy_exists(policy_name, region, key, keyid, profile): return True # depends on [control=['if'], data=[]] try: conn.create_policy(policy_name, policy_document, **params) log.info('Created IAM policy %s.', policy_name) # depends on [control=['try'], data=[]] except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to create IAM policy %s.', policy_name) return False # depends on [control=['except'], data=['e']] return True
def global_add(self, key: str, value: Any) -> None: """ Adds a key and value to the global dictionary """ self.global_context[key] = value
def function[global_add, parameter[self, key, value]]: constant[ Adds a key and value to the global dictionary ] call[name[self].global_context][name[key]] assign[=] name[value]
keyword[def] identifier[global_add] ( identifier[self] , identifier[key] : identifier[str] , identifier[value] : identifier[Any] )-> keyword[None] : literal[string] identifier[self] . identifier[global_context] [ identifier[key] ]= identifier[value]
def global_add(self, key: str, value: Any) -> None: """ Adds a key and value to the global dictionary """ self.global_context[key] = value
def is_model_mpttmeta_subclass(node): """Checks that node is derivative of MPTTMeta class.""" if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef): return False parents = ('django.db.models.base.Model', '.Model', # for the transformed version used in this plugin 'django.forms.forms.Form', '.Form', 'django.forms.models.ModelForm', '.ModelForm') return node_is_subclass(node.parent, *parents)
def function[is_model_mpttmeta_subclass, parameter[node]]: constant[Checks that node is derivative of MPTTMeta class.] if <ast.BoolOp object at 0x7da18bcc8610> begin[:] return[constant[False]] variable[parents] assign[=] tuple[[<ast.Constant object at 0x7da18bcc8d90>, <ast.Constant object at 0x7da18bccbaf0>, <ast.Constant object at 0x7da18bcc9c60>, <ast.Constant object at 0x7da18bcc86a0>, <ast.Constant object at 0x7da18bcc9270>, <ast.Constant object at 0x7da18bccae00>]] return[call[name[node_is_subclass], parameter[name[node].parent, <ast.Starred object at 0x7da18bcca170>]]]
keyword[def] identifier[is_model_mpttmeta_subclass] ( identifier[node] ): literal[string] keyword[if] identifier[node] . identifier[name] != literal[string] keyword[or] keyword[not] identifier[isinstance] ( identifier[node] . identifier[parent] , identifier[ClassDef] ): keyword[return] keyword[False] identifier[parents] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[return] identifier[node_is_subclass] ( identifier[node] . identifier[parent] ,* identifier[parents] )
def is_model_mpttmeta_subclass(node): """Checks that node is derivative of MPTTMeta class.""" if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef): return False # depends on [control=['if'], data=[]] # for the transformed version used in this plugin parents = ('django.db.models.base.Model', '.Model', 'django.forms.forms.Form', '.Form', 'django.forms.models.ModelForm', '.ModelForm') return node_is_subclass(node.parent, *parents)
def merge_mutect(job, perchrom_rvs): """ This module will merge the per-chromosome mutect files created by spawn_mutect into a genome vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls. ARGUMENTS 1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect() RETURN VALUES 1. output_files: <JSid for mutect_passing_calls.vcf> This module corresponds to node 11 on the tree """ job.fileStore.logToMaster('Running merge_mutect') work_dir = job.fileStore.getLocalTempDir() # We need to squash the input dict of dicts to a single dict such that it can be passed to # get_files_from_filestore input_files = {filename: jsid for perchrom_files in perchrom_rvs.values() for filename, jsid in perchrom_files.items()} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']] with open('/'.join([work_dir, 'mutect_calls.vcf']), 'w') as mutvcf, \ open('/'.join([work_dir, 'mutect_calls.out']), 'w') as mutout, \ open('/'.join([work_dir, 'mutect_passing_calls.vcf']), 'w') as mutpassvcf: out_header_not_printed = True for chrom in chromosomes: with open(input_files[''.join(['mutect_', chrom, '.vcf'])], 'r') as mutfile: for line in mutfile: line = line.strip() if line.startswith('#'): if chrom == 'chr1': print(line, file=mutvcf) print(line, file=mutpassvcf) continue else: print(line, file=mutvcf) line = line.split('\t') if line[6] != 'REJECT': print('\t'.join(line), file=mutpassvcf) with open(input_files[''.join(['mutect_', chrom, '.out'])], 'r') as mutfile: for line in mutfile: line = line.strip() if line.startswith('#'): if chrom == 'chr1': print(line, file=mutout) continue elif out_header_not_printed: print(line, file=mutout) out_header_not_printed = False else: print(line, file=mutout) output_file = job.fileStore.writeGlobalFile(mutpassvcf.name) return output_file
def function[merge_mutect, parameter[job, perchrom_rvs]]: constant[ This module will merge the per-chromosome mutect files created by spawn_mutect into a genome vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls. ARGUMENTS 1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect() RETURN VALUES 1. output_files: <JSid for mutect_passing_calls.vcf> This module corresponds to node 11 on the tree ] call[name[job].fileStore.logToMaster, parameter[constant[Running merge_mutect]]] variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]] variable[input_files] assign[=] <ast.DictComp object at 0x7da18f8107f0> variable[input_files] assign[=] call[name[get_files_from_filestore], parameter[name[job], name[input_files], name[work_dir]]] variable[chromosomes] assign[=] <ast.ListComp object at 0x7da18f812560> with call[name[open], parameter[call[constant[/].join, parameter[list[[<ast.Name object at 0x7da18f812d40>, <ast.Constant object at 0x7da18f810a00>]]]], constant[w]]] begin[:] variable[out_header_not_printed] assign[=] constant[True] for taget[name[chrom]] in starred[name[chromosomes]] begin[:] with call[name[open], parameter[call[name[input_files]][call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18f810520>, <ast.Name object at 0x7da18f813220>, <ast.Constant object at 0x7da18f811a80>]]]]], constant[r]]] begin[:] for taget[name[line]] in starred[name[mutfile]] begin[:] variable[line] assign[=] call[name[line].strip, parameter[]] if call[name[line].startswith, parameter[constant[#]]] begin[:] if compare[name[chrom] equal[==] constant[chr1]] begin[:] call[name[print], parameter[name[line]]] call[name[print], parameter[name[line]]] continue with call[name[open], parameter[call[name[input_files]][call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18f812260>, <ast.Name object at 0x7da18f812ce0>, <ast.Constant object at 0x7da18f810190>]]]]], constant[r]]] begin[:] for taget[name[line]] in starred[name[mutfile]] begin[:] variable[line] assign[=] call[name[line].strip, parameter[]] if call[name[line].startswith, parameter[constant[#]]] begin[:] if compare[name[chrom] equal[==] constant[chr1]] begin[:] call[name[print], parameter[name[line]]] continue variable[output_file] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[mutpassvcf].name]] return[name[output_file]]
keyword[def] identifier[merge_mutect] ( identifier[job] , identifier[perchrom_rvs] ): literal[string] identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] ) identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] () identifier[input_files] ={ identifier[filename] : identifier[jsid] keyword[for] identifier[perchrom_files] keyword[in] identifier[perchrom_rvs] . identifier[values] () keyword[for] identifier[filename] , identifier[jsid] keyword[in] identifier[perchrom_files] . identifier[items] ()} identifier[input_files] = identifier[get_files_from_filestore] ( identifier[job] , identifier[input_files] , identifier[work_dir] , identifier[docker] = keyword[False] ) identifier[chromosomes] =[ literal[string] . identifier[join] ([ literal[string] , identifier[str] ( identifier[x] )]) keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )+[ literal[string] , literal[string] ]] keyword[with] identifier[open] ( literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] ]), literal[string] ) keyword[as] identifier[mutvcf] , identifier[open] ( literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] ]), literal[string] ) keyword[as] identifier[mutout] , identifier[open] ( literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] ]), literal[string] ) keyword[as] identifier[mutpassvcf] : identifier[out_header_not_printed] = keyword[True] keyword[for] identifier[chrom] keyword[in] identifier[chromosomes] : keyword[with] identifier[open] ( identifier[input_files] [ literal[string] . identifier[join] ([ literal[string] , identifier[chrom] , literal[string] ])], literal[string] ) keyword[as] identifier[mutfile] : keyword[for] identifier[line] keyword[in] identifier[mutfile] : identifier[line] = identifier[line] . identifier[strip] () keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[if] identifier[chrom] == literal[string] : identifier[print] ( identifier[line] , identifier[file] = identifier[mutvcf] ) identifier[print] ( identifier[line] , identifier[file] = identifier[mutpassvcf] ) keyword[continue] keyword[else] : identifier[print] ( identifier[line] , identifier[file] = identifier[mutvcf] ) identifier[line] = identifier[line] . identifier[split] ( literal[string] ) keyword[if] identifier[line] [ literal[int] ]!= literal[string] : identifier[print] ( literal[string] . identifier[join] ( identifier[line] ), identifier[file] = identifier[mutpassvcf] ) keyword[with] identifier[open] ( identifier[input_files] [ literal[string] . identifier[join] ([ literal[string] , identifier[chrom] , literal[string] ])], literal[string] ) keyword[as] identifier[mutfile] : keyword[for] identifier[line] keyword[in] identifier[mutfile] : identifier[line] = identifier[line] . identifier[strip] () keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[if] identifier[chrom] == literal[string] : identifier[print] ( identifier[line] , identifier[file] = identifier[mutout] ) keyword[continue] keyword[elif] identifier[out_header_not_printed] : identifier[print] ( identifier[line] , identifier[file] = identifier[mutout] ) identifier[out_header_not_printed] = keyword[False] keyword[else] : identifier[print] ( identifier[line] , identifier[file] = identifier[mutout] ) identifier[output_file] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[mutpassvcf] . identifier[name] ) keyword[return] identifier[output_file]
def merge_mutect(job, perchrom_rvs): """ This module will merge the per-chromosome mutect files created by spawn_mutect into a genome vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls. ARGUMENTS 1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect() RETURN VALUES 1. output_files: <JSid for mutect_passing_calls.vcf> This module corresponds to node 11 on the tree """ job.fileStore.logToMaster('Running merge_mutect') work_dir = job.fileStore.getLocalTempDir() # We need to squash the input dict of dicts to a single dict such that it can be passed to # get_files_from_filestore input_files = {filename: jsid for perchrom_files in perchrom_rvs.values() for (filename, jsid) in perchrom_files.items()} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']] with open('/'.join([work_dir, 'mutect_calls.vcf']), 'w') as mutvcf, open('/'.join([work_dir, 'mutect_calls.out']), 'w') as mutout, open('/'.join([work_dir, 'mutect_passing_calls.vcf']), 'w') as mutpassvcf: out_header_not_printed = True for chrom in chromosomes: with open(input_files[''.join(['mutect_', chrom, '.vcf'])], 'r') as mutfile: for line in mutfile: line = line.strip() if line.startswith('#'): if chrom == 'chr1': print(line, file=mutvcf) print(line, file=mutpassvcf) # depends on [control=['if'], data=[]] continue # depends on [control=['if'], data=[]] else: print(line, file=mutvcf) line = line.split('\t') if line[6] != 'REJECT': print('\t'.join(line), file=mutpassvcf) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['mutfile']] with open(input_files[''.join(['mutect_', chrom, '.out'])], 'r') as mutfile: for line in mutfile: line = line.strip() if line.startswith('#'): if chrom == 'chr1': print(line, file=mutout) # depends on [control=['if'], data=[]] continue # depends on [control=['if'], data=[]] elif out_header_not_printed: print(line, file=mutout) out_header_not_printed = False # depends on [control=['if'], data=[]] else: print(line, file=mutout) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['mutfile']] # depends on [control=['for'], data=['chrom']] # depends on [control=['with'], data=['open', 'mutvcf']] output_file = job.fileStore.writeGlobalFile(mutpassvcf.name) return output_file
def dot_product(self, other): """ Return the dot product of the given vectors. """ return self.x * other.x + self.y * other.y
def function[dot_product, parameter[self, other]]: constant[ Return the dot product of the given vectors. ] return[binary_operation[binary_operation[name[self].x * name[other].x] + binary_operation[name[self].y * name[other].y]]]
keyword[def] identifier[dot_product] ( identifier[self] , identifier[other] ): literal[string] keyword[return] identifier[self] . identifier[x] * identifier[other] . identifier[x] + identifier[self] . identifier[y] * identifier[other] . identifier[y]
def dot_product(self, other): """ Return the dot product of the given vectors. """ return self.x * other.x + self.y * other.y
def add_program_dir(self, directory): """Hack in program directory""" dirs = list(self.PROGRAM_DIRS) dirs.append(directory) self.PROGRAM_DIRS = dirs
def function[add_program_dir, parameter[self, directory]]: constant[Hack in program directory] variable[dirs] assign[=] call[name[list], parameter[name[self].PROGRAM_DIRS]] call[name[dirs].append, parameter[name[directory]]] name[self].PROGRAM_DIRS assign[=] name[dirs]
keyword[def] identifier[add_program_dir] ( identifier[self] , identifier[directory] ): literal[string] identifier[dirs] = identifier[list] ( identifier[self] . identifier[PROGRAM_DIRS] ) identifier[dirs] . identifier[append] ( identifier[directory] ) identifier[self] . identifier[PROGRAM_DIRS] = identifier[dirs]
def add_program_dir(self, directory): """Hack in program directory""" dirs = list(self.PROGRAM_DIRS) dirs.append(directory) self.PROGRAM_DIRS = dirs
def search_datasets(self): """ Returns an iterator over the Datasets on the server. :return: An iterator over the :class:`ga4gh.protocol.Dataset` objects on the server. """ request = protocol.SearchDatasetsRequest() request.page_size = pb.int(self._page_size) return self._run_search_request( request, "datasets", protocol.SearchDatasetsResponse)
def function[search_datasets, parameter[self]]: constant[ Returns an iterator over the Datasets on the server. :return: An iterator over the :class:`ga4gh.protocol.Dataset` objects on the server. ] variable[request] assign[=] call[name[protocol].SearchDatasetsRequest, parameter[]] name[request].page_size assign[=] call[name[pb].int, parameter[name[self]._page_size]] return[call[name[self]._run_search_request, parameter[name[request], constant[datasets], name[protocol].SearchDatasetsResponse]]]
keyword[def] identifier[search_datasets] ( identifier[self] ): literal[string] identifier[request] = identifier[protocol] . identifier[SearchDatasetsRequest] () identifier[request] . identifier[page_size] = identifier[pb] . identifier[int] ( identifier[self] . identifier[_page_size] ) keyword[return] identifier[self] . identifier[_run_search_request] ( identifier[request] , literal[string] , identifier[protocol] . identifier[SearchDatasetsResponse] )
def search_datasets(self): """ Returns an iterator over the Datasets on the server. :return: An iterator over the :class:`ga4gh.protocol.Dataset` objects on the server. """ request = protocol.SearchDatasetsRequest() request.page_size = pb.int(self._page_size) return self._run_search_request(request, 'datasets', protocol.SearchDatasetsResponse)
def is_domterm(cls): """ :return: whether we are inside DomTerm :rtype: bool """ import os if cls._is_domterm is not None: return cls._is_domterm if not os.environ.get("DOMTERM"): cls._is_domterm = False return False cls._is_domterm = True return True
def function[is_domterm, parameter[cls]]: constant[ :return: whether we are inside DomTerm :rtype: bool ] import module[os] if compare[name[cls]._is_domterm is_not constant[None]] begin[:] return[name[cls]._is_domterm] if <ast.UnaryOp object at 0x7da1b24ef7c0> begin[:] name[cls]._is_domterm assign[=] constant[False] return[constant[False]] name[cls]._is_domterm assign[=] constant[True] return[constant[True]]
keyword[def] identifier[is_domterm] ( identifier[cls] ): literal[string] keyword[import] identifier[os] keyword[if] identifier[cls] . identifier[_is_domterm] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[cls] . identifier[_is_domterm] keyword[if] keyword[not] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ): identifier[cls] . identifier[_is_domterm] = keyword[False] keyword[return] keyword[False] identifier[cls] . identifier[_is_domterm] = keyword[True] keyword[return] keyword[True]
def is_domterm(cls): """ :return: whether we are inside DomTerm :rtype: bool """ import os if cls._is_domterm is not None: return cls._is_domterm # depends on [control=['if'], data=[]] if not os.environ.get('DOMTERM'): cls._is_domterm = False return False # depends on [control=['if'], data=[]] cls._is_domterm = True return True
def generate(self, x, **kwargs): """ Generate symbolic graph for adversarial examples and return. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) return vatm( self.model, x, self.model.get_logits(x), eps=self.eps, num_iterations=self.num_iterations, xi=self.xi, clip_min=self.clip_min, clip_max=self.clip_max)
def function[generate, parameter[self, x]]: constant[ Generate symbolic graph for adversarial examples and return. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` ] assert[call[name[self].parse_params, parameter[]]] return[call[name[vatm], parameter[name[self].model, name[x], call[name[self].model.get_logits, parameter[name[x]]]]]]
keyword[def] identifier[generate] ( identifier[self] , identifier[x] ,** identifier[kwargs] ): literal[string] keyword[assert] identifier[self] . identifier[parse_params] (** identifier[kwargs] ) keyword[return] identifier[vatm] ( identifier[self] . identifier[model] , identifier[x] , identifier[self] . identifier[model] . identifier[get_logits] ( identifier[x] ), identifier[eps] = identifier[self] . identifier[eps] , identifier[num_iterations] = identifier[self] . identifier[num_iterations] , identifier[xi] = identifier[self] . identifier[xi] , identifier[clip_min] = identifier[self] . identifier[clip_min] , identifier[clip_max] = identifier[self] . identifier[clip_max] )
def generate(self, x, **kwargs): """ Generate symbolic graph for adversarial examples and return. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) return vatm(self.model, x, self.model.get_logits(x), eps=self.eps, num_iterations=self.num_iterations, xi=self.xi, clip_min=self.clip_min, clip_max=self.clip_max)
def pop_arguments(instr, stack): """ Pop instructions off `stack` until we pop all instructions that will produce values popped by `instr`. """ needed = instr.stack_effect if needed >= 0: raise DecompilationError( "%s is does not have a negative stack effect" % instr ) for popcount, to_pop in enumerate(reversed(stack), start=1): needed += to_pop.stack_effect if not needed: break else: raise DecompilationError( "Reached end of stack without finding inputs to %s" % instr, ) popped = stack[-popcount:] stack[:] = stack[:-popcount] return popped
def function[pop_arguments, parameter[instr, stack]]: constant[ Pop instructions off `stack` until we pop all instructions that will produce values popped by `instr`. ] variable[needed] assign[=] name[instr].stack_effect if compare[name[needed] greater_or_equal[>=] constant[0]] begin[:] <ast.Raise object at 0x7da1b05b63e0> for taget[tuple[[<ast.Name object at 0x7da1b05b42e0>, <ast.Name object at 0x7da1b05b71f0>]]] in starred[call[name[enumerate], parameter[call[name[reversed], parameter[name[stack]]]]]] begin[:] <ast.AugAssign object at 0x7da1b05b45b0> if <ast.UnaryOp object at 0x7da1b05b6a70> begin[:] break variable[popped] assign[=] call[name[stack]][<ast.Slice object at 0x7da1b0464ca0>] call[name[stack]][<ast.Slice object at 0x7da1b0466980>] assign[=] call[name[stack]][<ast.Slice object at 0x7da1b0467d00>] return[name[popped]]
keyword[def] identifier[pop_arguments] ( identifier[instr] , identifier[stack] ): literal[string] identifier[needed] = identifier[instr] . identifier[stack_effect] keyword[if] identifier[needed] >= literal[int] : keyword[raise] identifier[DecompilationError] ( literal[string] % identifier[instr] ) keyword[for] identifier[popcount] , identifier[to_pop] keyword[in] identifier[enumerate] ( identifier[reversed] ( identifier[stack] ), identifier[start] = literal[int] ): identifier[needed] += identifier[to_pop] . identifier[stack_effect] keyword[if] keyword[not] identifier[needed] : keyword[break] keyword[else] : keyword[raise] identifier[DecompilationError] ( literal[string] % identifier[instr] , ) identifier[popped] = identifier[stack] [- identifier[popcount] :] identifier[stack] [:]= identifier[stack] [:- identifier[popcount] ] keyword[return] identifier[popped]
def pop_arguments(instr, stack): """ Pop instructions off `stack` until we pop all instructions that will produce values popped by `instr`. """ needed = instr.stack_effect if needed >= 0: raise DecompilationError('%s is does not have a negative stack effect' % instr) # depends on [control=['if'], data=[]] for (popcount, to_pop) in enumerate(reversed(stack), start=1): needed += to_pop.stack_effect if not needed: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] else: raise DecompilationError('Reached end of stack without finding inputs to %s' % instr) popped = stack[-popcount:] stack[:] = stack[:-popcount] return popped
def do_chan_log_all(self, line): """Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all""" self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS))
def function[do_chan_log_all, parameter[self, line]]: constant[Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all] call[name[self].application.channel.SetLogFilters, parameter[call[name[openpal].LogFilters, parameter[name[opendnp3].levels.ALL_COMMS]]]] call[name[print], parameter[call[constant[Channel log filtering level is now: {0}].format, parameter[name[opendnp3].levels.ALL_COMMS]]]]
keyword[def] identifier[do_chan_log_all] ( identifier[self] , identifier[line] ): literal[string] identifier[self] . identifier[application] . identifier[channel] . identifier[SetLogFilters] ( identifier[openpal] . identifier[LogFilters] ( identifier[opendnp3] . identifier[levels] . identifier[ALL_COMMS] )) identifier[print] ( literal[string] . identifier[format] ( identifier[opendnp3] . identifier[levels] . identifier[ALL_COMMS] ))
def do_chan_log_all(self, line): """Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all""" self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS))
def get_delete_branch_command(self, branch_name, message, author): """Get the command to delete or close a branch in the local repository.""" tokens = ['hg update --rev=%s && hg commit' % quote(branch_name)] if author: tokens.append('--user=%s' % quote(author.combined)) tokens.append('--message=%s' % quote(message)) tokens.append('--close-branch') return [' '.join(tokens)]
def function[get_delete_branch_command, parameter[self, branch_name, message, author]]: constant[Get the command to delete or close a branch in the local repository.] variable[tokens] assign[=] list[[<ast.BinOp object at 0x7da1b0af1180>]] if name[author] begin[:] call[name[tokens].append, parameter[binary_operation[constant[--user=%s] <ast.Mod object at 0x7da2590d6920> call[name[quote], parameter[name[author].combined]]]]] call[name[tokens].append, parameter[binary_operation[constant[--message=%s] <ast.Mod object at 0x7da2590d6920> call[name[quote], parameter[name[message]]]]]] call[name[tokens].append, parameter[constant[--close-branch]]] return[list[[<ast.Call object at 0x7da1b0a20d30>]]]
keyword[def] identifier[get_delete_branch_command] ( identifier[self] , identifier[branch_name] , identifier[message] , identifier[author] ): literal[string] identifier[tokens] =[ literal[string] % identifier[quote] ( identifier[branch_name] )] keyword[if] identifier[author] : identifier[tokens] . identifier[append] ( literal[string] % identifier[quote] ( identifier[author] . identifier[combined] )) identifier[tokens] . identifier[append] ( literal[string] % identifier[quote] ( identifier[message] )) identifier[tokens] . identifier[append] ( literal[string] ) keyword[return] [ literal[string] . identifier[join] ( identifier[tokens] )]
def get_delete_branch_command(self, branch_name, message, author): """Get the command to delete or close a branch in the local repository.""" tokens = ['hg update --rev=%s && hg commit' % quote(branch_name)] if author: tokens.append('--user=%s' % quote(author.combined)) # depends on [control=['if'], data=[]] tokens.append('--message=%s' % quote(message)) tokens.append('--close-branch') return [' '.join(tokens)]
def register_all_add_grad( add_grad_function, arg_types, exclude=(), ignore_existing=False): """Register a gradient adder for all combinations of given types. This is a convenience shorthand for calling register_add_grad when registering gradient adders for multiple types that can be interchanged for the purpose of addition. Args: add_grad_function: A gradient adder, see register_add_grad. arg_types: List of Python type objects. The gradient adder will be registered for all pairs of these types. exclude: Optional list of type tuples to exclude. ignore_existing: Boolean. Whether to silently skip argument pairs that were already registered. """ for t1 in arg_types: for t2 in arg_types: if (t1, t2) in exclude: continue if ignore_existing and (t1, t2) in grad_adders: continue register_add_grad(t1, t2, add_grad_function)
def function[register_all_add_grad, parameter[add_grad_function, arg_types, exclude, ignore_existing]]: constant[Register a gradient adder for all combinations of given types. This is a convenience shorthand for calling register_add_grad when registering gradient adders for multiple types that can be interchanged for the purpose of addition. Args: add_grad_function: A gradient adder, see register_add_grad. arg_types: List of Python type objects. The gradient adder will be registered for all pairs of these types. exclude: Optional list of type tuples to exclude. ignore_existing: Boolean. Whether to silently skip argument pairs that were already registered. ] for taget[name[t1]] in starred[name[arg_types]] begin[:] for taget[name[t2]] in starred[name[arg_types]] begin[:] if compare[tuple[[<ast.Name object at 0x7da1b1ddf730>, <ast.Name object at 0x7da1b1ddcf70>]] in name[exclude]] begin[:] continue if <ast.BoolOp object at 0x7da1b1ddeb30> begin[:] continue call[name[register_add_grad], parameter[name[t1], name[t2], name[add_grad_function]]]
keyword[def] identifier[register_all_add_grad] ( identifier[add_grad_function] , identifier[arg_types] , identifier[exclude] =(), identifier[ignore_existing] = keyword[False] ): literal[string] keyword[for] identifier[t1] keyword[in] identifier[arg_types] : keyword[for] identifier[t2] keyword[in] identifier[arg_types] : keyword[if] ( identifier[t1] , identifier[t2] ) keyword[in] identifier[exclude] : keyword[continue] keyword[if] identifier[ignore_existing] keyword[and] ( identifier[t1] , identifier[t2] ) keyword[in] identifier[grad_adders] : keyword[continue] identifier[register_add_grad] ( identifier[t1] , identifier[t2] , identifier[add_grad_function] )
def register_all_add_grad(add_grad_function, arg_types, exclude=(), ignore_existing=False): """Register a gradient adder for all combinations of given types. This is a convenience shorthand for calling register_add_grad when registering gradient adders for multiple types that can be interchanged for the purpose of addition. Args: add_grad_function: A gradient adder, see register_add_grad. arg_types: List of Python type objects. The gradient adder will be registered for all pairs of these types. exclude: Optional list of type tuples to exclude. ignore_existing: Boolean. Whether to silently skip argument pairs that were already registered. """ for t1 in arg_types: for t2 in arg_types: if (t1, t2) in exclude: continue # depends on [control=['if'], data=[]] if ignore_existing and (t1, t2) in grad_adders: continue # depends on [control=['if'], data=[]] register_add_grad(t1, t2, add_grad_function) # depends on [control=['for'], data=['t2']] # depends on [control=['for'], data=['t1']]
def reorg_crawl_tasks(tasks, concurrency, logger=None): """ Extract content returned by the crawler `iter_crawl_tasks` member method. :return: tuple made of the sub-tasks to executed, the epilogue task to execute or `None` is none was specified by the crawler, and the proper tasks concurrency level. :rtype: tuple (sub-tasks, epilogue, concurrent) """ futures = tasks['tasks'] epilogue = tasks.get('epilogue') custom_concurrency = tasks.get('max_concurrent_tasks', concurrency) check_custom_concurrency(concurrency, custom_concurrency, logger) futures = list(futures) return futures, epilogue, concurrency
def function[reorg_crawl_tasks, parameter[tasks, concurrency, logger]]: constant[ Extract content returned by the crawler `iter_crawl_tasks` member method. :return: tuple made of the sub-tasks to executed, the epilogue task to execute or `None` is none was specified by the crawler, and the proper tasks concurrency level. :rtype: tuple (sub-tasks, epilogue, concurrent) ] variable[futures] assign[=] call[name[tasks]][constant[tasks]] variable[epilogue] assign[=] call[name[tasks].get, parameter[constant[epilogue]]] variable[custom_concurrency] assign[=] call[name[tasks].get, parameter[constant[max_concurrent_tasks], name[concurrency]]] call[name[check_custom_concurrency], parameter[name[concurrency], name[custom_concurrency], name[logger]]] variable[futures] assign[=] call[name[list], parameter[name[futures]]] return[tuple[[<ast.Name object at 0x7da1b1450e50>, <ast.Name object at 0x7da1b1450c10>, <ast.Name object at 0x7da1b1450910>]]]
keyword[def] identifier[reorg_crawl_tasks] ( identifier[tasks] , identifier[concurrency] , identifier[logger] = keyword[None] ): literal[string] identifier[futures] = identifier[tasks] [ literal[string] ] identifier[epilogue] = identifier[tasks] . identifier[get] ( literal[string] ) identifier[custom_concurrency] = identifier[tasks] . identifier[get] ( literal[string] , identifier[concurrency] ) identifier[check_custom_concurrency] ( identifier[concurrency] , identifier[custom_concurrency] , identifier[logger] ) identifier[futures] = identifier[list] ( identifier[futures] ) keyword[return] identifier[futures] , identifier[epilogue] , identifier[concurrency]
def reorg_crawl_tasks(tasks, concurrency, logger=None): """ Extract content returned by the crawler `iter_crawl_tasks` member method. :return: tuple made of the sub-tasks to executed, the epilogue task to execute or `None` is none was specified by the crawler, and the proper tasks concurrency level. :rtype: tuple (sub-tasks, epilogue, concurrent) """ futures = tasks['tasks'] epilogue = tasks.get('epilogue') custom_concurrency = tasks.get('max_concurrent_tasks', concurrency) check_custom_concurrency(concurrency, custom_concurrency, logger) futures = list(futures) return (futures, epilogue, concurrency)
def parse_plist(entry): """Parse a XML dictionary entry.""" if is_leaf(entry): url = entry[KEY_URLSTRING] title = entry[KEY_URIDICTIONARY].get('title', url) yield (url, title) elif has_children(entry): for child in entry[KEY_CHILDREN]: for item in parse_plist(child): yield item
def function[parse_plist, parameter[entry]]: constant[Parse a XML dictionary entry.] if call[name[is_leaf], parameter[name[entry]]] begin[:] variable[url] assign[=] call[name[entry]][name[KEY_URLSTRING]] variable[title] assign[=] call[call[name[entry]][name[KEY_URIDICTIONARY]].get, parameter[constant[title], name[url]]] <ast.Yield object at 0x7da18c4cde40>
keyword[def] identifier[parse_plist] ( identifier[entry] ): literal[string] keyword[if] identifier[is_leaf] ( identifier[entry] ): identifier[url] = identifier[entry] [ identifier[KEY_URLSTRING] ] identifier[title] = identifier[entry] [ identifier[KEY_URIDICTIONARY] ]. identifier[get] ( literal[string] , identifier[url] ) keyword[yield] ( identifier[url] , identifier[title] ) keyword[elif] identifier[has_children] ( identifier[entry] ): keyword[for] identifier[child] keyword[in] identifier[entry] [ identifier[KEY_CHILDREN] ]: keyword[for] identifier[item] keyword[in] identifier[parse_plist] ( identifier[child] ): keyword[yield] identifier[item]
def parse_plist(entry): """Parse a XML dictionary entry.""" if is_leaf(entry): url = entry[KEY_URLSTRING] title = entry[KEY_URIDICTIONARY].get('title', url) yield (url, title) # depends on [control=['if'], data=[]] elif has_children(entry): for child in entry[KEY_CHILDREN]: for item in parse_plist(child): yield item # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
def tokenize_middle_english_words(text): """Tokenizes ME text: >>> tokenize_middle_english_words("And then, went I fastyr!") ['And', 'then', ',', 'went', 'I', 'fastyr', '!'] """ assert isinstance(text, str) text = re.sub(r'\n', r' ', text) text = re.sub(r'(?<=.)(?=[\.\";\,\:\-\[\]\(\)!&?])',r' ', text) text = re.sub(r'(?<=[\.\";\-\,\:\[\]\(\)!&?])(?=.)',r' ', text) text = re.sub(r'\s+',r' ', text) text = str.split(text) return text
def function[tokenize_middle_english_words, parameter[text]]: constant[Tokenizes ME text: >>> tokenize_middle_english_words("And then, went I fastyr!") ['And', 'then', ',', 'went', 'I', 'fastyr', '!'] ] assert[call[name[isinstance], parameter[name[text], name[str]]]] variable[text] assign[=] call[name[re].sub, parameter[constant[\n], constant[ ], name[text]]] variable[text] assign[=] call[name[re].sub, parameter[constant[(?<=.)(?=[\.\";\,\:\-\[\]\(\)!&?])], constant[ ], name[text]]] variable[text] assign[=] call[name[re].sub, parameter[constant[(?<=[\.\";\-\,\:\[\]\(\)!&?])(?=.)], constant[ ], name[text]]] variable[text] assign[=] call[name[re].sub, parameter[constant[\s+], constant[ ], name[text]]] variable[text] assign[=] call[name[str].split, parameter[name[text]]] return[name[text]]
keyword[def] identifier[tokenize_middle_english_words] ( identifier[text] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[text] , identifier[str] ) identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] ) identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] ) identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] ) identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] ) identifier[text] = identifier[str] . identifier[split] ( identifier[text] ) keyword[return] identifier[text]
def tokenize_middle_english_words(text): """Tokenizes ME text: >>> tokenize_middle_english_words("And then, went I fastyr!") ['And', 'then', ',', 'went', 'I', 'fastyr', '!'] """ assert isinstance(text, str) text = re.sub('\\n', ' ', text) text = re.sub('(?<=.)(?=[\\.\\";\\,\\:\\-\\[\\]\\(\\)!&?])', ' ', text) text = re.sub('(?<=[\\.\\";\\-\\,\\:\\[\\]\\(\\)!&?])(?=.)', ' ', text) text = re.sub('\\s+', ' ', text) text = str.split(text) return text
def allsame(list_, strict=True): """ checks to see if list is equal everywhere Args: list_ (list): Returns: True if all items in the list are equal """ if len(list_) == 0: return True first_item = list_[0] return list_all_eq_to(list_, first_item, strict)
def function[allsame, parameter[list_, strict]]: constant[ checks to see if list is equal everywhere Args: list_ (list): Returns: True if all items in the list are equal ] if compare[call[name[len], parameter[name[list_]]] equal[==] constant[0]] begin[:] return[constant[True]] variable[first_item] assign[=] call[name[list_]][constant[0]] return[call[name[list_all_eq_to], parameter[name[list_], name[first_item], name[strict]]]]
keyword[def] identifier[allsame] ( identifier[list_] , identifier[strict] = keyword[True] ): literal[string] keyword[if] identifier[len] ( identifier[list_] )== literal[int] : keyword[return] keyword[True] identifier[first_item] = identifier[list_] [ literal[int] ] keyword[return] identifier[list_all_eq_to] ( identifier[list_] , identifier[first_item] , identifier[strict] )
def allsame(list_, strict=True): """ checks to see if list is equal everywhere Args: list_ (list): Returns: True if all items in the list are equal """ if len(list_) == 0: return True # depends on [control=['if'], data=[]] first_item = list_[0] return list_all_eq_to(list_, first_item, strict)
def _get_crud_params(compiler, stmt, **kw): """ extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect""" compiler.postfetch = [] compiler.insert_prefetch = [] compiler.update_prefetch = [] compiler.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if compiler.column_keys is None and stmt.parameters is None: return [(c, crud._create_bind_param(compiler, c, None, required=True)) for c in stmt.table.columns] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account if SA_VERSION >= SA_1_1: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler, stmt) else: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler) # if we have statement parameters - set defaults in the # compiled params if compiler.column_keys is None: parameters = {} else: parameters = dict((_column_as_key(key), crud.REQUIRED) for key in compiler.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: crud._get_stmt_parameters_params( compiler, parameters, stmt_parameters, _column_as_key, values, kw) check_columns = {} crud._scan_cols(compiler, stmt, parameters, _getattr_col_key, _column_as_key, _col_bind_name, check_columns, values, kw) if stmt._has_multi_parameters: values = crud._extend_values_for_multiparams(compiler, stmt, values, kw) return values
def function[_get_crud_params, parameter[compiler, stmt]]: constant[ extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect] name[compiler].postfetch assign[=] list[[]] name[compiler].insert_prefetch assign[=] list[[]] name[compiler].update_prefetch assign[=] list[[]] name[compiler].returning assign[=] list[[]] if <ast.BoolOp object at 0x7da1b2346aa0> begin[:] return[<ast.ListComp object at 0x7da1b23459f0>] if name[stmt]._has_multi_parameters begin[:] variable[stmt_parameters] assign[=] call[name[stmt].parameters][constant[0]] if compare[name[SA_VERSION] greater_or_equal[>=] name[SA_1_1]] begin[:] <ast.Tuple object at 0x7da1b2346fb0> assign[=] call[name[crud]._key_getters_for_crud_column, parameter[name[compiler], name[stmt]]] if compare[name[compiler].column_keys is constant[None]] begin[:] variable[parameters] assign[=] dictionary[[], []] variable[values] assign[=] list[[]] if compare[name[stmt_parameters] is_not constant[None]] begin[:] call[name[crud]._get_stmt_parameters_params, parameter[name[compiler], name[parameters], name[stmt_parameters], name[_column_as_key], name[values], name[kw]]] variable[check_columns] assign[=] dictionary[[], []] call[name[crud]._scan_cols, parameter[name[compiler], name[stmt], name[parameters], name[_getattr_col_key], name[_column_as_key], name[_col_bind_name], name[check_columns], name[values], name[kw]]] if name[stmt]._has_multi_parameters begin[:] variable[values] assign[=] call[name[crud]._extend_values_for_multiparams, parameter[name[compiler], name[stmt], name[values], name[kw]]] return[name[values]]
keyword[def] identifier[_get_crud_params] ( identifier[compiler] , identifier[stmt] ,** identifier[kw] ): literal[string] identifier[compiler] . identifier[postfetch] =[] identifier[compiler] . identifier[insert_prefetch] =[] identifier[compiler] . identifier[update_prefetch] =[] identifier[compiler] . identifier[returning] =[] keyword[if] identifier[compiler] . identifier[column_keys] keyword[is] keyword[None] keyword[and] identifier[stmt] . identifier[parameters] keyword[is] keyword[None] : keyword[return] [( identifier[c] , identifier[crud] . identifier[_create_bind_param] ( identifier[compiler] , identifier[c] , keyword[None] , identifier[required] = keyword[True] )) keyword[for] identifier[c] keyword[in] identifier[stmt] . identifier[table] . identifier[columns] ] keyword[if] identifier[stmt] . identifier[_has_multi_parameters] : identifier[stmt_parameters] = identifier[stmt] . identifier[parameters] [ literal[int] ] keyword[else] : identifier[stmt_parameters] = identifier[stmt] . identifier[parameters] keyword[if] identifier[SA_VERSION] >= identifier[SA_1_1] : identifier[_column_as_key] , identifier[_getattr_col_key] , identifier[_col_bind_name] = identifier[crud] . identifier[_key_getters_for_crud_column] ( identifier[compiler] , identifier[stmt] ) keyword[else] : identifier[_column_as_key] , identifier[_getattr_col_key] , identifier[_col_bind_name] = identifier[crud] . identifier[_key_getters_for_crud_column] ( identifier[compiler] ) keyword[if] identifier[compiler] . identifier[column_keys] keyword[is] keyword[None] : identifier[parameters] ={} keyword[else] : identifier[parameters] = identifier[dict] (( identifier[_column_as_key] ( identifier[key] ), identifier[crud] . identifier[REQUIRED] ) keyword[for] identifier[key] keyword[in] identifier[compiler] . identifier[column_keys] keyword[if] keyword[not] identifier[stmt_parameters] keyword[or] identifier[key] keyword[not] keyword[in] identifier[stmt_parameters] ) identifier[values] =[] keyword[if] identifier[stmt_parameters] keyword[is] keyword[not] keyword[None] : identifier[crud] . identifier[_get_stmt_parameters_params] ( identifier[compiler] , identifier[parameters] , identifier[stmt_parameters] , identifier[_column_as_key] , identifier[values] , identifier[kw] ) identifier[check_columns] ={} identifier[crud] . identifier[_scan_cols] ( identifier[compiler] , identifier[stmt] , identifier[parameters] , identifier[_getattr_col_key] , identifier[_column_as_key] , identifier[_col_bind_name] , identifier[check_columns] , identifier[values] , identifier[kw] ) keyword[if] identifier[stmt] . identifier[_has_multi_parameters] : identifier[values] = identifier[crud] . identifier[_extend_values_for_multiparams] ( identifier[compiler] , identifier[stmt] , identifier[values] , identifier[kw] ) keyword[return] identifier[values]
def _get_crud_params(compiler, stmt, **kw): """ extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect""" compiler.postfetch = [] compiler.insert_prefetch = [] compiler.update_prefetch = [] compiler.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if compiler.column_keys is None and stmt.parameters is None: return [(c, crud._create_bind_param(compiler, c, None, required=True)) for c in stmt.table.columns] # depends on [control=['if'], data=[]] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] # depends on [control=['if'], data=[]] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account if SA_VERSION >= SA_1_1: (_column_as_key, _getattr_col_key, _col_bind_name) = crud._key_getters_for_crud_column(compiler, stmt) # depends on [control=['if'], data=[]] else: (_column_as_key, _getattr_col_key, _col_bind_name) = crud._key_getters_for_crud_column(compiler) # if we have statement parameters - set defaults in the # compiled params if compiler.column_keys is None: parameters = {} # depends on [control=['if'], data=[]] else: parameters = dict(((_column_as_key(key), crud.REQUIRED) for key in compiler.column_keys if not stmt_parameters or key not in stmt_parameters)) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: crud._get_stmt_parameters_params(compiler, parameters, stmt_parameters, _column_as_key, values, kw) # depends on [control=['if'], data=['stmt_parameters']] check_columns = {} crud._scan_cols(compiler, stmt, parameters, _getattr_col_key, _column_as_key, _col_bind_name, check_columns, values, kw) if stmt._has_multi_parameters: values = crud._extend_values_for_multiparams(compiler, stmt, values, kw) # depends on [control=['if'], data=[]] return values
def on_prev_button(self, event): """ update figures and text when a previous button is selected """ if 'saved' not in self.Data[self.s]['pars'] or self.Data[self.s]['pars']['saved'] != True: # check preferences if self.auto_save.GetValue(): self.on_save_interpretation_button(None) else: del self.Data[self.s]['pars'] self.Data[self.s]['pars'] = {} self.Data[self.s]['pars']['lab_dc_field'] = self.Data[self.s]['lab_dc_field'] self.Data[self.s]['pars']['er_specimen_name'] = self.Data[self.s]['er_specimen_name'] self.Data[self.s]['pars']['er_sample_name'] = self.Data[self.s]['er_sample_name'] # return to last saved interpretation if exist if 'er_specimen_name' in list(self.last_saved_pars.keys()) and self.last_saved_pars['er_specimen_name'] == self.s: for key in list(self.last_saved_pars.keys()): self.Data[self.s]['pars'][key] = self.last_saved_pars[key] self.last_saved_pars = {} index = self.specimens.index(self.s) if index == 0: index = len(self.specimens) index -= 1 self.s = self.specimens[index] self.specimens_box.SetStringSelection(self.s) self.update_selection()
def function[on_prev_button, parameter[self, event]]: constant[ update figures and text when a previous button is selected ] if <ast.BoolOp object at 0x7da1b05115a0> begin[:] if call[name[self].auto_save.GetValue, parameter[]] begin[:] call[name[self].on_save_interpretation_button, parameter[constant[None]]] variable[index] assign[=] call[name[self].specimens.index, parameter[name[self].s]] if compare[name[index] equal[==] constant[0]] begin[:] variable[index] assign[=] call[name[len], parameter[name[self].specimens]] <ast.AugAssign object at 0x7da1b055c850> name[self].s assign[=] call[name[self].specimens][name[index]] call[name[self].specimens_box.SetStringSelection, parameter[name[self].s]] call[name[self].update_selection, parameter[]]
keyword[def] identifier[on_prev_button] ( identifier[self] , identifier[event] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ] keyword[or] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ literal[string] ]!= keyword[True] : keyword[if] identifier[self] . identifier[auto_save] . identifier[GetValue] (): identifier[self] . identifier[on_save_interpretation_button] ( keyword[None] ) keyword[else] : keyword[del] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ]={} identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ] identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ literal[string] ]= identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ] keyword[if] literal[string] keyword[in] identifier[list] ( identifier[self] . identifier[last_saved_pars] . identifier[keys] ()) keyword[and] identifier[self] . identifier[last_saved_pars] [ literal[string] ]== identifier[self] . identifier[s] : keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[self] . identifier[last_saved_pars] . identifier[keys] ()): identifier[self] . identifier[Data] [ identifier[self] . identifier[s] ][ literal[string] ][ identifier[key] ]= identifier[self] . identifier[last_saved_pars] [ identifier[key] ] identifier[self] . identifier[last_saved_pars] ={} identifier[index] = identifier[self] . identifier[specimens] . identifier[index] ( identifier[self] . identifier[s] ) keyword[if] identifier[index] == literal[int] : identifier[index] = identifier[len] ( identifier[self] . identifier[specimens] ) identifier[index] -= literal[int] identifier[self] . identifier[s] = identifier[self] . identifier[specimens] [ identifier[index] ] identifier[self] . identifier[specimens_box] . identifier[SetStringSelection] ( identifier[self] . identifier[s] ) identifier[self] . identifier[update_selection] ()
def on_prev_button(self, event): """ update figures and text when a previous button is selected """ if 'saved' not in self.Data[self.s]['pars'] or self.Data[self.s]['pars']['saved'] != True: # check preferences if self.auto_save.GetValue(): self.on_save_interpretation_button(None) # depends on [control=['if'], data=[]] else: del self.Data[self.s]['pars'] self.Data[self.s]['pars'] = {} self.Data[self.s]['pars']['lab_dc_field'] = self.Data[self.s]['lab_dc_field'] self.Data[self.s]['pars']['er_specimen_name'] = self.Data[self.s]['er_specimen_name'] self.Data[self.s]['pars']['er_sample_name'] = self.Data[self.s]['er_sample_name'] # return to last saved interpretation if exist if 'er_specimen_name' in list(self.last_saved_pars.keys()) and self.last_saved_pars['er_specimen_name'] == self.s: for key in list(self.last_saved_pars.keys()): self.Data[self.s]['pars'][key] = self.last_saved_pars[key] # depends on [control=['for'], data=['key']] self.last_saved_pars = {} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] index = self.specimens.index(self.s) if index == 0: index = len(self.specimens) # depends on [control=['if'], data=['index']] index -= 1 self.s = self.specimens[index] self.specimens_box.SetStringSelection(self.s) self.update_selection()
def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)]
def function[conditional_entropy, parameter[data, precision, min_period, max_period, xbins, ybins, period_jobs]]: constant[ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 ] variable[periods] assign[=] call[name[np].arange, parameter[name[min_period], name[max_period], name[precision]]] variable[copy] assign[=] call[name[np].ma.copy, parameter[name[data]]] call[name[copy]][tuple[[<ast.Slice object at 0x7da1b0aee620>, <ast.Constant object at 0x7da1b0aee2f0>]]] assign[=] binary_operation[binary_operation[call[name[copy]][tuple[[<ast.Slice object at 0x7da1b0aee6b0>, <ast.Constant object at 0x7da1b0aeeaa0>]]] - call[name[np].min, parameter[call[name[copy]][tuple[[<ast.Slice object at 0x7da1b0aee770>, <ast.Constant object at 0x7da1b0aefd30>]]]]]] / binary_operation[call[name[np].max, parameter[call[name[copy]][tuple[[<ast.Slice object at 0x7da1b0aef8e0>, <ast.Constant object at 0x7da1b0aed390>]]]]] - call[name[np].min, parameter[call[name[copy]][tuple[[<ast.Slice object at 0x7da1b0aee1a0>, <ast.Constant object at 0x7da1b0aee7a0>]]]]]]] variable[partial_job] assign[=] call[name[partial], parameter[name[CE]]] variable[m] assign[=] <ast.IfExp object at 0x7da1b0aece80> variable[entropies] assign[=] call[name[list], parameter[call[name[m], parameter[name[partial_job], name[periods]]]]] return[call[name[periods]][call[name[np].argmin, parameter[name[entropies]]]]]
keyword[def] identifier[conditional_entropy] ( identifier[data] , identifier[precision] , identifier[min_period] , identifier[max_period] , identifier[xbins] = literal[int] , identifier[ybins] = literal[int] , identifier[period_jobs] = literal[int] ): literal[string] identifier[periods] = identifier[np] . identifier[arange] ( identifier[min_period] , identifier[max_period] , identifier[precision] ) identifier[copy] = identifier[np] . identifier[ma] . identifier[copy] ( identifier[data] ) identifier[copy] [:, literal[int] ]=( identifier[copy] [:, literal[int] ]- identifier[np] . identifier[min] ( identifier[copy] [:, literal[int] ]))/( identifier[np] . identifier[max] ( identifier[copy] [:, literal[int] ])- identifier[np] . identifier[min] ( identifier[copy] [:, literal[int] ])) identifier[partial_job] = identifier[partial] ( identifier[CE] , identifier[data] = identifier[copy] , identifier[xbins] = identifier[xbins] , identifier[ybins] = identifier[ybins] ) identifier[m] = identifier[map] keyword[if] identifier[period_jobs] <= literal[int] keyword[else] identifier[Pool] ( identifier[period_jobs] ). identifier[map] identifier[entropies] = identifier[list] ( identifier[m] ( identifier[partial_job] , identifier[periods] )) keyword[return] identifier[periods] [ identifier[np] . identifier[argmin] ( identifier[entropies] )]
def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:, 1] = (copy[:, 1] - np.min(copy[:, 1])) / (np.max(copy[:, 1]) - np.min(copy[:, 1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)]
def _watch_folder(folder, destination, compiler_args): """Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.""" for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: # Ignore filenames starting with ".#" for Emacs compatibility if watched_extension(filename) and not filename.startswith('.#'): fullpath = os.path.join(dirpath, filename) subfolder = os.path.relpath(dirpath, folder) mtime = os.stat(fullpath).st_mtime # Create subfolders in target directory if they don't exist compiled_folder = os.path.join(destination, subfolder) if not os.path.exists(compiled_folder): os.makedirs(compiled_folder) compiled_path = _compiled_path(compiled_folder, filename) if (not fullpath in compiled or compiled[fullpath] < mtime or not os.path.isfile(compiled_path)): compile_file(fullpath, compiled_path, compiler_args) compiled[fullpath] = mtime
def function[_watch_folder, parameter[folder, destination, compiler_args]]: constant[Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.] for taget[tuple[[<ast.Name object at 0x7da20c6ab2e0>, <ast.Name object at 0x7da20c6aa170>, <ast.Name object at 0x7da20c6a82b0>]]] in starred[call[name[os].walk, parameter[name[folder]]]] begin[:] for taget[name[filename]] in starred[name[filenames]] begin[:] if <ast.BoolOp object at 0x7da20c6ab490> begin[:] variable[fullpath] assign[=] call[name[os].path.join, parameter[name[dirpath], name[filename]]] variable[subfolder] assign[=] call[name[os].path.relpath, parameter[name[dirpath], name[folder]]] variable[mtime] assign[=] call[name[os].stat, parameter[name[fullpath]]].st_mtime variable[compiled_folder] assign[=] call[name[os].path.join, parameter[name[destination], name[subfolder]]] if <ast.UnaryOp object at 0x7da20c6aaaa0> begin[:] call[name[os].makedirs, parameter[name[compiled_folder]]] variable[compiled_path] assign[=] call[name[_compiled_path], parameter[name[compiled_folder], name[filename]]] if <ast.BoolOp object at 0x7da1b2345960> begin[:] call[name[compile_file], parameter[name[fullpath], name[compiled_path], name[compiler_args]]] call[name[compiled]][name[fullpath]] assign[=] name[mtime]
keyword[def] identifier[_watch_folder] ( identifier[folder] , identifier[destination] , identifier[compiler_args] ): literal[string] keyword[for] identifier[dirpath] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[folder] ): keyword[for] identifier[filename] keyword[in] identifier[filenames] : keyword[if] identifier[watched_extension] ( identifier[filename] ) keyword[and] keyword[not] identifier[filename] . identifier[startswith] ( literal[string] ): identifier[fullpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[filename] ) identifier[subfolder] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[dirpath] , identifier[folder] ) identifier[mtime] = identifier[os] . identifier[stat] ( identifier[fullpath] ). identifier[st_mtime] identifier[compiled_folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[destination] , identifier[subfolder] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[compiled_folder] ): identifier[os] . identifier[makedirs] ( identifier[compiled_folder] ) identifier[compiled_path] = identifier[_compiled_path] ( identifier[compiled_folder] , identifier[filename] ) keyword[if] ( keyword[not] identifier[fullpath] keyword[in] identifier[compiled] keyword[or] identifier[compiled] [ identifier[fullpath] ]< identifier[mtime] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[compiled_path] )): identifier[compile_file] ( identifier[fullpath] , identifier[compiled_path] , identifier[compiler_args] ) identifier[compiled] [ identifier[fullpath] ]= identifier[mtime]
def _watch_folder(folder, destination, compiler_args): """Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.""" for (dirpath, dirnames, filenames) in os.walk(folder): for filename in filenames: # Ignore filenames starting with ".#" for Emacs compatibility if watched_extension(filename) and (not filename.startswith('.#')): fullpath = os.path.join(dirpath, filename) subfolder = os.path.relpath(dirpath, folder) mtime = os.stat(fullpath).st_mtime # Create subfolders in target directory if they don't exist compiled_folder = os.path.join(destination, subfolder) if not os.path.exists(compiled_folder): os.makedirs(compiled_folder) # depends on [control=['if'], data=[]] compiled_path = _compiled_path(compiled_folder, filename) if not fullpath in compiled or compiled[fullpath] < mtime or (not os.path.isfile(compiled_path)): compile_file(fullpath, compiled_path, compiler_args) compiled[fullpath] = mtime # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
def dot_product_single_head(q, k, v, gates_q, gates_k, bi): """Perform a dot product attention on a single sequence on a single head. This function dispatch the q, k, v and loop over the buckets to compute the attention dot product on each subsequences. Args: q (tf.Tensor): [length_q, depth_q] k (tf.Tensor): [length_k, depth_q] v (tf.Tensor): [length_k, depth_v] gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets] gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets] bi (BatchInfo): Contains the batch coordinates and sequence order Returns: tf.Tensor: [length_q, depth_v] """ nb_buckets = gates_q.get_shape().as_list()[-1] q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q) k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k) def eventually_dispatch(dispatcher, value): if value is not None: return dispatcher.dispatch(value) return [None] * nb_buckets # Iterate over every dispatched group list_v_out = [] for ( q_i, k_i, v_i, qbc, qbo, kbc, kbo, ) in zip( # Dispatch queries, keys and values q_dispatcher.dispatch(q), k_dispatcher.dispatch(k), k_dispatcher.dispatch(v), # Also dispatch the sequence positions and batch coordinates eventually_dispatch(q_dispatcher, bi.coordinates), eventually_dispatch(q_dispatcher, bi.order), eventually_dispatch(k_dispatcher, bi.coordinates), eventually_dispatch(k_dispatcher, bi.order), ): list_v_out.append( expert_dot_product( q_i, k_i, v_i, info_q=BatchInfo(coordinates=qbc, order=qbo), info_k=BatchInfo(coordinates=kbc, order=kbo))) # Combine all buckets together to restore the original length return q_dispatcher.combine(list_v_out)
def function[dot_product_single_head, parameter[q, k, v, gates_q, gates_k, bi]]: constant[Perform a dot product attention on a single sequence on a single head. This function dispatch the q, k, v and loop over the buckets to compute the attention dot product on each subsequences. Args: q (tf.Tensor): [length_q, depth_q] k (tf.Tensor): [length_k, depth_q] v (tf.Tensor): [length_k, depth_v] gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets] gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets] bi (BatchInfo): Contains the batch coordinates and sequence order Returns: tf.Tensor: [length_q, depth_v] ] variable[nb_buckets] assign[=] call[call[call[name[gates_q].get_shape, parameter[]].as_list, parameter[]]][<ast.UnaryOp object at 0x7da1b1e15270>] variable[q_dispatcher] assign[=] call[name[expert_utils].SparseDispatcher, parameter[name[nb_buckets], name[gates_q]]] variable[k_dispatcher] assign[=] call[name[expert_utils].SparseDispatcher, parameter[name[nb_buckets], name[gates_k]]] def function[eventually_dispatch, parameter[dispatcher, value]]: if compare[name[value] is_not constant[None]] begin[:] return[call[name[dispatcher].dispatch, parameter[name[value]]]] return[binary_operation[list[[<ast.Constant object at 0x7da1b1e14190>]] * name[nb_buckets]]] variable[list_v_out] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1e17250>, <ast.Name object at 0x7da1b1e161d0>, <ast.Name object at 0x7da1b1e17a60>, <ast.Name object at 0x7da1b1e14040>, <ast.Name object at 0x7da1b1e16f50>, <ast.Name object at 0x7da1b1e16cb0>, <ast.Name object at 0x7da1b1e164a0>]]] in starred[call[name[zip], parameter[call[name[q_dispatcher].dispatch, parameter[name[q]]], call[name[k_dispatcher].dispatch, parameter[name[k]]], call[name[k_dispatcher].dispatch, parameter[name[v]]], call[name[eventually_dispatch], parameter[name[q_dispatcher], name[bi].coordinates]], call[name[eventually_dispatch], parameter[name[q_dispatcher], name[bi].order]], call[name[eventually_dispatch], parameter[name[k_dispatcher], name[bi].coordinates]], call[name[eventually_dispatch], parameter[name[k_dispatcher], name[bi].order]]]]] begin[:] call[name[list_v_out].append, parameter[call[name[expert_dot_product], parameter[name[q_i], name[k_i], name[v_i]]]]] return[call[name[q_dispatcher].combine, parameter[name[list_v_out]]]]
keyword[def] identifier[dot_product_single_head] ( identifier[q] , identifier[k] , identifier[v] , identifier[gates_q] , identifier[gates_k] , identifier[bi] ): literal[string] identifier[nb_buckets] = identifier[gates_q] . identifier[get_shape] (). identifier[as_list] ()[- literal[int] ] identifier[q_dispatcher] = identifier[expert_utils] . identifier[SparseDispatcher] ( identifier[nb_buckets] , identifier[gates_q] ) identifier[k_dispatcher] = identifier[expert_utils] . identifier[SparseDispatcher] ( identifier[nb_buckets] , identifier[gates_k] ) keyword[def] identifier[eventually_dispatch] ( identifier[dispatcher] , identifier[value] ): keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[dispatcher] . identifier[dispatch] ( identifier[value] ) keyword[return] [ keyword[None] ]* identifier[nb_buckets] identifier[list_v_out] =[] keyword[for] ( identifier[q_i] , identifier[k_i] , identifier[v_i] , identifier[qbc] , identifier[qbo] , identifier[kbc] , identifier[kbo] , ) keyword[in] identifier[zip] ( identifier[q_dispatcher] . identifier[dispatch] ( identifier[q] ), identifier[k_dispatcher] . identifier[dispatch] ( identifier[k] ), identifier[k_dispatcher] . identifier[dispatch] ( identifier[v] ), identifier[eventually_dispatch] ( identifier[q_dispatcher] , identifier[bi] . identifier[coordinates] ), identifier[eventually_dispatch] ( identifier[q_dispatcher] , identifier[bi] . identifier[order] ), identifier[eventually_dispatch] ( identifier[k_dispatcher] , identifier[bi] . identifier[coordinates] ), identifier[eventually_dispatch] ( identifier[k_dispatcher] , identifier[bi] . identifier[order] ), ): identifier[list_v_out] . identifier[append] ( identifier[expert_dot_product] ( identifier[q_i] , identifier[k_i] , identifier[v_i] , identifier[info_q] = identifier[BatchInfo] ( identifier[coordinates] = identifier[qbc] , identifier[order] = identifier[qbo] ), identifier[info_k] = identifier[BatchInfo] ( identifier[coordinates] = identifier[kbc] , identifier[order] = identifier[kbo] ))) keyword[return] identifier[q_dispatcher] . identifier[combine] ( identifier[list_v_out] )
def dot_product_single_head(q, k, v, gates_q, gates_k, bi): """Perform a dot product attention on a single sequence on a single head. This function dispatch the q, k, v and loop over the buckets to compute the attention dot product on each subsequences. Args: q (tf.Tensor): [length_q, depth_q] k (tf.Tensor): [length_k, depth_q] v (tf.Tensor): [length_k, depth_v] gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets] gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets] bi (BatchInfo): Contains the batch coordinates and sequence order Returns: tf.Tensor: [length_q, depth_v] """ nb_buckets = gates_q.get_shape().as_list()[-1] q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q) k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k) def eventually_dispatch(dispatcher, value): if value is not None: return dispatcher.dispatch(value) # depends on [control=['if'], data=['value']] return [None] * nb_buckets # Iterate over every dispatched group list_v_out = [] for (q_i, k_i, v_i, qbc, qbo, kbc, kbo) in zip(q_dispatcher.dispatch(q), k_dispatcher.dispatch(k), k_dispatcher.dispatch(v), eventually_dispatch(q_dispatcher, bi.coordinates), eventually_dispatch(q_dispatcher, bi.order), eventually_dispatch(k_dispatcher, bi.coordinates), eventually_dispatch(k_dispatcher, bi.order)): # Dispatch queries, keys and values # Also dispatch the sequence positions and batch coordinates list_v_out.append(expert_dot_product(q_i, k_i, v_i, info_q=BatchInfo(coordinates=qbc, order=qbo), info_k=BatchInfo(coordinates=kbc, order=kbo))) # depends on [control=['for'], data=[]] # Combine all buckets together to restore the original length return q_dispatcher.combine(list_v_out)
def httpResponse_bodyParse(self, **kwargs): """ Returns the *body* from a http response. :param kwargs: response = <string> :return: the <body> from the http <string> """ str_response = '' for k,v in kwargs.items(): if k == 'response': str_response = v try: str_body = str_response.split('\r\n\r\n')[1] d_body = yaml.load(str_body) str_body = json.dumps(d_body) except: str_body = str_response return str_body
def function[httpResponse_bodyParse, parameter[self]]: constant[ Returns the *body* from a http response. :param kwargs: response = <string> :return: the <body> from the http <string> ] variable[str_response] assign[=] constant[] for taget[tuple[[<ast.Name object at 0x7da1b234b160>, <ast.Name object at 0x7da1b23481c0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] if compare[name[k] equal[==] constant[response]] begin[:] variable[str_response] assign[=] name[v] <ast.Try object at 0x7da1b2348490> return[name[str_body]]
keyword[def] identifier[httpResponse_bodyParse] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[str_response] = literal[string] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] (): keyword[if] identifier[k] == literal[string] : identifier[str_response] = identifier[v] keyword[try] : identifier[str_body] = identifier[str_response] . identifier[split] ( literal[string] )[ literal[int] ] identifier[d_body] = identifier[yaml] . identifier[load] ( identifier[str_body] ) identifier[str_body] = identifier[json] . identifier[dumps] ( identifier[d_body] ) keyword[except] : identifier[str_body] = identifier[str_response] keyword[return] identifier[str_body]
def httpResponse_bodyParse(self, **kwargs): """ Returns the *body* from a http response. :param kwargs: response = <string> :return: the <body> from the http <string> """ str_response = '' for (k, v) in kwargs.items(): if k == 'response': str_response = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] try: str_body = str_response.split('\r\n\r\n')[1] d_body = yaml.load(str_body) str_body = json.dumps(d_body) # depends on [control=['try'], data=[]] except: str_body = str_response # depends on [control=['except'], data=[]] return str_body
def cf_data_to_bytes(value): """ Extracts a bytestring from a CFData object :param value: A CFData object :return: A byte string """ start = CoreFoundation.CFDataGetBytePtr(value) num_bytes = CoreFoundation.CFDataGetLength(value) return ffi.buffer(start, num_bytes)[:]
def function[cf_data_to_bytes, parameter[value]]: constant[ Extracts a bytestring from a CFData object :param value: A CFData object :return: A byte string ] variable[start] assign[=] call[name[CoreFoundation].CFDataGetBytePtr, parameter[name[value]]] variable[num_bytes] assign[=] call[name[CoreFoundation].CFDataGetLength, parameter[name[value]]] return[call[call[name[ffi].buffer, parameter[name[start], name[num_bytes]]]][<ast.Slice object at 0x7da1aff0ed10>]]
keyword[def] identifier[cf_data_to_bytes] ( identifier[value] ): literal[string] identifier[start] = identifier[CoreFoundation] . identifier[CFDataGetBytePtr] ( identifier[value] ) identifier[num_bytes] = identifier[CoreFoundation] . identifier[CFDataGetLength] ( identifier[value] ) keyword[return] identifier[ffi] . identifier[buffer] ( identifier[start] , identifier[num_bytes] )[:]
def cf_data_to_bytes(value): """ Extracts a bytestring from a CFData object :param value: A CFData object :return: A byte string """ start = CoreFoundation.CFDataGetBytePtr(value) num_bytes = CoreFoundation.CFDataGetLength(value) return ffi.buffer(start, num_bytes)[:]
def ids(cls, values, itype=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. ''' instance = cls(ids={'values': values}) if itype is not None: instance['ids']['type'] = itype return instance
def function[ids, parameter[cls, values, itype]]: constant[ http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. ] variable[instance] assign[=] call[name[cls], parameter[]] if compare[name[itype] is_not constant[None]] begin[:] call[call[name[instance]][constant[ids]]][constant[type]] assign[=] name[itype] return[name[instance]]
keyword[def] identifier[ids] ( identifier[cls] , identifier[values] , identifier[itype] = keyword[None] ): literal[string] identifier[instance] = identifier[cls] ( identifier[ids] ={ literal[string] : identifier[values] }) keyword[if] identifier[itype] keyword[is] keyword[not] keyword[None] : identifier[instance] [ literal[string] ][ literal[string] ]= identifier[itype] keyword[return] identifier[instance]
def ids(cls, values, itype=None): """ http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. """ instance = cls(ids={'values': values}) if itype is not None: instance['ids']['type'] = itype # depends on [control=['if'], data=['itype']] return instance
def asarray(self, *args, **kwargs): """Read image data from file and return as numpy array.""" # TODO: fix TypeError on Python 2 # "TypeError: unbound method asarray() must be called with TiffPage # instance as first argument (got TiffFrame instance instead)" if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asarray(self, *args, **kwargs)
def function[asarray, parameter[self]]: constant[Read image data from file and return as numpy array.] if compare[name[self]._keyframe is constant[None]] begin[:] <ast.Raise object at 0x7da1b1836050> call[name[kwargs]][constant[validate]] assign[=] constant[False] return[call[name[TiffPage].asarray, parameter[name[self], <ast.Starred object at 0x7da1b18377c0>]]]
keyword[def] identifier[asarray] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[self] . identifier[_keyframe] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[kwargs] [ literal[string] ]= keyword[False] keyword[return] identifier[TiffPage] . identifier[asarray] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
def asarray(self, *args, **kwargs): """Read image data from file and return as numpy array.""" # TODO: fix TypeError on Python 2 # "TypeError: unbound method asarray() must be called with TiffPage # instance as first argument (got TiffFrame instance instead)" if self._keyframe is None: raise RuntimeError('keyframe not set') # depends on [control=['if'], data=[]] kwargs['validate'] = False return TiffPage.asarray(self, *args, **kwargs)
def _handle_offset_error(self, failure): """ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. """ # outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching offset from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching offset from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching offset from kafka: %r", self, failure) self._retry_fetch()
def function[_handle_offset_error, parameter[self, failure]]: constant[ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. ] name[self]._request_d assign[=] constant[None] if <ast.BoolOp object at 0x7da1b04d80a0> begin[:] return[None] if <ast.BoolOp object at 0x7da1b04d8940> begin[:] call[name[log].debug, parameter[constant[%r: Exhausted attempts: %d fetching offset from kafka: %r], name[self], name[self].request_retry_max_attempts, name[failure]]] call[name[self]._start_d.errback, parameter[name[failure]]] return[None] if <ast.BoolOp object at 0x7da1b04da1d0> begin[:] call[name[log].debug, parameter[constant[%r: Failure fetching offset from kafka: %r], name[self], name[failure]]] call[name[self]._retry_fetch, parameter[]]
keyword[def] identifier[_handle_offset_error] ( identifier[self] , identifier[failure] ): literal[string] identifier[self] . identifier[_request_d] = keyword[None] keyword[if] identifier[self] . identifier[_stopping] keyword[and] identifier[failure] . identifier[check] ( identifier[CancelledError] ): keyword[return] keyword[if] ( identifier[self] . identifier[request_retry_max_attempts] != literal[int] keyword[and] identifier[self] . identifier[_fetch_attempt_count] >= identifier[self] . identifier[request_retry_max_attempts] ): identifier[log] . identifier[debug] ( literal[string] , identifier[self] , identifier[self] . identifier[request_retry_max_attempts] , identifier[failure] ) identifier[self] . identifier[_start_d] . identifier[errback] ( identifier[failure] ) keyword[return] keyword[if] ( identifier[self] . identifier[retry_delay] < identifier[self] . identifier[retry_max_delay] keyword[or] literal[int] ==( identifier[self] . identifier[_fetch_attempt_count] % literal[int] )): identifier[log] . identifier[debug] ( literal[string] , identifier[self] , identifier[failure] ) keyword[else] : identifier[log] . identifier[warning] ( literal[string] , identifier[self] , identifier[failure] ) identifier[self] . identifier[_retry_fetch] ()
def _handle_offset_error(self, failure): """ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. """ # outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # depends on [control=['if'], data=[]] # Do we need to abort? if self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts: log.debug('%r: Exhausted attempts: %d fetching offset from kafka: %r', self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # depends on [control=['if'], data=[]] # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if self.retry_delay < self.retry_max_delay or 0 == self._fetch_attempt_count % 2: log.debug('%r: Failure fetching offset from kafka: %r', self, failure) # depends on [control=['if'], data=[]] else: # We've retried until we hit the max delay, log at warn log.warning('%r: Still failing fetching offset from kafka: %r', self, failure) self._retry_fetch()
def graph_type(self, graph): """What type of graph is this?""" graph = self.pack(graph) return self.sql('graph_type', graph).fetchone()[0]
def function[graph_type, parameter[self, graph]]: constant[What type of graph is this?] variable[graph] assign[=] call[name[self].pack, parameter[name[graph]]] return[call[call[call[name[self].sql, parameter[constant[graph_type], name[graph]]].fetchone, parameter[]]][constant[0]]]
keyword[def] identifier[graph_type] ( identifier[self] , identifier[graph] ): literal[string] identifier[graph] = identifier[self] . identifier[pack] ( identifier[graph] ) keyword[return] identifier[self] . identifier[sql] ( literal[string] , identifier[graph] ). identifier[fetchone] ()[ literal[int] ]
def graph_type(self, graph): """What type of graph is this?""" graph = self.pack(graph) return self.sql('graph_type', graph).fetchone()[0]