Dataset Viewer
code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def append_dynamic(self, t, dynamic, canvas=0, color='blue'):
"""!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
"""
description = dynamic_descr(canvas, t, dynamic, False, color);
self.__dynamic_storage.append(description);
self.__update_canvas_xlim(description.time, description.separate); | def function[append_dynamic, parameter[self, t, dynamic, canvas, color]]:
constant[!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
]
variable[description] assign[=] call[name[dynamic_descr], parameter[name[canvas], name[t], name[dynamic], constant[False], name[color]]]
call[name[self].__dynamic_storage.append, parameter[name[description]]]
call[name[self].__update_canvas_xlim, parameter[name[description].time, name[description].separate]] | keyword[def] identifier[append_dynamic] ( identifier[self] , identifier[t] , identifier[dynamic] , identifier[canvas] = literal[int] , identifier[color] = literal[string] ):
literal[string]
identifier[description] = identifier[dynamic_descr] ( identifier[canvas] , identifier[t] , identifier[dynamic] , keyword[False] , identifier[color] );
identifier[self] . identifier[__dynamic_storage] . identifier[append] ( identifier[description] );
identifier[self] . identifier[__update_canvas_xlim] ( identifier[description] . identifier[time] , identifier[description] . identifier[separate] ); | def append_dynamic(self, t, dynamic, canvas=0, color='blue'):
"""!
@brief Append single dynamic to specified canvas (by default to the first with index '0').
@param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.
@param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.
@param[in] canvas (uint): Canvas where dynamic should be displayed.
@param[in] color (string): Color that is used for drawing dynamic on the canvas.
"""
description = dynamic_descr(canvas, t, dynamic, False, color)
self.__dynamic_storage.append(description)
self.__update_canvas_xlim(description.time, description.separate) |
def pretty_print(node):
"""漂亮地打印一个节点
Args:
node (TYPE): Description
"""
for pre, _, node in RenderTree(node):
print('{}{}'.format(pre, node.name)) | def function[pretty_print, parameter[node]]:
constant[漂亮地打印一个节点
Args:
node (TYPE): Description
]
for taget[tuple[[<ast.Name object at 0x7da204622080>, <ast.Name object at 0x7da204620b20>, <ast.Name object at 0x7da204622b30>]]] in starred[call[name[RenderTree], parameter[name[node]]]] begin[:]
call[name[print], parameter[call[constant[{}{}].format, parameter[name[pre], name[node].name]]]] | keyword[def] identifier[pretty_print] ( identifier[node] ):
literal[string]
keyword[for] identifier[pre] , identifier[_] , identifier[node] keyword[in] identifier[RenderTree] ( identifier[node] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[pre] , identifier[node] . identifier[name] )) | def pretty_print(node):
"""漂亮地打印一个节点
Args:
node (TYPE): Description
"""
for (pre, _, node) in RenderTree(node):
print('{}{}'.format(pre, node.name)) # depends on [control=['for'], data=[]] |
def set_state(self, entity_id, new_state, **kwargs):
"Updates or creates the current state of an entity."
return remote.set_state(self.api, new_state, **kwargs) | def function[set_state, parameter[self, entity_id, new_state]]:
constant[Updates or creates the current state of an entity.]
return[call[name[remote].set_state, parameter[name[self].api, name[new_state]]]] | keyword[def] identifier[set_state] ( identifier[self] , identifier[entity_id] , identifier[new_state] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[remote] . identifier[set_state] ( identifier[self] . identifier[api] , identifier[new_state] ,** identifier[kwargs] ) | def set_state(self, entity_id, new_state, **kwargs):
"""Updates or creates the current state of an entity."""
return remote.set_state(self.api, new_state, **kwargs) |
def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment
"""
Return gateways for the specified node. You can also
specify type to find only gateways of a specific type.
Valid types are: bgp_peering, netlink, ospfv2_area.
:param RoutingNode self: the routing node to check
:param str type: bgp_peering, netlink, ospfv2_area
:param str on_network: if network is specified, should be CIDR and
specifies a filter to only return gateways on that network when
an interface has multiple
:return: tuple of RoutingNode(interface,network,gateway)
:rtype: list
"""
gateways = route_level(self, 'gateway')
if not type:
for gw in gateways:
yield gw
else:
for node in gateways:
#TODO: Change to type == node.related_element_type when
# only supporting SMC >= 6.4
if type == node.routing_node_element.typeof:
# If the parent is level interface, this is a tunnel interface
# where the gateway is bound to interface versus network
parent = node._parent
if parent.level == 'interface':
interface = parent
network = None
else:
network = parent
interface = network._parent
if on_network is not None:
if network and network.ip == on_network:
yield (interface, network, node)
else:
yield (interface, network, node) | def function[gateway_by_type, parameter[self, type, on_network]]:
constant[
Return gateways for the specified node. You can also
specify type to find only gateways of a specific type.
Valid types are: bgp_peering, netlink, ospfv2_area.
:param RoutingNode self: the routing node to check
:param str type: bgp_peering, netlink, ospfv2_area
:param str on_network: if network is specified, should be CIDR and
specifies a filter to only return gateways on that network when
an interface has multiple
:return: tuple of RoutingNode(interface,network,gateway)
:rtype: list
]
variable[gateways] assign[=] call[name[route_level], parameter[name[self], constant[gateway]]]
if <ast.UnaryOp object at 0x7da1b1be44f0> begin[:]
for taget[name[gw]] in starred[name[gateways]] begin[:]
<ast.Yield object at 0x7da1b1be6d10> | keyword[def] identifier[gateway_by_type] ( identifier[self] , identifier[type] = keyword[None] , identifier[on_network] = keyword[None] ):
literal[string]
identifier[gateways] = identifier[route_level] ( identifier[self] , literal[string] )
keyword[if] keyword[not] identifier[type] :
keyword[for] identifier[gw] keyword[in] identifier[gateways] :
keyword[yield] identifier[gw]
keyword[else] :
keyword[for] identifier[node] keyword[in] identifier[gateways] :
keyword[if] identifier[type] == identifier[node] . identifier[routing_node_element] . identifier[typeof] :
identifier[parent] = identifier[node] . identifier[_parent]
keyword[if] identifier[parent] . identifier[level] == literal[string] :
identifier[interface] = identifier[parent]
identifier[network] = keyword[None]
keyword[else] :
identifier[network] = identifier[parent]
identifier[interface] = identifier[network] . identifier[_parent]
keyword[if] identifier[on_network] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[network] keyword[and] identifier[network] . identifier[ip] == identifier[on_network] :
keyword[yield] ( identifier[interface] , identifier[network] , identifier[node] )
keyword[else] :
keyword[yield] ( identifier[interface] , identifier[network] , identifier[node] ) | def gateway_by_type(self, type=None, on_network=None): # @ReservedAssignment
'\n Return gateways for the specified node. You can also\n specify type to find only gateways of a specific type.\n Valid types are: bgp_peering, netlink, ospfv2_area.\n \n :param RoutingNode self: the routing node to check\n :param str type: bgp_peering, netlink, ospfv2_area\n :param str on_network: if network is specified, should be CIDR and\n specifies a filter to only return gateways on that network when\n an interface has multiple\n :return: tuple of RoutingNode(interface,network,gateway)\n :rtype: list\n '
gateways = route_level(self, 'gateway')
if not type:
for gw in gateways:
yield gw # depends on [control=['for'], data=['gw']] # depends on [control=['if'], data=[]]
else:
for node in gateways:
#TODO: Change to type == node.related_element_type when
# only supporting SMC >= 6.4
if type == node.routing_node_element.typeof:
# If the parent is level interface, this is a tunnel interface
# where the gateway is bound to interface versus network
parent = node._parent
if parent.level == 'interface':
interface = parent
network = None # depends on [control=['if'], data=[]]
else:
network = parent
interface = network._parent
if on_network is not None:
if network and network.ip == on_network:
yield (interface, network, node) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['on_network']]
else:
yield (interface, network, node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] |
def attributes(self):
"""
A dictionary mapping names of attributes to BiomartAttribute instances.
This causes overwriting errors if there are diffferent pages which use
the same attribute names, but is kept for backward compatibility.
"""
if not self._attribute_pages:
self.fetch_attributes()
result = {}
for page in self._attribute_pages.values():
result.update(page.attributes)
return result | def function[attributes, parameter[self]]:
constant[
A dictionary mapping names of attributes to BiomartAttribute instances.
This causes overwriting errors if there are diffferent pages which use
the same attribute names, but is kept for backward compatibility.
]
if <ast.UnaryOp object at 0x7da20cabd6f0> begin[:]
call[name[self].fetch_attributes, parameter[]]
variable[result] assign[=] dictionary[[], []]
for taget[name[page]] in starred[call[name[self]._attribute_pages.values, parameter[]]] begin[:]
call[name[result].update, parameter[name[page].attributes]]
return[name[result]] | keyword[def] identifier[attributes] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_attribute_pages] :
identifier[self] . identifier[fetch_attributes] ()
identifier[result] ={}
keyword[for] identifier[page] keyword[in] identifier[self] . identifier[_attribute_pages] . identifier[values] ():
identifier[result] . identifier[update] ( identifier[page] . identifier[attributes] )
keyword[return] identifier[result] | def attributes(self):
"""
A dictionary mapping names of attributes to BiomartAttribute instances.
This causes overwriting errors if there are diffferent pages which use
the same attribute names, but is kept for backward compatibility.
"""
if not self._attribute_pages:
self.fetch_attributes() # depends on [control=['if'], data=[]]
result = {}
for page in self._attribute_pages.values():
result.update(page.attributes) # depends on [control=['for'], data=['page']]
return result |
def present(name, deployment_id, metric_name, alert_config, api_key=None, profile='telemetry'):
'''
Ensure the telemetry alert exists.
name
An optional description of the alarm (not currently supported by telemetry API)
deployment_id
Specifies the ID of the root deployment resource
(replica set cluster or sharded cluster) to which this alert definition is attached
metric_name
Specifies the unique ID of the metric to whose values these thresholds will be applied
alert_config: Is a list of dictionaries where each dict contains the following fields:
filter
By default the alert will apply to the deployment and all its constituent resources.
If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope.
min
the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced.
max
the largest "ok" value the metric may take on; if missing or null, no maximum is enforced.
notify_all
Used to indicate if you want to alert both onCallEngineer and apiNotifications
api_key
Telemetry api key for the user
profile
A dict of telemetry config information. If present, will be used instead of
api_key.
'''
ret = {'name': metric_name, 'result': True, 'comment': '', 'changes': {}}
saved_alert_config = __salt__['telemetry.get_alert_config'](
deployment_id, metric_name, api_key, profile)
post_body = {
"deployment": deployment_id,
"filter": alert_config.get('filter'),
"notificationChannel": __salt__['telemetry.get_notification_channel_id'](alert_config.get('escalate_to')).split(),
"condition": {
"metric": metric_name,
"max": alert_config.get('max'),
"min": alert_config.get('min')
}
}
# Diff the alert config with the passed-in attributes
difference = []
if saved_alert_config:
#del saved_alert_config["_id"]
for k, v in post_body.items():
if k not in saved_alert_config:
difference.append("{0}={1} (new)".format(k, v))
continue
v2 = saved_alert_config[k]
if v == v2:
continue
if isinstance(v, string_types) and six.text_type(v) == six.text_type(v2):
continue
if isinstance(v, float) and v == float(v2):
continue
if isinstance(v, int) and v == int(v2):
continue
difference.append("{0}='{1}' was: '{2}'".format(k, v, v2))
else:
difference.append("new alert config")
create_or_update_args = (
deployment_id,
metric_name,
alert_config,
api_key,
profile,
)
if saved_alert_config: # alert config is present. update, or do nothing
# check to see if attributes matches is_present. If so, do nothing.
if not difference:
ret['comment'] = "alert config {0} present and matching".format(metric_name)
return ret
if __opts__['test']:
msg = 'alert config {0} is to be updated.'.format(metric_name)
ret['comment'] = msg
ret['result'] = "\n".join(difference)
return ret
result, msg = __salt__['telemetry.update_alarm'](*create_or_update_args)
if result:
ret['changes']['diff'] = difference
ret['comment'] = "Alert updated."
else:
ret['result'] = False
ret['comment'] = 'Failed to update {0} alert config: {1}'.format(metric_name, msg)
else: # alert config is absent. create it.
if __opts__['test']:
msg = 'alert config {0} is to be created.'.format(metric_name)
ret['comment'] = msg
ret['result'] = None
return ret
result, msg = __salt__['telemetry.create_alarm'](*create_or_update_args)
if result:
ret['changes']['new'] = msg
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} alert config: {1}'.format(metric_name, msg)
return ret | def function[present, parameter[name, deployment_id, metric_name, alert_config, api_key, profile]]:
constant[
Ensure the telemetry alert exists.
name
An optional description of the alarm (not currently supported by telemetry API)
deployment_id
Specifies the ID of the root deployment resource
(replica set cluster or sharded cluster) to which this alert definition is attached
metric_name
Specifies the unique ID of the metric to whose values these thresholds will be applied
alert_config: Is a list of dictionaries where each dict contains the following fields:
filter
By default the alert will apply to the deployment and all its constituent resources.
If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope.
min
the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced.
max
the largest "ok" value the metric may take on; if missing or null, no maximum is enforced.
notify_all
Used to indicate if you want to alert both onCallEngineer and apiNotifications
api_key
Telemetry api key for the user
profile
A dict of telemetry config information. If present, will be used instead of
api_key.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ac970>, <ast.Constant object at 0x7da1b26adea0>, <ast.Constant object at 0x7da1b26aca60>, <ast.Constant object at 0x7da1b26af6a0>], [<ast.Name object at 0x7da1b26adc90>, <ast.Constant object at 0x7da1b26ae770>, <ast.Constant object at 0x7da1b26ac100>, <ast.Dict object at 0x7da1b26aece0>]]
variable[saved_alert_config] assign[=] call[call[name[__salt__]][constant[telemetry.get_alert_config]], parameter[name[deployment_id], name[metric_name], name[api_key], name[profile]]]
variable[post_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ae530>, <ast.Constant object at 0x7da1b26af5b0>, <ast.Constant object at 0x7da1b26af940>, <ast.Constant object at 0x7da1b26aed70>], [<ast.Name object at 0x7da1b26ac0a0>, <ast.Call object at 0x7da1b26aead0>, <ast.Call object at 0x7da1b26ac160>, <ast.Dict object at 0x7da1b26ad300>]]
variable[difference] assign[=] list[[]]
if name[saved_alert_config] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b26ac580>, <ast.Name object at 0x7da1b26ae4a0>]]] in starred[call[name[post_body].items, parameter[]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[saved_alert_config]] begin[:]
call[name[difference].append, parameter[call[constant[{0}={1} (new)].format, parameter[name[k], name[v]]]]]
continue
variable[v2] assign[=] call[name[saved_alert_config]][name[k]]
if compare[name[v] equal[==] name[v2]] begin[:]
continue
if <ast.BoolOp object at 0x7da1b26afb20> begin[:]
continue
if <ast.BoolOp object at 0x7da1b26ad7e0> begin[:]
continue
if <ast.BoolOp object at 0x7da1b26ad480> begin[:]
continue
call[name[difference].append, parameter[call[constant[{0}='{1}' was: '{2}'].format, parameter[name[k], name[v], name[v2]]]]]
variable[create_or_update_args] assign[=] tuple[[<ast.Name object at 0x7da1b26af670>, <ast.Name object at 0x7da1b26acac0>, <ast.Name object at 0x7da1b26ade40>, <ast.Name object at 0x7da1b26adc00>, <ast.Name object at 0x7da1b26afd00>]]
if name[saved_alert_config] begin[:]
if <ast.UnaryOp object at 0x7da1b26aed40> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[alert config {0} present and matching].format, parameter[name[metric_name]]]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
variable[msg] assign[=] call[constant[alert config {0} is to be updated.].format, parameter[name[metric_name]]]
call[name[ret]][constant[comment]] assign[=] name[msg]
call[name[ret]][constant[result]] assign[=] call[constant[
].join, parameter[name[difference]]]
return[name[ret]]
<ast.Tuple object at 0x7da1b26af0a0> assign[=] call[call[name[__salt__]][constant[telemetry.update_alarm]], parameter[<ast.Starred object at 0x7da1b26ac5b0>]]
if name[result] begin[:]
call[call[name[ret]][constant[changes]]][constant[diff]] assign[=] name[difference]
call[name[ret]][constant[comment]] assign[=] constant[Alert updated.]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] , identifier[deployment_id] , identifier[metric_name] , identifier[alert_config] , identifier[api_key] = keyword[None] , identifier[profile] = literal[string] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[metric_name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
identifier[saved_alert_config] = identifier[__salt__] [ literal[string] ](
identifier[deployment_id] , identifier[metric_name] , identifier[api_key] , identifier[profile] )
identifier[post_body] ={
literal[string] : identifier[deployment_id] ,
literal[string] : identifier[alert_config] . identifier[get] ( literal[string] ),
literal[string] : identifier[__salt__] [ literal[string] ]( identifier[alert_config] . identifier[get] ( literal[string] )). identifier[split] (),
literal[string] :{
literal[string] : identifier[metric_name] ,
literal[string] : identifier[alert_config] . identifier[get] ( literal[string] ),
literal[string] : identifier[alert_config] . identifier[get] ( literal[string] )
}
}
identifier[difference] =[]
keyword[if] identifier[saved_alert_config] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[post_body] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[saved_alert_config] :
identifier[difference] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] ))
keyword[continue]
identifier[v2] = identifier[saved_alert_config] [ identifier[k] ]
keyword[if] identifier[v] == identifier[v2] :
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[v] , identifier[string_types] ) keyword[and] identifier[six] . identifier[text_type] ( identifier[v] )== identifier[six] . identifier[text_type] ( identifier[v2] ):
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[v] , identifier[float] ) keyword[and] identifier[v] == identifier[float] ( identifier[v2] ):
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[v] , identifier[int] ) keyword[and] identifier[v] == identifier[int] ( identifier[v2] ):
keyword[continue]
identifier[difference] . identifier[append] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] , identifier[v2] ))
keyword[else] :
identifier[difference] . identifier[append] ( literal[string] )
identifier[create_or_update_args] =(
identifier[deployment_id] ,
identifier[metric_name] ,
identifier[alert_config] ,
identifier[api_key] ,
identifier[profile] ,
)
keyword[if] identifier[saved_alert_config] :
keyword[if] keyword[not] identifier[difference] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] )
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string] . identifier[format] ( identifier[metric_name] )
identifier[ret] [ literal[string] ]= identifier[msg]
identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[difference] )
keyword[return] identifier[ret]
identifier[result] , identifier[msg] = identifier[__salt__] [ literal[string] ](* identifier[create_or_update_args] )
keyword[if] identifier[result] :
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[difference]
identifier[ret] [ literal[string] ]= literal[string]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] , identifier[msg] )
keyword[else] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[msg] = literal[string] . identifier[format] ( identifier[metric_name] )
identifier[ret] [ literal[string] ]= identifier[msg]
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[result] , identifier[msg] = identifier[__salt__] [ literal[string] ](* identifier[create_or_update_args] )
keyword[if] identifier[result] :
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[msg]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[metric_name] , identifier[msg] )
keyword[return] identifier[ret] | def present(name, deployment_id, metric_name, alert_config, api_key=None, profile='telemetry'):
"""
Ensure the telemetry alert exists.
name
An optional description of the alarm (not currently supported by telemetry API)
deployment_id
Specifies the ID of the root deployment resource
(replica set cluster or sharded cluster) to which this alert definition is attached
metric_name
Specifies the unique ID of the metric to whose values these thresholds will be applied
alert_config: Is a list of dictionaries where each dict contains the following fields:
filter
By default the alert will apply to the deployment and all its constituent resources.
If the alert only applies to a subset of those resources, a filter may be specified to narrow this scope.
min
the smallest "ok" value the metric may take on; if missing or null, no minimum is enforced.
max
the largest "ok" value the metric may take on; if missing or null, no maximum is enforced.
notify_all
Used to indicate if you want to alert both onCallEngineer and apiNotifications
api_key
Telemetry api key for the user
profile
A dict of telemetry config information. If present, will be used instead of
api_key.
"""
ret = {'name': metric_name, 'result': True, 'comment': '', 'changes': {}}
saved_alert_config = __salt__['telemetry.get_alert_config'](deployment_id, metric_name, api_key, profile)
post_body = {'deployment': deployment_id, 'filter': alert_config.get('filter'), 'notificationChannel': __salt__['telemetry.get_notification_channel_id'](alert_config.get('escalate_to')).split(), 'condition': {'metric': metric_name, 'max': alert_config.get('max'), 'min': alert_config.get('min')}}
# Diff the alert config with the passed-in attributes
difference = []
if saved_alert_config:
#del saved_alert_config["_id"]
for (k, v) in post_body.items():
if k not in saved_alert_config:
difference.append('{0}={1} (new)'.format(k, v))
continue # depends on [control=['if'], data=['k']]
v2 = saved_alert_config[k]
if v == v2:
continue # depends on [control=['if'], data=[]]
if isinstance(v, string_types) and six.text_type(v) == six.text_type(v2):
continue # depends on [control=['if'], data=[]]
if isinstance(v, float) and v == float(v2):
continue # depends on [control=['if'], data=[]]
if isinstance(v, int) and v == int(v2):
continue # depends on [control=['if'], data=[]]
difference.append("{0}='{1}' was: '{2}'".format(k, v, v2)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
difference.append('new alert config')
create_or_update_args = (deployment_id, metric_name, alert_config, api_key, profile)
if saved_alert_config: # alert config is present. update, or do nothing
# check to see if attributes matches is_present. If so, do nothing.
if not difference:
ret['comment'] = 'alert config {0} present and matching'.format(metric_name)
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
msg = 'alert config {0} is to be updated.'.format(metric_name)
ret['comment'] = msg
ret['result'] = '\n'.join(difference)
return ret # depends on [control=['if'], data=[]]
(result, msg) = __salt__['telemetry.update_alarm'](*create_or_update_args)
if result:
ret['changes']['diff'] = difference
ret['comment'] = 'Alert updated.' # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to update {0} alert config: {1}'.format(metric_name, msg) # depends on [control=['if'], data=[]]
else: # alert config is absent. create it.
if __opts__['test']:
msg = 'alert config {0} is to be created.'.format(metric_name)
ret['comment'] = msg
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
(result, msg) = __salt__['telemetry.create_alarm'](*create_or_update_args)
if result:
ret['changes']['new'] = msg # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} alert config: {1}'.format(metric_name, msg)
return ret |
def pos(self):
"""
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
"""
if self._pos is None:
poses = self._element.xpath('POS/text()')
if len(poses) > 0:
self._pos = poses[0]
return self._pos | def function[pos, parameter[self]]:
constant[
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
]
if compare[name[self]._pos is constant[None]] begin[:]
variable[poses] assign[=] call[name[self]._element.xpath, parameter[constant[POS/text()]]]
if compare[call[name[len], parameter[name[poses]]] greater[>] constant[0]] begin[:]
name[self]._pos assign[=] call[name[poses]][constant[0]]
return[name[self]._pos] | keyword[def] identifier[pos] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_pos] keyword[is] keyword[None] :
identifier[poses] = identifier[self] . identifier[_element] . identifier[xpath] ( literal[string] )
keyword[if] identifier[len] ( identifier[poses] )> literal[int] :
identifier[self] . identifier[_pos] = identifier[poses] [ literal[int] ]
keyword[return] identifier[self] . identifier[_pos] | def pos(self):
"""
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
"""
if self._pos is None:
poses = self._element.xpath('POS/text()')
if len(poses) > 0:
self._pos = poses[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._pos |
async def handle_client_request(self, req, res):
"""
Entry point for the request + response middleware chain.
This is called by growler.HTTPResponder (the default responder)
after the headers have been processed in the begin_application
method.
This iterates over all middleware in the middleware list which
matches the client's method and path.
It executes the middleware and continues iterating until the
res.has_ended property is true.
If the middleware raises a GrowlerStopIteration exception, this
method immediately returns None, breaking the loop and leaving
res without sending any information back to the client. Be *sure*
that you have another coroutine scheduled that will take over
handling client data.
If a middleware function raises any other exception, the
exception is forwarded to the middleware generator, which changes
behavior to generating any error handlers it had encountered.
This method then calls the handle_server_error method which
*should* handle the error and notify the user.
If after the chain is exhausted, either with an exception raised
or not, res.has_ended does not evaluate to true, the response
is sent a simple server error message in text.
Args:
req (growler.HTTPRequest): The incoming request, containing
all information about the client.
res (growler.HTTPResponse): The outgoing response, containing
methods for sending headers and data back to the client.
"""
# create a middleware generator
mw_generator = self.middleware(req.method, req.path)
# loop through middleware
for mw in mw_generator:
# try calling the function
try:
ret_val = mw(req, res)
if inspect.isawaitable(ret_val):
await ret_val
# special exception - immediately stop the loop
# - do not check if res has sent
except GrowlerStopIteration:
return None
# on an unhandled exception - notify the generator of the error
except Exception as error:
mw_generator.throw(error)
await self.handle_server_error(req, res, mw_generator, error)
return
if res.has_ended:
break
if not res.has_ended:
self.handle_response_not_sent(req, res) | <ast.AsyncFunctionDef object at 0x7da18bcc8070> | keyword[async] keyword[def] identifier[handle_client_request] ( identifier[self] , identifier[req] , identifier[res] ):
literal[string]
identifier[mw_generator] = identifier[self] . identifier[middleware] ( identifier[req] . identifier[method] , identifier[req] . identifier[path] )
keyword[for] identifier[mw] keyword[in] identifier[mw_generator] :
keyword[try] :
identifier[ret_val] = identifier[mw] ( identifier[req] , identifier[res] )
keyword[if] identifier[inspect] . identifier[isawaitable] ( identifier[ret_val] ):
keyword[await] identifier[ret_val]
keyword[except] identifier[GrowlerStopIteration] :
keyword[return] keyword[None]
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[mw_generator] . identifier[throw] ( identifier[error] )
keyword[await] identifier[self] . identifier[handle_server_error] ( identifier[req] , identifier[res] , identifier[mw_generator] , identifier[error] )
keyword[return]
keyword[if] identifier[res] . identifier[has_ended] :
keyword[break]
keyword[if] keyword[not] identifier[res] . identifier[has_ended] :
identifier[self] . identifier[handle_response_not_sent] ( identifier[req] , identifier[res] ) | async def handle_client_request(self, req, res):
"""
Entry point for the request + response middleware chain.
This is called by growler.HTTPResponder (the default responder)
after the headers have been processed in the begin_application
method.
This iterates over all middleware in the middleware list which
matches the client's method and path.
It executes the middleware and continues iterating until the
res.has_ended property is true.
If the middleware raises a GrowlerStopIteration exception, this
method immediately returns None, breaking the loop and leaving
res without sending any information back to the client. Be *sure*
that you have another coroutine scheduled that will take over
handling client data.
If a middleware function raises any other exception, the
exception is forwarded to the middleware generator, which changes
behavior to generating any error handlers it had encountered.
This method then calls the handle_server_error method which
*should* handle the error and notify the user.
If after the chain is exhausted, either with an exception raised
or not, res.has_ended does not evaluate to true, the response
is sent a simple server error message in text.
Args:
req (growler.HTTPRequest): The incoming request, containing
all information about the client.
res (growler.HTTPResponse): The outgoing response, containing
methods for sending headers and data back to the client.
"""
# create a middleware generator
mw_generator = self.middleware(req.method, req.path)
# loop through middleware
for mw in mw_generator:
# try calling the function
try:
ret_val = mw(req, res)
if inspect.isawaitable(ret_val):
await ret_val # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
# special exception - immediately stop the loop
# - do not check if res has sent
except GrowlerStopIteration:
return None # depends on [control=['except'], data=[]]
# on an unhandled exception - notify the generator of the error
except Exception as error:
mw_generator.throw(error)
await self.handle_server_error(req, res, mw_generator, error)
return # depends on [control=['except'], data=['error']]
if res.has_ended:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mw']]
if not res.has_ended:
self.handle_response_not_sent(req, res) # depends on [control=['if'], data=[]] |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'event_rate') and self.event_rate is not None:
_dict['event_rate'] = self.event_rate
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da2044c2920> begin[:]
call[name[_dict]][constant[key]] assign[=] name[self].key
if <ast.BoolOp object at 0x7da2044c0340> begin[:]
call[name[_dict]][constant[matching_results]] assign[=] name[self].matching_results
if <ast.BoolOp object at 0x7da18bccb4c0> begin[:]
call[name[_dict]][constant[event_rate]] assign[=] name[self].event_rate
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[key]
keyword[if] identifier[hasattr] ( identifier[self] ,
literal[string] ) keyword[and] identifier[self] . identifier[matching_results] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[matching_results]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[event_rate] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[event_rate]
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key # depends on [control=['if'], data=[]]
if hasattr(self, 'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results # depends on [control=['if'], data=[]]
if hasattr(self, 'event_rate') and self.event_rate is not None:
_dict['event_rate'] = self.event_rate # depends on [control=['if'], data=[]]
return _dict |
def device_unmounted(self, device):
"""Show unmount notification for specified device object."""
if not self._mounter.is_handleable(device):
return
self._show_notification(
'device_unmounted',
_('Device unmounted'),
_('{0.ui_label} unmounted', device),
device.icon_name) | def function[device_unmounted, parameter[self, device]]:
constant[Show unmount notification for specified device object.]
if <ast.UnaryOp object at 0x7da20c6e5300> begin[:]
return[None]
call[name[self]._show_notification, parameter[constant[device_unmounted], call[name[_], parameter[constant[Device unmounted]]], call[name[_], parameter[constant[{0.ui_label} unmounted], name[device]]], name[device].icon_name]] | keyword[def] identifier[device_unmounted] ( identifier[self] , identifier[device] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_mounter] . identifier[is_handleable] ( identifier[device] ):
keyword[return]
identifier[self] . identifier[_show_notification] (
literal[string] ,
identifier[_] ( literal[string] ),
identifier[_] ( literal[string] , identifier[device] ),
identifier[device] . identifier[icon_name] ) | def device_unmounted(self, device):
"""Show unmount notification for specified device object."""
if not self._mounter.is_handleable(device):
return # depends on [control=['if'], data=[]]
self._show_notification('device_unmounted', _('Device unmounted'), _('{0.ui_label} unmounted', device), device.icon_name) |
def update(self, play):
"""
Update the accumulator with the current play
:returns: new tally
:rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }``
"""
new_tally = { }
#if any(isinstance(play.event, te) for te in self.trigger_event_types):
if self._count_play(play):
# the team who made the play / triggered the event
team = self._get_team(play)
try:
self.total[team] += 1
except:
self.total[team] = 1
self.teams.append(team)
for i in range(len(self.tally)):
self.tally[i][team] = 0
try:
new_tally = { k:v for k,v in self.tally[len(self.tally)-1].items() }
new_tally['period'] = play.period
new_tally['time'] = play.time
new_tally[team] += 1
new_tally['play'] = play
except:
new_tally = {
'period': play.period,
'time': play.time,
team: 1,
'play': play
}
self.tally.append(new_tally)
return new_tally | def function[update, parameter[self, play]]:
constant[
Update the accumulator with the current play
:returns: new tally
:rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }``
]
variable[new_tally] assign[=] dictionary[[], []]
if call[name[self]._count_play, parameter[name[play]]] begin[:]
variable[team] assign[=] call[name[self]._get_team, parameter[name[play]]]
<ast.Try object at 0x7da1b10c2350>
<ast.Try object at 0x7da1b10c15a0>
call[name[self].tally.append, parameter[name[new_tally]]]
return[name[new_tally]] | keyword[def] identifier[update] ( identifier[self] , identifier[play] ):
literal[string]
identifier[new_tally] ={}
keyword[if] identifier[self] . identifier[_count_play] ( identifier[play] ):
identifier[team] = identifier[self] . identifier[_get_team] ( identifier[play] )
keyword[try] :
identifier[self] . identifier[total] [ identifier[team] ]+= literal[int]
keyword[except] :
identifier[self] . identifier[total] [ identifier[team] ]= literal[int]
identifier[self] . identifier[teams] . identifier[append] ( identifier[team] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[tally] )):
identifier[self] . identifier[tally] [ identifier[i] ][ identifier[team] ]= literal[int]
keyword[try] :
identifier[new_tally] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[tally] [ identifier[len] ( identifier[self] . identifier[tally] )- literal[int] ]. identifier[items] ()}
identifier[new_tally] [ literal[string] ]= identifier[play] . identifier[period]
identifier[new_tally] [ literal[string] ]= identifier[play] . identifier[time]
identifier[new_tally] [ identifier[team] ]+= literal[int]
identifier[new_tally] [ literal[string] ]= identifier[play]
keyword[except] :
identifier[new_tally] ={
literal[string] : identifier[play] . identifier[period] ,
literal[string] : identifier[play] . identifier[time] ,
identifier[team] : literal[int] ,
literal[string] : identifier[play]
}
identifier[self] . identifier[tally] . identifier[append] ( identifier[new_tally] )
keyword[return] identifier[new_tally] | def update(self, play):
"""
Update the accumulator with the current play
:returns: new tally
:rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }``
"""
new_tally = {}
#if any(isinstance(play.event, te) for te in self.trigger_event_types):
if self._count_play(play):
# the team who made the play / triggered the event
team = self._get_team(play)
try:
self.total[team] += 1 # depends on [control=['try'], data=[]]
except:
self.total[team] = 1
self.teams.append(team)
for i in range(len(self.tally)):
self.tally[i][team] = 0 # depends on [control=['for'], data=['i']] # depends on [control=['except'], data=[]]
try:
new_tally = {k: v for (k, v) in self.tally[len(self.tally) - 1].items()}
new_tally['period'] = play.period
new_tally['time'] = play.time
new_tally[team] += 1
new_tally['play'] = play # depends on [control=['try'], data=[]]
except:
new_tally = {'period': play.period, 'time': play.time, team: 1, 'play': play} # depends on [control=['except'], data=[]]
self.tally.append(new_tally) # depends on [control=['if'], data=[]]
return new_tally |
def reject_source(ident, comment):
'''Reject a source for automatic harvesting'''
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_REFUSED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object()
source.save()
return source | def function[reject_source, parameter[ident, comment]]:
constant[Reject a source for automatic harvesting]
variable[source] assign[=] call[name[get_source], parameter[name[ident]]]
name[source].validation.on assign[=] call[name[datetime].now, parameter[]]
name[source].validation.comment assign[=] name[comment]
name[source].validation.state assign[=] name[VALIDATION_REFUSED]
if name[current_user].is_authenticated begin[:]
name[source].validation.by assign[=] call[name[current_user]._get_current_object, parameter[]]
call[name[source].save, parameter[]]
return[name[source]] | keyword[def] identifier[reject_source] ( identifier[ident] , identifier[comment] ):
literal[string]
identifier[source] = identifier[get_source] ( identifier[ident] )
identifier[source] . identifier[validation] . identifier[on] = identifier[datetime] . identifier[now] ()
identifier[source] . identifier[validation] . identifier[comment] = identifier[comment]
identifier[source] . identifier[validation] . identifier[state] = identifier[VALIDATION_REFUSED]
keyword[if] identifier[current_user] . identifier[is_authenticated] :
identifier[source] . identifier[validation] . identifier[by] = identifier[current_user] . identifier[_get_current_object] ()
identifier[source] . identifier[save] ()
keyword[return] identifier[source] | def reject_source(ident, comment):
"""Reject a source for automatic harvesting"""
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_REFUSED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object() # depends on [control=['if'], data=[]]
source.save()
return source |
def expandService(service_element):
"""Take a service element and expand it into an iterator of:
([type_uri], uri, service_element)
"""
uris = sortedURIs(service_element)
if not uris:
uris = [None]
expanded = []
for uri in uris:
type_uris = getTypeURIs(service_element)
expanded.append((type_uris, uri, service_element))
return expanded | def function[expandService, parameter[service_element]]:
constant[Take a service element and expand it into an iterator of:
([type_uri], uri, service_element)
]
variable[uris] assign[=] call[name[sortedURIs], parameter[name[service_element]]]
if <ast.UnaryOp object at 0x7da18dc07af0> begin[:]
variable[uris] assign[=] list[[<ast.Constant object at 0x7da18dc05360>]]
variable[expanded] assign[=] list[[]]
for taget[name[uri]] in starred[name[uris]] begin[:]
variable[type_uris] assign[=] call[name[getTypeURIs], parameter[name[service_element]]]
call[name[expanded].append, parameter[tuple[[<ast.Name object at 0x7da2054a4430>, <ast.Name object at 0x7da2054a5c30>, <ast.Name object at 0x7da2054a7a00>]]]]
return[name[expanded]] | keyword[def] identifier[expandService] ( identifier[service_element] ):
literal[string]
identifier[uris] = identifier[sortedURIs] ( identifier[service_element] )
keyword[if] keyword[not] identifier[uris] :
identifier[uris] =[ keyword[None] ]
identifier[expanded] =[]
keyword[for] identifier[uri] keyword[in] identifier[uris] :
identifier[type_uris] = identifier[getTypeURIs] ( identifier[service_element] )
identifier[expanded] . identifier[append] (( identifier[type_uris] , identifier[uri] , identifier[service_element] ))
keyword[return] identifier[expanded] | def expandService(service_element):
"""Take a service element and expand it into an iterator of:
([type_uri], uri, service_element)
"""
uris = sortedURIs(service_element)
if not uris:
uris = [None] # depends on [control=['if'], data=[]]
expanded = []
for uri in uris:
type_uris = getTypeURIs(service_element)
expanded.append((type_uris, uri, service_element)) # depends on [control=['for'], data=['uri']]
return expanded |
def euler_angles(self):
""":obj:`tuple` of float: The three euler angles for the rotation.
"""
q_wxyz = self.quaternion
q_xyzw = np.roll(q_wxyz, -1)
return transformations.euler_from_quaternion(q_xyzw) | def function[euler_angles, parameter[self]]:
constant[:obj:`tuple` of float: The three euler angles for the rotation.
]
variable[q_wxyz] assign[=] name[self].quaternion
variable[q_xyzw] assign[=] call[name[np].roll, parameter[name[q_wxyz], <ast.UnaryOp object at 0x7da1b12b4be0>]]
return[call[name[transformations].euler_from_quaternion, parameter[name[q_xyzw]]]] | keyword[def] identifier[euler_angles] ( identifier[self] ):
literal[string]
identifier[q_wxyz] = identifier[self] . identifier[quaternion]
identifier[q_xyzw] = identifier[np] . identifier[roll] ( identifier[q_wxyz] ,- literal[int] )
keyword[return] identifier[transformations] . identifier[euler_from_quaternion] ( identifier[q_xyzw] ) | def euler_angles(self):
""":obj:`tuple` of float: The three euler angles for the rotation.
"""
q_wxyz = self.quaternion
q_xyzw = np.roll(q_wxyz, -1)
return transformations.euler_from_quaternion(q_xyzw) |
def plot_survival(self,
on,
how="os",
survival_units="Days",
strata=None,
ax=None,
ci_show=False,
with_condition_color="#B38600",
no_condition_color="#A941AC",
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set2",
threshold=None, **kwargs):
"""Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
"""
assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how
cols, df = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True)
df = filter_not_null(df, plot_col)
results = plot_kmf(
df=df,
condition_col=plot_col,
xlabel=survival_units,
ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)",
censor_col="deceased" if how == "os" else "progressed_or_deceased",
survival_col=how,
strata_col=strata,
threshold=threshold,
ax=ax,
ci_show=ci_show,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_palette=color_palette,
label_map=label_map,
color_map=color_map,
)
return results | def function[plot_survival, parameter[self, on, how, survival_units, strata, ax, ci_show, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, threshold]]:
constant[Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
]
assert[compare[name[how] in list[[<ast.Constant object at 0x7da18eb562f0>, <ast.Constant object at 0x7da18eb55990>]]]]
<ast.Tuple object at 0x7da18eb56da0> assign[=] call[name[self].as_dataframe, parameter[name[on]]]
variable[plot_col] assign[=] call[name[self].plot_col_from_cols, parameter[]]
variable[df] assign[=] call[name[filter_not_null], parameter[name[df], name[plot_col]]]
variable[results] assign[=] call[name[plot_kmf], parameter[]]
return[name[results]] | keyword[def] identifier[plot_survival] ( identifier[self] ,
identifier[on] ,
identifier[how] = literal[string] ,
identifier[survival_units] = literal[string] ,
identifier[strata] = keyword[None] ,
identifier[ax] = keyword[None] ,
identifier[ci_show] = keyword[False] ,
identifier[with_condition_color] = literal[string] ,
identifier[no_condition_color] = literal[string] ,
identifier[with_condition_label] = keyword[None] ,
identifier[no_condition_label] = keyword[None] ,
identifier[color_map] = keyword[None] ,
identifier[label_map] = keyword[None] ,
identifier[color_palette] = literal[string] ,
identifier[threshold] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[how] keyword[in] [ literal[string] , literal[string] ], literal[string] % identifier[how]
identifier[cols] , identifier[df] = identifier[self] . identifier[as_dataframe] ( identifier[on] , identifier[return_cols] = keyword[True] ,** identifier[kwargs] )
identifier[plot_col] = identifier[self] . identifier[plot_col_from_cols] ( identifier[cols] = identifier[cols] , identifier[only_allow_one] = keyword[True] )
identifier[df] = identifier[filter_not_null] ( identifier[df] , identifier[plot_col] )
identifier[results] = identifier[plot_kmf] (
identifier[df] = identifier[df] ,
identifier[condition_col] = identifier[plot_col] ,
identifier[xlabel] = identifier[survival_units] ,
identifier[ylabel] = literal[string] keyword[if] identifier[how] == literal[string] keyword[else] literal[string] ,
identifier[censor_col] = literal[string] keyword[if] identifier[how] == literal[string] keyword[else] literal[string] ,
identifier[survival_col] = identifier[how] ,
identifier[strata_col] = identifier[strata] ,
identifier[threshold] = identifier[threshold] ,
identifier[ax] = identifier[ax] ,
identifier[ci_show] = identifier[ci_show] ,
identifier[with_condition_color] = identifier[with_condition_color] ,
identifier[no_condition_color] = identifier[no_condition_color] ,
identifier[with_condition_label] = identifier[with_condition_label] ,
identifier[no_condition_label] = identifier[no_condition_label] ,
identifier[color_palette] = identifier[color_palette] ,
identifier[label_map] = identifier[label_map] ,
identifier[color_map] = identifier[color_map] ,
)
keyword[return] identifier[results] | def plot_survival(self, on, how='os', survival_units='Days', strata=None, ax=None, ci_show=False, with_condition_color='#B38600', no_condition_color='#A941AC', with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette='Set2', threshold=None, **kwargs):
"""Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
how : {"os", "pfs"}, optional
Whether to plot OS (overall survival) or PFS (progression free survival)
survival_units : str
Unit of time for the survival measure, i.e. Days or Months
strata : str
(optional) column name of stratifying variable
ci_show : bool
Display the confidence interval around the survival curve
threshold : int, "median", "median-per-strata" or None (optional)
Threshold of `col` on which to split the cohort
"""
assert how in ['os', 'pfs'], 'Invalid choice of survival plot type %s' % how
(cols, df) = self.as_dataframe(on, return_cols=True, **kwargs)
plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True)
df = filter_not_null(df, plot_col)
results = plot_kmf(df=df, condition_col=plot_col, xlabel=survival_units, ylabel='Overall Survival (%)' if how == 'os' else 'Progression-Free Survival (%)', censor_col='deceased' if how == 'os' else 'progressed_or_deceased', survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map)
return results |
def change_cell(self, x, y, ch, fg, bg):
"""Change cell in position (x;y).
"""
self.console.draw_char(x, y, ch, fg, bg) | def function[change_cell, parameter[self, x, y, ch, fg, bg]]:
constant[Change cell in position (x;y).
]
call[name[self].console.draw_char, parameter[name[x], name[y], name[ch], name[fg], name[bg]]] | keyword[def] identifier[change_cell] ( identifier[self] , identifier[x] , identifier[y] , identifier[ch] , identifier[fg] , identifier[bg] ):
literal[string]
identifier[self] . identifier[console] . identifier[draw_char] ( identifier[x] , identifier[y] , identifier[ch] , identifier[fg] , identifier[bg] ) | def change_cell(self, x, y, ch, fg, bg):
"""Change cell in position (x;y).
"""
self.console.draw_char(x, y, ch, fg, bg) |
def _emit_internal(self, sid, event, data, namespace=None, id=None):
"""Send a message to a client."""
if six.PY2 and not self.binary:
binary = False # pragma: nocover
else:
binary = None
# tuples are expanded to multiple arguments, everything else is sent
# as a single argument
if isinstance(data, tuple):
data = list(data)
else:
data = [data]
self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace,
data=[event] + data, id=id,
binary=binary)) | def function[_emit_internal, parameter[self, sid, event, data, namespace, id]]:
constant[Send a message to a client.]
if <ast.BoolOp object at 0x7da1b21ba290> begin[:]
variable[binary] assign[=] constant[False]
if call[name[isinstance], parameter[name[data], name[tuple]]] begin[:]
variable[data] assign[=] call[name[list], parameter[name[data]]]
call[name[self]._send_packet, parameter[name[sid], call[name[packet].Packet, parameter[name[packet].EVENT]]]] | keyword[def] identifier[_emit_internal] ( identifier[self] , identifier[sid] , identifier[event] , identifier[data] , identifier[namespace] = keyword[None] , identifier[id] = keyword[None] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] keyword[and] keyword[not] identifier[self] . identifier[binary] :
identifier[binary] = keyword[False]
keyword[else] :
identifier[binary] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[tuple] ):
identifier[data] = identifier[list] ( identifier[data] )
keyword[else] :
identifier[data] =[ identifier[data] ]
identifier[self] . identifier[_send_packet] ( identifier[sid] , identifier[packet] . identifier[Packet] ( identifier[packet] . identifier[EVENT] , identifier[namespace] = identifier[namespace] ,
identifier[data] =[ identifier[event] ]+ identifier[data] , identifier[id] = identifier[id] ,
identifier[binary] = identifier[binary] )) | def _emit_internal(self, sid, event, data, namespace=None, id=None):
"""Send a message to a client."""
if six.PY2 and (not self.binary):
binary = False # pragma: nocover # depends on [control=['if'], data=[]]
else:
binary = None
# tuples are expanded to multiple arguments, everything else is sent
# as a single argument
if isinstance(data, tuple):
data = list(data) # depends on [control=['if'], data=[]]
else:
data = [data]
self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace, data=[event] + data, id=id, binary=binary)) |
def routeByMonthAbbr(self, request, year, monthAbbr):
"""Route a request with a month abbreviation to the monthly view."""
month = (DatePictures['Mon'].index(monthAbbr.lower()) // 4) + 1
return self.serveMonth(request, year, month) | def function[routeByMonthAbbr, parameter[self, request, year, monthAbbr]]:
constant[Route a request with a month abbreviation to the monthly view.]
variable[month] assign[=] binary_operation[binary_operation[call[call[name[DatePictures]][constant[Mon]].index, parameter[call[name[monthAbbr].lower, parameter[]]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]] + constant[1]]
return[call[name[self].serveMonth, parameter[name[request], name[year], name[month]]]] | keyword[def] identifier[routeByMonthAbbr] ( identifier[self] , identifier[request] , identifier[year] , identifier[monthAbbr] ):
literal[string]
identifier[month] =( identifier[DatePictures] [ literal[string] ]. identifier[index] ( identifier[monthAbbr] . identifier[lower] ())// literal[int] )+ literal[int]
keyword[return] identifier[self] . identifier[serveMonth] ( identifier[request] , identifier[year] , identifier[month] ) | def routeByMonthAbbr(self, request, year, monthAbbr):
"""Route a request with a month abbreviation to the monthly view."""
month = DatePictures['Mon'].index(monthAbbr.lower()) // 4 + 1
return self.serveMonth(request, year, month) |
def _extents(self):
"""
A (cx, cy) 2-tuple representing the effective rendering area for text
within this text frame when margins are taken into account.
"""
return (
self._parent.width - self.margin_left - self.margin_right,
self._parent.height - self.margin_top - self.margin_bottom
) | def function[_extents, parameter[self]]:
constant[
A (cx, cy) 2-tuple representing the effective rendering area for text
within this text frame when margins are taken into account.
]
return[tuple[[<ast.BinOp object at 0x7da20c6ab8b0>, <ast.BinOp object at 0x7da20c6a9b40>]]] | keyword[def] identifier[_extents] ( identifier[self] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_parent] . identifier[width] - identifier[self] . identifier[margin_left] - identifier[self] . identifier[margin_right] ,
identifier[self] . identifier[_parent] . identifier[height] - identifier[self] . identifier[margin_top] - identifier[self] . identifier[margin_bottom]
) | def _extents(self):
"""
A (cx, cy) 2-tuple representing the effective rendering area for text
within this text frame when margins are taken into account.
"""
return (self._parent.width - self.margin_left - self.margin_right, self._parent.height - self.margin_top - self.margin_bottom) |
def _batch_gather_with_broadcast(params, indices, axis):
"""Like batch_gather, but broadcasts to the left of axis."""
# batch_gather assumes...
# params.shape = [A1,...,AN, B1,...,BM]
# indices.shape = [A1,...,AN, C]
# which gives output of shape
# [A1,...,AN, C, B1,...,BM]
# Here we broadcast dims of each to the left of `axis` in params, and left of
# the rightmost dim in indices, e.g. we can
# have
# params.shape = [A1,...,AN, B1,...,BM]
# indices.shape = [a1,...,aN, C],
# where ai broadcasts with Ai.
# leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN].
leading_bcast_shape = tf.broadcast_dynamic_shape(
tf.shape(input=params)[:axis],
tf.shape(input=indices)[:-1])
params += tf.zeros(
tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0),
dtype=params.dtype)
indices += tf.zeros(
tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0),
dtype=indices.dtype)
return tf.compat.v1.batch_gather(params, indices) | def function[_batch_gather_with_broadcast, parameter[params, indices, axis]]:
constant[Like batch_gather, but broadcasts to the left of axis.]
variable[leading_bcast_shape] assign[=] call[name[tf].broadcast_dynamic_shape, parameter[call[call[name[tf].shape, parameter[]]][<ast.Slice object at 0x7da1b03e36a0>], call[call[name[tf].shape, parameter[]]][<ast.Slice object at 0x7da1b03e3970>]]]
<ast.AugAssign object at 0x7da1b03e32e0>
<ast.AugAssign object at 0x7da1b03e3130>
return[call[name[tf].compat.v1.batch_gather, parameter[name[params], name[indices]]]] | keyword[def] identifier[_batch_gather_with_broadcast] ( identifier[params] , identifier[indices] , identifier[axis] ):
literal[string]
identifier[leading_bcast_shape] = identifier[tf] . identifier[broadcast_dynamic_shape] (
identifier[tf] . identifier[shape] ( identifier[input] = identifier[params] )[: identifier[axis] ],
identifier[tf] . identifier[shape] ( identifier[input] = identifier[indices] )[:- literal[int] ])
identifier[params] += identifier[tf] . identifier[zeros] (
identifier[tf] . identifier[concat] (( identifier[leading_bcast_shape] , identifier[tf] . identifier[shape] ( identifier[input] = identifier[params] )[ identifier[axis] :]), identifier[axis] = literal[int] ),
identifier[dtype] = identifier[params] . identifier[dtype] )
identifier[indices] += identifier[tf] . identifier[zeros] (
identifier[tf] . identifier[concat] (( identifier[leading_bcast_shape] , identifier[tf] . identifier[shape] ( identifier[input] = identifier[indices] )[- literal[int] :]), identifier[axis] = literal[int] ),
identifier[dtype] = identifier[indices] . identifier[dtype] )
keyword[return] identifier[tf] . identifier[compat] . identifier[v1] . identifier[batch_gather] ( identifier[params] , identifier[indices] ) | def _batch_gather_with_broadcast(params, indices, axis):
"""Like batch_gather, but broadcasts to the left of axis."""
# batch_gather assumes...
# params.shape = [A1,...,AN, B1,...,BM]
# indices.shape = [A1,...,AN, C]
# which gives output of shape
# [A1,...,AN, C, B1,...,BM]
# Here we broadcast dims of each to the left of `axis` in params, and left of
# the rightmost dim in indices, e.g. we can
# have
# params.shape = [A1,...,AN, B1,...,BM]
# indices.shape = [a1,...,aN, C],
# where ai broadcasts with Ai.
# leading_bcast_shape is the broadcast of [A1,...,AN] and [a1,...,aN].
leading_bcast_shape = tf.broadcast_dynamic_shape(tf.shape(input=params)[:axis], tf.shape(input=indices)[:-1])
params += tf.zeros(tf.concat((leading_bcast_shape, tf.shape(input=params)[axis:]), axis=0), dtype=params.dtype)
indices += tf.zeros(tf.concat((leading_bcast_shape, tf.shape(input=indices)[-1:]), axis=0), dtype=indices.dtype)
return tf.compat.v1.batch_gather(params, indices) |
def get_library_name(database='Human'):
"""return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
"""
# make a get request to get the gmt names and meta data from Enrichr
# old code
# response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta')
# gmt_data = response.json()
# # generate list of lib names
# libs = []
# # get library names
# for inst_gmt in gmt_data['libraries']:
# # only include active gmts
# if inst_gmt['isActive'] == True:
# libs.append(inst_gmt['libraryName'])
if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']:
sys.stderr.write("""No supported database. Please input one of these:
'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' """)
return
if database in ['Human', 'Mouse']: database=''
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs) | def function[get_library_name, parameter[database]]:
constant[return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
]
if compare[name[database] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20c6aa320>, <ast.Constant object at 0x7da20c6aa0e0>, <ast.Constant object at 0x7da20c6a95d0>, <ast.Constant object at 0x7da20c6aaec0>, <ast.Constant object at 0x7da20c6aa530>, <ast.Constant object at 0x7da20c6a9030>]]] begin[:]
call[name[sys].stderr.write, parameter[constant[No supported database. Please input one of these:
'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' ]]]
return[None]
if compare[name[database] in list[[<ast.Constant object at 0x7da20c6aa650>, <ast.Constant object at 0x7da20c6a97e0>]]] begin[:]
variable[database] assign[=] constant[]
variable[lib_url] assign[=] binary_operation[constant[http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics] <ast.Mod object at 0x7da2590d6920> name[database]]
variable[libs_json] assign[=] call[name[json].loads, parameter[call[name[requests].get, parameter[name[lib_url]]].text]]
variable[libs] assign[=] <ast.ListComp object at 0x7da20c6aa200>
return[call[name[sorted], parameter[name[libs]]]] | keyword[def] identifier[get_library_name] ( identifier[database] = literal[string] ):
literal[string]
keyword[if] identifier[database] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
keyword[return]
keyword[if] identifier[database] keyword[in] [ literal[string] , literal[string] ]: identifier[database] = literal[string]
identifier[lib_url] = literal[string] % identifier[database]
identifier[libs_json] = identifier[json] . identifier[loads] ( identifier[requests] . identifier[get] ( identifier[lib_url] ). identifier[text] )
identifier[libs] =[ identifier[lib] [ literal[string] ] keyword[for] identifier[lib] keyword[in] identifier[libs_json] [ literal[string] ]]
keyword[return] identifier[sorted] ( identifier[libs] ) | def get_library_name(database='Human'):
"""return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
"""
# make a get request to get the gmt names and meta data from Enrichr
# old code
# response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta')
# gmt_data = response.json()
# # generate list of lib names
# libs = []
# # get library names
# for inst_gmt in gmt_data['libraries']:
# # only include active gmts
# if inst_gmt['isActive'] == True:
# libs.append(inst_gmt['libraryName'])
if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']:
sys.stderr.write("No supported database. Please input one of these:\n 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' ")
return # depends on [control=['if'], data=[]]
if database in ['Human', 'Mouse']:
database = '' # depends on [control=['if'], data=['database']]
lib_url = 'http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics' % database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs) |
def parse(self):
"""
Parse the vmstat file
:return: status of the metric parse
"""
file_status = True
for input_file in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(input_file)
if not file_status:
return False
status = True
data = {} # stores the data of each column
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file) as fh:
for line in fh:
words = line.split() # [0] is day; [1] is seconds; [2] is field name:; [3] is value [4] is unit
if len(words) < 3:
continue
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue
col = words[2].strip(':')
# only process sub_metrics specified in config.
if self.sub_metrics and col not in self.sub_metrics:
continue
# add unit to metric description; most of the metrics have 'KB'; a few others do not have unit, they are in number of pages
if len(words) > 4 and words[4]:
unit = words[4]
else:
unit = 'pages'
self.sub_metric_unit[col] = unit
# stores the values in data[] before finally writing out
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + "," + words[3])
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv])))
return status | def function[parse, parameter[self]]:
constant[
Parse the vmstat file
:return: status of the metric parse
]
variable[file_status] assign[=] constant[True]
for taget[name[input_file]] in starred[name[self].infile_list] begin[:]
variable[file_status] assign[=] <ast.BoolOp object at 0x7da1b00daa40>
if <ast.UnaryOp object at 0x7da1b00db610> begin[:]
return[constant[False]]
variable[status] assign[=] constant[True]
variable[data] assign[=] dictionary[[], []]
for taget[name[input_file]] in starred[name[self].infile_list] begin[:]
call[name[logger].info, parameter[constant[Processing : %s], name[input_file]]]
variable[timestamp_format] assign[=] constant[None]
with call[name[open], parameter[name[input_file]]] begin[:]
for taget[name[line]] in starred[name[fh]] begin[:]
variable[words] assign[=] call[name[line].split, parameter[]]
if compare[call[name[len], parameter[name[words]]] less[<] constant[3]] begin[:]
continue
variable[ts] assign[=] binary_operation[binary_operation[call[name[words]][constant[0]] + constant[ ]] + call[name[words]][constant[1]]]
if <ast.BoolOp object at 0x7da1b00dac80> begin[:]
variable[timestamp_format] assign[=] call[name[naarad].utils.detect_timestamp_format, parameter[name[ts]]]
if compare[name[timestamp_format] equal[==] constant[unknown]] begin[:]
continue
variable[ts] assign[=] call[name[naarad].utils.get_standardized_timestamp, parameter[name[ts], name[timestamp_format]]]
if call[name[self].ts_out_of_range, parameter[name[ts]]] begin[:]
continue
variable[col] assign[=] call[call[name[words]][constant[2]].strip, parameter[constant[:]]]
if <ast.BoolOp object at 0x7da1aff75390> begin[:]
continue
if <ast.BoolOp object at 0x7da1aff755a0> begin[:]
variable[unit] assign[=] call[name[words]][constant[4]]
call[name[self].sub_metric_unit][name[col]] assign[=] name[unit]
if compare[name[col] in name[self].column_csv_map] begin[:]
variable[out_csv] assign[=] call[name[self].column_csv_map][name[col]]
call[call[name[data]][name[out_csv]].append, parameter[binary_operation[binary_operation[name[ts] + constant[,]] + call[name[words]][constant[3]]]]]
for taget[name[csv]] in starred[call[name[data].keys, parameter[]]] begin[:]
call[name[self].csv_files.append, parameter[name[csv]]]
with call[name[open], parameter[name[csv], constant[w]]] begin[:]
call[name[fh].write, parameter[call[constant[
].join, parameter[call[name[sorted], parameter[call[name[data]][name[csv]]]]]]]]
return[name[status]] | keyword[def] identifier[parse] ( identifier[self] ):
literal[string]
identifier[file_status] = keyword[True]
keyword[for] identifier[input_file] keyword[in] identifier[self] . identifier[infile_list] :
identifier[file_status] = identifier[file_status] keyword[and] identifier[naarad] . identifier[utils] . identifier[is_valid_file] ( identifier[input_file] )
keyword[if] keyword[not] identifier[file_status] :
keyword[return] keyword[False]
identifier[status] = keyword[True]
identifier[data] ={}
keyword[for] identifier[input_file] keyword[in] identifier[self] . identifier[infile_list] :
identifier[logger] . identifier[info] ( literal[string] , identifier[input_file] )
identifier[timestamp_format] = keyword[None]
keyword[with] identifier[open] ( identifier[input_file] ) keyword[as] identifier[fh] :
keyword[for] identifier[line] keyword[in] identifier[fh] :
identifier[words] = identifier[line] . identifier[split] ()
keyword[if] identifier[len] ( identifier[words] )< literal[int] :
keyword[continue]
identifier[ts] = identifier[words] [ literal[int] ]+ literal[string] + identifier[words] [ literal[int] ]
keyword[if] keyword[not] identifier[timestamp_format] keyword[or] identifier[timestamp_format] == literal[string] :
identifier[timestamp_format] = identifier[naarad] . identifier[utils] . identifier[detect_timestamp_format] ( identifier[ts] )
keyword[if] identifier[timestamp_format] == literal[string] :
keyword[continue]
identifier[ts] = identifier[naarad] . identifier[utils] . identifier[get_standardized_timestamp] ( identifier[ts] , identifier[timestamp_format] )
keyword[if] identifier[self] . identifier[ts_out_of_range] ( identifier[ts] ):
keyword[continue]
identifier[col] = identifier[words] [ literal[int] ]. identifier[strip] ( literal[string] )
keyword[if] identifier[self] . identifier[sub_metrics] keyword[and] identifier[col] keyword[not] keyword[in] identifier[self] . identifier[sub_metrics] :
keyword[continue]
keyword[if] identifier[len] ( identifier[words] )> literal[int] keyword[and] identifier[words] [ literal[int] ]:
identifier[unit] = identifier[words] [ literal[int] ]
keyword[else] :
identifier[unit] = literal[string]
identifier[self] . identifier[sub_metric_unit] [ identifier[col] ]= identifier[unit]
keyword[if] identifier[col] keyword[in] identifier[self] . identifier[column_csv_map] :
identifier[out_csv] = identifier[self] . identifier[column_csv_map] [ identifier[col] ]
keyword[else] :
identifier[out_csv] = identifier[self] . identifier[get_csv] ( identifier[col] )
identifier[data] [ identifier[out_csv] ]=[]
identifier[data] [ identifier[out_csv] ]. identifier[append] ( identifier[ts] + literal[string] + identifier[words] [ literal[int] ])
keyword[for] identifier[csv] keyword[in] identifier[data] . identifier[keys] ():
identifier[self] . identifier[csv_files] . identifier[append] ( identifier[csv] )
keyword[with] identifier[open] ( identifier[csv] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( literal[string] . identifier[join] ( identifier[sorted] ( identifier[data] [ identifier[csv] ])))
keyword[return] identifier[status] | def parse(self):
"""
Parse the vmstat file
:return: status of the metric parse
"""
file_status = True
for input_file in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(input_file)
if not file_status:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['input_file']]
status = True
data = {} # stores the data of each column
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file) as fh:
for line in fh:
words = line.split() # [0] is day; [1] is seconds; [2] is field name:; [3] is value [4] is unit
if len(words) < 3:
continue # depends on [control=['if'], data=[]]
ts = words[0] + ' ' + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts) # depends on [control=['if'], data=[]]
if timestamp_format == 'unknown':
continue # depends on [control=['if'], data=[]]
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue # depends on [control=['if'], data=[]]
col = words[2].strip(':')
# only process sub_metrics specified in config.
if self.sub_metrics and col not in self.sub_metrics:
continue # depends on [control=['if'], data=[]]
# add unit to metric description; most of the metrics have 'KB'; a few others do not have unit, they are in number of pages
if len(words) > 4 and words[4]:
unit = words[4] # depends on [control=['if'], data=[]]
else:
unit = 'pages'
self.sub_metric_unit[col] = unit
# stores the values in data[] before finally writing out
if col in self.column_csv_map:
out_csv = self.column_csv_map[col] # depends on [control=['if'], data=['col']]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + ',' + words[3]) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fh']] # depends on [control=['for'], data=['input_file']]
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv]))) # depends on [control=['with'], data=['fh']] # depends on [control=['for'], data=['csv']]
return status |
def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate
:param role: The role to remove from the user
"""
rv = False
user, role = self._prepare_role_modify_args(user, role)
if role in user.roles:
rv = True
user.roles.remove(role)
self.put(user)
return rv | def function[remove_role_from_user, parameter[self, user, role]]:
constant[Removes a role from a user.
:param user: The user to manipulate
:param role: The role to remove from the user
]
variable[rv] assign[=] constant[False]
<ast.Tuple object at 0x7da18fe91870> assign[=] call[name[self]._prepare_role_modify_args, parameter[name[user], name[role]]]
if compare[name[role] in name[user].roles] begin[:]
variable[rv] assign[=] constant[True]
call[name[user].roles.remove, parameter[name[role]]]
call[name[self].put, parameter[name[user]]]
return[name[rv]] | keyword[def] identifier[remove_role_from_user] ( identifier[self] , identifier[user] , identifier[role] ):
literal[string]
identifier[rv] = keyword[False]
identifier[user] , identifier[role] = identifier[self] . identifier[_prepare_role_modify_args] ( identifier[user] , identifier[role] )
keyword[if] identifier[role] keyword[in] identifier[user] . identifier[roles] :
identifier[rv] = keyword[True]
identifier[user] . identifier[roles] . identifier[remove] ( identifier[role] )
identifier[self] . identifier[put] ( identifier[user] )
keyword[return] identifier[rv] | def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate
:param role: The role to remove from the user
"""
rv = False
(user, role) = self._prepare_role_modify_args(user, role)
if role in user.roles:
rv = True
user.roles.remove(role)
self.put(user) # depends on [control=['if'], data=['role']]
return rv |
def set_fft_params(func):
"""Decorate a method to automatically convert quantities to samples
"""
@wraps(func)
def wrapped_func(series, method_func, *args, **kwargs):
"""Wrap function to normalize FFT params before execution
"""
if isinstance(series, tuple):
data = series[0]
else:
data = series
# normalise FFT parmeters for all libraries
normalize_fft_params(data, kwargs=kwargs, func=method_func)
return func(series, method_func, *args, **kwargs)
return wrapped_func | def function[set_fft_params, parameter[func]]:
constant[Decorate a method to automatically convert quantities to samples
]
def function[wrapped_func, parameter[series, method_func]]:
constant[Wrap function to normalize FFT params before execution
]
if call[name[isinstance], parameter[name[series], name[tuple]]] begin[:]
variable[data] assign[=] call[name[series]][constant[0]]
call[name[normalize_fft_params], parameter[name[data]]]
return[call[name[func], parameter[name[series], name[method_func], <ast.Starred object at 0x7da20e955270>]]]
return[name[wrapped_func]] | keyword[def] identifier[set_fft_params] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapped_func] ( identifier[series] , identifier[method_func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[series] , identifier[tuple] ):
identifier[data] = identifier[series] [ literal[int] ]
keyword[else] :
identifier[data] = identifier[series]
identifier[normalize_fft_params] ( identifier[data] , identifier[kwargs] = identifier[kwargs] , identifier[func] = identifier[method_func] )
keyword[return] identifier[func] ( identifier[series] , identifier[method_func] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapped_func] | def set_fft_params(func):
"""Decorate a method to automatically convert quantities to samples
"""
@wraps(func)
def wrapped_func(series, method_func, *args, **kwargs):
"""Wrap function to normalize FFT params before execution
"""
if isinstance(series, tuple):
data = series[0] # depends on [control=['if'], data=[]]
else:
data = series
# normalise FFT parmeters for all libraries
normalize_fft_params(data, kwargs=kwargs, func=method_func)
return func(series, method_func, *args, **kwargs)
return wrapped_func |
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result] | def function[nlargest, parameter[n, iterable, key]]:
constant[Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
]
if compare[name[n] equal[==] constant[1]] begin[:]
variable[it] assign[=] call[name[iter], parameter[name[iterable]]]
variable[sentinel] assign[=] call[name[object], parameter[]]
if compare[name[key] is constant[None]] begin[:]
variable[result] assign[=] call[name[max], parameter[name[it]]]
return[<ast.IfExp object at 0x7da1b20a8190>]
<ast.Try object at 0x7da1b20aac80>
if compare[name[key] is constant[None]] begin[:]
variable[it] assign[=] call[name[iter], parameter[name[iterable]]]
variable[result] assign[=] <ast.ListComp object at 0x7da1b20a94b0>
if <ast.UnaryOp object at 0x7da1b20a8640> begin[:]
return[name[result]]
call[name[heapify], parameter[name[result]]]
variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]]
variable[order] assign[=] <ast.UnaryOp object at 0x7da1b20a88e0>
variable[_heapreplace] assign[=] name[heapreplace]
for taget[name[elem]] in starred[name[it]] begin[:]
if compare[name[top] less[<] name[elem]] begin[:]
call[name[_heapreplace], parameter[name[result], tuple[[<ast.Name object at 0x7da1b20a80a0>, <ast.Name object at 0x7da1b20a8670>]]]]
variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]]
<ast.AugAssign object at 0x7da1b20a8070>
call[name[result].sort, parameter[]]
return[<ast.ListComp object at 0x7da1b20a8460>]
variable[it] assign[=] call[name[iter], parameter[name[iterable]]]
variable[result] assign[=] <ast.ListComp object at 0x7da20e954310>
if <ast.UnaryOp object at 0x7da20e957850> begin[:]
return[name[result]]
call[name[heapify], parameter[name[result]]]
variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]]
variable[order] assign[=] <ast.UnaryOp object at 0x7da20e956440>
variable[_heapreplace] assign[=] name[heapreplace]
for taget[name[elem]] in starred[name[it]] begin[:]
variable[k] assign[=] call[name[key], parameter[name[elem]]]
if compare[name[top] less[<] name[k]] begin[:]
call[name[_heapreplace], parameter[name[result], tuple[[<ast.Name object at 0x7da20e9568c0>, <ast.Name object at 0x7da20e9572b0>, <ast.Name object at 0x7da20e954ee0>]]]]
variable[top] assign[=] call[call[name[result]][constant[0]]][constant[0]]
<ast.AugAssign object at 0x7da20e9549a0>
call[name[result].sort, parameter[]]
return[<ast.ListComp object at 0x7da20e957250>] | keyword[def] identifier[nlargest] ( identifier[n] , identifier[iterable] , identifier[key] = keyword[None] ):
literal[string]
keyword[if] identifier[n] == literal[int] :
identifier[it] = identifier[iter] ( identifier[iterable] )
identifier[sentinel] = identifier[object] ()
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[result] = identifier[max] ( identifier[it] , identifier[default] = identifier[sentinel] )
keyword[else] :
identifier[result] = identifier[max] ( identifier[it] , identifier[default] = identifier[sentinel] , identifier[key] = identifier[key] )
keyword[return] [] keyword[if] identifier[result] keyword[is] identifier[sentinel] keyword[else] [ identifier[result] ]
keyword[try] :
identifier[size] = identifier[len] ( identifier[iterable] )
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
keyword[pass]
keyword[else] :
keyword[if] identifier[n] >= identifier[size] :
keyword[return] identifier[sorted] ( identifier[iterable] , identifier[key] = identifier[key] , identifier[reverse] = keyword[True] )[: identifier[n] ]
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[it] = identifier[iter] ( identifier[iterable] )
identifier[result] =[( identifier[elem] , identifier[i] ) keyword[for] identifier[i] , identifier[elem] keyword[in] identifier[zip] ( identifier[range] ( literal[int] ,- identifier[n] ,- literal[int] ), identifier[it] )]
keyword[if] keyword[not] identifier[result] :
keyword[return] identifier[result]
identifier[heapify] ( identifier[result] )
identifier[top] = identifier[result] [ literal[int] ][ literal[int] ]
identifier[order] =- identifier[n]
identifier[_heapreplace] = identifier[heapreplace]
keyword[for] identifier[elem] keyword[in] identifier[it] :
keyword[if] identifier[top] < identifier[elem] :
identifier[_heapreplace] ( identifier[result] ,( identifier[elem] , identifier[order] ))
identifier[top] = identifier[result] [ literal[int] ][ literal[int] ]
identifier[order] -= literal[int]
identifier[result] . identifier[sort] ( identifier[reverse] = keyword[True] )
keyword[return] [ identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[result] ]
identifier[it] = identifier[iter] ( identifier[iterable] )
identifier[result] =[( identifier[key] ( identifier[elem] ), identifier[i] , identifier[elem] ) keyword[for] identifier[i] , identifier[elem] keyword[in] identifier[zip] ( identifier[range] ( literal[int] ,- identifier[n] ,- literal[int] ), identifier[it] )]
keyword[if] keyword[not] identifier[result] :
keyword[return] identifier[result]
identifier[heapify] ( identifier[result] )
identifier[top] = identifier[result] [ literal[int] ][ literal[int] ]
identifier[order] =- identifier[n]
identifier[_heapreplace] = identifier[heapreplace]
keyword[for] identifier[elem] keyword[in] identifier[it] :
identifier[k] = identifier[key] ( identifier[elem] )
keyword[if] identifier[top] < identifier[k] :
identifier[_heapreplace] ( identifier[result] ,( identifier[k] , identifier[order] , identifier[elem] ))
identifier[top] = identifier[result] [ literal[int] ][ literal[int] ]
identifier[order] -= literal[int]
identifier[result] . identifier[sort] ( identifier[reverse] = keyword[True] )
keyword[return] [ identifier[r] [ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[result] ] | def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel) # depends on [control=['if'], data=[]]
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result] # depends on [control=['if'], data=[]]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError):
pass # depends on [control=['except'], data=[]]
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n] # depends on [control=['if'], data=['n']]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for (i, elem) in zip(range(0, -n, -1), it)]
if not result:
return result # depends on [control=['if'], data=[]]
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1 # depends on [control=['if'], data=['top', 'elem']] # depends on [control=['for'], data=['elem']]
result.sort(reverse=True)
return [r[0] for r in result] # depends on [control=['if'], data=[]]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for (i, elem) in zip(range(0, -n, -1), it)]
if not result:
return result # depends on [control=['if'], data=[]]
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1 # depends on [control=['if'], data=['top', 'k']] # depends on [control=['for'], data=['elem']]
result.sort(reverse=True)
return [r[2] for r in result] |
def _parse_response(resp):
""" Get xmlrpc response from scgi response
"""
# Assume they care for standards and send us CRLF (not just LF)
try:
headers, payload = resp.split("\r\n\r\n", 1)
except (TypeError, ValueError) as exc:
raise SCGIException("No header delimiter in SCGI response of length %d (%s)" % (len(resp), exc,))
headers = _parse_headers(headers)
clen = headers.get("Content-Length")
if clen is not None:
# Check length, just in case the transport is bogus
assert len(payload) == int(clen)
return payload, headers | def function[_parse_response, parameter[resp]]:
constant[ Get xmlrpc response from scgi response
]
<ast.Try object at 0x7da18bcc9720>
variable[headers] assign[=] call[name[_parse_headers], parameter[name[headers]]]
variable[clen] assign[=] call[name[headers].get, parameter[constant[Content-Length]]]
if compare[name[clen] is_not constant[None]] begin[:]
assert[compare[call[name[len], parameter[name[payload]]] equal[==] call[name[int], parameter[name[clen]]]]]
return[tuple[[<ast.Name object at 0x7da18bccb010>, <ast.Name object at 0x7da18bcca8f0>]]] | keyword[def] identifier[_parse_response] ( identifier[resp] ):
literal[string]
keyword[try] :
identifier[headers] , identifier[payload] = identifier[resp] . identifier[split] ( literal[string] , literal[int] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[SCGIException] ( literal[string] %( identifier[len] ( identifier[resp] ), identifier[exc] ,))
identifier[headers] = identifier[_parse_headers] ( identifier[headers] )
identifier[clen] = identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[clen] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[len] ( identifier[payload] )== identifier[int] ( identifier[clen] )
keyword[return] identifier[payload] , identifier[headers] | def _parse_response(resp):
""" Get xmlrpc response from scgi response
"""
# Assume they care for standards and send us CRLF (not just LF)
try:
(headers, payload) = resp.split('\r\n\r\n', 1) # depends on [control=['try'], data=[]]
except (TypeError, ValueError) as exc:
raise SCGIException('No header delimiter in SCGI response of length %d (%s)' % (len(resp), exc)) # depends on [control=['except'], data=['exc']]
headers = _parse_headers(headers)
clen = headers.get('Content-Length')
if clen is not None:
# Check length, just in case the transport is bogus
assert len(payload) == int(clen) # depends on [control=['if'], data=['clen']]
return (payload, headers) |
def sessions(status, access_key, id_only, all):
'''
List and manage compute sessions.
'''
fields = [
('Session ID', 'sess_id'),
]
with Session() as session:
if is_admin(session):
fields.append(('Owner', 'access_key'))
if not id_only:
fields.extend([
('Image', 'image'),
('Tag', 'tag'),
('Created At', 'created_at',),
('Terminated At', 'terminated_at'),
('Status', 'status'),
('Occupied Resource', 'occupied_slots'),
('Used Memory (MiB)', 'mem_cur_bytes'),
('Max Used Memory (MiB)', 'mem_max_bytes'),
('CPU Using (%)', 'cpu_using'),
])
if is_legacy_server():
del fields[2]
def execute_paginated_query(limit, offset):
q = '''
query($limit:Int!, $offset:Int!, $ak:String, $status:String) {
compute_session_list(
limit:$limit, offset:$offset, access_key:$ak, status:$status) {
items { $fields }
total_count
}
}'''
q = textwrap.dedent(q).strip()
q = q.replace('$fields', ' '.join(item[1] for item in fields))
v = {
'limit': limit,
'offset': offset,
'status': status if status != 'ALL' else None,
'ak': access_key,
}
try:
resp = session.Admin.query(q, v)
except Exception as e:
print_error(e)
sys.exit(1)
return resp['compute_session_list']
def round_mem(items):
for item in items:
if 'mem_cur_bytes' in item:
item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1)
if 'mem_max_bytes' in item:
item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1)
return items
def _generate_paginated_results(interval):
offset = 0
is_first = True
total_count = -1
while True:
limit = (interval if is_first else
min(interval, total_count - offset))
try:
result = execute_paginated_query(limit, offset)
except Exception as e:
print_error(e)
sys.exit(1)
offset += interval
total_count = result['total_count']
items = result['items']
items = round_mem(items)
if id_only:
yield '\n'.join([item['sess_id'] for item in items]) + '\n'
else:
table = tabulate([item.values() for item in items],
headers=(item[0] for item in fields))
if not is_first:
table_rows = table.split('\n')
table = '\n'.join(table_rows[2:])
yield table + '\n'
if is_first:
is_first = False
if not offset < total_count:
break
with Session() as session:
paginating_interval = 10
if all:
click.echo_via_pager(_generate_paginated_results(paginating_interval))
else:
result = execute_paginated_query(paginating_interval, offset=0)
total_count = result['total_count']
if total_count == 0:
print('There are no compute sessions currently {0}.'
.format(status.lower()))
return
items = result['items']
items = round_mem(items)
if id_only:
for item in items:
print(item['sess_id'])
else:
print(tabulate([item.values() for item in items],
headers=(item[0] for item in fields)))
if total_count > paginating_interval:
print("More sessions can be displayed by using --all option.") | def function[sessions, parameter[status, access_key, id_only, all]]:
constant[
List and manage compute sessions.
]
variable[fields] assign[=] list[[<ast.Tuple object at 0x7da207f9aec0>]]
with call[name[Session], parameter[]] begin[:]
if call[name[is_admin], parameter[name[session]]] begin[:]
call[name[fields].append, parameter[tuple[[<ast.Constant object at 0x7da207f9b8e0>, <ast.Constant object at 0x7da207f98cd0>]]]]
if <ast.UnaryOp object at 0x7da207f98100> begin[:]
call[name[fields].extend, parameter[list[[<ast.Tuple object at 0x7da207f9b1f0>, <ast.Tuple object at 0x7da207f9a050>, <ast.Tuple object at 0x7da207f9bd90>, <ast.Tuple object at 0x7da207f9a800>, <ast.Tuple object at 0x7da207f9a8f0>, <ast.Tuple object at 0x7da207f986d0>, <ast.Tuple object at 0x7da207f99a50>, <ast.Tuple object at 0x7da207f9a2c0>, <ast.Tuple object at 0x7da1b26ad660>]]]]
if call[name[is_legacy_server], parameter[]] begin[:]
<ast.Delete object at 0x7da1b26ae4a0>
def function[execute_paginated_query, parameter[limit, offset]]:
variable[q] assign[=] constant[
query($limit:Int!, $offset:Int!, $ak:String, $status:String) {
compute_session_list(
limit:$limit, offset:$offset, access_key:$ak, status:$status) {
items { $fields }
total_count
}
}]
variable[q] assign[=] call[call[name[textwrap].dedent, parameter[name[q]]].strip, parameter[]]
variable[q] assign[=] call[name[q].replace, parameter[constant[$fields], call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da207f98520>]]]]
variable[v] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c63e0>, <ast.Constant object at 0x7da20c6c6110>, <ast.Constant object at 0x7da20c6c4e20>, <ast.Constant object at 0x7da20c6c48e0>], [<ast.Name object at 0x7da20c6c4430>, <ast.Name object at 0x7da20c6c40d0>, <ast.IfExp object at 0x7da20c6c7d00>, <ast.Name object at 0x7da20c6c7640>]]
<ast.Try object at 0x7da20c6c77f0>
return[call[name[resp]][constant[compute_session_list]]]
def function[round_mem, parameter[items]]:
for taget[name[item]] in starred[name[items]] begin[:]
if compare[constant[mem_cur_bytes] in name[item]] begin[:]
call[name[item]][constant[mem_cur_bytes]] assign[=] call[name[round], parameter[binary_operation[call[name[item]][constant[mem_cur_bytes]] / binary_operation[constant[2] ** constant[20]]], constant[1]]]
if compare[constant[mem_max_bytes] in name[item]] begin[:]
call[name[item]][constant[mem_max_bytes]] assign[=] call[name[round], parameter[binary_operation[call[name[item]][constant[mem_max_bytes]] / binary_operation[constant[2] ** constant[20]]], constant[1]]]
return[name[items]]
def function[_generate_paginated_results, parameter[interval]]:
variable[offset] assign[=] constant[0]
variable[is_first] assign[=] constant[True]
variable[total_count] assign[=] <ast.UnaryOp object at 0x7da20c6c5b40>
while constant[True] begin[:]
variable[limit] assign[=] <ast.IfExp object at 0x7da20c6c7370>
<ast.Try object at 0x7da20c6c7d30>
<ast.AugAssign object at 0x7da20c6c4250>
variable[total_count] assign[=] call[name[result]][constant[total_count]]
variable[items] assign[=] call[name[result]][constant[items]]
variable[items] assign[=] call[name[round_mem], parameter[name[items]]]
if name[id_only] begin[:]
<ast.Yield object at 0x7da20c6c6bf0>
if name[is_first] begin[:]
variable[is_first] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da20c6c6320> begin[:]
break
with call[name[Session], parameter[]] begin[:]
variable[paginating_interval] assign[=] constant[10]
if name[all] begin[:]
call[name[click].echo_via_pager, parameter[call[name[_generate_paginated_results], parameter[name[paginating_interval]]]]] | keyword[def] identifier[sessions] ( identifier[status] , identifier[access_key] , identifier[id_only] , identifier[all] ):
literal[string]
identifier[fields] =[
( literal[string] , literal[string] ),
]
keyword[with] identifier[Session] () keyword[as] identifier[session] :
keyword[if] identifier[is_admin] ( identifier[session] ):
identifier[fields] . identifier[append] (( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[id_only] :
identifier[fields] . identifier[extend] ([
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ,),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
])
keyword[if] identifier[is_legacy_server] ():
keyword[del] identifier[fields] [ literal[int] ]
keyword[def] identifier[execute_paginated_query] ( identifier[limit] , identifier[offset] ):
identifier[q] = literal[string]
identifier[q] = identifier[textwrap] . identifier[dedent] ( identifier[q] ). identifier[strip] ()
identifier[q] = identifier[q] . identifier[replace] ( literal[string] , literal[string] . identifier[join] ( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))
identifier[v] ={
literal[string] : identifier[limit] ,
literal[string] : identifier[offset] ,
literal[string] : identifier[status] keyword[if] identifier[status] != literal[string] keyword[else] keyword[None] ,
literal[string] : identifier[access_key] ,
}
keyword[try] :
identifier[resp] = identifier[session] . identifier[Admin] . identifier[query] ( identifier[q] , identifier[v] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print_error] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[resp] [ literal[string] ]
keyword[def] identifier[round_mem] ( identifier[items] ):
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] literal[string] keyword[in] identifier[item] :
identifier[item] [ literal[string] ]= identifier[round] ( identifier[item] [ literal[string] ]/ literal[int] ** literal[int] , literal[int] )
keyword[if] literal[string] keyword[in] identifier[item] :
identifier[item] [ literal[string] ]= identifier[round] ( identifier[item] [ literal[string] ]/ literal[int] ** literal[int] , literal[int] )
keyword[return] identifier[items]
keyword[def] identifier[_generate_paginated_results] ( identifier[interval] ):
identifier[offset] = literal[int]
identifier[is_first] = keyword[True]
identifier[total_count] =- literal[int]
keyword[while] keyword[True] :
identifier[limit] =( identifier[interval] keyword[if] identifier[is_first] keyword[else]
identifier[min] ( identifier[interval] , identifier[total_count] - identifier[offset] ))
keyword[try] :
identifier[result] = identifier[execute_paginated_query] ( identifier[limit] , identifier[offset] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print_error] ( identifier[e] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[offset] += identifier[interval]
identifier[total_count] = identifier[result] [ literal[string] ]
identifier[items] = identifier[result] [ literal[string] ]
identifier[items] = identifier[round_mem] ( identifier[items] )
keyword[if] identifier[id_only] :
keyword[yield] literal[string] . identifier[join] ([ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[items] ])+ literal[string]
keyword[else] :
identifier[table] = identifier[tabulate] ([ identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ],
identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] ))
keyword[if] keyword[not] identifier[is_first] :
identifier[table_rows] = identifier[table] . identifier[split] ( literal[string] )
identifier[table] = literal[string] . identifier[join] ( identifier[table_rows] [ literal[int] :])
keyword[yield] identifier[table] + literal[string]
keyword[if] identifier[is_first] :
identifier[is_first] = keyword[False]
keyword[if] keyword[not] identifier[offset] < identifier[total_count] :
keyword[break]
keyword[with] identifier[Session] () keyword[as] identifier[session] :
identifier[paginating_interval] = literal[int]
keyword[if] identifier[all] :
identifier[click] . identifier[echo_via_pager] ( identifier[_generate_paginated_results] ( identifier[paginating_interval] ))
keyword[else] :
identifier[result] = identifier[execute_paginated_query] ( identifier[paginating_interval] , identifier[offset] = literal[int] )
identifier[total_count] = identifier[result] [ literal[string] ]
keyword[if] identifier[total_count] == literal[int] :
identifier[print] ( literal[string]
. identifier[format] ( identifier[status] . identifier[lower] ()))
keyword[return]
identifier[items] = identifier[result] [ literal[string] ]
identifier[items] = identifier[round_mem] ( identifier[items] )
keyword[if] identifier[id_only] :
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[print] ( identifier[item] [ literal[string] ])
keyword[else] :
identifier[print] ( identifier[tabulate] ([ identifier[item] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[items] ],
identifier[headers] =( identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[fields] )))
keyword[if] identifier[total_count] > identifier[paginating_interval] :
identifier[print] ( literal[string] ) | def sessions(status, access_key, id_only, all):
"""
List and manage compute sessions.
"""
fields = [('Session ID', 'sess_id')]
with Session() as session:
if is_admin(session):
fields.append(('Owner', 'access_key')) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['session']]
if not id_only:
fields.extend([('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at'), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('Occupied Resource', 'occupied_slots'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('CPU Using (%)', 'cpu_using')])
if is_legacy_server():
del fields[2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def execute_paginated_query(limit, offset):
q = '\n query($limit:Int!, $offset:Int!, $ak:String, $status:String) {\n compute_session_list(\n limit:$limit, offset:$offset, access_key:$ak, status:$status) {\n items { $fields }\n total_count\n }\n }'
q = textwrap.dedent(q).strip()
q = q.replace('$fields', ' '.join((item[1] for item in fields)))
v = {'limit': limit, 'offset': offset, 'status': status if status != 'ALL' else None, 'ak': access_key}
try:
resp = session.Admin.query(q, v) # depends on [control=['try'], data=[]]
except Exception as e:
print_error(e)
sys.exit(1) # depends on [control=['except'], data=['e']]
return resp['compute_session_list']
def round_mem(items):
for item in items:
if 'mem_cur_bytes' in item:
item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) # depends on [control=['if'], data=['item']]
if 'mem_max_bytes' in item:
item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) # depends on [control=['if'], data=['item']] # depends on [control=['for'], data=['item']]
return items
def _generate_paginated_results(interval):
offset = 0
is_first = True
total_count = -1
while True:
limit = interval if is_first else min(interval, total_count - offset)
try:
result = execute_paginated_query(limit, offset) # depends on [control=['try'], data=[]]
except Exception as e:
print_error(e)
sys.exit(1) # depends on [control=['except'], data=['e']]
offset += interval
total_count = result['total_count']
items = result['items']
items = round_mem(items)
if id_only:
yield ('\n'.join([item['sess_id'] for item in items]) + '\n') # depends on [control=['if'], data=[]]
else:
table = tabulate([item.values() for item in items], headers=(item[0] for item in fields))
if not is_first:
table_rows = table.split('\n')
table = '\n'.join(table_rows[2:]) # depends on [control=['if'], data=[]]
yield (table + '\n')
if is_first:
is_first = False # depends on [control=['if'], data=[]]
if not offset < total_count:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
with Session() as session:
paginating_interval = 10
if all:
click.echo_via_pager(_generate_paginated_results(paginating_interval)) # depends on [control=['if'], data=[]]
else:
result = execute_paginated_query(paginating_interval, offset=0)
total_count = result['total_count']
if total_count == 0:
print('There are no compute sessions currently {0}.'.format(status.lower()))
return # depends on [control=['if'], data=[]]
items = result['items']
items = round_mem(items)
if id_only:
for item in items:
print(item['sess_id']) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
else:
print(tabulate([item.values() for item in items], headers=(item[0] for item in fields)))
if total_count > paginating_interval:
print('More sessions can be displayed by using --all option.') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] |
def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,
**kwargs):
r"""Returns the value of a cosmological quantity (e.g., age) at a redshift.
Parameters
----------
z : float
The redshift.
quantity : str
The name of the quantity to get. The name may be any attribute of
:py:class:`astropy.cosmology.FlatLambdaCDM`.
strip_unit : bool, optional
Just return the value of the quantity, sans units. Default is True.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float or astropy.units.quantity :
The value of the quantity at the requested value. If ``strip_unit`` is
``True``, will return the value. Otherwise, will return the value with
units.
"""
cosmology = get_cosmology(**kwargs)
val = getattr(cosmology, quantity)(z)
if strip_unit:
val = val.value
return val | def function[cosmological_quantity_from_redshift, parameter[z, quantity, strip_unit]]:
constant[Returns the value of a cosmological quantity (e.g., age) at a redshift.
Parameters
----------
z : float
The redshift.
quantity : str
The name of the quantity to get. The name may be any attribute of
:py:class:`astropy.cosmology.FlatLambdaCDM`.
strip_unit : bool, optional
Just return the value of the quantity, sans units. Default is True.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float or astropy.units.quantity :
The value of the quantity at the requested value. If ``strip_unit`` is
``True``, will return the value. Otherwise, will return the value with
units.
]
variable[cosmology] assign[=] call[name[get_cosmology], parameter[]]
variable[val] assign[=] call[call[name[getattr], parameter[name[cosmology], name[quantity]]], parameter[name[z]]]
if name[strip_unit] begin[:]
variable[val] assign[=] name[val].value
return[name[val]] | keyword[def] identifier[cosmological_quantity_from_redshift] ( identifier[z] , identifier[quantity] , identifier[strip_unit] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
identifier[cosmology] = identifier[get_cosmology] (** identifier[kwargs] )
identifier[val] = identifier[getattr] ( identifier[cosmology] , identifier[quantity] )( identifier[z] )
keyword[if] identifier[strip_unit] :
identifier[val] = identifier[val] . identifier[value]
keyword[return] identifier[val] | def cosmological_quantity_from_redshift(z, quantity, strip_unit=True, **kwargs):
"""Returns the value of a cosmological quantity (e.g., age) at a redshift.
Parameters
----------
z : float
The redshift.
quantity : str
The name of the quantity to get. The name may be any attribute of
:py:class:`astropy.cosmology.FlatLambdaCDM`.
strip_unit : bool, optional
Just return the value of the quantity, sans units. Default is True.
\\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float or astropy.units.quantity :
The value of the quantity at the requested value. If ``strip_unit`` is
``True``, will return the value. Otherwise, will return the value with
units.
"""
cosmology = get_cosmology(**kwargs)
val = getattr(cosmology, quantity)(z)
if strip_unit:
val = val.value # depends on [control=['if'], data=[]]
return val |
def message_user(self, username, domain, subject, message):
"""Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline."""
kwargs = {
'body': message,
'from': domain,
'to': '%s@%s' % (username, domain),
}
if self.api_version <= (14, 7):
# TODO: it's unclear when send_message was introduced
command = 'send_message_chat'
else:
command = 'send_message'
kwargs['subject'] = subject
kwargs['type'] = 'normal'
result = self.rpc(command, **kwargs)
if result['res'] == 0:
return
else:
raise BackendError(result.get('text', 'Unknown Error')) | def function[message_user, parameter[self, username, domain, subject, message]]:
constant[Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline.]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b10eb0>, <ast.Constant object at 0x7da1b0b12e60>, <ast.Constant object at 0x7da1b0b10310>], [<ast.Name object at 0x7da1b0b12080>, <ast.Name object at 0x7da1b0b10ac0>, <ast.BinOp object at 0x7da1b0b12dd0>]]
if compare[name[self].api_version less_or_equal[<=] tuple[[<ast.Constant object at 0x7da1b0b12350>, <ast.Constant object at 0x7da1b0b11d20>]]] begin[:]
variable[command] assign[=] constant[send_message_chat]
variable[result] assign[=] call[name[self].rpc, parameter[name[command]]]
if compare[call[name[result]][constant[res]] equal[==] constant[0]] begin[:]
return[None] | keyword[def] identifier[message_user] ( identifier[self] , identifier[username] , identifier[domain] , identifier[subject] , identifier[message] ):
literal[string]
identifier[kwargs] ={
literal[string] : identifier[message] ,
literal[string] : identifier[domain] ,
literal[string] : literal[string] %( identifier[username] , identifier[domain] ),
}
keyword[if] identifier[self] . identifier[api_version] <=( literal[int] , literal[int] ):
identifier[command] = literal[string]
keyword[else] :
identifier[command] = literal[string]
identifier[kwargs] [ literal[string] ]= identifier[subject]
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[result] = identifier[self] . identifier[rpc] ( identifier[command] ,** identifier[kwargs] )
keyword[if] identifier[result] [ literal[string] ]== literal[int] :
keyword[return]
keyword[else] :
keyword[raise] identifier[BackendError] ( identifier[result] . identifier[get] ( literal[string] , literal[string] )) | def message_user(self, username, domain, subject, message):
"""Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline."""
kwargs = {'body': message, 'from': domain, 'to': '%s@%s' % (username, domain)}
if self.api_version <= (14, 7):
# TODO: it's unclear when send_message was introduced
command = 'send_message_chat' # depends on [control=['if'], data=[]]
else:
command = 'send_message'
kwargs['subject'] = subject
kwargs['type'] = 'normal'
result = self.rpc(command, **kwargs)
if result['res'] == 0:
return # depends on [control=['if'], data=[]]
else:
raise BackendError(result.get('text', 'Unknown Error')) |
def create_view(self, query_criteria=None, uid='_all_users'):
'''
a method to add a view to a design document of a uid
:param query_criteria: dictionary with valid jsonmodel query criteria
:param uid: [optional] string with uid of design document to update
:return: integer with status of operation
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ]
}
}
NOTE: only fields specified in the document schema at class initialization
can be used as fields in query_criteria. otherwise, an error will be thrown.
uid is automatically added to all document schemas at initialization
NOTE: the full list of all criteria are found in the reference page for the
jsonmodel module as well as the query-rules.json file included in the
module.
http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
'''
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_
# https://developer.couchbase.com/documentation/server/3.x/admin/Views/views-writing.html
title = '%s.create_view' % self.__class__.__name__
# validate inputs
input_fields = {
'uid': uid
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate inputs
if query_criteria:
if not self.model:
raise ValueError('%s(query_criteria={...} requires a document_schema.' % title)
self.model.query(query_criteria)
else:
query_criteria = {}
if uid != '_all_users' and self.public:
raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title')
# catch missing args
if not query_criteria and not uid:
raise IndexError('%s requires either a uid or query_criteria argument.' % title)
# create a view of all user documents
else:
# retrieve the design document for the uid
url = self.bucket_url + '/_design/%s' % uid
design_details = {
'views': {}
}
response = requests.get(url)
if response.status_code in (200, 201):
design_details = response.json()
design_details['views'] = self._clean_views(design_details['views'])
# create a view of all docs for the uid
if not query_criteria:
if uid == '_all_users':
return response.status_code
else:
function_string = 'function(doc, meta) { if (doc.uid == "%s") { emit(null, null); } }' % uid
design_details['views']['_all_docs'] = { 'map': function_string }
# construct a view for a query criteria
else:
# determine hashed key for criteria
import hashlib
import json
from collections import OrderedDict
ordered_criteria = OrderedDict(**query_criteria)
hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_keys=True).encode('utf-8')).hexdigest()
# determine function string for criteria
uid_insert = 'emit();'
if uid != '_all_users':
uid_insert = 'if (doc.uid == "%s") { emit(); }' % uid
function_string = 'function(doc, meta) { %s }' % uid_insert
emit_insert = 'emit(null, ['
count = 0
for key in ordered_criteria.keys():
if count:
emit_insert += ','
emit_insert += 'doc%s' % key
emit_insert += ']);'
function_string = function_string.replace('emit();', emit_insert)
# construct updated design details
design_details['views'][hashed_criteria] = { 'map': function_string }
# send update of design document
response = requests.put(url, json=design_details)
return response.status_code | def function[create_view, parameter[self, query_criteria, uid]]:
constant[
a method to add a view to a design document of a uid
:param query_criteria: dictionary with valid jsonmodel query criteria
:param uid: [optional] string with uid of design document to update
:return: integer with status of operation
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ]
}
}
NOTE: only fields specified in the document schema at class initialization
can be used as fields in query_criteria. otherwise, an error will be thrown.
uid is automatically added to all document schemas at initialization
NOTE: the full list of all criteria are found in the reference page for the
jsonmodel module as well as the query-rules.json file included in the
module.
http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
]
variable[title] assign[=] binary_operation[constant[%s.create_view] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
variable[input_fields] assign[=] dictionary[[<ast.Constant object at 0x7da20e9633a0>], [<ast.Name object at 0x7da20e963d30>]]
for taget[tuple[[<ast.Name object at 0x7da20e962020>, <ast.Name object at 0x7da20e9639a0>]]] in starred[call[name[input_fields].items, parameter[]]] begin[:]
if name[value] begin[:]
variable[object_title] assign[=] binary_operation[constant[%s(%s=%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e962920>, <ast.Name object at 0x7da20e962aa0>, <ast.Call object at 0x7da20e9629b0>]]]
call[name[self].fields.validate, parameter[name[value], binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[key]], name[object_title]]]
if name[query_criteria] begin[:]
if <ast.UnaryOp object at 0x7da20e962500> begin[:]
<ast.Raise object at 0x7da20e963580>
call[name[self].model.query, parameter[name[query_criteria]]]
if <ast.BoolOp object at 0x7da1b1335d80> begin[:]
<ast.Raise object at 0x7da1b1335cc0>
if <ast.BoolOp object at 0x7da1b1335a80> begin[:]
<ast.Raise object at 0x7da1b1334160>
return[name[response].status_code] | keyword[def] identifier[create_view] ( identifier[self] , identifier[query_criteria] = keyword[None] , identifier[uid] = literal[string] ):
literal[string]
identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
identifier[input_fields] ={
literal[string] : identifier[uid]
}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[input_fields] . identifier[items] ():
keyword[if] identifier[value] :
identifier[object_title] = literal[string] %( identifier[title] , identifier[key] , identifier[str] ( identifier[value] ))
identifier[self] . identifier[fields] . identifier[validate] ( identifier[value] , literal[string] % identifier[key] , identifier[object_title] )
keyword[if] identifier[query_criteria] :
keyword[if] keyword[not] identifier[self] . identifier[model] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[title] )
identifier[self] . identifier[model] . identifier[query] ( identifier[query_criteria] )
keyword[else] :
identifier[query_criteria] ={}
keyword[if] identifier[uid] != literal[string] keyword[and] identifier[self] . identifier[public] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[query_criteria] keyword[and] keyword[not] identifier[uid] :
keyword[raise] identifier[IndexError] ( literal[string] % identifier[title] )
keyword[else] :
identifier[url] = identifier[self] . identifier[bucket_url] + literal[string] % identifier[uid]
identifier[design_details] ={
literal[string] :{}
}
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] identifier[response] . identifier[status_code] keyword[in] ( literal[int] , literal[int] ):
identifier[design_details] = identifier[response] . identifier[json] ()
identifier[design_details] [ literal[string] ]= identifier[self] . identifier[_clean_views] ( identifier[design_details] [ literal[string] ])
keyword[if] keyword[not] identifier[query_criteria] :
keyword[if] identifier[uid] == literal[string] :
keyword[return] identifier[response] . identifier[status_code]
keyword[else] :
identifier[function_string] = literal[string] % identifier[uid]
identifier[design_details] [ literal[string] ][ literal[string] ]={ literal[string] : identifier[function_string] }
keyword[else] :
keyword[import] identifier[hashlib]
keyword[import] identifier[json]
keyword[from] identifier[collections] keyword[import] identifier[OrderedDict]
identifier[ordered_criteria] = identifier[OrderedDict] (** identifier[query_criteria] )
identifier[hashed_criteria] = identifier[hashlib] . identifier[md5] ( identifier[json] . identifier[dumps] ( identifier[query_criteria] , identifier[sort_keys] = keyword[True] ). identifier[encode] ( literal[string] )). identifier[hexdigest] ()
identifier[uid_insert] = literal[string]
keyword[if] identifier[uid] != literal[string] :
identifier[uid_insert] = literal[string] % identifier[uid]
identifier[function_string] = literal[string] % identifier[uid_insert]
identifier[emit_insert] = literal[string]
identifier[count] = literal[int]
keyword[for] identifier[key] keyword[in] identifier[ordered_criteria] . identifier[keys] ():
keyword[if] identifier[count] :
identifier[emit_insert] += literal[string]
identifier[emit_insert] += literal[string] % identifier[key]
identifier[emit_insert] += literal[string]
identifier[function_string] = identifier[function_string] . identifier[replace] ( literal[string] , identifier[emit_insert] )
identifier[design_details] [ literal[string] ][ identifier[hashed_criteria] ]={ literal[string] : identifier[function_string] }
identifier[response] = identifier[requests] . identifier[put] ( identifier[url] , identifier[json] = identifier[design_details] )
keyword[return] identifier[response] . identifier[status_code] | def create_view(self, query_criteria=None, uid='_all_users'):
"""
a method to add a view to a design document of a uid
:param query_criteria: dictionary with valid jsonmodel query criteria
:param uid: [optional] string with uid of design document to update
:return: integer with status of operation
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ]
}
}
NOTE: only fields specified in the document schema at class initialization
can be used as fields in query_criteria. otherwise, an error will be thrown.
uid is automatically added to all document schemas at initialization
NOTE: the full list of all criteria are found in the reference page for the
jsonmodel module as well as the query-rules.json file included in the
module.
http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
"""
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_
# https://developer.couchbase.com/documentation/server/3.x/admin/Views/views-writing.html
title = '%s.create_view' % self.__class__.__name__
# validate inputs
input_fields = {'uid': uid}
for (key, value) in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# validate inputs
if query_criteria:
if not self.model:
raise ValueError('%s(query_criteria={...} requires a document_schema.' % title) # depends on [control=['if'], data=[]]
self.model.query(query_criteria) # depends on [control=['if'], data=[]]
else:
query_criteria = {}
if uid != '_all_users' and self.public:
raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title') # depends on [control=['if'], data=[]]
# catch missing args
if not query_criteria and (not uid):
raise IndexError('%s requires either a uid or query_criteria argument.' % title) # depends on [control=['if'], data=[]]
else:
# create a view of all user documents
# retrieve the design document for the uid
url = self.bucket_url + '/_design/%s' % uid
design_details = {'views': {}}
response = requests.get(url)
if response.status_code in (200, 201):
design_details = response.json()
design_details['views'] = self._clean_views(design_details['views']) # depends on [control=['if'], data=[]]
# create a view of all docs for the uid
if not query_criteria:
if uid == '_all_users':
return response.status_code # depends on [control=['if'], data=[]]
else:
function_string = 'function(doc, meta) { if (doc.uid == "%s") { emit(null, null); } }' % uid
design_details['views']['_all_docs'] = {'map': function_string} # depends on [control=['if'], data=[]]
else:
# construct a view for a query criteria
# determine hashed key for criteria
import hashlib
import json
from collections import OrderedDict
ordered_criteria = OrderedDict(**query_criteria)
hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_keys=True).encode('utf-8')).hexdigest()
# determine function string for criteria
uid_insert = 'emit();'
if uid != '_all_users':
uid_insert = 'if (doc.uid == "%s") { emit(); }' % uid # depends on [control=['if'], data=['uid']]
function_string = 'function(doc, meta) { %s }' % uid_insert
emit_insert = 'emit(null, ['
count = 0
for key in ordered_criteria.keys():
if count:
emit_insert += ',' # depends on [control=['if'], data=[]]
emit_insert += 'doc%s' % key # depends on [control=['for'], data=['key']]
emit_insert += ']);'
function_string = function_string.replace('emit();', emit_insert)
# construct updated design details
design_details['views'][hashed_criteria] = {'map': function_string}
# send update of design document
response = requests.put(url, json=design_details)
return response.status_code |
def tx_hash( cls, tx ):
"""
Calculate the hash of a transction structure given by bitcoind
"""
tx_hex = bits.btc_bitcoind_tx_serialize( tx )
tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex')
return tx_hash | def function[tx_hash, parameter[cls, tx]]:
constant[
Calculate the hash of a transction structure given by bitcoind
]
variable[tx_hex] assign[=] call[name[bits].btc_bitcoind_tx_serialize, parameter[name[tx]]]
variable[tx_hash] assign[=] call[call[call[name[hashing].bin_double_sha256, parameter[call[name[tx_hex].decode, parameter[constant[hex]]]]]][<ast.Slice object at 0x7da1b2840fa0>].encode, parameter[constant[hex]]]
return[name[tx_hash]] | keyword[def] identifier[tx_hash] ( identifier[cls] , identifier[tx] ):
literal[string]
identifier[tx_hex] = identifier[bits] . identifier[btc_bitcoind_tx_serialize] ( identifier[tx] )
identifier[tx_hash] = identifier[hashing] . identifier[bin_double_sha256] ( identifier[tx_hex] . identifier[decode] ( literal[string] ))[::- literal[int] ]. identifier[encode] ( literal[string] )
keyword[return] identifier[tx_hash] | def tx_hash(cls, tx):
"""
Calculate the hash of a transction structure given by bitcoind
"""
tx_hex = bits.btc_bitcoind_tx_serialize(tx)
tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex')
return tx_hash |
def _from_dict(cls, _dict):
"""Initialize a ListCollectionFieldsResponse object from a json dictionary."""
args = {}
if 'fields' in _dict:
args['fields'] = [
Field._from_dict(x) for x in (_dict.get('fields'))
]
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a ListCollectionFieldsResponse object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[fields] in name[_dict]] begin[:]
call[name[args]][constant[fields]] assign[=] <ast.ListComp object at 0x7da18bcca140>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[Field] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a ListCollectionFieldsResponse object from a json dictionary."""
args = {}
if 'fields' in _dict:
args['fields'] = [Field._from_dict(x) for x in _dict.get('fields')] # depends on [control=['if'], data=['_dict']]
return cls(**args) |
def check_py_version():
"""Check if a propper Python version is used."""
try:
if sys.version_info >= (2, 7):
return
except:
pass
print(" ")
print(" ERROR - memtop needs python version at least 2.7")
print(("Chances are that you can install newer version from your "
"repositories, or even that you have some newer version "
"installed yet."))
print("(one way to find out which versions are installed is to try "
"following: 'which python2.7' , 'which python3' and so...)")
print(" ")
sys.exit(-1) | def function[check_py_version, parameter[]]:
constant[Check if a propper Python version is used.]
<ast.Try object at 0x7da1b10a4100>
call[name[print], parameter[constant[ ]]]
call[name[print], parameter[constant[ ERROR - memtop needs python version at least 2.7]]]
call[name[print], parameter[constant[Chances are that you can install newer version from your repositories, or even that you have some newer version installed yet.]]]
call[name[print], parameter[constant[(one way to find out which versions are installed is to try following: 'which python2.7' , 'which python3' and so...)]]]
call[name[print], parameter[constant[ ]]]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b10a4c70>]] | keyword[def] identifier[check_py_version] ():
literal[string]
keyword[try] :
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ):
keyword[return]
keyword[except] :
keyword[pass]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] (( literal[string]
literal[string]
literal[string] ))
identifier[print] ( literal[string]
literal[string] )
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] (- literal[int] ) | def check_py_version():
"""Check if a propper Python version is used."""
try:
if sys.version_info >= (2, 7):
return # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
print(' ')
print(' ERROR - memtop needs python version at least 2.7')
print('Chances are that you can install newer version from your repositories, or even that you have some newer version installed yet.')
print("(one way to find out which versions are installed is to try following: 'which python2.7' , 'which python3' and so...)")
print(' ')
sys.exit(-1) |
def save_task_info(self, res, mem_gb=0):
"""
:param self: an object with attributes .hdf5, .argnames, .sent
:parent res: a :class:`Result` object
:param mem_gb: memory consumption at the saving time (optional)
"""
mon = res.mon
name = mon.operation[6:] # strip 'total '
if self.hdf5:
mon.hdf5 = self.hdf5 # needed for the flush below
t = (mon.task_no, mon.weight, mon.duration, len(res.pik), mem_gb)
data = numpy.array([t], task_info_dt)
hdf5.extend3(self.hdf5.filename, 'task_info/' + name, data,
argnames=self.argnames, sent=self.sent)
mon.flush() | def function[save_task_info, parameter[self, res, mem_gb]]:
constant[
:param self: an object with attributes .hdf5, .argnames, .sent
:parent res: a :class:`Result` object
:param mem_gb: memory consumption at the saving time (optional)
]
variable[mon] assign[=] name[res].mon
variable[name] assign[=] call[name[mon].operation][<ast.Slice object at 0x7da20c7c9510>]
if name[self].hdf5 begin[:]
name[mon].hdf5 assign[=] name[self].hdf5
variable[t] assign[=] tuple[[<ast.Attribute object at 0x7da20c7c9060>, <ast.Attribute object at 0x7da20c7c9b40>, <ast.Attribute object at 0x7da20c7cbd30>, <ast.Call object at 0x7da20c7c8730>, <ast.Name object at 0x7da20c7c9330>]]
variable[data] assign[=] call[name[numpy].array, parameter[list[[<ast.Name object at 0x7da20c7c8e80>]], name[task_info_dt]]]
call[name[hdf5].extend3, parameter[name[self].hdf5.filename, binary_operation[constant[task_info/] + name[name]], name[data]]]
call[name[mon].flush, parameter[]] | keyword[def] identifier[save_task_info] ( identifier[self] , identifier[res] , identifier[mem_gb] = literal[int] ):
literal[string]
identifier[mon] = identifier[res] . identifier[mon]
identifier[name] = identifier[mon] . identifier[operation] [ literal[int] :]
keyword[if] identifier[self] . identifier[hdf5] :
identifier[mon] . identifier[hdf5] = identifier[self] . identifier[hdf5]
identifier[t] =( identifier[mon] . identifier[task_no] , identifier[mon] . identifier[weight] , identifier[mon] . identifier[duration] , identifier[len] ( identifier[res] . identifier[pik] ), identifier[mem_gb] )
identifier[data] = identifier[numpy] . identifier[array] ([ identifier[t] ], identifier[task_info_dt] )
identifier[hdf5] . identifier[extend3] ( identifier[self] . identifier[hdf5] . identifier[filename] , literal[string] + identifier[name] , identifier[data] ,
identifier[argnames] = identifier[self] . identifier[argnames] , identifier[sent] = identifier[self] . identifier[sent] )
identifier[mon] . identifier[flush] () | def save_task_info(self, res, mem_gb=0):
"""
:param self: an object with attributes .hdf5, .argnames, .sent
:parent res: a :class:`Result` object
:param mem_gb: memory consumption at the saving time (optional)
"""
mon = res.mon
name = mon.operation[6:] # strip 'total '
if self.hdf5:
mon.hdf5 = self.hdf5 # needed for the flush below
t = (mon.task_no, mon.weight, mon.duration, len(res.pik), mem_gb)
data = numpy.array([t], task_info_dt)
hdf5.extend3(self.hdf5.filename, 'task_info/' + name, data, argnames=self.argnames, sent=self.sent) # depends on [control=['if'], data=[]]
mon.flush() |
def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = '''SELECT id FROM multiplicons WHERE is_redundant="-1"'''
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result)
if value in self._redundant_multiplicon_cache:
return True
else:
return False | def function[is_redundant_multiplicon, parameter[self, value]]:
constant[ Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
]
if <ast.UnaryOp object at 0x7da1b0ae1000> begin[:]
variable[sql] assign[=] constant[SELECT id FROM multiplicons WHERE is_redundant="-1"]
variable[cur] assign[=] call[name[self]._dbconn.cursor, parameter[]]
call[name[cur].execute, parameter[name[sql], dictionary[[<ast.Constant object at 0x7da1b0ae0d30>], [<ast.Call object at 0x7da1b09c76a0>]]]]
variable[result] assign[=] <ast.ListComp object at 0x7da1b09c5180>
name[self]._redundant_multiplicon_cache assign[=] call[name[set], parameter[name[result]]]
if compare[name[value] in name[self]._redundant_multiplicon_cache] begin[:]
return[constant[True]] | keyword[def] identifier[is_redundant_multiplicon] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[sql] = literal[string]
identifier[cur] = identifier[self] . identifier[_dbconn] . identifier[cursor] ()
identifier[cur] . identifier[execute] ( identifier[sql] ,{ literal[string] : identifier[str] ( identifier[value] )})
identifier[result] =[ identifier[int] ( identifier[r] [ literal[int] ]) keyword[for] identifier[r] keyword[in] identifier[cur] . identifier[fetchall] ()]
identifier[self] . identifier[_redundant_multiplicon_cache] = identifier[set] ( identifier[result] )
keyword[if] identifier[value] keyword[in] identifier[self] . identifier[_redundant_multiplicon_cache] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def is_redundant_multiplicon(self, value):
""" Returns True if the passed multiplicon ID is redundant, False
otherwise.
- value, (int) multiplicon ID
"""
if not hasattr(self, '_redundant_multiplicon_cache'):
sql = 'SELECT id FROM multiplicons WHERE is_redundant="-1"'
cur = self._dbconn.cursor()
cur.execute(sql, {'id': str(value)})
result = [int(r[0]) for r in cur.fetchall()]
self._redundant_multiplicon_cache = set(result) # depends on [control=['if'], data=[]]
if value in self._redundant_multiplicon_cache:
return True # depends on [control=['if'], data=[]]
else:
return False |
def _ConvertManagedPropertyType(self, propType):
"""
Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi
managed property definition
"""
if propType:
name = propType.name
version = propType.version
aType = propType.type
flags = self._ConvertAnnotations(propType.annotation)
privId = propType.privId
prop = (name, aType, version, flags, privId)
else:
prop = None
return prop | def function[_ConvertManagedPropertyType, parameter[self, propType]]:
constant[
Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi
managed property definition
]
if name[propType] begin[:]
variable[name] assign[=] name[propType].name
variable[version] assign[=] name[propType].version
variable[aType] assign[=] name[propType].type
variable[flags] assign[=] call[name[self]._ConvertAnnotations, parameter[name[propType].annotation]]
variable[privId] assign[=] name[propType].privId
variable[prop] assign[=] tuple[[<ast.Name object at 0x7da18ede5810>, <ast.Name object at 0x7da18ede7df0>, <ast.Name object at 0x7da18ede6c50>, <ast.Name object at 0x7da18ede7040>, <ast.Name object at 0x7da18ede5480>]]
return[name[prop]] | keyword[def] identifier[_ConvertManagedPropertyType] ( identifier[self] , identifier[propType] ):
literal[string]
keyword[if] identifier[propType] :
identifier[name] = identifier[propType] . identifier[name]
identifier[version] = identifier[propType] . identifier[version]
identifier[aType] = identifier[propType] . identifier[type]
identifier[flags] = identifier[self] . identifier[_ConvertAnnotations] ( identifier[propType] . identifier[annotation] )
identifier[privId] = identifier[propType] . identifier[privId]
identifier[prop] =( identifier[name] , identifier[aType] , identifier[version] , identifier[flags] , identifier[privId] )
keyword[else] :
identifier[prop] = keyword[None]
keyword[return] identifier[prop] | def _ConvertManagedPropertyType(self, propType):
"""
Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi
managed property definition
"""
if propType:
name = propType.name
version = propType.version
aType = propType.type
flags = self._ConvertAnnotations(propType.annotation)
privId = propType.privId
prop = (name, aType, version, flags, privId) # depends on [control=['if'], data=[]]
else:
prop = None
return prop |
def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL):
"""
Find the timezone for a point in `query`.
GeoNames always returns a timezone: if the point being queried
doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset``
timezone is used to produce the :class:`geopy.timezone.Timezone`.
.. versionadded:: 1.18.0
:param query: The coordinates for which you want a timezone.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.timezone.Timezone`
"""
ensure_pytz_is_installed()
try:
lat, lng = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
"lat": lat,
"lng": lng,
"username": self.username,
}
url = "?".join((self.api_timezone, urlencode(params)))
logger.debug("%s.reverse_timezone: %s", self.__class__.__name__, url)
return self._parse_json_timezone(
self._call_geocoder(url, timeout=timeout)
) | def function[reverse_timezone, parameter[self, query, timeout]]:
constant[
Find the timezone for a point in `query`.
GeoNames always returns a timezone: if the point being queried
doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset``
timezone is used to produce the :class:`geopy.timezone.Timezone`.
.. versionadded:: 1.18.0
:param query: The coordinates for which you want a timezone.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.timezone.Timezone`
]
call[name[ensure_pytz_is_installed], parameter[]]
<ast.Try object at 0x7da20c6e54e0>
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7e20>, <ast.Constant object at 0x7da20c6e7e50>, <ast.Constant object at 0x7da20c6e4430>], [<ast.Name object at 0x7da20c6e7460>, <ast.Name object at 0x7da20c6e6410>, <ast.Attribute object at 0x7da20c6e6f50>]]
variable[url] assign[=] call[constant[?].join, parameter[tuple[[<ast.Attribute object at 0x7da20c6e5840>, <ast.Call object at 0x7da20c6e62f0>]]]]
call[name[logger].debug, parameter[constant[%s.reverse_timezone: %s], name[self].__class__.__name__, name[url]]]
return[call[name[self]._parse_json_timezone, parameter[call[name[self]._call_geocoder, parameter[name[url]]]]]] | keyword[def] identifier[reverse_timezone] ( identifier[self] , identifier[query] , identifier[timeout] = identifier[DEFAULT_SENTINEL] ):
literal[string]
identifier[ensure_pytz_is_installed] ()
keyword[try] :
identifier[lat] , identifier[lng] = identifier[self] . identifier[_coerce_point_to_string] ( identifier[query] ). identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[params] ={
literal[string] : identifier[lat] ,
literal[string] : identifier[lng] ,
literal[string] : identifier[self] . identifier[username] ,
}
identifier[url] = literal[string] . identifier[join] (( identifier[self] . identifier[api_timezone] , identifier[urlencode] ( identifier[params] )))
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[__class__] . identifier[__name__] , identifier[url] )
keyword[return] identifier[self] . identifier[_parse_json_timezone] (
identifier[self] . identifier[_call_geocoder] ( identifier[url] , identifier[timeout] = identifier[timeout] )
) | def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL):
"""
Find the timezone for a point in `query`.
GeoNames always returns a timezone: if the point being queried
doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset``
timezone is used to produce the :class:`geopy.timezone.Timezone`.
.. versionadded:: 1.18.0
:param query: The coordinates for which you want a timezone.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.timezone.Timezone`
"""
ensure_pytz_is_installed()
try:
(lat, lng) = self._coerce_point_to_string(query).split(',') # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Must be a coordinate pair or Point') # depends on [control=['except'], data=[]]
params = {'lat': lat, 'lng': lng, 'username': self.username}
url = '?'.join((self.api_timezone, urlencode(params)))
logger.debug('%s.reverse_timezone: %s', self.__class__.__name__, url)
return self._parse_json_timezone(self._call_geocoder(url, timeout=timeout)) |
def add_transcription(self, gene: Gene, rna: Union[Rna, MicroRna]) -> str:
"""Add a transcription relation from a gene to an RNA or miRNA node.
:param gene: A gene node
:param rna: An RNA or microRNA node
"""
return self.add_unqualified_edge(gene, rna, TRANSCRIBED_TO) | def function[add_transcription, parameter[self, gene, rna]]:
constant[Add a transcription relation from a gene to an RNA or miRNA node.
:param gene: A gene node
:param rna: An RNA or microRNA node
]
return[call[name[self].add_unqualified_edge, parameter[name[gene], name[rna], name[TRANSCRIBED_TO]]]] | keyword[def] identifier[add_transcription] ( identifier[self] , identifier[gene] : identifier[Gene] , identifier[rna] : identifier[Union] [ identifier[Rna] , identifier[MicroRna] ])-> identifier[str] :
literal[string]
keyword[return] identifier[self] . identifier[add_unqualified_edge] ( identifier[gene] , identifier[rna] , identifier[TRANSCRIBED_TO] ) | def add_transcription(self, gene: Gene, rna: Union[Rna, MicroRna]) -> str:
"""Add a transcription relation from a gene to an RNA or miRNA node.
:param gene: A gene node
:param rna: An RNA or microRNA node
"""
return self.add_unqualified_edge(gene, rna, TRANSCRIBED_TO) |
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return python dictionary with parsed data-types."""
response = fetch(method, uri, params_prefix, **params)
return _parse(json.loads(response.text)) | def function[fetch_and_parse, parameter[method, uri, params_prefix]]:
constant[Fetch the given uri and return python dictionary with parsed data-types.]
variable[response] assign[=] call[name[fetch], parameter[name[method], name[uri], name[params_prefix]]]
return[call[name[_parse], parameter[call[name[json].loads, parameter[name[response].text]]]]] | keyword[def] identifier[fetch_and_parse] ( identifier[method] , identifier[uri] , identifier[params_prefix] = keyword[None] ,** identifier[params] ):
literal[string]
identifier[response] = identifier[fetch] ( identifier[method] , identifier[uri] , identifier[params_prefix] ,** identifier[params] )
keyword[return] identifier[_parse] ( identifier[json] . identifier[loads] ( identifier[response] . identifier[text] )) | def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return python dictionary with parsed data-types."""
response = fetch(method, uri, params_prefix, **params)
return _parse(json.loads(response.text)) |
def memoize(func):
""" Memoization decorator for a function taking one or more arguments. """
class Memodict(dict):
""" just a dict"""
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
""" this makes it faster """
ret = self[key] = func(*key)
return ret
return Memodict().__getitem__ | def function[memoize, parameter[func]]:
constant[ Memoization decorator for a function taking one or more arguments. ]
class class[Memodict, parameter[]] begin[:]
constant[ just a dict]
def function[__getitem__, parameter[self]]:
return[call[name[dict].__getitem__, parameter[name[self], name[key]]]]
def function[__missing__, parameter[self, key]]:
constant[ this makes it faster ]
variable[ret] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da18ede7190>]]
return[name[ret]]
return[call[name[Memodict], parameter[]].__getitem__] | keyword[def] identifier[memoize] ( identifier[func] ):
literal[string]
keyword[class] identifier[Memodict] ( identifier[dict] ):
literal[string]
keyword[def] identifier[__getitem__] ( identifier[self] ,* identifier[key] ):
keyword[return] identifier[dict] . identifier[__getitem__] ( identifier[self] , identifier[key] )
keyword[def] identifier[__missing__] ( identifier[self] , identifier[key] ):
literal[string]
identifier[ret] = identifier[self] [ identifier[key] ]= identifier[func] (* identifier[key] )
keyword[return] identifier[ret]
keyword[return] identifier[Memodict] (). identifier[__getitem__] | def memoize(func):
""" Memoization decorator for a function taking one or more arguments. """
class Memodict(dict):
""" just a dict"""
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
""" this makes it faster """
ret = self[key] = func(*key)
return ret
return Memodict().__getitem__ |
def _set_pseudotime(self):
"""Return pseudotime with respect to root point.
"""
self.pseudotime = self.distances_dpt[self.iroot].copy()
self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf]) | def function[_set_pseudotime, parameter[self]]:
constant[Return pseudotime with respect to root point.
]
name[self].pseudotime assign[=] call[call[name[self].distances_dpt][name[self].iroot].copy, parameter[]]
<ast.AugAssign object at 0x7da18f58faf0> | keyword[def] identifier[_set_pseudotime] ( identifier[self] ):
literal[string]
identifier[self] . identifier[pseudotime] = identifier[self] . identifier[distances_dpt] [ identifier[self] . identifier[iroot] ]. identifier[copy] ()
identifier[self] . identifier[pseudotime] /= identifier[np] . identifier[max] ( identifier[self] . identifier[pseudotime] [ identifier[self] . identifier[pseudotime] < identifier[np] . identifier[inf] ]) | def _set_pseudotime(self):
"""Return pseudotime with respect to root point.
"""
self.pseudotime = self.distances_dpt[self.iroot].copy()
self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf]) |
def resetCanvasDimensions(self, windowHeight, windowWidth):
'sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)'
self.plotwidth = windowWidth*2
self.plotheight = (windowHeight-1)*4 # exclude status line
# pixels[y][x] = { attr: list(rows), ... }
self.pixels = [[defaultdict(list) for x in range(self.plotwidth)] for y in range(self.plotheight)] | def function[resetCanvasDimensions, parameter[self, windowHeight, windowWidth]]:
constant[sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)]
name[self].plotwidth assign[=] binary_operation[name[windowWidth] * constant[2]]
name[self].plotheight assign[=] binary_operation[binary_operation[name[windowHeight] - constant[1]] * constant[4]]
name[self].pixels assign[=] <ast.ListComp object at 0x7da20e9b2b00> | keyword[def] identifier[resetCanvasDimensions] ( identifier[self] , identifier[windowHeight] , identifier[windowWidth] ):
literal[string]
identifier[self] . identifier[plotwidth] = identifier[windowWidth] * literal[int]
identifier[self] . identifier[plotheight] =( identifier[windowHeight] - literal[int] )* literal[int]
identifier[self] . identifier[pixels] =[[ identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[plotwidth] )] keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[self] . identifier[plotheight] )] | def resetCanvasDimensions(self, windowHeight, windowWidth):
"""sets total available canvas dimensions to (windowHeight, windowWidth) (in char cells)"""
self.plotwidth = windowWidth * 2
self.plotheight = (windowHeight - 1) * 4 # exclude status line
# pixels[y][x] = { attr: list(rows), ... }
self.pixels = [[defaultdict(list) for x in range(self.plotwidth)] for y in range(self.plotheight)] |
def verify_words(self):
"""Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
"""
if self.comment:
if find_words(self.comment, self.suspect_words, self.excluded_words):
self.label_suspicious('suspect_word')
if self.source:
for word in self.illegal_sources:
if word in self.source.lower():
self.label_suspicious('suspect_word')
break
if self.imagery_used:
for word in self.illegal_sources:
if word in self.imagery_used.lower():
self.label_suspicious('suspect_word')
break
self.suspicion_reasons = list(set(self.suspicion_reasons)) | def function[verify_words, parameter[self]]:
constant[Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
]
if name[self].comment begin[:]
if call[name[find_words], parameter[name[self].comment, name[self].suspect_words, name[self].excluded_words]] begin[:]
call[name[self].label_suspicious, parameter[constant[suspect_word]]]
if name[self].source begin[:]
for taget[name[word]] in starred[name[self].illegal_sources] begin[:]
if compare[name[word] in call[name[self].source.lower, parameter[]]] begin[:]
call[name[self].label_suspicious, parameter[constant[suspect_word]]]
break
if name[self].imagery_used begin[:]
for taget[name[word]] in starred[name[self].illegal_sources] begin[:]
if compare[name[word] in call[name[self].imagery_used.lower, parameter[]]] begin[:]
call[name[self].label_suspicious, parameter[constant[suspect_word]]]
break
name[self].suspicion_reasons assign[=] call[name[list], parameter[call[name[set], parameter[name[self].suspicion_reasons]]]] | keyword[def] identifier[verify_words] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[comment] :
keyword[if] identifier[find_words] ( identifier[self] . identifier[comment] , identifier[self] . identifier[suspect_words] , identifier[self] . identifier[excluded_words] ):
identifier[self] . identifier[label_suspicious] ( literal[string] )
keyword[if] identifier[self] . identifier[source] :
keyword[for] identifier[word] keyword[in] identifier[self] . identifier[illegal_sources] :
keyword[if] identifier[word] keyword[in] identifier[self] . identifier[source] . identifier[lower] ():
identifier[self] . identifier[label_suspicious] ( literal[string] )
keyword[break]
keyword[if] identifier[self] . identifier[imagery_used] :
keyword[for] identifier[word] keyword[in] identifier[self] . identifier[illegal_sources] :
keyword[if] identifier[word] keyword[in] identifier[self] . identifier[imagery_used] . identifier[lower] ():
identifier[self] . identifier[label_suspicious] ( literal[string] )
keyword[break]
identifier[self] . identifier[suspicion_reasons] = identifier[list] ( identifier[set] ( identifier[self] . identifier[suspicion_reasons] )) | def verify_words(self):
"""Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
"""
if self.comment:
if find_words(self.comment, self.suspect_words, self.excluded_words):
self.label_suspicious('suspect_word') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.source:
for word in self.illegal_sources:
if word in self.source.lower():
self.label_suspicious('suspect_word')
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]]
if self.imagery_used:
for word in self.illegal_sources:
if word in self.imagery_used.lower():
self.label_suspicious('suspect_word')
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]]
self.suspicion_reasons = list(set(self.suspicion_reasons)) |
End of preview. Expand
in Data Studio
- Downloads last month
- 36