code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def get_filter_value(self, filter_name):
"""
API Version 2015-10-01 defines the following filters for DescribeDhcpOptions:
* dhcp-options-id
* key
* value
* tag:key=value
* tag-key
* tag-value
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
"""
if filter_name == 'dhcp-options-id':
return self.id
elif filter_name == 'key':
return list(self._options.keys())
elif filter_name == 'value':
values = [item for item in list(self._options.values()) if item]
return itertools.chain(*values)
else:
return super(DHCPOptionsSet, self).get_filter_value(
filter_name, 'DescribeDhcpOptions') | def function[get_filter_value, parameter[self, filter_name]]:
constant[
API Version 2015-10-01 defines the following filters for DescribeDhcpOptions:
* dhcp-options-id
* key
* value
* tag:key=value
* tag-key
* tag-value
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
]
if compare[name[filter_name] equal[==] constant[dhcp-options-id]] begin[:]
return[name[self].id] | keyword[def] identifier[get_filter_value] ( identifier[self] , identifier[filter_name] ):
literal[string]
keyword[if] identifier[filter_name] == literal[string] :
keyword[return] identifier[self] . identifier[id]
keyword[elif] identifier[filter_name] == literal[string] :
keyword[return] identifier[list] ( identifier[self] . identifier[_options] . identifier[keys] ())
keyword[elif] identifier[filter_name] == literal[string] :
identifier[values] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[list] ( identifier[self] . identifier[_options] . identifier[values] ()) keyword[if] identifier[item] ]
keyword[return] identifier[itertools] . identifier[chain] (* identifier[values] )
keyword[else] :
keyword[return] identifier[super] ( identifier[DHCPOptionsSet] , identifier[self] ). identifier[get_filter_value] (
identifier[filter_name] , literal[string] ) | def get_filter_value(self, filter_name):
"""
API Version 2015-10-01 defines the following filters for DescribeDhcpOptions:
* dhcp-options-id
* key
* value
* tag:key=value
* tag-key
* tag-value
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
"""
if filter_name == 'dhcp-options-id':
return self.id # depends on [control=['if'], data=[]]
elif filter_name == 'key':
return list(self._options.keys()) # depends on [control=['if'], data=[]]
elif filter_name == 'value':
values = [item for item in list(self._options.values()) if item]
return itertools.chain(*values) # depends on [control=['if'], data=[]]
else:
return super(DHCPOptionsSet, self).get_filter_value(filter_name, 'DescribeDhcpOptions') |
def _graphite_url(self, query, raw_data=False, graphite_url=None):
"""Build Graphite URL."""
query = escape.url_escape(query)
graphite_url = graphite_url or self.reactor.options.get('public_graphite_url')
url = "{base}/render/?target={query}&from=-{from_time}&until=-{until}".format(
base=graphite_url, query=query,
from_time=self.from_time.as_graphite(),
until=self.until.as_graphite(),
)
if raw_data:
url = "{}&format=raw".format(url)
return url | def function[_graphite_url, parameter[self, query, raw_data, graphite_url]]:
constant[Build Graphite URL.]
variable[query] assign[=] call[name[escape].url_escape, parameter[name[query]]]
variable[graphite_url] assign[=] <ast.BoolOp object at 0x7da1b0e9c340>
variable[url] assign[=] call[constant[{base}/render/?target={query}&from=-{from_time}&until=-{until}].format, parameter[]]
if name[raw_data] begin[:]
variable[url] assign[=] call[constant[{}&format=raw].format, parameter[name[url]]]
return[name[url]] | keyword[def] identifier[_graphite_url] ( identifier[self] , identifier[query] , identifier[raw_data] = keyword[False] , identifier[graphite_url] = keyword[None] ):
literal[string]
identifier[query] = identifier[escape] . identifier[url_escape] ( identifier[query] )
identifier[graphite_url] = identifier[graphite_url] keyword[or] identifier[self] . identifier[reactor] . identifier[options] . identifier[get] ( literal[string] )
identifier[url] = literal[string] . identifier[format] (
identifier[base] = identifier[graphite_url] , identifier[query] = identifier[query] ,
identifier[from_time] = identifier[self] . identifier[from_time] . identifier[as_graphite] (),
identifier[until] = identifier[self] . identifier[until] . identifier[as_graphite] (),
)
keyword[if] identifier[raw_data] :
identifier[url] = literal[string] . identifier[format] ( identifier[url] )
keyword[return] identifier[url] | def _graphite_url(self, query, raw_data=False, graphite_url=None):
"""Build Graphite URL."""
query = escape.url_escape(query)
graphite_url = graphite_url or self.reactor.options.get('public_graphite_url')
url = '{base}/render/?target={query}&from=-{from_time}&until=-{until}'.format(base=graphite_url, query=query, from_time=self.from_time.as_graphite(), until=self.until.as_graphite())
if raw_data:
url = '{}&format=raw'.format(url) # depends on [control=['if'], data=[]]
return url |
def extend_webfont_settings(webfont_settings):
"""
Validate a webfont settings and optionally fill missing ``csspart_path``
option.
Args:
webfont_settings (dict): Webfont settings (an item value from
``settings.ICOMOON_WEBFONTS``).
Returns:
dict: Webfont settings
"""
if not webfont_settings.get('fontdir_path', False):
raise IcomoonSettingsError(("Webfont settings miss the required key "
"item 'fontdir_path'"))
if not webfont_settings.get('csspart_path', False):
webfont_settings['csspart_path'] = None
return webfont_settings | def function[extend_webfont_settings, parameter[webfont_settings]]:
constant[
Validate a webfont settings and optionally fill missing ``csspart_path``
option.
Args:
webfont_settings (dict): Webfont settings (an item value from
``settings.ICOMOON_WEBFONTS``).
Returns:
dict: Webfont settings
]
if <ast.UnaryOp object at 0x7da207f03190> begin[:]
<ast.Raise object at 0x7da1b26ac2b0>
if <ast.UnaryOp object at 0x7da1b26ae560> begin[:]
call[name[webfont_settings]][constant[csspart_path]] assign[=] constant[None]
return[name[webfont_settings]] | keyword[def] identifier[extend_webfont_settings] ( identifier[webfont_settings] ):
literal[string]
keyword[if] keyword[not] identifier[webfont_settings] . identifier[get] ( literal[string] , keyword[False] ):
keyword[raise] identifier[IcomoonSettingsError] (( literal[string]
literal[string] ))
keyword[if] keyword[not] identifier[webfont_settings] . identifier[get] ( literal[string] , keyword[False] ):
identifier[webfont_settings] [ literal[string] ]= keyword[None]
keyword[return] identifier[webfont_settings] | def extend_webfont_settings(webfont_settings):
"""
Validate a webfont settings and optionally fill missing ``csspart_path``
option.
Args:
webfont_settings (dict): Webfont settings (an item value from
``settings.ICOMOON_WEBFONTS``).
Returns:
dict: Webfont settings
"""
if not webfont_settings.get('fontdir_path', False):
raise IcomoonSettingsError("Webfont settings miss the required key item 'fontdir_path'") # depends on [control=['if'], data=[]]
if not webfont_settings.get('csspart_path', False):
webfont_settings['csspart_path'] = None # depends on [control=['if'], data=[]]
return webfont_settings |
def update(self, campaign_id, search_channels, nonsearch_channels, outside_discount, nick=None):
'''xxxxx.xxxxx.campaign.platform.update
===================================
取得一个推广计划的投放平台设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.platform.update')
request['campaign_id'] = campaign_id
request['search_channels'] = search_channels
request['nonsearch_channels'] = nonsearch_channels
request['outside_discount'] = outside_discount
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignPlatform})
return self.result | def function[update, parameter[self, campaign_id, search_channels, nonsearch_channels, outside_discount, nick]]:
constant[xxxxx.xxxxx.campaign.platform.update
===================================
取得一个推广计划的投放平台设置]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[xxxxx.xxxxx.campaign.platform.update]]]
call[name[request]][constant[campaign_id]] assign[=] name[campaign_id]
call[name[request]][constant[search_channels]] assign[=] name[search_channels]
call[name[request]][constant[nonsearch_channels]] assign[=] name[nonsearch_channels]
call[name[request]][constant[outside_discount]] assign[=] name[outside_discount]
if compare[name[nick] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[nick]] assign[=] name[nick]
call[name[self].create, parameter[call[name[self].execute, parameter[name[request]]]]]
return[name[self].result] | keyword[def] identifier[update] ( identifier[self] , identifier[campaign_id] , identifier[search_channels] , identifier[nonsearch_channels] , identifier[outside_discount] , identifier[nick] = keyword[None] ):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
identifier[request] [ literal[string] ]= identifier[campaign_id]
identifier[request] [ literal[string] ]= identifier[search_channels]
identifier[request] [ literal[string] ]= identifier[nonsearch_channels]
identifier[request] [ literal[string] ]= identifier[outside_discount]
keyword[if] identifier[nick] != keyword[None] : identifier[request] [ literal[string] ]= identifier[nick]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] ), identifier[fields] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[models] ={ literal[string] : identifier[CampaignPlatform] })
keyword[return] identifier[self] . identifier[result] | def update(self, campaign_id, search_channels, nonsearch_channels, outside_discount, nick=None):
"""xxxxx.xxxxx.campaign.platform.update
===================================
取得一个推广计划的投放平台设置"""
request = TOPRequest('xxxxx.xxxxx.campaign.platform.update')
request['campaign_id'] = campaign_id
request['search_channels'] = search_channels
request['nonsearch_channels'] = nonsearch_channels
request['outside_discount'] = outside_discount
if nick != None:
request['nick'] = nick # depends on [control=['if'], data=['nick']]
self.create(self.execute(request), fields=['success', 'result', 'success', 'result_code', 'result_message'], models={'result': CampaignPlatform})
return self.result |
def to_hsl(self):
''' Return a corresponding HSL color for this RGB color.
Returns:
:class:`~bokeh.colors.rgb.RGB`
'''
from .hsl import HSL # prevent circular import
h, l, s = colorsys.rgb_to_hls(float(self.r)/255, float(self.g)/255, float(self.b)/255)
return HSL(round(h*360), s, l, self.a) | def function[to_hsl, parameter[self]]:
constant[ Return a corresponding HSL color for this RGB color.
Returns:
:class:`~bokeh.colors.rgb.RGB`
]
from relative_module[hsl] import module[HSL]
<ast.Tuple object at 0x7da1b1f1b6d0> assign[=] call[name[colorsys].rgb_to_hls, parameter[binary_operation[call[name[float], parameter[name[self].r]] / constant[255]], binary_operation[call[name[float], parameter[name[self].g]] / constant[255]], binary_operation[call[name[float], parameter[name[self].b]] / constant[255]]]]
return[call[name[HSL], parameter[call[name[round], parameter[binary_operation[name[h] * constant[360]]]], name[s], name[l], name[self].a]]] | keyword[def] identifier[to_hsl] ( identifier[self] ):
literal[string]
keyword[from] . identifier[hsl] keyword[import] identifier[HSL]
identifier[h] , identifier[l] , identifier[s] = identifier[colorsys] . identifier[rgb_to_hls] ( identifier[float] ( identifier[self] . identifier[r] )/ literal[int] , identifier[float] ( identifier[self] . identifier[g] )/ literal[int] , identifier[float] ( identifier[self] . identifier[b] )/ literal[int] )
keyword[return] identifier[HSL] ( identifier[round] ( identifier[h] * literal[int] ), identifier[s] , identifier[l] , identifier[self] . identifier[a] ) | def to_hsl(self):
""" Return a corresponding HSL color for this RGB color.
Returns:
:class:`~bokeh.colors.rgb.RGB`
"""
from .hsl import HSL # prevent circular import
(h, l, s) = colorsys.rgb_to_hls(float(self.r) / 255, float(self.g) / 255, float(self.b) / 255)
return HSL(round(h * 360), s, l, self.a) |
def paint(self, painter, option, widget):
"""
Overloads the paint method from QGraphicsPathItem to \
handle custom drawing of the path using this items \
pens and polygons.
:param painter <QPainter>
:param option <QGraphicsItemStyleOption>
:param widget <QWidget>
"""
# following the arguments required by Qt
# pylint: disable-msg=W0613
painter.setOpacity(self.opacity())
# show the connection selected
if not self.isEnabled():
pen = QPen(self.disabledPen())
elif self.isSelected():
pen = QPen(self.highlightPen())
else:
pen = QPen(self.pen())
if self._textItem:
self._textItem.setOpacity(self.opacity())
self._textItem.setDefaultTextColor(pen.color().darker(110))
# rebuild first if necessary
if self.isDirty():
self.setPath(self.rebuild())
# store the initial hint
hint = painter.renderHints()
painter.setRenderHint(painter.Antialiasing)
pen.setWidthF(1.25)
painter.setPen(pen)
painter.drawPath(self.path())
# redraw the polys to force-fill them
for poly in self._polygons:
if not poly.isClosed():
continue
painter.setBrush(pen.color())
painter.drawPolygon(poly)
# restore the render hints
painter.setRenderHints(hint) | def function[paint, parameter[self, painter, option, widget]]:
constant[
Overloads the paint method from QGraphicsPathItem to handle custom drawing of the path using this items pens and polygons.
:param painter <QPainter>
:param option <QGraphicsItemStyleOption>
:param widget <QWidget>
]
call[name[painter].setOpacity, parameter[call[name[self].opacity, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b2427af0> begin[:]
variable[pen] assign[=] call[name[QPen], parameter[call[name[self].disabledPen, parameter[]]]]
if name[self]._textItem begin[:]
call[name[self]._textItem.setOpacity, parameter[call[name[self].opacity, parameter[]]]]
call[name[self]._textItem.setDefaultTextColor, parameter[call[call[name[pen].color, parameter[]].darker, parameter[constant[110]]]]]
if call[name[self].isDirty, parameter[]] begin[:]
call[name[self].setPath, parameter[call[name[self].rebuild, parameter[]]]]
variable[hint] assign[=] call[name[painter].renderHints, parameter[]]
call[name[painter].setRenderHint, parameter[name[painter].Antialiasing]]
call[name[pen].setWidthF, parameter[constant[1.25]]]
call[name[painter].setPen, parameter[name[pen]]]
call[name[painter].drawPath, parameter[call[name[self].path, parameter[]]]]
for taget[name[poly]] in starred[name[self]._polygons] begin[:]
if <ast.UnaryOp object at 0x7da1b24e1d80> begin[:]
continue
call[name[painter].setBrush, parameter[call[name[pen].color, parameter[]]]]
call[name[painter].drawPolygon, parameter[name[poly]]]
call[name[painter].setRenderHints, parameter[name[hint]]] | keyword[def] identifier[paint] ( identifier[self] , identifier[painter] , identifier[option] , identifier[widget] ):
literal[string]
identifier[painter] . identifier[setOpacity] ( identifier[self] . identifier[opacity] ())
keyword[if] keyword[not] identifier[self] . identifier[isEnabled] ():
identifier[pen] = identifier[QPen] ( identifier[self] . identifier[disabledPen] ())
keyword[elif] identifier[self] . identifier[isSelected] ():
identifier[pen] = identifier[QPen] ( identifier[self] . identifier[highlightPen] ())
keyword[else] :
identifier[pen] = identifier[QPen] ( identifier[self] . identifier[pen] ())
keyword[if] identifier[self] . identifier[_textItem] :
identifier[self] . identifier[_textItem] . identifier[setOpacity] ( identifier[self] . identifier[opacity] ())
identifier[self] . identifier[_textItem] . identifier[setDefaultTextColor] ( identifier[pen] . identifier[color] (). identifier[darker] ( literal[int] ))
keyword[if] identifier[self] . identifier[isDirty] ():
identifier[self] . identifier[setPath] ( identifier[self] . identifier[rebuild] ())
identifier[hint] = identifier[painter] . identifier[renderHints] ()
identifier[painter] . identifier[setRenderHint] ( identifier[painter] . identifier[Antialiasing] )
identifier[pen] . identifier[setWidthF] ( literal[int] )
identifier[painter] . identifier[setPen] ( identifier[pen] )
identifier[painter] . identifier[drawPath] ( identifier[self] . identifier[path] ())
keyword[for] identifier[poly] keyword[in] identifier[self] . identifier[_polygons] :
keyword[if] keyword[not] identifier[poly] . identifier[isClosed] ():
keyword[continue]
identifier[painter] . identifier[setBrush] ( identifier[pen] . identifier[color] ())
identifier[painter] . identifier[drawPolygon] ( identifier[poly] )
identifier[painter] . identifier[setRenderHints] ( identifier[hint] ) | def paint(self, painter, option, widget):
"""
Overloads the paint method from QGraphicsPathItem to handle custom drawing of the path using this items pens and polygons.
:param painter <QPainter>
:param option <QGraphicsItemStyleOption>
:param widget <QWidget>
"""
# following the arguments required by Qt
# pylint: disable-msg=W0613
painter.setOpacity(self.opacity())
# show the connection selected
if not self.isEnabled():
pen = QPen(self.disabledPen()) # depends on [control=['if'], data=[]]
elif self.isSelected():
pen = QPen(self.highlightPen()) # depends on [control=['if'], data=[]]
else:
pen = QPen(self.pen())
if self._textItem:
self._textItem.setOpacity(self.opacity())
self._textItem.setDefaultTextColor(pen.color().darker(110)) # depends on [control=['if'], data=[]]
# rebuild first if necessary
if self.isDirty():
self.setPath(self.rebuild()) # depends on [control=['if'], data=[]]
# store the initial hint
hint = painter.renderHints()
painter.setRenderHint(painter.Antialiasing)
pen.setWidthF(1.25)
painter.setPen(pen)
painter.drawPath(self.path())
# redraw the polys to force-fill them
for poly in self._polygons:
if not poly.isClosed():
continue # depends on [control=['if'], data=[]]
painter.setBrush(pen.color())
painter.drawPolygon(poly) # depends on [control=['for'], data=['poly']]
# restore the render hints
painter.setRenderHints(hint) |
def get_entries(self, user_scope, key=None):
"""GetEntries.
[Preview API] Get all setting entries for the given user/all-users scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str')
response = self._send(http_method='GET',
location_id='cd006711-163d-4cd4-a597-b05bad2556ff',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response)) | def function[get_entries, parameter[self, user_scope, key]]:
constant[GetEntries.
[Preview API] Get all setting entries for the given user/all-users scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str key: Optional key under which to filter all the entries
:rtype: {object}
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[user_scope] is_not constant[None]] begin[:]
call[name[route_values]][constant[userScope]] assign[=] call[name[self]._serialize.url, parameter[constant[user_scope], name[user_scope], constant[str]]]
if compare[name[key] is_not constant[None]] begin[:]
call[name[route_values]][constant[key]] assign[=] call[name[self]._serialize.url, parameter[constant[key], name[key], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[{object}], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_entries] ( identifier[self] , identifier[user_scope] , identifier[key] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[user_scope] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[user_scope] , literal[string] )
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[key] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_entries(self, user_scope, key=None):
"""GetEntries.
[Preview API] Get all setting entries for the given user/all-users scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') # depends on [control=['if'], data=['user_scope']]
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str') # depends on [control=['if'], data=['key']]
response = self._send(http_method='GET', location_id='cd006711-163d-4cd4-a597-b05bad2556ff', version='5.0-preview.1', route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response)) |
def _create_config(cls):
"""
Creates an ApphookConfig instance
``AutoCMSAppMixin.auto_setup['config_fields']`` is used to fill in the data
of the instance.
:return: ApphookConfig instance
"""
return cls.app_config.objects.create(
namespace=cls.auto_setup['namespace'], **cls.auto_setup['config_fields']
) | def function[_create_config, parameter[cls]]:
constant[
Creates an ApphookConfig instance
``AutoCMSAppMixin.auto_setup['config_fields']`` is used to fill in the data
of the instance.
:return: ApphookConfig instance
]
return[call[name[cls].app_config.objects.create, parameter[]]] | keyword[def] identifier[_create_config] ( identifier[cls] ):
literal[string]
keyword[return] identifier[cls] . identifier[app_config] . identifier[objects] . identifier[create] (
identifier[namespace] = identifier[cls] . identifier[auto_setup] [ literal[string] ],** identifier[cls] . identifier[auto_setup] [ literal[string] ]
) | def _create_config(cls):
"""
Creates an ApphookConfig instance
``AutoCMSAppMixin.auto_setup['config_fields']`` is used to fill in the data
of the instance.
:return: ApphookConfig instance
"""
return cls.app_config.objects.create(namespace=cls.auto_setup['namespace'], **cls.auto_setup['config_fields']) |
def t_measures(dirname, time_func, measure_func):
"""Calculate a measure over time for a single output directory,
and its uncertainty.
Parameters
----------
dirname: str
Path to a model output directory.
time_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns its time.
measure_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the measure of interest, and its uncertainty.
Returns
-------
ts: np.ndarray
Times.
measures: np.ndarray
Measures.
measure_errs: np.ndarray
Measure uncertainties.
"""
ts, measures, measure_errs = [], [], []
for fname in get_filenames(dirname):
m = filename_to_model(fname)
ts.append(time_func(m))
meas, meas_err = measure_func(m)
measures.append(meas)
measure_errs.append(meas_err)
return np.array(ts), np.array(measures), np.array(measure_errs) | def function[t_measures, parameter[dirname, time_func, measure_func]]:
constant[Calculate a measure over time for a single output directory,
and its uncertainty.
Parameters
----------
dirname: str
Path to a model output directory.
time_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns its time.
measure_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the measure of interest, and its uncertainty.
Returns
-------
ts: np.ndarray
Times.
measures: np.ndarray
Measures.
measure_errs: np.ndarray
Measure uncertainties.
]
<ast.Tuple object at 0x7da1b143e770> assign[=] tuple[[<ast.List object at 0x7da1b143f130>, <ast.List object at 0x7da1b143f820>, <ast.List object at 0x7da1b143f520>]]
for taget[name[fname]] in starred[call[name[get_filenames], parameter[name[dirname]]]] begin[:]
variable[m] assign[=] call[name[filename_to_model], parameter[name[fname]]]
call[name[ts].append, parameter[call[name[time_func], parameter[name[m]]]]]
<ast.Tuple object at 0x7da1b149e6e0> assign[=] call[name[measure_func], parameter[name[m]]]
call[name[measures].append, parameter[name[meas]]]
call[name[measure_errs].append, parameter[name[meas_err]]]
return[tuple[[<ast.Call object at 0x7da1b149d810>, <ast.Call object at 0x7da1b149e590>, <ast.Call object at 0x7da1b149d450>]]] | keyword[def] identifier[t_measures] ( identifier[dirname] , identifier[time_func] , identifier[measure_func] ):
literal[string]
identifier[ts] , identifier[measures] , identifier[measure_errs] =[],[],[]
keyword[for] identifier[fname] keyword[in] identifier[get_filenames] ( identifier[dirname] ):
identifier[m] = identifier[filename_to_model] ( identifier[fname] )
identifier[ts] . identifier[append] ( identifier[time_func] ( identifier[m] ))
identifier[meas] , identifier[meas_err] = identifier[measure_func] ( identifier[m] )
identifier[measures] . identifier[append] ( identifier[meas] )
identifier[measure_errs] . identifier[append] ( identifier[meas_err] )
keyword[return] identifier[np] . identifier[array] ( identifier[ts] ), identifier[np] . identifier[array] ( identifier[measures] ), identifier[np] . identifier[array] ( identifier[measure_errs] ) | def t_measures(dirname, time_func, measure_func):
"""Calculate a measure over time for a single output directory,
and its uncertainty.
Parameters
----------
dirname: str
Path to a model output directory.
time_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns its time.
measure_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the measure of interest, and its uncertainty.
Returns
-------
ts: np.ndarray
Times.
measures: np.ndarray
Measures.
measure_errs: np.ndarray
Measure uncertainties.
"""
(ts, measures, measure_errs) = ([], [], [])
for fname in get_filenames(dirname):
m = filename_to_model(fname)
ts.append(time_func(m))
(meas, meas_err) = measure_func(m)
measures.append(meas)
measure_errs.append(meas_err) # depends on [control=['for'], data=['fname']]
return (np.array(ts), np.array(measures), np.array(measure_errs)) |
def scan(self, t, dt=None, aggfunc=None):
"""
Returns the spectrum from a specific time or range of times.
"""
return self.data.scan(t, dt, aggfunc) | def function[scan, parameter[self, t, dt, aggfunc]]:
constant[
Returns the spectrum from a specific time or range of times.
]
return[call[name[self].data.scan, parameter[name[t], name[dt], name[aggfunc]]]] | keyword[def] identifier[scan] ( identifier[self] , identifier[t] , identifier[dt] = keyword[None] , identifier[aggfunc] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[data] . identifier[scan] ( identifier[t] , identifier[dt] , identifier[aggfunc] ) | def scan(self, t, dt=None, aggfunc=None):
"""
Returns the spectrum from a specific time or range of times.
"""
return self.data.scan(t, dt, aggfunc) |
def grav_pot(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0):
"""
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
Ra, Rs = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
pot = 4 * np.pi * rho0 * Ra ** 2 * Rs ** 2 / (Rs ** 2 - Ra ** 2) * (Rs / r * np.arctan(r / Rs) - Ra / r * np.arctan(r / Ra)
+ 1. / 2 * np.log((Rs ** 2 + r ** 2) / (Ra ** 2 + r ** 2)))
return pot | def function[grav_pot, parameter[self, x, y, rho0, Ra, Rs, center_x, center_y]]:
constant[
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
]
<ast.Tuple object at 0x7da18dc99690> assign[=] call[name[self]._sort_ra_rs, parameter[name[Ra], name[Rs]]]
variable[x_] assign[=] binary_operation[name[x] - name[center_x]]
variable[y_] assign[=] binary_operation[name[y] - name[center_y]]
variable[r] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x_] ** constant[2]] + binary_operation[name[y_] ** constant[2]]]]]
variable[pot] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[4] * name[np].pi] * name[rho0]] * binary_operation[name[Ra] ** constant[2]]] * binary_operation[name[Rs] ** constant[2]]] / binary_operation[binary_operation[name[Rs] ** constant[2]] - binary_operation[name[Ra] ** constant[2]]]] * binary_operation[binary_operation[binary_operation[binary_operation[name[Rs] / name[r]] * call[name[np].arctan, parameter[binary_operation[name[r] / name[Rs]]]]] - binary_operation[binary_operation[name[Ra] / name[r]] * call[name[np].arctan, parameter[binary_operation[name[r] / name[Ra]]]]]] + binary_operation[binary_operation[constant[1.0] / constant[2]] * call[name[np].log, parameter[binary_operation[binary_operation[binary_operation[name[Rs] ** constant[2]] + binary_operation[name[r] ** constant[2]]] / binary_operation[binary_operation[name[Ra] ** constant[2]] + binary_operation[name[r] ** constant[2]]]]]]]]]
return[name[pot]] | keyword[def] identifier[grav_pot] ( identifier[self] , identifier[x] , identifier[y] , identifier[rho0] , identifier[Ra] , identifier[Rs] , identifier[center_x] = literal[int] , identifier[center_y] = literal[int] ):
literal[string]
identifier[Ra] , identifier[Rs] = identifier[self] . identifier[_sort_ra_rs] ( identifier[Ra] , identifier[Rs] )
identifier[x_] = identifier[x] - identifier[center_x]
identifier[y_] = identifier[y] - identifier[center_y]
identifier[r] = identifier[np] . identifier[sqrt] ( identifier[x_] ** literal[int] + identifier[y_] ** literal[int] )
identifier[pot] = literal[int] * identifier[np] . identifier[pi] * identifier[rho0] * identifier[Ra] ** literal[int] * identifier[Rs] ** literal[int] /( identifier[Rs] ** literal[int] - identifier[Ra] ** literal[int] )*( identifier[Rs] / identifier[r] * identifier[np] . identifier[arctan] ( identifier[r] / identifier[Rs] )- identifier[Ra] / identifier[r] * identifier[np] . identifier[arctan] ( identifier[r] / identifier[Ra] )
+ literal[int] / literal[int] * identifier[np] . identifier[log] (( identifier[Rs] ** literal[int] + identifier[r] ** literal[int] )/( identifier[Ra] ** literal[int] + identifier[r] ** literal[int] )))
keyword[return] identifier[pot] | def grav_pot(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0):
"""
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
(Ra, Rs) = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_ ** 2 + y_ ** 2)
pot = 4 * np.pi * rho0 * Ra ** 2 * Rs ** 2 / (Rs ** 2 - Ra ** 2) * (Rs / r * np.arctan(r / Rs) - Ra / r * np.arctan(r / Ra) + 1.0 / 2 * np.log((Rs ** 2 + r ** 2) / (Ra ** 2 + r ** 2)))
return pot |
def verify_checksum(self):
"""
Verify the checksum in the header for this HDU.
"""
res = self._FITS.verify_checksum(self._ext+1)
if res['dataok'] != 1:
raise ValueError("data checksum failed")
if res['hduok'] != 1:
raise ValueError("hdu checksum failed") | def function[verify_checksum, parameter[self]]:
constant[
Verify the checksum in the header for this HDU.
]
variable[res] assign[=] call[name[self]._FITS.verify_checksum, parameter[binary_operation[name[self]._ext + constant[1]]]]
if compare[call[name[res]][constant[dataok]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18bc73c40>
if compare[call[name[res]][constant[hduok]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18bc718a0> | keyword[def] identifier[verify_checksum] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] . identifier[_FITS] . identifier[verify_checksum] ( identifier[self] . identifier[_ext] + literal[int] )
keyword[if] identifier[res] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[res] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def verify_checksum(self):
"""
Verify the checksum in the header for this HDU.
"""
res = self._FITS.verify_checksum(self._ext + 1)
if res['dataok'] != 1:
raise ValueError('data checksum failed') # depends on [control=['if'], data=[]]
if res['hduok'] != 1:
raise ValueError('hdu checksum failed') # depends on [control=['if'], data=[]] |
def search(
self,
token: dict = None,
query: str = "",
bbox: list = None,
poly: str = None,
georel: str = None,
order_by: str = "_created",
order_dir: str = "desc",
page_size: int = 100,
offset: int = 0,
share: str = None,
specific_md: list = [],
include: list = [],
whole_share: bool = True,
check: bool = True,
augment: bool = False,
tags_as_dicts: bool = False,
prot: str = "https",
) -> dict:
"""Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_lang": self.lang,
"_limit": page_size,
"_offset": offset,
"box": bbox,
"geo": poly,
"rel": georel,
"ob": order_by,
"od": order_dir,
"q": query,
"s": share,
}
if check:
checker.check_request_parameters(payload)
else:
pass
# search request
search_url = "{}://v1.{}.isogeo.com/resources/search".format(prot, self.api_url)
try:
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
except Exception as e:
logging.error(e)
raise Exception
# fast response check
checker.check_api_response(search_req)
# serializing result into dict and storing resources in variables
search_rez = search_req.json()
resources_count = search_rez.get("total") # total of metadatas shared
# handling Isogeo API pagination
# see: http://help.isogeo.com/api/fr/methods/pagination.html
if resources_count > page_size and whole_share:
# if API returned more than one page of results, let's get the rest!
metadatas = [] # a recipient list
payload["_limit"] = 100 # now it'll get pages of 100 resources
# let's parse pages
for idx in range(0, int(ceil(resources_count / 100)) + 1):
payload["_offset"] = idx * 100
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# storing results by addition
metadatas.extend(search_req.json().get("results"))
search_rez["results"] = metadatas
else:
pass
# add shares to tags and query
if augment:
self.add_tags_shares(search_rez.get("tags"))
if share:
search_rez.get("query")["_shares"] = [share]
else:
search_rez.get("query")["_shares"] = []
else:
pass
# store tags in dicts
if tags_as_dicts:
new_tags = utils.tags_to_dict(
tags=search_rez.get("tags"), prev_query=search_rez.get("query")
)
# clear
search_rez.get("tags").clear()
search_rez.get("query").clear()
# update
search_rez.get("tags").update(new_tags[0])
search_rez.get("query").update(new_tags[1])
else:
pass
# end of method
return search_rez | def function[search, parameter[self, token, query, bbox, poly, georel, order_by, order_dir, page_size, offset, share, specific_md, include, whole_share, check, augment, tags_as_dicts, prot]]:
constant[Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
]
variable[specific_md] assign[=] call[name[checker]._check_filter_specific_md, parameter[name[specific_md]]]
variable[include] assign[=] call[name[checker]._check_filter_includes, parameter[name[include]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b10e7b50>, <ast.Constant object at 0x7da1b10e7520>, <ast.Constant object at 0x7da1b10e5de0>, <ast.Constant object at 0x7da1b10e5d80>, <ast.Constant object at 0x7da1b10e6e30>, <ast.Constant object at 0x7da1b10ee260>, <ast.Constant object at 0x7da1b10ecc10>, <ast.Constant object at 0x7da1b10ef340>, <ast.Constant object at 0x7da1b10edae0>, <ast.Constant object at 0x7da1b10ef640>, <ast.Constant object at 0x7da1b10efb80>, <ast.Constant object at 0x7da1b10ef220>], [<ast.Name object at 0x7da1b10ef940>, <ast.Name object at 0x7da1b10ee3e0>, <ast.Attribute object at 0x7da1b10eee60>, <ast.Name object at 0x7da1b10ee2c0>, <ast.Name object at 0x7da1b10ef580>, <ast.Name object at 0x7da1b10ec7c0>, <ast.Name object at 0x7da1b10edb70>, <ast.Name object at 0x7da1b10efd60>, <ast.Name object at 0x7da1b10ecd90>, <ast.Name object at 0x7da1b10a7d60>, <ast.Name object at 0x7da1b10a68c0>, <ast.Name object at 0x7da1b10a68f0>]]
if name[check] begin[:]
call[name[checker].check_request_parameters, parameter[name[payload]]]
variable[search_url] assign[=] call[constant[{}://v1.{}.isogeo.com/resources/search].format, parameter[name[prot], name[self].api_url]]
<ast.Try object at 0x7da1b10a56f0>
call[name[checker].check_api_response, parameter[name[search_req]]]
variable[search_rez] assign[=] call[name[search_req].json, parameter[]]
variable[resources_count] assign[=] call[name[search_rez].get, parameter[constant[total]]]
if <ast.BoolOp object at 0x7da1b10a67a0> begin[:]
variable[metadatas] assign[=] list[[]]
call[name[payload]][constant[_limit]] assign[=] constant[100]
for taget[name[idx]] in starred[call[name[range], parameter[constant[0], binary_operation[call[name[int], parameter[call[name[ceil], parameter[binary_operation[name[resources_count] / constant[100]]]]]] + constant[1]]]]] begin[:]
call[name[payload]][constant[_offset]] assign[=] binary_operation[name[idx] * constant[100]]
variable[search_req] assign[=] call[name[self].get, parameter[name[search_url]]]
call[name[metadatas].extend, parameter[call[call[name[search_req].json, parameter[]].get, parameter[constant[results]]]]]
call[name[search_rez]][constant[results]] assign[=] name[metadatas]
if name[augment] begin[:]
call[name[self].add_tags_shares, parameter[call[name[search_rez].get, parameter[constant[tags]]]]]
if name[share] begin[:]
call[call[name[search_rez].get, parameter[constant[query]]]][constant[_shares]] assign[=] list[[<ast.Name object at 0x7da1b10a78b0>]]
if name[tags_as_dicts] begin[:]
variable[new_tags] assign[=] call[name[utils].tags_to_dict, parameter[]]
call[call[name[search_rez].get, parameter[constant[tags]]].clear, parameter[]]
call[call[name[search_rez].get, parameter[constant[query]]].clear, parameter[]]
call[call[name[search_rez].get, parameter[constant[tags]]].update, parameter[call[name[new_tags]][constant[0]]]]
call[call[name[search_rez].get, parameter[constant[query]]].update, parameter[call[name[new_tags]][constant[1]]]]
return[name[search_rez]] | keyword[def] identifier[search] (
identifier[self] ,
identifier[token] : identifier[dict] = keyword[None] ,
identifier[query] : identifier[str] = literal[string] ,
identifier[bbox] : identifier[list] = keyword[None] ,
identifier[poly] : identifier[str] = keyword[None] ,
identifier[georel] : identifier[str] = keyword[None] ,
identifier[order_by] : identifier[str] = literal[string] ,
identifier[order_dir] : identifier[str] = literal[string] ,
identifier[page_size] : identifier[int] = literal[int] ,
identifier[offset] : identifier[int] = literal[int] ,
identifier[share] : identifier[str] = keyword[None] ,
identifier[specific_md] : identifier[list] =[],
identifier[include] : identifier[list] =[],
identifier[whole_share] : identifier[bool] = keyword[True] ,
identifier[check] : identifier[bool] = keyword[True] ,
identifier[augment] : identifier[bool] = keyword[False] ,
identifier[tags_as_dicts] : identifier[bool] = keyword[False] ,
identifier[prot] : identifier[str] = literal[string] ,
)-> identifier[dict] :
literal[string]
identifier[specific_md] = identifier[checker] . identifier[_check_filter_specific_md] ( identifier[specific_md] )
identifier[include] = identifier[checker] . identifier[_check_filter_includes] ( identifier[include] )
identifier[payload] ={
literal[string] : identifier[specific_md] ,
literal[string] : identifier[include] ,
literal[string] : identifier[self] . identifier[lang] ,
literal[string] : identifier[page_size] ,
literal[string] : identifier[offset] ,
literal[string] : identifier[bbox] ,
literal[string] : identifier[poly] ,
literal[string] : identifier[georel] ,
literal[string] : identifier[order_by] ,
literal[string] : identifier[order_dir] ,
literal[string] : identifier[query] ,
literal[string] : identifier[share] ,
}
keyword[if] identifier[check] :
identifier[checker] . identifier[check_request_parameters] ( identifier[payload] )
keyword[else] :
keyword[pass]
identifier[search_url] = literal[string] . identifier[format] ( identifier[prot] , identifier[self] . identifier[api_url] )
keyword[try] :
identifier[search_req] = identifier[self] . identifier[get] (
identifier[search_url] ,
identifier[headers] = identifier[self] . identifier[header] ,
identifier[params] = identifier[payload] ,
identifier[proxies] = identifier[self] . identifier[proxies] ,
identifier[verify] = identifier[self] . identifier[ssl] ,
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( identifier[e] )
keyword[raise] identifier[Exception]
identifier[checker] . identifier[check_api_response] ( identifier[search_req] )
identifier[search_rez] = identifier[search_req] . identifier[json] ()
identifier[resources_count] = identifier[search_rez] . identifier[get] ( literal[string] )
keyword[if] identifier[resources_count] > identifier[page_size] keyword[and] identifier[whole_share] :
identifier[metadatas] =[]
identifier[payload] [ literal[string] ]= literal[int]
keyword[for] identifier[idx] keyword[in] identifier[range] ( literal[int] , identifier[int] ( identifier[ceil] ( identifier[resources_count] / literal[int] ))+ literal[int] ):
identifier[payload] [ literal[string] ]= identifier[idx] * literal[int]
identifier[search_req] = identifier[self] . identifier[get] (
identifier[search_url] ,
identifier[headers] = identifier[self] . identifier[header] ,
identifier[params] = identifier[payload] ,
identifier[proxies] = identifier[self] . identifier[proxies] ,
identifier[verify] = identifier[self] . identifier[ssl] ,
)
identifier[metadatas] . identifier[extend] ( identifier[search_req] . identifier[json] (). identifier[get] ( literal[string] ))
identifier[search_rez] [ literal[string] ]= identifier[metadatas]
keyword[else] :
keyword[pass]
keyword[if] identifier[augment] :
identifier[self] . identifier[add_tags_shares] ( identifier[search_rez] . identifier[get] ( literal[string] ))
keyword[if] identifier[share] :
identifier[search_rez] . identifier[get] ( literal[string] )[ literal[string] ]=[ identifier[share] ]
keyword[else] :
identifier[search_rez] . identifier[get] ( literal[string] )[ literal[string] ]=[]
keyword[else] :
keyword[pass]
keyword[if] identifier[tags_as_dicts] :
identifier[new_tags] = identifier[utils] . identifier[tags_to_dict] (
identifier[tags] = identifier[search_rez] . identifier[get] ( literal[string] ), identifier[prev_query] = identifier[search_rez] . identifier[get] ( literal[string] )
)
identifier[search_rez] . identifier[get] ( literal[string] ). identifier[clear] ()
identifier[search_rez] . identifier[get] ( literal[string] ). identifier[clear] ()
identifier[search_rez] . identifier[get] ( literal[string] ). identifier[update] ( identifier[new_tags] [ literal[int] ])
identifier[search_rez] . identifier[get] ( literal[string] ). identifier[update] ( identifier[new_tags] [ literal[int] ])
keyword[else] :
keyword[pass]
keyword[return] identifier[search_rez] | def search(self, token: dict=None, query: str='', bbox: list=None, poly: str=None, georel: str=None, order_by: str='_created', order_dir: str='desc', page_size: int=100, offset: int=0, share: str=None, specific_md: list=[], include: list=[], whole_share: bool=True, check: bool=True, augment: bool=False, tags_as_dicts: bool=False, prot: str='https') -> dict:
"""Search within the resources shared to the application.
It's the main method to use.
:param str token: API auth token - DEPRECATED: token is now automatically included
:param str query: search terms and semantic filters. Equivalent of
**q** parameter in Isogeo API. It could be a simple
string like *oil* or a tag like *keyword:isogeo:formations*
or *keyword:inspire-theme:landcover*. The *AND* operator
is applied when various tags are passed.
:param list bbox: Bounding box to limit the search.
Must be a 4 list of coordinates in WGS84 (EPSG 4326).
Could be associated with *georel*.
:param str poly: Geographic criteria for the search, in WKT format.
Could be associated with *georel*.
:param str georel: geometric operator to apply to the bbox or poly
parameters.
Available values (see: *isogeo.GEORELATIONS*):
* 'contains',
* 'disjoint',
* 'equals',
* 'intersects' - [APPLIED BY API if NOT SPECIFIED]
* 'overlaps',
* 'within'.
:param str order_by: sorting results.
Available values:
* '_created': metadata creation date [DEFAULT if relevance is null]
* '_modified': metadata last update
* 'title': metadata title
* 'created': data creation date (possibly None)
* 'modified': data last update date
* 'relevance': relevance score calculated by API [DEFAULT].
:param str order_dir: sorting direction.
Available values:
* 'desc': descending
* 'asc': ascending
:param int page_size: limits the number of results.
Useful to paginate results display. Default value: 100.
:param int offset: offset to start page size
from a specific results index
:param str share: share UUID to filter on
:param list specific_md: list of metadata UUIDs to filter on
:param list include: subresources that should be returned.
Must be a list of strings. Available values: *isogeo.SUBRESOURCES*
:param bool whole_share: option to return all results or only the
page size. *True* by DEFAULT.
:param bool check: option to check query parameters and avoid erros.
*True* by DEFAULT.
:param bool augment: option to improve API response by adding
some tags on the fly (like shares_id)
:param bool tags_as_dicts: option to store tags as key/values by filter.
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {'_id': specific_md, '_include': include, '_lang': self.lang, '_limit': page_size, '_offset': offset, 'box': bbox, 'geo': poly, 'rel': georel, 'ob': order_by, 'od': order_dir, 'q': query, 's': share}
if check:
checker.check_request_parameters(payload) # depends on [control=['if'], data=[]]
else:
pass
# search request
search_url = '{}://v1.{}.isogeo.com/resources/search'.format(prot, self.api_url)
try:
search_req = self.get(search_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl) # depends on [control=['try'], data=[]]
except Exception as e:
logging.error(e)
raise Exception # depends on [control=['except'], data=['e']]
# fast response check
checker.check_api_response(search_req)
# serializing result into dict and storing resources in variables
search_rez = search_req.json()
resources_count = search_rez.get('total') # total of metadatas shared
# handling Isogeo API pagination
# see: http://help.isogeo.com/api/fr/methods/pagination.html
if resources_count > page_size and whole_share:
# if API returned more than one page of results, let's get the rest!
metadatas = [] # a recipient list
payload['_limit'] = 100 # now it'll get pages of 100 resources
# let's parse pages
for idx in range(0, int(ceil(resources_count / 100)) + 1):
payload['_offset'] = idx * 100
search_req = self.get(search_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl)
# storing results by addition
metadatas.extend(search_req.json().get('results')) # depends on [control=['for'], data=['idx']]
search_rez['results'] = metadatas # depends on [control=['if'], data=[]]
else:
pass
# add shares to tags and query
if augment:
self.add_tags_shares(search_rez.get('tags'))
if share:
search_rez.get('query')['_shares'] = [share] # depends on [control=['if'], data=[]]
else:
search_rez.get('query')['_shares'] = [] # depends on [control=['if'], data=[]]
else:
pass
# store tags in dicts
if tags_as_dicts:
new_tags = utils.tags_to_dict(tags=search_rez.get('tags'), prev_query=search_rez.get('query'))
# clear
search_rez.get('tags').clear()
search_rez.get('query').clear()
# update
search_rez.get('tags').update(new_tags[0])
search_rez.get('query').update(new_tags[1]) # depends on [control=['if'], data=[]]
else:
pass
# end of method
return search_rez |
def load_params(
self, f=None, f_params=None, f_optimizer=None, f_history=None,
checkpoint=None):
"""Loads the the module's parameters, history, and optimizer,
not the whole object.
To save and load the whole object, use pickle.
``f_params`` and ``f_optimizer`` uses PyTorchs'
:func:`~torch.save`.
Parameters
----------
f_params : file-like object, str, None (default=None)
Path of module parameters. Pass ``None`` to not load.
f_optimizer : file-like object, str, None (default=None)
Path of optimizer. Pass ``None`` to not load.
f_history : file-like object, str, None (default=None)
Path to history. Pass ``None`` to not load.
checkpoint : :class:`.Checkpoint`, None (default=None)
Checkpoint to load params from. If a checkpoint and a ``f_*``
path is passed in, the ``f_*`` will be loaded. Pass
``None`` to not load.
f : deprecated
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.save_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
"""
def _get_state_dict(f):
map_location = get_map_location(self.device)
self.device = self._check_device(self.device, map_location)
return torch.load(f, map_location=map_location)
# TODO: Remove warning in a future release
if f is not None:
warnings.warn(
"f is deprecated in save_params and will be removed in the "
"next release, please use f_params instead",
DeprecationWarning)
f_params = f
if f_history is not None:
self.history = History.from_file(f_history)
if checkpoint is not None:
if f_history is None and checkpoint.f_history is not None:
self.history = History.from_file(checkpoint.f_history_)
formatted_files = checkpoint.get_formatted_files(self)
f_params = f_params or formatted_files['f_params']
f_optimizer = f_optimizer or formatted_files['f_optimizer']
if f_params is not None:
if not hasattr(self, 'module_'):
raise NotInitializedError(
"Cannot load parameters of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
state_dict = _get_state_dict(f_params)
self.module_.load_state_dict(state_dict)
if f_optimizer is not None:
if not hasattr(self, 'optimizer_'):
raise NotInitializedError(
"Cannot load state of an un-initialized optimizer. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
state_dict = _get_state_dict(f_optimizer)
self.optimizer_.load_state_dict(state_dict) | def function[load_params, parameter[self, f, f_params, f_optimizer, f_history, checkpoint]]:
constant[Loads the the module's parameters, history, and optimizer,
not the whole object.
To save and load the whole object, use pickle.
``f_params`` and ``f_optimizer`` uses PyTorchs'
:func:`~torch.save`.
Parameters
----------
f_params : file-like object, str, None (default=None)
Path of module parameters. Pass ``None`` to not load.
f_optimizer : file-like object, str, None (default=None)
Path of optimizer. Pass ``None`` to not load.
f_history : file-like object, str, None (default=None)
Path to history. Pass ``None`` to not load.
checkpoint : :class:`.Checkpoint`, None (default=None)
Checkpoint to load params from. If a checkpoint and a ``f_*``
path is passed in, the ``f_*`` will be loaded. Pass
``None`` to not load.
f : deprecated
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.save_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
]
def function[_get_state_dict, parameter[f]]:
variable[map_location] assign[=] call[name[get_map_location], parameter[name[self].device]]
name[self].device assign[=] call[name[self]._check_device, parameter[name[self].device, name[map_location]]]
return[call[name[torch].load, parameter[name[f]]]]
if compare[name[f] is_not constant[None]] begin[:]
call[name[warnings].warn, parameter[constant[f is deprecated in save_params and will be removed in the next release, please use f_params instead], name[DeprecationWarning]]]
variable[f_params] assign[=] name[f]
if compare[name[f_history] is_not constant[None]] begin[:]
name[self].history assign[=] call[name[History].from_file, parameter[name[f_history]]]
if compare[name[checkpoint] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18eb55b10> begin[:]
name[self].history assign[=] call[name[History].from_file, parameter[name[checkpoint].f_history_]]
variable[formatted_files] assign[=] call[name[checkpoint].get_formatted_files, parameter[name[self]]]
variable[f_params] assign[=] <ast.BoolOp object at 0x7da18eb55ab0>
variable[f_optimizer] assign[=] <ast.BoolOp object at 0x7da18eb57430>
if compare[name[f_params] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da18eb56ef0> begin[:]
<ast.Raise object at 0x7da18eb55000>
variable[state_dict] assign[=] call[name[_get_state_dict], parameter[name[f_params]]]
call[name[self].module_.load_state_dict, parameter[name[state_dict]]]
if compare[name[f_optimizer] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da18eb56a70> begin[:]
<ast.Raise object at 0x7da18eb54b50>
variable[state_dict] assign[=] call[name[_get_state_dict], parameter[name[f_optimizer]]]
call[name[self].optimizer_.load_state_dict, parameter[name[state_dict]]] | keyword[def] identifier[load_params] (
identifier[self] , identifier[f] = keyword[None] , identifier[f_params] = keyword[None] , identifier[f_optimizer] = keyword[None] , identifier[f_history] = keyword[None] ,
identifier[checkpoint] = keyword[None] ):
literal[string]
keyword[def] identifier[_get_state_dict] ( identifier[f] ):
identifier[map_location] = identifier[get_map_location] ( identifier[self] . identifier[device] )
identifier[self] . identifier[device] = identifier[self] . identifier[_check_device] ( identifier[self] . identifier[device] , identifier[map_location] )
keyword[return] identifier[torch] . identifier[load] ( identifier[f] , identifier[map_location] = identifier[map_location] )
keyword[if] identifier[f] keyword[is] keyword[not] keyword[None] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[DeprecationWarning] )
identifier[f_params] = identifier[f]
keyword[if] identifier[f_history] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[history] = identifier[History] . identifier[from_file] ( identifier[f_history] )
keyword[if] identifier[checkpoint] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[f_history] keyword[is] keyword[None] keyword[and] identifier[checkpoint] . identifier[f_history] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[history] = identifier[History] . identifier[from_file] ( identifier[checkpoint] . identifier[f_history_] )
identifier[formatted_files] = identifier[checkpoint] . identifier[get_formatted_files] ( identifier[self] )
identifier[f_params] = identifier[f_params] keyword[or] identifier[formatted_files] [ literal[string] ]
identifier[f_optimizer] = identifier[f_optimizer] keyword[or] identifier[formatted_files] [ literal[string] ]
keyword[if] identifier[f_params] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[NotInitializedError] (
literal[string]
literal[string]
literal[string] )
identifier[state_dict] = identifier[_get_state_dict] ( identifier[f_params] )
identifier[self] . identifier[module_] . identifier[load_state_dict] ( identifier[state_dict] )
keyword[if] identifier[f_optimizer] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[NotInitializedError] (
literal[string]
literal[string]
literal[string] )
identifier[state_dict] = identifier[_get_state_dict] ( identifier[f_optimizer] )
identifier[self] . identifier[optimizer_] . identifier[load_state_dict] ( identifier[state_dict] ) | def load_params(self, f=None, f_params=None, f_optimizer=None, f_history=None, checkpoint=None):
"""Loads the the module's parameters, history, and optimizer,
not the whole object.
To save and load the whole object, use pickle.
``f_params`` and ``f_optimizer`` uses PyTorchs'
:func:`~torch.save`.
Parameters
----------
f_params : file-like object, str, None (default=None)
Path of module parameters. Pass ``None`` to not load.
f_optimizer : file-like object, str, None (default=None)
Path of optimizer. Pass ``None`` to not load.
f_history : file-like object, str, None (default=None)
Path to history. Pass ``None`` to not load.
checkpoint : :class:`.Checkpoint`, None (default=None)
Checkpoint to load params from. If a checkpoint and a ``f_*``
path is passed in, the ``f_*`` will be loaded. Pass
``None`` to not load.
f : deprecated
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.save_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params(f_params='model.pkl',
>>> f_optimizer='optimizer.pkl',
>>> f_history='history.json')
"""
def _get_state_dict(f):
map_location = get_map_location(self.device)
self.device = self._check_device(self.device, map_location)
return torch.load(f, map_location=map_location)
# TODO: Remove warning in a future release
if f is not None:
warnings.warn('f is deprecated in save_params and will be removed in the next release, please use f_params instead', DeprecationWarning)
f_params = f # depends on [control=['if'], data=['f']]
if f_history is not None:
self.history = History.from_file(f_history) # depends on [control=['if'], data=['f_history']]
if checkpoint is not None:
if f_history is None and checkpoint.f_history is not None:
self.history = History.from_file(checkpoint.f_history_) # depends on [control=['if'], data=[]]
formatted_files = checkpoint.get_formatted_files(self)
f_params = f_params or formatted_files['f_params']
f_optimizer = f_optimizer or formatted_files['f_optimizer'] # depends on [control=['if'], data=['checkpoint']]
if f_params is not None:
if not hasattr(self, 'module_'):
raise NotInitializedError('Cannot load parameters of an un-initialized model. Please initialize first by calling .initialize() or by fitting the model with .fit(...).') # depends on [control=['if'], data=[]]
state_dict = _get_state_dict(f_params)
self.module_.load_state_dict(state_dict) # depends on [control=['if'], data=['f_params']]
if f_optimizer is not None:
if not hasattr(self, 'optimizer_'):
raise NotInitializedError('Cannot load state of an un-initialized optimizer. Please initialize first by calling .initialize() or by fitting the model with .fit(...).') # depends on [control=['if'], data=[]]
state_dict = _get_state_dict(f_optimizer)
self.optimizer_.load_state_dict(state_dict) # depends on [control=['if'], data=['f_optimizer']] |
def DCM_update(IMU, ATT, MAG, GPS):
'''implement full DCM system'''
global dcm_state
if dcm_state is None:
dcm_state = DCM_State(ATT.Roll, ATT.Pitch, ATT.Yaw)
mag = Vector3(MAG.MagX, MAG.MagY, MAG.MagZ)
gyro = Vector3(IMU.GyrX, IMU.GyrY, IMU.GyrZ)
accel = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
accel2 = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
dcm_state.update(gyro, accel, mag, GPS)
return dcm_state | def function[DCM_update, parameter[IMU, ATT, MAG, GPS]]:
constant[implement full DCM system]
<ast.Global object at 0x7da20c991180>
if compare[name[dcm_state] is constant[None]] begin[:]
variable[dcm_state] assign[=] call[name[DCM_State], parameter[name[ATT].Roll, name[ATT].Pitch, name[ATT].Yaw]]
variable[mag] assign[=] call[name[Vector3], parameter[name[MAG].MagX, name[MAG].MagY, name[MAG].MagZ]]
variable[gyro] assign[=] call[name[Vector3], parameter[name[IMU].GyrX, name[IMU].GyrY, name[IMU].GyrZ]]
variable[accel] assign[=] call[name[Vector3], parameter[name[IMU].AccX, name[IMU].AccY, name[IMU].AccZ]]
variable[accel2] assign[=] call[name[Vector3], parameter[name[IMU].AccX, name[IMU].AccY, name[IMU].AccZ]]
call[name[dcm_state].update, parameter[name[gyro], name[accel], name[mag], name[GPS]]]
return[name[dcm_state]] | keyword[def] identifier[DCM_update] ( identifier[IMU] , identifier[ATT] , identifier[MAG] , identifier[GPS] ):
literal[string]
keyword[global] identifier[dcm_state]
keyword[if] identifier[dcm_state] keyword[is] keyword[None] :
identifier[dcm_state] = identifier[DCM_State] ( identifier[ATT] . identifier[Roll] , identifier[ATT] . identifier[Pitch] , identifier[ATT] . identifier[Yaw] )
identifier[mag] = identifier[Vector3] ( identifier[MAG] . identifier[MagX] , identifier[MAG] . identifier[MagY] , identifier[MAG] . identifier[MagZ] )
identifier[gyro] = identifier[Vector3] ( identifier[IMU] . identifier[GyrX] , identifier[IMU] . identifier[GyrY] , identifier[IMU] . identifier[GyrZ] )
identifier[accel] = identifier[Vector3] ( identifier[IMU] . identifier[AccX] , identifier[IMU] . identifier[AccY] , identifier[IMU] . identifier[AccZ] )
identifier[accel2] = identifier[Vector3] ( identifier[IMU] . identifier[AccX] , identifier[IMU] . identifier[AccY] , identifier[IMU] . identifier[AccZ] )
identifier[dcm_state] . identifier[update] ( identifier[gyro] , identifier[accel] , identifier[mag] , identifier[GPS] )
keyword[return] identifier[dcm_state] | def DCM_update(IMU, ATT, MAG, GPS):
"""implement full DCM system"""
global dcm_state
if dcm_state is None:
dcm_state = DCM_State(ATT.Roll, ATT.Pitch, ATT.Yaw) # depends on [control=['if'], data=['dcm_state']]
mag = Vector3(MAG.MagX, MAG.MagY, MAG.MagZ)
gyro = Vector3(IMU.GyrX, IMU.GyrY, IMU.GyrZ)
accel = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
accel2 = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
dcm_state.update(gyro, accel, mag, GPS)
return dcm_state |
def enterbox(msg="Enter something."
, title=" "
, default=""
, strip=True
, image=None
, root=None
):
"""
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
By default, enterbox strips its result (i.e. removes leading and trailing
whitespace). (If you want it not to strip, use keyword argument: strip=False.)
This makes it easier to test the results of the call::
reply = enterbox(....)
if reply:
...
else:
...
"""
result = __fillablebox(msg, title, default=default, mask=None,image=image,root=root)
if result and strip:
result = result.strip()
return result | def function[enterbox, parameter[msg, title, default, strip, image, root]]:
constant[
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
By default, enterbox strips its result (i.e. removes leading and trailing
whitespace). (If you want it not to strip, use keyword argument: strip=False.)
This makes it easier to test the results of the call::
reply = enterbox(....)
if reply:
...
else:
...
]
variable[result] assign[=] call[name[__fillablebox], parameter[name[msg], name[title]]]
if <ast.BoolOp object at 0x7da1b07aeb00> begin[:]
variable[result] assign[=] call[name[result].strip, parameter[]]
return[name[result]] | keyword[def] identifier[enterbox] ( identifier[msg] = literal[string]
, identifier[title] = literal[string]
, identifier[default] = literal[string]
, identifier[strip] = keyword[True]
, identifier[image] = keyword[None]
, identifier[root] = keyword[None]
):
literal[string]
identifier[result] = identifier[__fillablebox] ( identifier[msg] , identifier[title] , identifier[default] = identifier[default] , identifier[mask] = keyword[None] , identifier[image] = identifier[image] , identifier[root] = identifier[root] )
keyword[if] identifier[result] keyword[and] identifier[strip] :
identifier[result] = identifier[result] . identifier[strip] ()
keyword[return] identifier[result] | def enterbox(msg='Enter something.', title=' ', default='', strip=True, image=None, root=None):
"""
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
By default, enterbox strips its result (i.e. removes leading and trailing
whitespace). (If you want it not to strip, use keyword argument: strip=False.)
This makes it easier to test the results of the call::
reply = enterbox(....)
if reply:
...
else:
...
"""
result = __fillablebox(msg, title, default=default, mask=None, image=image, root=root)
if result and strip:
result = result.strip() # depends on [control=['if'], data=[]]
return result |
def initdict(self, fname):
"""create a blank dictionary"""
if isinstance(fname, Idd):
self.dt, self.dtls = fname.dt, fname.dtls
return self.dt, self.dtls
astr = mylib2.readfile(fname)
nocom = removecomment(astr, '!')
idfst = nocom
alist = idfst.split(';')
lss = []
for element in alist:
lst = element.split(',')
lss.append(lst)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
dt = {}
dtls = []
for element in lss:
if element[0] == '':
continue
dt[element[0].upper()] = []
dtls.append(element[0].upper())
self.dt, self.dtls = dt, dtls
return dt, dtls | def function[initdict, parameter[self, fname]]:
constant[create a blank dictionary]
if call[name[isinstance], parameter[name[fname], name[Idd]]] begin[:]
<ast.Tuple object at 0x7da1b11ed4e0> assign[=] tuple[[<ast.Attribute object at 0x7da1b11edfc0>, <ast.Attribute object at 0x7da1b11edea0>]]
return[tuple[[<ast.Attribute object at 0x7da1b11eca00>, <ast.Attribute object at 0x7da1b11efeb0>]]]
variable[astr] assign[=] call[name[mylib2].readfile, parameter[name[fname]]]
variable[nocom] assign[=] call[name[removecomment], parameter[name[astr], constant[!]]]
variable[idfst] assign[=] name[nocom]
variable[alist] assign[=] call[name[idfst].split, parameter[constant[;]]]
variable[lss] assign[=] list[[]]
for taget[name[element]] in starred[name[alist]] begin[:]
variable[lst] assign[=] call[name[element].split, parameter[constant[,]]]
call[name[lss].append, parameter[name[lst]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[lss]]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[call[name[lss]][name[i]]]]]]] begin[:]
call[call[name[lss]][name[i]]][name[j]] assign[=] call[call[call[name[lss]][name[i]]][name[j]].strip, parameter[]]
variable[dt] assign[=] dictionary[[], []]
variable[dtls] assign[=] list[[]]
for taget[name[element]] in starred[name[lss]] begin[:]
if compare[call[name[element]][constant[0]] equal[==] constant[]] begin[:]
continue
call[name[dt]][call[call[name[element]][constant[0]].upper, parameter[]]] assign[=] list[[]]
call[name[dtls].append, parameter[call[call[name[element]][constant[0]].upper, parameter[]]]]
<ast.Tuple object at 0x7da18bcc8820> assign[=] tuple[[<ast.Name object at 0x7da18bcc8970>, <ast.Name object at 0x7da20c76f310>]]
return[tuple[[<ast.Name object at 0x7da20cabc880>, <ast.Name object at 0x7da20cabdd80>]]] | keyword[def] identifier[initdict] ( identifier[self] , identifier[fname] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[fname] , identifier[Idd] ):
identifier[self] . identifier[dt] , identifier[self] . identifier[dtls] = identifier[fname] . identifier[dt] , identifier[fname] . identifier[dtls]
keyword[return] identifier[self] . identifier[dt] , identifier[self] . identifier[dtls]
identifier[astr] = identifier[mylib2] . identifier[readfile] ( identifier[fname] )
identifier[nocom] = identifier[removecomment] ( identifier[astr] , literal[string] )
identifier[idfst] = identifier[nocom]
identifier[alist] = identifier[idfst] . identifier[split] ( literal[string] )
identifier[lss] =[]
keyword[for] identifier[element] keyword[in] identifier[alist] :
identifier[lst] = identifier[element] . identifier[split] ( literal[string] )
identifier[lss] . identifier[append] ( identifier[lst] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[lss] )):
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[lss] [ identifier[i] ])):
identifier[lss] [ identifier[i] ][ identifier[j] ]= identifier[lss] [ identifier[i] ][ identifier[j] ]. identifier[strip] ()
identifier[dt] ={}
identifier[dtls] =[]
keyword[for] identifier[element] keyword[in] identifier[lss] :
keyword[if] identifier[element] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[dt] [ identifier[element] [ literal[int] ]. identifier[upper] ()]=[]
identifier[dtls] . identifier[append] ( identifier[element] [ literal[int] ]. identifier[upper] ())
identifier[self] . identifier[dt] , identifier[self] . identifier[dtls] = identifier[dt] , identifier[dtls]
keyword[return] identifier[dt] , identifier[dtls] | def initdict(self, fname):
"""create a blank dictionary"""
if isinstance(fname, Idd):
(self.dt, self.dtls) = (fname.dt, fname.dtls)
return (self.dt, self.dtls) # depends on [control=['if'], data=[]]
astr = mylib2.readfile(fname)
nocom = removecomment(astr, '!')
idfst = nocom
alist = idfst.split(';')
lss = []
for element in alist:
lst = element.split(',')
lss.append(lst) # depends on [control=['for'], data=['element']]
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip() # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
dt = {}
dtls = []
for element in lss:
if element[0] == '':
continue # depends on [control=['if'], data=[]]
dt[element[0].upper()] = []
dtls.append(element[0].upper()) # depends on [control=['for'], data=['element']]
(self.dt, self.dtls) = (dt, dtls)
return (dt, dtls) |
def _first_as_df(self):
"""Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes"""
columns = self._schema_rdd.columns
df = pd.DataFrame.from_records(
[self._schema_rdd.first()],
columns=self._schema_rdd.columns)
df = _update_index_on_df(df, self._index_names)
return df | def function[_first_as_df, parameter[self]]:
constant[Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes]
variable[columns] assign[=] name[self]._schema_rdd.columns
variable[df] assign[=] call[name[pd].DataFrame.from_records, parameter[list[[<ast.Call object at 0x7da1b04ca770>]]]]
variable[df] assign[=] call[name[_update_index_on_df], parameter[name[df], name[self]._index_names]]
return[name[df]] | keyword[def] identifier[_first_as_df] ( identifier[self] ):
literal[string]
identifier[columns] = identifier[self] . identifier[_schema_rdd] . identifier[columns]
identifier[df] = identifier[pd] . identifier[DataFrame] . identifier[from_records] (
[ identifier[self] . identifier[_schema_rdd] . identifier[first] ()],
identifier[columns] = identifier[self] . identifier[_schema_rdd] . identifier[columns] )
identifier[df] = identifier[_update_index_on_df] ( identifier[df] , identifier[self] . identifier[_index_names] )
keyword[return] identifier[df] | def _first_as_df(self):
"""Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes"""
columns = self._schema_rdd.columns
df = pd.DataFrame.from_records([self._schema_rdd.first()], columns=self._schema_rdd.columns)
df = _update_index_on_df(df, self._index_names)
return df |
def read(filename, data_wrapper=DataWrapper):
'''Read an SWC file and return a tuple of data, format.'''
data = np.loadtxt(filename)
if len(np.shape(data)) == 1:
data = np.reshape(data, (1, -1))
data = data[:, [X, Y, Z, R, TYPE, ID, P]]
return data_wrapper(data, 'SWC', None) | def function[read, parameter[filename, data_wrapper]]:
constant[Read an SWC file and return a tuple of data, format.]
variable[data] assign[=] call[name[np].loadtxt, parameter[name[filename]]]
if compare[call[name[len], parameter[call[name[np].shape, parameter[name[data]]]]] equal[==] constant[1]] begin[:]
variable[data] assign[=] call[name[np].reshape, parameter[name[data], tuple[[<ast.Constant object at 0x7da20c76fc10>, <ast.UnaryOp object at 0x7da20c76e890>]]]]
variable[data] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da20c76e830>, <ast.List object at 0x7da20c76d8a0>]]]
return[call[name[data_wrapper], parameter[name[data], constant[SWC], constant[None]]]] | keyword[def] identifier[read] ( identifier[filename] , identifier[data_wrapper] = identifier[DataWrapper] ):
literal[string]
identifier[data] = identifier[np] . identifier[loadtxt] ( identifier[filename] )
keyword[if] identifier[len] ( identifier[np] . identifier[shape] ( identifier[data] ))== literal[int] :
identifier[data] = identifier[np] . identifier[reshape] ( identifier[data] ,( literal[int] ,- literal[int] ))
identifier[data] = identifier[data] [:,[ identifier[X] , identifier[Y] , identifier[Z] , identifier[R] , identifier[TYPE] , identifier[ID] , identifier[P] ]]
keyword[return] identifier[data_wrapper] ( identifier[data] , literal[string] , keyword[None] ) | def read(filename, data_wrapper=DataWrapper):
"""Read an SWC file and return a tuple of data, format."""
data = np.loadtxt(filename)
if len(np.shape(data)) == 1:
data = np.reshape(data, (1, -1)) # depends on [control=['if'], data=[]]
data = data[:, [X, Y, Z, R, TYPE, ID, P]]
return data_wrapper(data, 'SWC', None) |
def checkTikaServer(scheme="http", serverHost=ServerHost, port=Port, tikaServerJar=TikaServerJar, classpath=None, config_path=None):
'''
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
'''
if classpath is None:
classpath = TikaServerClasspath
if port is None:
port = '443' if scheme == 'https' else '80'
urlp = urlparse(tikaServerJar)
serverEndpoint = '%s://%s:%s' % (scheme, serverHost, port)
jarPath = os.path.join(TikaJarPath, 'tika-server.jar')
if 'localhost' in serverEndpoint or '127.0.0.1' in serverEndpoint:
alreadyRunning = checkPortIsOpen(serverHost, port)
if not alreadyRunning:
if not os.path.isfile(jarPath) and urlp.scheme != '':
getRemoteJar(tikaServerJar, jarPath)
if not checkJarSig(tikaServerJar, jarPath):
os.remove(jarPath)
tikaServerJar = getRemoteJar(tikaServerJar, jarPath)
status = startServer(jarPath, TikaJava, serverHost, port, classpath, config_path)
if not status:
log.error("Failed to receive startup confirmation from startServer.")
raise RuntimeError("Unable to start Tika server.")
return serverEndpoint | def function[checkTikaServer, parameter[scheme, serverHost, port, tikaServerJar, classpath, config_path]]:
constant[
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
]
if compare[name[classpath] is constant[None]] begin[:]
variable[classpath] assign[=] name[TikaServerClasspath]
if compare[name[port] is constant[None]] begin[:]
variable[port] assign[=] <ast.IfExp object at 0x7da1b170edd0>
variable[urlp] assign[=] call[name[urlparse], parameter[name[tikaServerJar]]]
variable[serverEndpoint] assign[=] binary_operation[constant[%s://%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b18ae1a0>, <ast.Name object at 0x7da1b18ad1e0>, <ast.Name object at 0x7da1b18acfa0>]]]
variable[jarPath] assign[=] call[name[os].path.join, parameter[name[TikaJarPath], constant[tika-server.jar]]]
if <ast.BoolOp object at 0x7da1b170e2f0> begin[:]
variable[alreadyRunning] assign[=] call[name[checkPortIsOpen], parameter[name[serverHost], name[port]]]
if <ast.UnaryOp object at 0x7da1b170d360> begin[:]
if <ast.BoolOp object at 0x7da1b170e800> begin[:]
call[name[getRemoteJar], parameter[name[tikaServerJar], name[jarPath]]]
if <ast.UnaryOp object at 0x7da1b170d6c0> begin[:]
call[name[os].remove, parameter[name[jarPath]]]
variable[tikaServerJar] assign[=] call[name[getRemoteJar], parameter[name[tikaServerJar], name[jarPath]]]
variable[status] assign[=] call[name[startServer], parameter[name[jarPath], name[TikaJava], name[serverHost], name[port], name[classpath], name[config_path]]]
if <ast.UnaryOp object at 0x7da1b170c760> begin[:]
call[name[log].error, parameter[constant[Failed to receive startup confirmation from startServer.]]]
<ast.Raise object at 0x7da1b170f760>
return[name[serverEndpoint]] | keyword[def] identifier[checkTikaServer] ( identifier[scheme] = literal[string] , identifier[serverHost] = identifier[ServerHost] , identifier[port] = identifier[Port] , identifier[tikaServerJar] = identifier[TikaServerJar] , identifier[classpath] = keyword[None] , identifier[config_path] = keyword[None] ):
literal[string]
keyword[if] identifier[classpath] keyword[is] keyword[None] :
identifier[classpath] = identifier[TikaServerClasspath]
keyword[if] identifier[port] keyword[is] keyword[None] :
identifier[port] = literal[string] keyword[if] identifier[scheme] == literal[string] keyword[else] literal[string]
identifier[urlp] = identifier[urlparse] ( identifier[tikaServerJar] )
identifier[serverEndpoint] = literal[string] %( identifier[scheme] , identifier[serverHost] , identifier[port] )
identifier[jarPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[TikaJarPath] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[serverEndpoint] keyword[or] literal[string] keyword[in] identifier[serverEndpoint] :
identifier[alreadyRunning] = identifier[checkPortIsOpen] ( identifier[serverHost] , identifier[port] )
keyword[if] keyword[not] identifier[alreadyRunning] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[jarPath] ) keyword[and] identifier[urlp] . identifier[scheme] != literal[string] :
identifier[getRemoteJar] ( identifier[tikaServerJar] , identifier[jarPath] )
keyword[if] keyword[not] identifier[checkJarSig] ( identifier[tikaServerJar] , identifier[jarPath] ):
identifier[os] . identifier[remove] ( identifier[jarPath] )
identifier[tikaServerJar] = identifier[getRemoteJar] ( identifier[tikaServerJar] , identifier[jarPath] )
identifier[status] = identifier[startServer] ( identifier[jarPath] , identifier[TikaJava] , identifier[serverHost] , identifier[port] , identifier[classpath] , identifier[config_path] )
keyword[if] keyword[not] identifier[status] :
identifier[log] . identifier[error] ( literal[string] )
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[serverEndpoint] | def checkTikaServer(scheme='http', serverHost=ServerHost, port=Port, tikaServerJar=TikaServerJar, classpath=None, config_path=None):
"""
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
"""
if classpath is None:
classpath = TikaServerClasspath # depends on [control=['if'], data=['classpath']]
if port is None:
port = '443' if scheme == 'https' else '80' # depends on [control=['if'], data=['port']]
urlp = urlparse(tikaServerJar)
serverEndpoint = '%s://%s:%s' % (scheme, serverHost, port)
jarPath = os.path.join(TikaJarPath, 'tika-server.jar')
if 'localhost' in serverEndpoint or '127.0.0.1' in serverEndpoint:
alreadyRunning = checkPortIsOpen(serverHost, port)
if not alreadyRunning:
if not os.path.isfile(jarPath) and urlp.scheme != '':
getRemoteJar(tikaServerJar, jarPath) # depends on [control=['if'], data=[]]
if not checkJarSig(tikaServerJar, jarPath):
os.remove(jarPath)
tikaServerJar = getRemoteJar(tikaServerJar, jarPath) # depends on [control=['if'], data=[]]
status = startServer(jarPath, TikaJava, serverHost, port, classpath, config_path)
if not status:
log.error('Failed to receive startup confirmation from startServer.')
raise RuntimeError('Unable to start Tika server.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return serverEndpoint |
def CrearCertificacionCabecera(self, pto_emision=1, nro_orden=None,
tipo_certificado=None, nro_planta=None,
nro_ing_bruto_depositario=None, titular_grano=None,
cuit_depositante=None, nro_ing_bruto_depositante=None,
cuit_corredor=None, cod_grano=None, campania=None,
datos_adicionales=None,
**kwargs):
"Inicializa los datos de una certificación de granos (cabecera)"
self.certificacion = {}
self.certificacion['cabecera'] = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
tipoCertificado=tipo_certificado,
nroPlanta=nro_planta or None, # opcional
nroIngBrutoDepositario=nro_ing_bruto_depositario,
titularGrano=titular_grano,
cuitDepositante=cuit_depositante or None, # opcional
nroIngBrutoDepositante=nro_ing_bruto_depositante or None, # opcional
cuitCorredor=cuit_corredor or None, # opcional
codGrano=cod_grano,
campania=campania,
datosAdicionales=datos_adicionales, # opcional
)
# limpio las estructuras internas no utilizables en este caso
self.liquidacion = None
return True | def function[CrearCertificacionCabecera, parameter[self, pto_emision, nro_orden, tipo_certificado, nro_planta, nro_ing_bruto_depositario, titular_grano, cuit_depositante, nro_ing_bruto_depositante, cuit_corredor, cod_grano, campania, datos_adicionales]]:
constant[Inicializa los datos de una certificación de granos (cabecera)]
name[self].certificacion assign[=] dictionary[[], []]
call[name[self].certificacion][constant[cabecera]] assign[=] call[name[dict], parameter[]]
name[self].liquidacion assign[=] constant[None]
return[constant[True]] | keyword[def] identifier[CrearCertificacionCabecera] ( identifier[self] , identifier[pto_emision] = literal[int] , identifier[nro_orden] = keyword[None] ,
identifier[tipo_certificado] = keyword[None] , identifier[nro_planta] = keyword[None] ,
identifier[nro_ing_bruto_depositario] = keyword[None] , identifier[titular_grano] = keyword[None] ,
identifier[cuit_depositante] = keyword[None] , identifier[nro_ing_bruto_depositante] = keyword[None] ,
identifier[cuit_corredor] = keyword[None] , identifier[cod_grano] = keyword[None] , identifier[campania] = keyword[None] ,
identifier[datos_adicionales] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[certificacion] ={}
identifier[self] . identifier[certificacion] [ literal[string] ]= identifier[dict] (
identifier[ptoEmision] = identifier[pto_emision] ,
identifier[nroOrden] = identifier[nro_orden] ,
identifier[tipoCertificado] = identifier[tipo_certificado] ,
identifier[nroPlanta] = identifier[nro_planta] keyword[or] keyword[None] ,
identifier[nroIngBrutoDepositario] = identifier[nro_ing_bruto_depositario] ,
identifier[titularGrano] = identifier[titular_grano] ,
identifier[cuitDepositante] = identifier[cuit_depositante] keyword[or] keyword[None] ,
identifier[nroIngBrutoDepositante] = identifier[nro_ing_bruto_depositante] keyword[or] keyword[None] ,
identifier[cuitCorredor] = identifier[cuit_corredor] keyword[or] keyword[None] ,
identifier[codGrano] = identifier[cod_grano] ,
identifier[campania] = identifier[campania] ,
identifier[datosAdicionales] = identifier[datos_adicionales] ,
)
identifier[self] . identifier[liquidacion] = keyword[None]
keyword[return] keyword[True] | def CrearCertificacionCabecera(self, pto_emision=1, nro_orden=None, tipo_certificado=None, nro_planta=None, nro_ing_bruto_depositario=None, titular_grano=None, cuit_depositante=None, nro_ing_bruto_depositante=None, cuit_corredor=None, cod_grano=None, campania=None, datos_adicionales=None, **kwargs):
"""Inicializa los datos de una certificación de granos (cabecera)"""
self.certificacion = {} # opcional
# opcional
# opcional
# opcional
# opcional
self.certificacion['cabecera'] = dict(ptoEmision=pto_emision, nroOrden=nro_orden, tipoCertificado=tipo_certificado, nroPlanta=nro_planta or None, nroIngBrutoDepositario=nro_ing_bruto_depositario, titularGrano=titular_grano, cuitDepositante=cuit_depositante or None, nroIngBrutoDepositante=nro_ing_bruto_depositante or None, cuitCorredor=cuit_corredor or None, codGrano=cod_grano, campania=campania, datosAdicionales=datos_adicionales)
# limpio las estructuras internas no utilizables en este caso
self.liquidacion = None
return True |
def path(self):
""" Absolute path to the directory on the camera's filesystem. """
if self.parent is None:
return "/"
else:
return os.path.join(self.parent.path, self.name) | def function[path, parameter[self]]:
constant[ Absolute path to the directory on the camera's filesystem. ]
if compare[name[self].parent is constant[None]] begin[:]
return[constant[/]] | keyword[def] identifier[path] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[parent] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[parent] . identifier[path] , identifier[self] . identifier[name] ) | def path(self):
""" Absolute path to the directory on the camera's filesystem. """
if self.parent is None:
return '/' # depends on [control=['if'], data=[]]
else:
return os.path.join(self.parent.path, self.name) |
def psnr(test, ref, mask=None):
"""Peak Signal-to-Noise Ratio (PSNR)
Calculate the PSNR between a test image and a reference image.
Parameters
----------
ref : np.ndarray
the reference image
test : np.ndarray
the tested image
mask : np.ndarray, optional
the mask for the ROI
Notes
-----
Compute the metric only on magnetude.
Returns
-------
psnr: float, the psnr
"""
test, ref, mask = _preprocess_input(test, ref, mask)
if mask is not None:
test = mask * test
ref = mask * ref
num = np.max(np.abs(test))
deno = mse(test, ref)
return 10.0 * np.log10(num / deno) | def function[psnr, parameter[test, ref, mask]]:
constant[Peak Signal-to-Noise Ratio (PSNR)
Calculate the PSNR between a test image and a reference image.
Parameters
----------
ref : np.ndarray
the reference image
test : np.ndarray
the tested image
mask : np.ndarray, optional
the mask for the ROI
Notes
-----
Compute the metric only on magnetude.
Returns
-------
psnr: float, the psnr
]
<ast.Tuple object at 0x7da1b0e66e90> assign[=] call[name[_preprocess_input], parameter[name[test], name[ref], name[mask]]]
if compare[name[mask] is_not constant[None]] begin[:]
variable[test] assign[=] binary_operation[name[mask] * name[test]]
variable[ref] assign[=] binary_operation[name[mask] * name[ref]]
variable[num] assign[=] call[name[np].max, parameter[call[name[np].abs, parameter[name[test]]]]]
variable[deno] assign[=] call[name[mse], parameter[name[test], name[ref]]]
return[binary_operation[constant[10.0] * call[name[np].log10, parameter[binary_operation[name[num] / name[deno]]]]]] | keyword[def] identifier[psnr] ( identifier[test] , identifier[ref] , identifier[mask] = keyword[None] ):
literal[string]
identifier[test] , identifier[ref] , identifier[mask] = identifier[_preprocess_input] ( identifier[test] , identifier[ref] , identifier[mask] )
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[test] = identifier[mask] * identifier[test]
identifier[ref] = identifier[mask] * identifier[ref]
identifier[num] = identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[test] ))
identifier[deno] = identifier[mse] ( identifier[test] , identifier[ref] )
keyword[return] literal[int] * identifier[np] . identifier[log10] ( identifier[num] / identifier[deno] ) | def psnr(test, ref, mask=None):
"""Peak Signal-to-Noise Ratio (PSNR)
Calculate the PSNR between a test image and a reference image.
Parameters
----------
ref : np.ndarray
the reference image
test : np.ndarray
the tested image
mask : np.ndarray, optional
the mask for the ROI
Notes
-----
Compute the metric only on magnetude.
Returns
-------
psnr: float, the psnr
"""
(test, ref, mask) = _preprocess_input(test, ref, mask)
if mask is not None:
test = mask * test
ref = mask * ref # depends on [control=['if'], data=['mask']]
num = np.max(np.abs(test))
deno = mse(test, ref)
return 10.0 * np.log10(num / deno) |
def create_component(self, name, description=None):
"""
Create a sub component in the business component.
:param name: The new component's name.
:param description: The new component's description.
:returns: The created component.
"""
new_comp = Component(name, self.gl, description=description)
new_comp.set_parent_path(self.path)
self.components.append(new_comp)
return new_comp | def function[create_component, parameter[self, name, description]]:
constant[
Create a sub component in the business component.
:param name: The new component's name.
:param description: The new component's description.
:returns: The created component.
]
variable[new_comp] assign[=] call[name[Component], parameter[name[name], name[self].gl]]
call[name[new_comp].set_parent_path, parameter[name[self].path]]
call[name[self].components.append, parameter[name[new_comp]]]
return[name[new_comp]] | keyword[def] identifier[create_component] ( identifier[self] , identifier[name] , identifier[description] = keyword[None] ):
literal[string]
identifier[new_comp] = identifier[Component] ( identifier[name] , identifier[self] . identifier[gl] , identifier[description] = identifier[description] )
identifier[new_comp] . identifier[set_parent_path] ( identifier[self] . identifier[path] )
identifier[self] . identifier[components] . identifier[append] ( identifier[new_comp] )
keyword[return] identifier[new_comp] | def create_component(self, name, description=None):
"""
Create a sub component in the business component.
:param name: The new component's name.
:param description: The new component's description.
:returns: The created component.
"""
new_comp = Component(name, self.gl, description=description)
new_comp.set_parent_path(self.path)
self.components.append(new_comp)
return new_comp |
def docx_process_simple_text(text: str, width: int) -> str:
"""
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
"""
if width:
return '\n'.join(textwrap.wrap(text, width=width))
else:
return text | def function[docx_process_simple_text, parameter[text, width]]:
constant[
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
]
if name[width] begin[:]
return[call[constant[
].join, parameter[call[name[textwrap].wrap, parameter[name[text]]]]]] | keyword[def] identifier[docx_process_simple_text] ( identifier[text] : identifier[str] , identifier[width] : identifier[int] )-> identifier[str] :
literal[string]
keyword[if] identifier[width] :
keyword[return] literal[string] . identifier[join] ( identifier[textwrap] . identifier[wrap] ( identifier[text] , identifier[width] = identifier[width] ))
keyword[else] :
keyword[return] identifier[text] | def docx_process_simple_text(text: str, width: int) -> str:
"""
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
"""
if width:
return '\n'.join(textwrap.wrap(text, width=width)) # depends on [control=['if'], data=[]]
else:
return text |
def get_stores(self, names=None, workspaces=None):
'''
Returns a list of stores in the catalog. If workspaces is specified will only return stores in those workspaces.
If names is specified, will only return stores that match.
names can either be a comma delimited string or an array.
Will return an empty list if no stores are found.
'''
if isinstance(workspaces, Workspace):
workspaces = [workspaces]
elif isinstance(workspaces, list) and [w for w in workspaces if isinstance(w, Workspace)]:
# nothing
pass
else:
workspaces = self.get_workspaces(names=workspaces)
stores = []
for ws in workspaces:
ds_list = self.get_xml(ws.datastore_url)
cs_list = self.get_xml(ws.coveragestore_url)
wms_list = self.get_xml(ws.wmsstore_url)
stores.extend([datastore_from_index(self, ws, n) for n in ds_list.findall("dataStore")])
stores.extend([coveragestore_from_index(self, ws, n) for n in cs_list.findall("coverageStore")])
stores.extend([wmsstore_from_index(self, ws, n) for n in wms_list.findall("wmsStore")])
if names is None:
names = []
elif isinstance(names, basestring):
names = [s.strip() for s in names.split(',') if s.strip()]
if stores and names:
return ([store for store in stores if store.name in names])
return stores | def function[get_stores, parameter[self, names, workspaces]]:
constant[
Returns a list of stores in the catalog. If workspaces is specified will only return stores in those workspaces.
If names is specified, will only return stores that match.
names can either be a comma delimited string or an array.
Will return an empty list if no stores are found.
]
if call[name[isinstance], parameter[name[workspaces], name[Workspace]]] begin[:]
variable[workspaces] assign[=] list[[<ast.Name object at 0x7da1b014fc40>]]
variable[stores] assign[=] list[[]]
for taget[name[ws]] in starred[name[workspaces]] begin[:]
variable[ds_list] assign[=] call[name[self].get_xml, parameter[name[ws].datastore_url]]
variable[cs_list] assign[=] call[name[self].get_xml, parameter[name[ws].coveragestore_url]]
variable[wms_list] assign[=] call[name[self].get_xml, parameter[name[ws].wmsstore_url]]
call[name[stores].extend, parameter[<ast.ListComp object at 0x7da1b014fcd0>]]
call[name[stores].extend, parameter[<ast.ListComp object at 0x7da1b014d990>]]
call[name[stores].extend, parameter[<ast.ListComp object at 0x7da1b014c8b0>]]
if compare[name[names] is constant[None]] begin[:]
variable[names] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b006cc70> begin[:]
return[<ast.ListComp object at 0x7da1b006ee90>]
return[name[stores]] | keyword[def] identifier[get_stores] ( identifier[self] , identifier[names] = keyword[None] , identifier[workspaces] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[workspaces] , identifier[Workspace] ):
identifier[workspaces] =[ identifier[workspaces] ]
keyword[elif] identifier[isinstance] ( identifier[workspaces] , identifier[list] ) keyword[and] [ identifier[w] keyword[for] identifier[w] keyword[in] identifier[workspaces] keyword[if] identifier[isinstance] ( identifier[w] , identifier[Workspace] )]:
keyword[pass]
keyword[else] :
identifier[workspaces] = identifier[self] . identifier[get_workspaces] ( identifier[names] = identifier[workspaces] )
identifier[stores] =[]
keyword[for] identifier[ws] keyword[in] identifier[workspaces] :
identifier[ds_list] = identifier[self] . identifier[get_xml] ( identifier[ws] . identifier[datastore_url] )
identifier[cs_list] = identifier[self] . identifier[get_xml] ( identifier[ws] . identifier[coveragestore_url] )
identifier[wms_list] = identifier[self] . identifier[get_xml] ( identifier[ws] . identifier[wmsstore_url] )
identifier[stores] . identifier[extend] ([ identifier[datastore_from_index] ( identifier[self] , identifier[ws] , identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[ds_list] . identifier[findall] ( literal[string] )])
identifier[stores] . identifier[extend] ([ identifier[coveragestore_from_index] ( identifier[self] , identifier[ws] , identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[cs_list] . identifier[findall] ( literal[string] )])
identifier[stores] . identifier[extend] ([ identifier[wmsstore_from_index] ( identifier[self] , identifier[ws] , identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[wms_list] . identifier[findall] ( literal[string] )])
keyword[if] identifier[names] keyword[is] keyword[None] :
identifier[names] =[]
keyword[elif] identifier[isinstance] ( identifier[names] , identifier[basestring] ):
identifier[names] =[ identifier[s] . identifier[strip] () keyword[for] identifier[s] keyword[in] identifier[names] . identifier[split] ( literal[string] ) keyword[if] identifier[s] . identifier[strip] ()]
keyword[if] identifier[stores] keyword[and] identifier[names] :
keyword[return] ([ identifier[store] keyword[for] identifier[store] keyword[in] identifier[stores] keyword[if] identifier[store] . identifier[name] keyword[in] identifier[names] ])
keyword[return] identifier[stores] | def get_stores(self, names=None, workspaces=None):
"""
Returns a list of stores in the catalog. If workspaces is specified will only return stores in those workspaces.
If names is specified, will only return stores that match.
names can either be a comma delimited string or an array.
Will return an empty list if no stores are found.
"""
if isinstance(workspaces, Workspace):
workspaces = [workspaces] # depends on [control=['if'], data=[]]
elif isinstance(workspaces, list) and [w for w in workspaces if isinstance(w, Workspace)]:
# nothing
pass # depends on [control=['if'], data=[]]
else:
workspaces = self.get_workspaces(names=workspaces)
stores = []
for ws in workspaces:
ds_list = self.get_xml(ws.datastore_url)
cs_list = self.get_xml(ws.coveragestore_url)
wms_list = self.get_xml(ws.wmsstore_url)
stores.extend([datastore_from_index(self, ws, n) for n in ds_list.findall('dataStore')])
stores.extend([coveragestore_from_index(self, ws, n) for n in cs_list.findall('coverageStore')])
stores.extend([wmsstore_from_index(self, ws, n) for n in wms_list.findall('wmsStore')]) # depends on [control=['for'], data=['ws']]
if names is None:
names = [] # depends on [control=['if'], data=['names']]
elif isinstance(names, basestring):
names = [s.strip() for s in names.split(',') if s.strip()] # depends on [control=['if'], data=[]]
if stores and names:
return [store for store in stores if store.name in names] # depends on [control=['if'], data=[]]
return stores |
def append(self, other: 'List') -> 'List':
"""Append other list to this list."""
if self.null():
return other
return (self.tail().append(other)).cons(self.head()) | def function[append, parameter[self, other]]:
constant[Append other list to this list.]
if call[name[self].null, parameter[]] begin[:]
return[name[other]]
return[call[call[call[name[self].tail, parameter[]].append, parameter[name[other]]].cons, parameter[call[name[self].head, parameter[]]]]] | keyword[def] identifier[append] ( identifier[self] , identifier[other] : literal[string] )-> literal[string] :
literal[string]
keyword[if] identifier[self] . identifier[null] ():
keyword[return] identifier[other]
keyword[return] ( identifier[self] . identifier[tail] (). identifier[append] ( identifier[other] )). identifier[cons] ( identifier[self] . identifier[head] ()) | def append(self, other: 'List') -> 'List':
"""Append other list to this list."""
if self.null():
return other # depends on [control=['if'], data=[]]
return self.tail().append(other).cons(self.head()) |
def find_subdirs(startdir='.', recursion_depth=None):
"""Find all subdirectory of a directory.
Inputs:
startdir: directory to start with. Defaults to the current folder.
recursion_depth: number of levels to traverse. None is infinite.
Output: a list of absolute names of subfolders.
Examples:
>>> find_subdirs('dir',0) # returns just ['dir']
>>> find_subdirs('dir',1) # returns all direct (first-level) subdirs
# of 'dir'.
"""
startdir = os.path.expanduser(startdir)
direct_subdirs = [os.path.join(startdir, x) for x in os.listdir(
startdir) if os.path.isdir(os.path.join(startdir, x))]
if recursion_depth is None:
next_recursion_depth = None
else:
next_recursion_depth = recursion_depth - 1
if (recursion_depth is not None) and (recursion_depth <= 1):
return [startdir] + direct_subdirs
else:
subdirs = []
for d in direct_subdirs:
subdirs.extend(find_subdirs(d, next_recursion_depth))
return [startdir] + subdirs | def function[find_subdirs, parameter[startdir, recursion_depth]]:
constant[Find all subdirectory of a directory.
Inputs:
startdir: directory to start with. Defaults to the current folder.
recursion_depth: number of levels to traverse. None is infinite.
Output: a list of absolute names of subfolders.
Examples:
>>> find_subdirs('dir',0) # returns just ['dir']
>>> find_subdirs('dir',1) # returns all direct (first-level) subdirs
# of 'dir'.
]
variable[startdir] assign[=] call[name[os].path.expanduser, parameter[name[startdir]]]
variable[direct_subdirs] assign[=] <ast.ListComp object at 0x7da1b1050580>
if compare[name[recursion_depth] is constant[None]] begin[:]
variable[next_recursion_depth] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1050130> begin[:]
return[binary_operation[list[[<ast.Name object at 0x7da1b10512d0>]] + name[direct_subdirs]]] | keyword[def] identifier[find_subdirs] ( identifier[startdir] = literal[string] , identifier[recursion_depth] = keyword[None] ):
literal[string]
identifier[startdir] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[startdir] )
identifier[direct_subdirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[startdir] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[os] . identifier[listdir] (
identifier[startdir] ) keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[startdir] , identifier[x] ))]
keyword[if] identifier[recursion_depth] keyword[is] keyword[None] :
identifier[next_recursion_depth] = keyword[None]
keyword[else] :
identifier[next_recursion_depth] = identifier[recursion_depth] - literal[int]
keyword[if] ( identifier[recursion_depth] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[recursion_depth] <= literal[int] ):
keyword[return] [ identifier[startdir] ]+ identifier[direct_subdirs]
keyword[else] :
identifier[subdirs] =[]
keyword[for] identifier[d] keyword[in] identifier[direct_subdirs] :
identifier[subdirs] . identifier[extend] ( identifier[find_subdirs] ( identifier[d] , identifier[next_recursion_depth] ))
keyword[return] [ identifier[startdir] ]+ identifier[subdirs] | def find_subdirs(startdir='.', recursion_depth=None):
"""Find all subdirectory of a directory.
Inputs:
startdir: directory to start with. Defaults to the current folder.
recursion_depth: number of levels to traverse. None is infinite.
Output: a list of absolute names of subfolders.
Examples:
>>> find_subdirs('dir',0) # returns just ['dir']
>>> find_subdirs('dir',1) # returns all direct (first-level) subdirs
# of 'dir'.
"""
startdir = os.path.expanduser(startdir)
direct_subdirs = [os.path.join(startdir, x) for x in os.listdir(startdir) if os.path.isdir(os.path.join(startdir, x))]
if recursion_depth is None:
next_recursion_depth = None # depends on [control=['if'], data=[]]
else:
next_recursion_depth = recursion_depth - 1
if recursion_depth is not None and recursion_depth <= 1:
return [startdir] + direct_subdirs # depends on [control=['if'], data=[]]
else:
subdirs = []
for d in direct_subdirs:
subdirs.extend(find_subdirs(d, next_recursion_depth)) # depends on [control=['for'], data=['d']]
return [startdir] + subdirs |
def __add_parameter(self, param, path_parameters, params):
"""Adds all parameters in a field to a method parameters descriptor.
Simple fields will only have one parameter, but a message field 'x' that
corresponds to a message class with fields 'y' and 'z' will result in
parameters 'x.y' and 'x.z', for example. The mapping from field to
parameters is mostly handled by __field_to_subfields.
Args:
param: Parameter to be added to the descriptor.
path_parameters: A list of parameters matched from a path for this field.
For example for the hypothetical 'x' from above if the path was
'/a/{x.z}/b/{other}' then this list would contain only the element
'x.z' since 'other' does not match to this field.
params: List of parameters. Each parameter in the field.
"""
# If this is a simple field, just build the descriptor and append it.
# Otherwise, build a schema and assign it to this descriptor
if not isinstance(param, messages.MessageField):
if param.name in path_parameters:
descriptor = self.__path_parameter_descriptor(param)
else:
descriptor = self.__query_parameter_descriptor(param)
params.append(descriptor)
else:
# If a subfield of a MessageField is found in the path, build a descriptor
# for the path parameter.
for subfield_list in self.__field_to_subfields(param):
qualified_name = '.'.join(subfield.name for subfield in subfield_list)
if qualified_name in path_parameters:
descriptor = self.__path_parameter_descriptor(subfield_list[-1])
descriptor['required'] = True
params.append(descriptor) | def function[__add_parameter, parameter[self, param, path_parameters, params]]:
constant[Adds all parameters in a field to a method parameters descriptor.
Simple fields will only have one parameter, but a message field 'x' that
corresponds to a message class with fields 'y' and 'z' will result in
parameters 'x.y' and 'x.z', for example. The mapping from field to
parameters is mostly handled by __field_to_subfields.
Args:
param: Parameter to be added to the descriptor.
path_parameters: A list of parameters matched from a path for this field.
For example for the hypothetical 'x' from above if the path was
'/a/{x.z}/b/{other}' then this list would contain only the element
'x.z' since 'other' does not match to this field.
params: List of parameters. Each parameter in the field.
]
if <ast.UnaryOp object at 0x7da1b0d550c0> begin[:]
if compare[name[param].name in name[path_parameters]] begin[:]
variable[descriptor] assign[=] call[name[self].__path_parameter_descriptor, parameter[name[param]]]
call[name[params].append, parameter[name[descriptor]]] | keyword[def] identifier[__add_parameter] ( identifier[self] , identifier[param] , identifier[path_parameters] , identifier[params] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[param] , identifier[messages] . identifier[MessageField] ):
keyword[if] identifier[param] . identifier[name] keyword[in] identifier[path_parameters] :
identifier[descriptor] = identifier[self] . identifier[__path_parameter_descriptor] ( identifier[param] )
keyword[else] :
identifier[descriptor] = identifier[self] . identifier[__query_parameter_descriptor] ( identifier[param] )
identifier[params] . identifier[append] ( identifier[descriptor] )
keyword[else] :
keyword[for] identifier[subfield_list] keyword[in] identifier[self] . identifier[__field_to_subfields] ( identifier[param] ):
identifier[qualified_name] = literal[string] . identifier[join] ( identifier[subfield] . identifier[name] keyword[for] identifier[subfield] keyword[in] identifier[subfield_list] )
keyword[if] identifier[qualified_name] keyword[in] identifier[path_parameters] :
identifier[descriptor] = identifier[self] . identifier[__path_parameter_descriptor] ( identifier[subfield_list] [- literal[int] ])
identifier[descriptor] [ literal[string] ]= keyword[True]
identifier[params] . identifier[append] ( identifier[descriptor] ) | def __add_parameter(self, param, path_parameters, params):
"""Adds all parameters in a field to a method parameters descriptor.
Simple fields will only have one parameter, but a message field 'x' that
corresponds to a message class with fields 'y' and 'z' will result in
parameters 'x.y' and 'x.z', for example. The mapping from field to
parameters is mostly handled by __field_to_subfields.
Args:
param: Parameter to be added to the descriptor.
path_parameters: A list of parameters matched from a path for this field.
For example for the hypothetical 'x' from above if the path was
'/a/{x.z}/b/{other}' then this list would contain only the element
'x.z' since 'other' does not match to this field.
params: List of parameters. Each parameter in the field.
"""
# If this is a simple field, just build the descriptor and append it.
# Otherwise, build a schema and assign it to this descriptor
if not isinstance(param, messages.MessageField):
if param.name in path_parameters:
descriptor = self.__path_parameter_descriptor(param) # depends on [control=['if'], data=[]]
else:
descriptor = self.__query_parameter_descriptor(param)
params.append(descriptor) # depends on [control=['if'], data=[]]
else:
# If a subfield of a MessageField is found in the path, build a descriptor
# for the path parameter.
for subfield_list in self.__field_to_subfields(param):
qualified_name = '.'.join((subfield.name for subfield in subfield_list))
if qualified_name in path_parameters:
descriptor = self.__path_parameter_descriptor(subfield_list[-1])
descriptor['required'] = True
params.append(descriptor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subfield_list']] |
def metadata(self):
"""Access process configuarion values as attributes. """
from ambry.metadata.schema import Top # cross-module import
top = Top()
top.build_from_db(self.dataset)
return top | def function[metadata, parameter[self]]:
constant[Access process configuarion values as attributes. ]
from relative_module[ambry.metadata.schema] import module[Top]
variable[top] assign[=] call[name[Top], parameter[]]
call[name[top].build_from_db, parameter[name[self].dataset]]
return[name[top]] | keyword[def] identifier[metadata] ( identifier[self] ):
literal[string]
keyword[from] identifier[ambry] . identifier[metadata] . identifier[schema] keyword[import] identifier[Top]
identifier[top] = identifier[Top] ()
identifier[top] . identifier[build_from_db] ( identifier[self] . identifier[dataset] )
keyword[return] identifier[top] | def metadata(self):
"""Access process configuarion values as attributes. """
from ambry.metadata.schema import Top # cross-module import
top = Top()
top.build_from_db(self.dataset)
return top |
def yield_once(iterator):
"""
Decorator to make an iterator returned by a method yield each result only
once.
>>> @yield_once
... def generate_list(foo):
... return foo
>>> list(generate_list([1, 2, 1]))
[1, 2]
:param iterator: Any method that returns an iterator
:return: An method returning an iterator
that yields every result only once at most.
"""
@wraps(iterator)
def yield_once_generator(*args, **kwargs):
yielded = set()
for item in iterator(*args, **kwargs):
if item not in yielded:
yielded.add(item)
yield item
return yield_once_generator | def function[yield_once, parameter[iterator]]:
constant[
Decorator to make an iterator returned by a method yield each result only
once.
>>> @yield_once
... def generate_list(foo):
... return foo
>>> list(generate_list([1, 2, 1]))
[1, 2]
:param iterator: Any method that returns an iterator
:return: An method returning an iterator
that yields every result only once at most.
]
def function[yield_once_generator, parameter[]]:
variable[yielded] assign[=] call[name[set], parameter[]]
for taget[name[item]] in starred[call[name[iterator], parameter[<ast.Starred object at 0x7da1b28ec760>]]] begin[:]
if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[yielded]] begin[:]
call[name[yielded].add, parameter[name[item]]]
<ast.Yield object at 0x7da1b287bf70>
return[name[yield_once_generator]] | keyword[def] identifier[yield_once] ( identifier[iterator] ):
literal[string]
@ identifier[wraps] ( identifier[iterator] )
keyword[def] identifier[yield_once_generator] (* identifier[args] ,** identifier[kwargs] ):
identifier[yielded] = identifier[set] ()
keyword[for] identifier[item] keyword[in] identifier[iterator] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[item] keyword[not] keyword[in] identifier[yielded] :
identifier[yielded] . identifier[add] ( identifier[item] )
keyword[yield] identifier[item]
keyword[return] identifier[yield_once_generator] | def yield_once(iterator):
"""
Decorator to make an iterator returned by a method yield each result only
once.
>>> @yield_once
... def generate_list(foo):
... return foo
>>> list(generate_list([1, 2, 1]))
[1, 2]
:param iterator: Any method that returns an iterator
:return: An method returning an iterator
that yields every result only once at most.
"""
@wraps(iterator)
def yield_once_generator(*args, **kwargs):
yielded = set()
for item in iterator(*args, **kwargs):
if item not in yielded:
yielded.add(item)
yield item # depends on [control=['if'], data=['item', 'yielded']] # depends on [control=['for'], data=['item']]
return yield_once_generator |
def com_google_fonts_check_contour_count(ttFont):
"""Check if each glyph has the recommended amount of contours.
This check is useful to assure glyphs aren't incorrectly constructed.
The desired_glyph_data module contains the 'recommended' countour count
for encoded glyphs. The contour counts are derived from fonts which were
chosen for their quality and unique design decisions for particular glyphs.
In the future, additional glyph data can be included. A good addition would
be the 'recommended' anchor counts for each glyph.
"""
from fontbakery.glyphdata import desired_glyph_data as glyph_data
from fontbakery.utils import (get_font_glyph_data,
pretty_print_list)
# rearrange data structure:
desired_glyph_data = {}
for glyph in glyph_data:
desired_glyph_data[glyph['unicode']] = glyph
bad_glyphs = []
desired_glyph_contours = {f: desired_glyph_data[f]['contours']
for f in desired_glyph_data}
font_glyph_data = get_font_glyph_data(ttFont)
if font_glyph_data is None:
yield FAIL, "This font lacks cmap data."
else:
font_glyph_contours = {f['unicode']: list(f['contours'])[0]
for f in font_glyph_data}
shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours)
for glyph in shared_glyphs:
if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]:
bad_glyphs.append([glyph,
font_glyph_contours[glyph],
desired_glyph_contours[glyph]])
if len(bad_glyphs) > 0:
cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP).cmap
bad_glyphs_name = [("Glyph name: {}\t"
"Contours detected: {}\t"
"Expected: {}").format(cmap[name],
count,
pretty_print_list(expected,
shorten=None,
glue="or"))
for name, count, expected in bad_glyphs]
yield WARN, (("This check inspects the glyph outlines and detects the"
" total number of contours in each of them. The expected"
" values are infered from the typical ammounts of"
" contours observed in a large collection of reference"
" font families. The divergences listed below may simply"
" indicate a significantly different design on some of"
" your glyphs. On the other hand, some of these may flag"
" actual bugs in the font such as glyphs mapped to an"
" incorrect codepoint. Please consider reviewing"
" the design and codepoint assignment of these to make"
" sure they are correct.\n"
"\n"
"The following glyphs do not have the recommended"
" number of contours:\n"
"\n{}").format('\n'.join(bad_glyphs_name)))
else:
yield PASS, "All glyphs have the recommended amount of contours" | def function[com_google_fonts_check_contour_count, parameter[ttFont]]:
constant[Check if each glyph has the recommended amount of contours.
This check is useful to assure glyphs aren't incorrectly constructed.
The desired_glyph_data module contains the 'recommended' countour count
for encoded glyphs. The contour counts are derived from fonts which were
chosen for their quality and unique design decisions for particular glyphs.
In the future, additional glyph data can be included. A good addition would
be the 'recommended' anchor counts for each glyph.
]
from relative_module[fontbakery.glyphdata] import module[desired_glyph_data]
from relative_module[fontbakery.utils] import module[get_font_glyph_data], module[pretty_print_list]
variable[desired_glyph_data] assign[=] dictionary[[], []]
for taget[name[glyph]] in starred[name[glyph_data]] begin[:]
call[name[desired_glyph_data]][call[name[glyph]][constant[unicode]]] assign[=] name[glyph]
variable[bad_glyphs] assign[=] list[[]]
variable[desired_glyph_contours] assign[=] <ast.DictComp object at 0x7da1b12945b0>
variable[font_glyph_data] assign[=] call[name[get_font_glyph_data], parameter[name[ttFont]]]
if compare[name[font_glyph_data] is constant[None]] begin[:]
<ast.Yield object at 0x7da1b1294070> | keyword[def] identifier[com_google_fonts_check_contour_count] ( identifier[ttFont] ):
literal[string]
keyword[from] identifier[fontbakery] . identifier[glyphdata] keyword[import] identifier[desired_glyph_data] keyword[as] identifier[glyph_data]
keyword[from] identifier[fontbakery] . identifier[utils] keyword[import] ( identifier[get_font_glyph_data] ,
identifier[pretty_print_list] )
identifier[desired_glyph_data] ={}
keyword[for] identifier[glyph] keyword[in] identifier[glyph_data] :
identifier[desired_glyph_data] [ identifier[glyph] [ literal[string] ]]= identifier[glyph]
identifier[bad_glyphs] =[]
identifier[desired_glyph_contours] ={ identifier[f] : identifier[desired_glyph_data] [ identifier[f] ][ literal[string] ]
keyword[for] identifier[f] keyword[in] identifier[desired_glyph_data] }
identifier[font_glyph_data] = identifier[get_font_glyph_data] ( identifier[ttFont] )
keyword[if] identifier[font_glyph_data] keyword[is] keyword[None] :
keyword[yield] identifier[FAIL] , literal[string]
keyword[else] :
identifier[font_glyph_contours] ={ identifier[f] [ literal[string] ]: identifier[list] ( identifier[f] [ literal[string] ])[ literal[int] ]
keyword[for] identifier[f] keyword[in] identifier[font_glyph_data] }
identifier[shared_glyphs] = identifier[set] ( identifier[desired_glyph_contours] )& identifier[set] ( identifier[font_glyph_contours] )
keyword[for] identifier[glyph] keyword[in] identifier[shared_glyphs] :
keyword[if] identifier[font_glyph_contours] [ identifier[glyph] ] keyword[not] keyword[in] identifier[desired_glyph_contours] [ identifier[glyph] ]:
identifier[bad_glyphs] . identifier[append] ([ identifier[glyph] ,
identifier[font_glyph_contours] [ identifier[glyph] ],
identifier[desired_glyph_contours] [ identifier[glyph] ]])
keyword[if] identifier[len] ( identifier[bad_glyphs] )> literal[int] :
identifier[cmap] = identifier[ttFont] [ literal[string] ]. identifier[getcmap] ( identifier[PlatformID] . identifier[WINDOWS] ,
identifier[WindowsEncodingID] . identifier[UNICODE_BMP] ). identifier[cmap]
identifier[bad_glyphs_name] =[( literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[cmap] [ identifier[name] ],
identifier[count] ,
identifier[pretty_print_list] ( identifier[expected] ,
identifier[shorten] = keyword[None] ,
identifier[glue] = literal[string] ))
keyword[for] identifier[name] , identifier[count] , identifier[expected] keyword[in] identifier[bad_glyphs] ]
keyword[yield] identifier[WARN] ,(( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( literal[string] . identifier[join] ( identifier[bad_glyphs_name] )))
keyword[else] :
keyword[yield] identifier[PASS] , literal[string] | def com_google_fonts_check_contour_count(ttFont):
"""Check if each glyph has the recommended amount of contours.
This check is useful to assure glyphs aren't incorrectly constructed.
The desired_glyph_data module contains the 'recommended' countour count
for encoded glyphs. The contour counts are derived from fonts which were
chosen for their quality and unique design decisions for particular glyphs.
In the future, additional glyph data can be included. A good addition would
be the 'recommended' anchor counts for each glyph.
"""
from fontbakery.glyphdata import desired_glyph_data as glyph_data
from fontbakery.utils import get_font_glyph_data, pretty_print_list
# rearrange data structure:
desired_glyph_data = {}
for glyph in glyph_data:
desired_glyph_data[glyph['unicode']] = glyph # depends on [control=['for'], data=['glyph']]
bad_glyphs = []
desired_glyph_contours = {f: desired_glyph_data[f]['contours'] for f in desired_glyph_data}
font_glyph_data = get_font_glyph_data(ttFont)
if font_glyph_data is None:
yield (FAIL, 'This font lacks cmap data.') # depends on [control=['if'], data=[]]
else:
font_glyph_contours = {f['unicode']: list(f['contours'])[0] for f in font_glyph_data}
shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours)
for glyph in shared_glyphs:
if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]:
bad_glyphs.append([glyph, font_glyph_contours[glyph], desired_glyph_contours[glyph]]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['glyph']]
if len(bad_glyphs) > 0:
cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP).cmap
bad_glyphs_name = ['Glyph name: {}\tContours detected: {}\tExpected: {}'.format(cmap[name], count, pretty_print_list(expected, shorten=None, glue='or')) for (name, count, expected) in bad_glyphs]
yield (WARN, 'This check inspects the glyph outlines and detects the total number of contours in each of them. The expected values are infered from the typical ammounts of contours observed in a large collection of reference font families. The divergences listed below may simply indicate a significantly different design on some of your glyphs. On the other hand, some of these may flag actual bugs in the font such as glyphs mapped to an incorrect codepoint. Please consider reviewing the design and codepoint assignment of these to make sure they are correct.\n\nThe following glyphs do not have the recommended number of contours:\n\n{}'.format('\n'.join(bad_glyphs_name))) # depends on [control=['if'], data=[]]
else:
yield (PASS, 'All glyphs have the recommended amount of contours') |
def _compute_delta_beta(self, df, events, start, stop, weights):
""" approximate change in betas as a result of excluding ith row"""
score_residuals = self._compute_residuals(df, events, start, stop, weights) * weights[:, None]
naive_var = inv(self._hessian_)
delta_betas = -score_residuals.dot(naive_var) / self._norm_std.values
return delta_betas | def function[_compute_delta_beta, parameter[self, df, events, start, stop, weights]]:
constant[ approximate change in betas as a result of excluding ith row]
variable[score_residuals] assign[=] binary_operation[call[name[self]._compute_residuals, parameter[name[df], name[events], name[start], name[stop], name[weights]]] * call[name[weights]][tuple[[<ast.Slice object at 0x7da18f09ca00>, <ast.Constant object at 0x7da18f09e470>]]]]
variable[naive_var] assign[=] call[name[inv], parameter[name[self]._hessian_]]
variable[delta_betas] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18f09c550> / name[self]._norm_std.values]
return[name[delta_betas]] | keyword[def] identifier[_compute_delta_beta] ( identifier[self] , identifier[df] , identifier[events] , identifier[start] , identifier[stop] , identifier[weights] ):
literal[string]
identifier[score_residuals] = identifier[self] . identifier[_compute_residuals] ( identifier[df] , identifier[events] , identifier[start] , identifier[stop] , identifier[weights] )* identifier[weights] [:, keyword[None] ]
identifier[naive_var] = identifier[inv] ( identifier[self] . identifier[_hessian_] )
identifier[delta_betas] =- identifier[score_residuals] . identifier[dot] ( identifier[naive_var] )/ identifier[self] . identifier[_norm_std] . identifier[values]
keyword[return] identifier[delta_betas] | def _compute_delta_beta(self, df, events, start, stop, weights):
""" approximate change in betas as a result of excluding ith row"""
score_residuals = self._compute_residuals(df, events, start, stop, weights) * weights[:, None]
naive_var = inv(self._hessian_)
delta_betas = -score_residuals.dot(naive_var) / self._norm_std.values
return delta_betas |
def delete_issue(self, issue_id, params=None):
"""Deletes an individual issue.
If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete
an issue without deleting its sub-tasks.
Args:
issue_id:
params:
Returns:
"""
return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params) | def function[delete_issue, parameter[self, issue_id, params]]:
constant[Deletes an individual issue.
If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete
an issue without deleting its sub-tasks.
Args:
issue_id:
params:
Returns:
]
return[call[name[self]._delete, parameter[binary_operation[name[self].API_URL + call[constant[issue/{}].format, parameter[name[issue_id]]]]]]] | keyword[def] identifier[delete_issue] ( identifier[self] , identifier[issue_id] , identifier[params] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_delete] ( identifier[self] . identifier[API_URL] + literal[string] . identifier[format] ( identifier[issue_id] ), identifier[params] = identifier[params] ) | def delete_issue(self, issue_id, params=None):
"""Deletes an individual issue.
If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete
an issue without deleting its sub-tasks.
Args:
issue_id:
params:
Returns:
"""
return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params) |
def _get_width(self, width, max_width):
"""
Helper function to figure out the actual column width from the various options.
:param width: The size of column requested
:param max_width: The maximum width allowed for this widget.
:return: the integer width of the column in characters
"""
if isinstance(width, float):
return int(max_width * width)
if width == 0:
width = (max_width - sum(self._spacing) -
sum([self._get_width(x, max_width) for x in self._columns if x != 0]))
return width | def function[_get_width, parameter[self, width, max_width]]:
constant[
Helper function to figure out the actual column width from the various options.
:param width: The size of column requested
:param max_width: The maximum width allowed for this widget.
:return: the integer width of the column in characters
]
if call[name[isinstance], parameter[name[width], name[float]]] begin[:]
return[call[name[int], parameter[binary_operation[name[max_width] * name[width]]]]]
if compare[name[width] equal[==] constant[0]] begin[:]
variable[width] assign[=] binary_operation[binary_operation[name[max_width] - call[name[sum], parameter[name[self]._spacing]]] - call[name[sum], parameter[<ast.ListComp object at 0x7da1b1d34130>]]]
return[name[width]] | keyword[def] identifier[_get_width] ( identifier[self] , identifier[width] , identifier[max_width] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[width] , identifier[float] ):
keyword[return] identifier[int] ( identifier[max_width] * identifier[width] )
keyword[if] identifier[width] == literal[int] :
identifier[width] =( identifier[max_width] - identifier[sum] ( identifier[self] . identifier[_spacing] )-
identifier[sum] ([ identifier[self] . identifier[_get_width] ( identifier[x] , identifier[max_width] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[_columns] keyword[if] identifier[x] != literal[int] ]))
keyword[return] identifier[width] | def _get_width(self, width, max_width):
"""
Helper function to figure out the actual column width from the various options.
:param width: The size of column requested
:param max_width: The maximum width allowed for this widget.
:return: the integer width of the column in characters
"""
if isinstance(width, float):
return int(max_width * width) # depends on [control=['if'], data=[]]
if width == 0:
width = max_width - sum(self._spacing) - sum([self._get_width(x, max_width) for x in self._columns if x != 0]) # depends on [control=['if'], data=['width']]
return width |
def delete(self, username, type):
"""Delete an LDAP user."""
self.client.delete(self.__distinguished_name(type, username=username)) | def function[delete, parameter[self, username, type]]:
constant[Delete an LDAP user.]
call[name[self].client.delete, parameter[call[name[self].__distinguished_name, parameter[name[type]]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[username] , identifier[type] ):
literal[string]
identifier[self] . identifier[client] . identifier[delete] ( identifier[self] . identifier[__distinguished_name] ( identifier[type] , identifier[username] = identifier[username] )) | def delete(self, username, type):
"""Delete an LDAP user."""
self.client.delete(self.__distinguished_name(type, username=username)) |
def run_healthchecks(self):
"""
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
"""
if not self._registry_loaded:
self.load_healthchecks()
def get_healthcheck_name(hc):
if hasattr(hc, 'name'):
return hc.name
return hc.__name__
responses = []
for healthcheck in self._registry:
try:
if inspect.isclass(healthcheck):
healthcheck = healthcheck()
response = healthcheck()
if isinstance(response, bool):
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=response,
)
except Exception as e:
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=False,
exception=str(e),
exception_class=e.__class__.__name__,
)
responses.append(response)
return responses | def function[run_healthchecks, parameter[self]]:
constant[
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
]
if <ast.UnaryOp object at 0x7da1b28dde70> begin[:]
call[name[self].load_healthchecks, parameter[]]
def function[get_healthcheck_name, parameter[hc]]:
if call[name[hasattr], parameter[name[hc], constant[name]]] begin[:]
return[name[hc].name]
return[name[hc].__name__]
variable[responses] assign[=] list[[]]
for taget[name[healthcheck]] in starred[name[self]._registry] begin[:]
<ast.Try object at 0x7da1b28dc0d0>
call[name[responses].append, parameter[name[response]]]
return[name[responses]] | keyword[def] identifier[run_healthchecks] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_registry_loaded] :
identifier[self] . identifier[load_healthchecks] ()
keyword[def] identifier[get_healthcheck_name] ( identifier[hc] ):
keyword[if] identifier[hasattr] ( identifier[hc] , literal[string] ):
keyword[return] identifier[hc] . identifier[name]
keyword[return] identifier[hc] . identifier[__name__]
identifier[responses] =[]
keyword[for] identifier[healthcheck] keyword[in] identifier[self] . identifier[_registry] :
keyword[try] :
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[healthcheck] ):
identifier[healthcheck] = identifier[healthcheck] ()
identifier[response] = identifier[healthcheck] ()
keyword[if] identifier[isinstance] ( identifier[response] , identifier[bool] ):
identifier[response] = identifier[HealthcheckResponse] (
identifier[name] = identifier[get_healthcheck_name] ( identifier[healthcheck] ),
identifier[status] = identifier[response] ,
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[response] = identifier[HealthcheckResponse] (
identifier[name] = identifier[get_healthcheck_name] ( identifier[healthcheck] ),
identifier[status] = keyword[False] ,
identifier[exception] = identifier[str] ( identifier[e] ),
identifier[exception_class] = identifier[e] . identifier[__class__] . identifier[__name__] ,
)
identifier[responses] . identifier[append] ( identifier[response] )
keyword[return] identifier[responses] | def run_healthchecks(self):
"""
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
"""
if not self._registry_loaded:
self.load_healthchecks() # depends on [control=['if'], data=[]]
def get_healthcheck_name(hc):
if hasattr(hc, 'name'):
return hc.name # depends on [control=['if'], data=[]]
return hc.__name__
responses = []
for healthcheck in self._registry:
try:
if inspect.isclass(healthcheck):
healthcheck = healthcheck() # depends on [control=['if'], data=[]]
response = healthcheck()
if isinstance(response, bool):
response = HealthcheckResponse(name=get_healthcheck_name(healthcheck), status=response) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
response = HealthcheckResponse(name=get_healthcheck_name(healthcheck), status=False, exception=str(e), exception_class=e.__class__.__name__) # depends on [control=['except'], data=['e']]
responses.append(response) # depends on [control=['for'], data=['healthcheck']]
return responses |
def message(self, pubnub, message):
"""
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
"""
try:
json_data = json.dumps(message.message.get('data'))
except AttributeError:
json_data = message.message
for func in SUBSCRIPTIONS[message.channel]:
# This means pubnub couldn't get the current state of the channel
# The pull_url is the location to pull the current state from.
# Returning None here to have the calling program handle this.
if 'pull_url' in json_data:
func(None)
else:
func(json.loads(json_data)) | def function[message, parameter[self, pubnub, message]]:
constant[
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
]
<ast.Try object at 0x7da20c76cc40>
for taget[name[func]] in starred[call[name[SUBSCRIPTIONS]][name[message].channel]] begin[:]
if compare[constant[pull_url] in name[json_data]] begin[:]
call[name[func], parameter[constant[None]]] | keyword[def] identifier[message] ( identifier[self] , identifier[pubnub] , identifier[message] ):
literal[string]
keyword[try] :
identifier[json_data] = identifier[json] . identifier[dumps] ( identifier[message] . identifier[message] . identifier[get] ( literal[string] ))
keyword[except] identifier[AttributeError] :
identifier[json_data] = identifier[message] . identifier[message]
keyword[for] identifier[func] keyword[in] identifier[SUBSCRIPTIONS] [ identifier[message] . identifier[channel] ]:
keyword[if] literal[string] keyword[in] identifier[json_data] :
identifier[func] ( keyword[None] )
keyword[else] :
identifier[func] ( identifier[json] . identifier[loads] ( identifier[json_data] )) | def message(self, pubnub, message):
"""
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
"""
try:
json_data = json.dumps(message.message.get('data')) # depends on [control=['try'], data=[]]
except AttributeError:
json_data = message.message # depends on [control=['except'], data=[]]
for func in SUBSCRIPTIONS[message.channel]:
# This means pubnub couldn't get the current state of the channel
# The pull_url is the location to pull the current state from.
# Returning None here to have the calling program handle this.
if 'pull_url' in json_data:
func(None) # depends on [control=['if'], data=[]]
else:
func(json.loads(json_data)) # depends on [control=['for'], data=['func']] |
def get_dev_folder(path=sys.prefix):
"""
:param str path: Path to examine
:return str|None: Path to development build folder, such as .venv, .tox etc, if any
"""
if not path or len(path) <= 4:
return None
dirpath, basename = os.path.split(path)
if basename in DEV_FOLDERS:
return path
return get_dev_folder(dirpath) | def function[get_dev_folder, parameter[path]]:
constant[
:param str path: Path to examine
:return str|None: Path to development build folder, such as .venv, .tox etc, if any
]
if <ast.BoolOp object at 0x7da1b2390430> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da1b2335bd0> assign[=] call[name[os].path.split, parameter[name[path]]]
if compare[name[basename] in name[DEV_FOLDERS]] begin[:]
return[name[path]]
return[call[name[get_dev_folder], parameter[name[dirpath]]]] | keyword[def] identifier[get_dev_folder] ( identifier[path] = identifier[sys] . identifier[prefix] ):
literal[string]
keyword[if] keyword[not] identifier[path] keyword[or] identifier[len] ( identifier[path] )<= literal[int] :
keyword[return] keyword[None]
identifier[dirpath] , identifier[basename] = identifier[os] . identifier[path] . identifier[split] ( identifier[path] )
keyword[if] identifier[basename] keyword[in] identifier[DEV_FOLDERS] :
keyword[return] identifier[path]
keyword[return] identifier[get_dev_folder] ( identifier[dirpath] ) | def get_dev_folder(path=sys.prefix):
"""
:param str path: Path to examine
:return str|None: Path to development build folder, such as .venv, .tox etc, if any
"""
if not path or len(path) <= 4:
return None # depends on [control=['if'], data=[]]
(dirpath, basename) = os.path.split(path)
if basename in DEV_FOLDERS:
return path # depends on [control=['if'], data=[]]
return get_dev_folder(dirpath) |
def detect_encoding(fp, default=None):
"""Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
"""
init_pos = fp.tell()
try:
sample = fp.read(
current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default)
else:
return default
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default
finally:
fp.seek(init_pos) | def function[detect_encoding, parameter[fp, default]]:
constant[Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
]
variable[init_pos] assign[=] call[name[fp].tell, parameter[]]
<ast.Try object at 0x7da1b1ad0dc0> | keyword[def] identifier[detect_encoding] ( identifier[fp] , identifier[default] = keyword[None] ):
literal[string]
identifier[init_pos] = identifier[fp] . identifier[tell] ()
keyword[try] :
identifier[sample] = identifier[fp] . identifier[read] (
identifier[current_app] . identifier[config] . identifier[get] ( literal[string] , literal[int] ))
identifier[result] = identifier[cchardet] . identifier[detect] ( identifier[sample] )
identifier[threshold] = identifier[current_app] . identifier[config] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[result] . identifier[get] ( literal[string] , literal[int] )> identifier[threshold] :
keyword[return] identifier[result] . identifier[get] ( literal[string] , identifier[default] )
keyword[else] :
keyword[return] identifier[default]
keyword[except] identifier[Exception] :
identifier[current_app] . identifier[logger] . identifier[warning] ( literal[string] , identifier[exc_info] = keyword[True] )
keyword[return] identifier[default]
keyword[finally] :
identifier[fp] . identifier[seek] ( identifier[init_pos] ) | def detect_encoding(fp, default=None):
"""Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
"""
init_pos = fp.tell()
try:
sample = fp.read(current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default) # depends on [control=['if'], data=[]]
else:
return default # depends on [control=['try'], data=[]]
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default # depends on [control=['except'], data=[]]
finally:
fp.seek(init_pos) |
def removeAllEntitlements(self, appId):
"""
This operation removes all entitlements from the portal for ArcGIS
Pro or additional products such as Navigator for ArcGIS and revokes
all entitlements assigned to users for the specified product. The
portal is no longer a licensing portal for that product.
License assignments are retained on disk. Therefore, if you decide
to configure this portal as a licensing portal for the product
again in the future, all licensing assignments will be available in
the website.
Parameters:
appId - The identifier for the application for which the
entitlements are being removed.
"""
params = {
"f" : "json",
"appId" : appId
}
url = self._url + "/licenses/removeAllEntitlements"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | def function[removeAllEntitlements, parameter[self, appId]]:
constant[
This operation removes all entitlements from the portal for ArcGIS
Pro or additional products such as Navigator for ArcGIS and revokes
all entitlements assigned to users for the specified product. The
portal is no longer a licensing portal for that product.
License assignments are retained on disk. Therefore, if you decide
to configure this portal as a licensing portal for the product
again in the future, all licensing assignments will be available in
the website.
Parameters:
appId - The identifier for the application for which the
entitlements are being removed.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5630>, <ast.Constant object at 0x7da2054a58d0>], [<ast.Constant object at 0x7da2054a5a20>, <ast.Name object at 0x7da2054a5900>]]
variable[url] assign[=] binary_operation[name[self]._url + constant[/licenses/removeAllEntitlements]]
return[call[name[self]._post, parameter[]]] | keyword[def] identifier[removeAllEntitlements] ( identifier[self] , identifier[appId] ):
literal[string]
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : identifier[appId]
}
identifier[url] = identifier[self] . identifier[_url] + literal[string]
keyword[return] identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ) | def removeAllEntitlements(self, appId):
"""
This operation removes all entitlements from the portal for ArcGIS
Pro or additional products such as Navigator for ArcGIS and revokes
all entitlements assigned to users for the specified product. The
portal is no longer a licensing portal for that product.
License assignments are retained on disk. Therefore, if you decide
to configure this portal as a licensing portal for the product
again in the future, all licensing assignments will be available in
the website.
Parameters:
appId - The identifier for the application for which the
entitlements are being removed.
"""
params = {'f': 'json', 'appId': appId}
url = self._url + '/licenses/removeAllEntitlements'
return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) |
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False | def function[match_multiline, parameter[self, text, delimiter, in_state, style]]:
constant[Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
]
if compare[call[name[self].previousBlockState, parameter[]] equal[==] name[in_state]] begin[:]
variable[start] assign[=] constant[0]
variable[add] assign[=] constant[0]
while compare[name[start] greater_or_equal[>=] constant[0]] begin[:]
variable[end] assign[=] call[name[delimiter].indexIn, parameter[name[text], binary_operation[name[start] + name[add]]]]
if compare[name[end] greater_or_equal[>=] name[add]] begin[:]
variable[length] assign[=] binary_operation[binary_operation[binary_operation[name[end] - name[start]] + name[add]] + call[name[delimiter].matchedLength, parameter[]]]
call[name[self].setCurrentBlockState, parameter[constant[0]]]
call[name[self].setFormat, parameter[name[start], name[length], name[style]]]
variable[start] assign[=] call[name[delimiter].indexIn, parameter[name[text], binary_operation[name[start] + name[length]]]]
if compare[call[name[self].currentBlockState, parameter[]] equal[==] name[in_state]] begin[:]
return[constant[True]] | keyword[def] identifier[match_multiline] ( identifier[self] , identifier[text] , identifier[delimiter] , identifier[in_state] , identifier[style] ):
literal[string]
keyword[if] identifier[self] . identifier[previousBlockState] ()== identifier[in_state] :
identifier[start] = literal[int]
identifier[add] = literal[int]
keyword[else] :
identifier[start] = identifier[delimiter] . identifier[indexIn] ( identifier[text] )
identifier[add] = identifier[delimiter] . identifier[matchedLength] ()
keyword[while] identifier[start] >= literal[int] :
identifier[end] = identifier[delimiter] . identifier[indexIn] ( identifier[text] , identifier[start] + identifier[add] )
keyword[if] identifier[end] >= identifier[add] :
identifier[length] = identifier[end] - identifier[start] + identifier[add] + identifier[delimiter] . identifier[matchedLength] ()
identifier[self] . identifier[setCurrentBlockState] ( literal[int] )
keyword[else] :
identifier[self] . identifier[setCurrentBlockState] ( identifier[in_state] )
identifier[length] = identifier[len] ( identifier[text] )- identifier[start] + identifier[add]
identifier[self] . identifier[setFormat] ( identifier[start] , identifier[length] , identifier[style] )
identifier[start] = identifier[delimiter] . identifier[indexIn] ( identifier[text] , identifier[start] + identifier[length] )
keyword[if] identifier[self] . identifier[currentBlockState] ()== identifier[in_state] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
""" # If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0 # depends on [control=['if'], data=[]]
else: # Otherwise, look for the delimiter on this line
start = delimiter.indexIn(text) # Move past this match
add = delimiter.matchedLength() # As long as there's a delimiter match on this line...
while start >= 0: # Look for the ending delimiter
end = delimiter.indexIn(text, start + add) # Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0) # depends on [control=['if'], data=['end', 'add']]
else: # No; multi-line string
self.setCurrentBlockState(in_state)
length = len(text) - start + add # Apply formatting
self.setFormat(start, length, style) # Look for the next match
start = delimiter.indexIn(text, start + length) # depends on [control=['while'], data=['start']] # Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True # depends on [control=['if'], data=[]]
else:
return False |
def search(self, query=None, args=None):
'''query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
[empty] list all collections in registry
vsoch do a general search for the expression "vsoch"
vsoch/ list all containers in collection vsoch
/dinosaur list containers across collections called "dinosaur"
vsoch/dinosaur list details of container vsoch/dinosaur
tag "latest" is used by default, and then the most recent
vsoch/dinosaur:tag list details for specific container
'''
if query is not None:
# List all containers in collection query/
if query.endswith('/'): # collection search
return self._collection_search(query)
# List containers across collections called /query
elif query.startswith('/'):
return self._container_search(query, across_collections=True)
# List details of a specific collection container
elif "/" in query or ":" in query:
return self._container_search(query)
# Search collections across all fields
return self._collection_search(query=query)
# Search collections across all fields
return self._search_all() | def function[search, parameter[self, query, args]]:
constant[query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
[empty] list all collections in registry
vsoch do a general search for the expression "vsoch"
vsoch/ list all containers in collection vsoch
/dinosaur list containers across collections called "dinosaur"
vsoch/dinosaur list details of container vsoch/dinosaur
tag "latest" is used by default, and then the most recent
vsoch/dinosaur:tag list details for specific container
]
if compare[name[query] is_not constant[None]] begin[:]
if call[name[query].endswith, parameter[constant[/]]] begin[:]
return[call[name[self]._collection_search, parameter[name[query]]]]
return[call[name[self]._collection_search, parameter[]]]
return[call[name[self]._search_all, parameter[]]] | keyword[def] identifier[search] ( identifier[self] , identifier[query] = keyword[None] , identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[query] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[query] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[self] . identifier[_collection_search] ( identifier[query] )
keyword[elif] identifier[query] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[self] . identifier[_container_search] ( identifier[query] , identifier[across_collections] = keyword[True] )
keyword[elif] literal[string] keyword[in] identifier[query] keyword[or] literal[string] keyword[in] identifier[query] :
keyword[return] identifier[self] . identifier[_container_search] ( identifier[query] )
keyword[return] identifier[self] . identifier[_collection_search] ( identifier[query] = identifier[query] )
keyword[return] identifier[self] . identifier[_search_all] () | def search(self, query=None, args=None):
"""query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
[empty] list all collections in registry
vsoch do a general search for the expression "vsoch"
vsoch/ list all containers in collection vsoch
/dinosaur list containers across collections called "dinosaur"
vsoch/dinosaur list details of container vsoch/dinosaur
tag "latest" is used by default, and then the most recent
vsoch/dinosaur:tag list details for specific container
"""
if query is not None:
# List all containers in collection query/
if query.endswith('/'): # collection search
return self._collection_search(query) # depends on [control=['if'], data=[]]
# List containers across collections called /query
elif query.startswith('/'):
return self._container_search(query, across_collections=True) # depends on [control=['if'], data=[]]
# List details of a specific collection container
elif '/' in query or ':' in query:
return self._container_search(query) # depends on [control=['if'], data=[]]
# Search collections across all fields
return self._collection_search(query=query) # depends on [control=['if'], data=['query']]
# Search collections across all fields
return self._search_all() |
def seek_previous_line(self):
"""
Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found.
"""
where = self.file.tell()
offset = 0
while True:
if offset == where:
break
read_size = self.read_size if self.read_size <= where else where
self.file.seek(where - offset - read_size, SEEK_SET)
data_len, data = self.read(read_size)
# Consider the following example: Foo\r | \nBar where " | " denotes current position,
# '\nBar' is the read part and 'Foo\r' is the remaining part.
# We should completely consume terminator "\r\n" by reading one extra byte.
if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]:
terminator_where = self.file.tell()
if terminator_where > data_len + 1:
self.file.seek(where - offset - data_len - 1, SEEK_SET)
terminator_len, terminator_data = self.read(1)
if terminator_data[0] == b'\r'[0]:
data_len += 1
data = b'\r' + data
self.file.seek(terminator_where)
data_where = data_len
while data_where > 0:
terminator = self.suffix_line_terminator(data[:data_where])
if terminator and offset == 0 and data_where == data_len:
# The last character is a line terminator that finishes current line. Ignore it.
data_where -= len(terminator)
elif terminator:
self.file.seek(where - offset - (data_len - data_where))
return self.file.tell()
else:
data_where -= 1
offset += data_len
if where == 0:
# Nothing more to read.
return -1
else:
# Very first line.
self.file.seek(0)
return 0 | def function[seek_previous_line, parameter[self]]:
constant[
Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found.
]
variable[where] assign[=] call[name[self].file.tell, parameter[]]
variable[offset] assign[=] constant[0]
while constant[True] begin[:]
if compare[name[offset] equal[==] name[where]] begin[:]
break
variable[read_size] assign[=] <ast.IfExp object at 0x7da207f029b0>
call[name[self].file.seek, parameter[binary_operation[binary_operation[name[where] - name[offset]] - name[read_size]], name[SEEK_SET]]]
<ast.Tuple object at 0x7da207f03ee0> assign[=] call[name[self].read, parameter[name[read_size]]]
if <ast.BoolOp object at 0x7da207f01450> begin[:]
variable[terminator_where] assign[=] call[name[self].file.tell, parameter[]]
if compare[name[terminator_where] greater[>] binary_operation[name[data_len] + constant[1]]] begin[:]
call[name[self].file.seek, parameter[binary_operation[binary_operation[binary_operation[name[where] - name[offset]] - name[data_len]] - constant[1]], name[SEEK_SET]]]
<ast.Tuple object at 0x7da207f01720> assign[=] call[name[self].read, parameter[constant[1]]]
if compare[call[name[terminator_data]][constant[0]] equal[==] call[constant[b'\r']][constant[0]]] begin[:]
<ast.AugAssign object at 0x7da207f01bd0>
variable[data] assign[=] binary_operation[constant[b'\r'] + name[data]]
call[name[self].file.seek, parameter[name[terminator_where]]]
variable[data_where] assign[=] name[data_len]
while compare[name[data_where] greater[>] constant[0]] begin[:]
variable[terminator] assign[=] call[name[self].suffix_line_terminator, parameter[call[name[data]][<ast.Slice object at 0x7da207f00640>]]]
if <ast.BoolOp object at 0x7da207f037f0> begin[:]
<ast.AugAssign object at 0x7da207f02ef0>
<ast.AugAssign object at 0x7da207f02560>
if compare[name[where] equal[==] constant[0]] begin[:]
return[<ast.UnaryOp object at 0x7da207f032b0>] | keyword[def] identifier[seek_previous_line] ( identifier[self] ):
literal[string]
identifier[where] = identifier[self] . identifier[file] . identifier[tell] ()
identifier[offset] = literal[int]
keyword[while] keyword[True] :
keyword[if] identifier[offset] == identifier[where] :
keyword[break]
identifier[read_size] = identifier[self] . identifier[read_size] keyword[if] identifier[self] . identifier[read_size] <= identifier[where] keyword[else] identifier[where]
identifier[self] . identifier[file] . identifier[seek] ( identifier[where] - identifier[offset] - identifier[read_size] , identifier[SEEK_SET] )
identifier[data_len] , identifier[data] = identifier[self] . identifier[read] ( identifier[read_size] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[LINE_TERMINATORS] keyword[and] identifier[data] [ literal[int] ]== literal[string] [ literal[int] ]:
identifier[terminator_where] = identifier[self] . identifier[file] . identifier[tell] ()
keyword[if] identifier[terminator_where] > identifier[data_len] + literal[int] :
identifier[self] . identifier[file] . identifier[seek] ( identifier[where] - identifier[offset] - identifier[data_len] - literal[int] , identifier[SEEK_SET] )
identifier[terminator_len] , identifier[terminator_data] = identifier[self] . identifier[read] ( literal[int] )
keyword[if] identifier[terminator_data] [ literal[int] ]== literal[string] [ literal[int] ]:
identifier[data_len] += literal[int]
identifier[data] = literal[string] + identifier[data]
identifier[self] . identifier[file] . identifier[seek] ( identifier[terminator_where] )
identifier[data_where] = identifier[data_len]
keyword[while] identifier[data_where] > literal[int] :
identifier[terminator] = identifier[self] . identifier[suffix_line_terminator] ( identifier[data] [: identifier[data_where] ])
keyword[if] identifier[terminator] keyword[and] identifier[offset] == literal[int] keyword[and] identifier[data_where] == identifier[data_len] :
identifier[data_where] -= identifier[len] ( identifier[terminator] )
keyword[elif] identifier[terminator] :
identifier[self] . identifier[file] . identifier[seek] ( identifier[where] - identifier[offset] -( identifier[data_len] - identifier[data_where] ))
keyword[return] identifier[self] . identifier[file] . identifier[tell] ()
keyword[else] :
identifier[data_where] -= literal[int]
identifier[offset] += identifier[data_len]
keyword[if] identifier[where] == literal[int] :
keyword[return] - literal[int]
keyword[else] :
identifier[self] . identifier[file] . identifier[seek] ( literal[int] )
keyword[return] literal[int] | def seek_previous_line(self):
"""
Seek previous line relative to the current file position.
:return: Position of the line or -1 if previous line was not found.
"""
where = self.file.tell()
offset = 0
while True:
if offset == where:
break # depends on [control=['if'], data=[]]
read_size = self.read_size if self.read_size <= where else where
self.file.seek(where - offset - read_size, SEEK_SET)
(data_len, data) = self.read(read_size)
# Consider the following example: Foo\r | \nBar where " | " denotes current position,
# '\nBar' is the read part and 'Foo\r' is the remaining part.
# We should completely consume terminator "\r\n" by reading one extra byte.
if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]:
terminator_where = self.file.tell()
if terminator_where > data_len + 1:
self.file.seek(where - offset - data_len - 1, SEEK_SET)
(terminator_len, terminator_data) = self.read(1)
if terminator_data[0] == b'\r'[0]:
data_len += 1
data = b'\r' + data # depends on [control=['if'], data=[]]
self.file.seek(terminator_where) # depends on [control=['if'], data=['terminator_where']] # depends on [control=['if'], data=[]]
data_where = data_len
while data_where > 0:
terminator = self.suffix_line_terminator(data[:data_where])
if terminator and offset == 0 and (data_where == data_len):
# The last character is a line terminator that finishes current line. Ignore it.
data_where -= len(terminator) # depends on [control=['if'], data=[]]
elif terminator:
self.file.seek(where - offset - (data_len - data_where))
return self.file.tell() # depends on [control=['if'], data=[]]
else:
data_where -= 1 # depends on [control=['while'], data=['data_where']]
offset += data_len # depends on [control=['while'], data=[]]
if where == 0:
# Nothing more to read.
return -1 # depends on [control=['if'], data=[]]
else:
# Very first line.
self.file.seek(0)
return 0 |
def verify(self, data):
r"""Does the given `data` hash to the digest in this `Multihash`?
>>> import hashlib
>>> data = b'foo'
>>> hash = hashlib.sha1(data)
>>> mh = Multihash.from_hash(hash)
>>> mh.verify(data)
True
>>> mh.verify(b'foobar')
False
Application-specific hash functions are also supported (see
`FuncReg`).
"""
digest = _do_digest(data, self.func)
return digest[:len(self.digest)] == self.digest | def function[verify, parameter[self, data]]:
constant[Does the given `data` hash to the digest in this `Multihash`?
>>> import hashlib
>>> data = b'foo'
>>> hash = hashlib.sha1(data)
>>> mh = Multihash.from_hash(hash)
>>> mh.verify(data)
True
>>> mh.verify(b'foobar')
False
Application-specific hash functions are also supported (see
`FuncReg`).
]
variable[digest] assign[=] call[name[_do_digest], parameter[name[data], name[self].func]]
return[compare[call[name[digest]][<ast.Slice object at 0x7da1b2587a00>] equal[==] name[self].digest]] | keyword[def] identifier[verify] ( identifier[self] , identifier[data] ):
literal[string]
identifier[digest] = identifier[_do_digest] ( identifier[data] , identifier[self] . identifier[func] )
keyword[return] identifier[digest] [: identifier[len] ( identifier[self] . identifier[digest] )]== identifier[self] . identifier[digest] | def verify(self, data):
"""Does the given `data` hash to the digest in this `Multihash`?
>>> import hashlib
>>> data = b'foo'
>>> hash = hashlib.sha1(data)
>>> mh = Multihash.from_hash(hash)
>>> mh.verify(data)
True
>>> mh.verify(b'foobar')
False
Application-specific hash functions are also supported (see
`FuncReg`).
"""
digest = _do_digest(data, self.func)
return digest[:len(self.digest)] == self.digest |
def set_cache_policy(self, func):
"""Set the context cache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
if func is None:
func = self.default_cache_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._cache_policy = func | def function[set_cache_policy, parameter[self, func]]:
constant[Set the context cache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
]
if compare[name[func] is constant[None]] begin[:]
variable[func] assign[=] name[self].default_cache_policy
name[self]._cache_policy assign[=] name[func] | keyword[def] identifier[set_cache_policy] ( identifier[self] , identifier[func] ):
literal[string]
keyword[if] identifier[func] keyword[is] keyword[None] :
identifier[func] = identifier[self] . identifier[default_cache_policy]
keyword[elif] identifier[isinstance] ( identifier[func] , identifier[bool] ):
identifier[func] = keyword[lambda] identifier[unused_key] , identifier[flag] = identifier[func] : identifier[flag]
identifier[self] . identifier[_cache_policy] = identifier[func] | def set_cache_policy(self, func):
"""Set the context cache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
if func is None:
func = self.default_cache_policy # depends on [control=['if'], data=['func']]
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag # depends on [control=['if'], data=[]]
self._cache_policy = func |
def y_grid(self, grid=None):
"""The vertical lines that run accross the chart from the y-ticks.
If a boolean value is given, these gridlines will be turned on or off.
Otherwise, the method will return their current state.
:param bool grid: Turns the gridlines on or off.
:rtype: ``bool``"""
if grid is None:
return self._y_grid
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._y_grid = grid | def function[y_grid, parameter[self, grid]]:
constant[The vertical lines that run accross the chart from the y-ticks.
If a boolean value is given, these gridlines will be turned on or off.
Otherwise, the method will return their current state.
:param bool grid: Turns the gridlines on or off.
:rtype: ``bool``]
if compare[name[grid] is constant[None]] begin[:]
return[name[self]._y_grid] | keyword[def] identifier[y_grid] ( identifier[self] , identifier[grid] = keyword[None] ):
literal[string]
keyword[if] identifier[grid] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_y_grid]
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[grid] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[grid] )
identifier[self] . identifier[_y_grid] = identifier[grid] | def y_grid(self, grid=None):
"""The vertical lines that run accross the chart from the y-ticks.
If a boolean value is given, these gridlines will be turned on or off.
Otherwise, the method will return their current state.
:param bool grid: Turns the gridlines on or off.
:rtype: ``bool``"""
if grid is None:
return self._y_grid # depends on [control=['if'], data=[]]
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid) # depends on [control=['if'], data=[]]
self._y_grid = grid |
def edit(self, id):
""" Edit a pool.
"""
c.pool = Pool.get(int(id))
c.prefix_list = Prefix.list({ 'pool_id': c.pool.id })
c.prefix = ''
# save changes to NIPAP
if request.method == 'POST':
c.pool.name = request.params['name']
c.pool.description = request.params['description']
c.pool.default_type = request.params['default_type']
if request.params['ipv4_default_prefix_length'].strip() == '':
c.pool.ipv4_default_prefix_length = None
else:
c.pool.ipv4_default_prefix_length = request.params['ipv4_default_prefix_length']
if request.params['ipv6_default_prefix_length'].strip() == '':
c.pool.ipv6_default_prefix_length = None
else:
c.pool.ipv6_default_prefix_length = request.params['ipv6_default_prefix_length']
c.pool.save()
redirect(url(controller = 'pool', action = 'list'))
c.search_opt_parent = 'all'
c.search_opt_child = 'none'
return render("/pool_edit.html") | def function[edit, parameter[self, id]]:
constant[ Edit a pool.
]
name[c].pool assign[=] call[name[Pool].get, parameter[call[name[int], parameter[name[id]]]]]
name[c].prefix_list assign[=] call[name[Prefix].list, parameter[dictionary[[<ast.Constant object at 0x7da1b2346bc0>], [<ast.Attribute object at 0x7da1b2344940>]]]]
name[c].prefix assign[=] constant[]
if compare[name[request].method equal[==] constant[POST]] begin[:]
name[c].pool.name assign[=] call[name[request].params][constant[name]]
name[c].pool.description assign[=] call[name[request].params][constant[description]]
name[c].pool.default_type assign[=] call[name[request].params][constant[default_type]]
if compare[call[call[name[request].params][constant[ipv4_default_prefix_length]].strip, parameter[]] equal[==] constant[]] begin[:]
name[c].pool.ipv4_default_prefix_length assign[=] constant[None]
if compare[call[call[name[request].params][constant[ipv6_default_prefix_length]].strip, parameter[]] equal[==] constant[]] begin[:]
name[c].pool.ipv6_default_prefix_length assign[=] constant[None]
call[name[c].pool.save, parameter[]]
call[name[redirect], parameter[call[name[url], parameter[]]]]
name[c].search_opt_parent assign[=] constant[all]
name[c].search_opt_child assign[=] constant[none]
return[call[name[render], parameter[constant[/pool_edit.html]]]] | keyword[def] identifier[edit] ( identifier[self] , identifier[id] ):
literal[string]
identifier[c] . identifier[pool] = identifier[Pool] . identifier[get] ( identifier[int] ( identifier[id] ))
identifier[c] . identifier[prefix_list] = identifier[Prefix] . identifier[list] ({ literal[string] : identifier[c] . identifier[pool] . identifier[id] })
identifier[c] . identifier[prefix] = literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[c] . identifier[pool] . identifier[name] = identifier[request] . identifier[params] [ literal[string] ]
identifier[c] . identifier[pool] . identifier[description] = identifier[request] . identifier[params] [ literal[string] ]
identifier[c] . identifier[pool] . identifier[default_type] = identifier[request] . identifier[params] [ literal[string] ]
keyword[if] identifier[request] . identifier[params] [ literal[string] ]. identifier[strip] ()== literal[string] :
identifier[c] . identifier[pool] . identifier[ipv4_default_prefix_length] = keyword[None]
keyword[else] :
identifier[c] . identifier[pool] . identifier[ipv4_default_prefix_length] = identifier[request] . identifier[params] [ literal[string] ]
keyword[if] identifier[request] . identifier[params] [ literal[string] ]. identifier[strip] ()== literal[string] :
identifier[c] . identifier[pool] . identifier[ipv6_default_prefix_length] = keyword[None]
keyword[else] :
identifier[c] . identifier[pool] . identifier[ipv6_default_prefix_length] = identifier[request] . identifier[params] [ literal[string] ]
identifier[c] . identifier[pool] . identifier[save] ()
identifier[redirect] ( identifier[url] ( identifier[controller] = literal[string] , identifier[action] = literal[string] ))
identifier[c] . identifier[search_opt_parent] = literal[string]
identifier[c] . identifier[search_opt_child] = literal[string]
keyword[return] identifier[render] ( literal[string] ) | def edit(self, id):
""" Edit a pool.
"""
c.pool = Pool.get(int(id))
c.prefix_list = Prefix.list({'pool_id': c.pool.id})
c.prefix = ''
# save changes to NIPAP
if request.method == 'POST':
c.pool.name = request.params['name']
c.pool.description = request.params['description']
c.pool.default_type = request.params['default_type']
if request.params['ipv4_default_prefix_length'].strip() == '':
c.pool.ipv4_default_prefix_length = None # depends on [control=['if'], data=[]]
else:
c.pool.ipv4_default_prefix_length = request.params['ipv4_default_prefix_length']
if request.params['ipv6_default_prefix_length'].strip() == '':
c.pool.ipv6_default_prefix_length = None # depends on [control=['if'], data=[]]
else:
c.pool.ipv6_default_prefix_length = request.params['ipv6_default_prefix_length']
c.pool.save()
redirect(url(controller='pool', action='list')) # depends on [control=['if'], data=[]]
c.search_opt_parent = 'all'
c.search_opt_child = 'none'
return render('/pool_edit.html') |
def disconnect_async(self, conn_id, callback):
"""Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
"""
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout'))
topics = context['topics']
disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'}
self.client.publish(topics.action, disconn_message) | def function[disconnect_async, parameter[self, conn_id, callback]]:
constant[Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
]
<ast.Try object at 0x7da20c6c6950>
call[name[self].conns.begin_disconnection, parameter[name[conn_id], name[callback], call[name[self].get_config, parameter[constant[default_timeout]]]]]
variable[topics] assign[=] call[name[context]][constant[topics]]
variable[disconn_message] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c7280>, <ast.Constant object at 0x7da20c6c6b90>, <ast.Constant object at 0x7da20c6c63e0>, <ast.Constant object at 0x7da20c6c7a90>], [<ast.Subscript object at 0x7da20c6c7e80>, <ast.Attribute object at 0x7da20c6c4fd0>, <ast.Constant object at 0x7da20c6c5e70>, <ast.Constant object at 0x7da20c6c50c0>]]
call[name[self].client.publish, parameter[name[topics].action, name[disconn_message]]] | keyword[def] identifier[disconnect_async] ( identifier[self] , identifier[conn_id] , identifier[callback] ):
literal[string]
keyword[try] :
identifier[context] = identifier[self] . identifier[conns] . identifier[get_context] ( identifier[conn_id] )
keyword[except] identifier[ArgumentError] :
identifier[callback] ( identifier[conn_id] , identifier[self] . identifier[id] , keyword[False] , literal[string] )
keyword[return]
identifier[self] . identifier[conns] . identifier[begin_disconnection] ( identifier[conn_id] , identifier[callback] , identifier[self] . identifier[get_config] ( literal[string] ))
identifier[topics] = identifier[context] [ literal[string] ]
identifier[disconn_message] ={ literal[string] : identifier[context] [ literal[string] ], literal[string] : identifier[self] . identifier[name] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[self] . identifier[client] . identifier[publish] ( identifier[topics] . identifier[action] , identifier[disconn_message] ) | def disconnect_async(self, conn_id, callback):
"""Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
"""
try:
context = self.conns.get_context(conn_id) # depends on [control=['try'], data=[]]
except ArgumentError:
callback(conn_id, self.id, False, 'Could not find connection information')
return # depends on [control=['except'], data=[]]
self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout'))
topics = context['topics']
disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'}
self.client.publish(topics.action, disconn_message) |
def style_format(text, style):
""" Wraps texts in terminal control sequences
Style can be passed as either a collection or space delimited string.
Valid styles can be found in the codes module. Invalid or unsuported styles
will just be ignored.
"""
if not style:
return text
if isinstance(style, str):
style = style.split(" ")
prefix = ""
for s in style:
prefix += codes.colours.get(s, "")
prefix += codes.highlights.get(s, "")
prefix += codes.modes.get(s, "")
return prefix + text + codes.modes["reset"] | def function[style_format, parameter[text, style]]:
constant[ Wraps texts in terminal control sequences
Style can be passed as either a collection or space delimited string.
Valid styles can be found in the codes module. Invalid or unsuported styles
will just be ignored.
]
if <ast.UnaryOp object at 0x7da1b1434490> begin[:]
return[name[text]]
if call[name[isinstance], parameter[name[style], name[str]]] begin[:]
variable[style] assign[=] call[name[style].split, parameter[constant[ ]]]
variable[prefix] assign[=] constant[]
for taget[name[s]] in starred[name[style]] begin[:]
<ast.AugAssign object at 0x7da1b1434730>
<ast.AugAssign object at 0x7da1b1437340>
<ast.AugAssign object at 0x7da1b1437520>
return[binary_operation[binary_operation[name[prefix] + name[text]] + call[name[codes].modes][constant[reset]]]] | keyword[def] identifier[style_format] ( identifier[text] , identifier[style] ):
literal[string]
keyword[if] keyword[not] identifier[style] :
keyword[return] identifier[text]
keyword[if] identifier[isinstance] ( identifier[style] , identifier[str] ):
identifier[style] = identifier[style] . identifier[split] ( literal[string] )
identifier[prefix] = literal[string]
keyword[for] identifier[s] keyword[in] identifier[style] :
identifier[prefix] += identifier[codes] . identifier[colours] . identifier[get] ( identifier[s] , literal[string] )
identifier[prefix] += identifier[codes] . identifier[highlights] . identifier[get] ( identifier[s] , literal[string] )
identifier[prefix] += identifier[codes] . identifier[modes] . identifier[get] ( identifier[s] , literal[string] )
keyword[return] identifier[prefix] + identifier[text] + identifier[codes] . identifier[modes] [ literal[string] ] | def style_format(text, style):
""" Wraps texts in terminal control sequences
Style can be passed as either a collection or space delimited string.
Valid styles can be found in the codes module. Invalid or unsuported styles
will just be ignored.
"""
if not style:
return text # depends on [control=['if'], data=[]]
if isinstance(style, str):
style = style.split(' ') # depends on [control=['if'], data=[]]
prefix = ''
for s in style:
prefix += codes.colours.get(s, '')
prefix += codes.highlights.get(s, '')
prefix += codes.modes.get(s, '') # depends on [control=['for'], data=['s']]
return prefix + text + codes.modes['reset'] |
def getTaxCertURL(self, CorpNum, UserID):
""" 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CERT', CorpNum, UserID)
return result.url | def function[getTaxCertURL, parameter[self, CorpNum, UserID]]:
constant[ 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
]
variable[result] assign[=] call[name[self]._httpget, parameter[constant[/?TG=CERT], name[CorpNum], name[UserID]]]
return[name[result].url] | keyword[def] identifier[getTaxCertURL] ( identifier[self] , identifier[CorpNum] , identifier[UserID] ):
literal[string]
identifier[result] = identifier[self] . identifier[_httpget] ( literal[string] , identifier[CorpNum] , identifier[UserID] )
keyword[return] identifier[result] . identifier[url] | def getTaxCertURL(self, CorpNum, UserID):
""" 공인인증서 등록 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/?TG=CERT', CorpNum, UserID)
return result.url |
def has_requisite_content_models(self):
''':type: bool
True when the current object has the expected content models
for whatever subclass of :class:`DigitalObject` it was
initialized as.'''
for cmodel in getattr(self, 'CONTENT_MODELS', ()):
if not self.has_model(cmodel):
return False
return True | def function[has_requisite_content_models, parameter[self]]:
constant[:type: bool
True when the current object has the expected content models
for whatever subclass of :class:`DigitalObject` it was
initialized as.]
for taget[name[cmodel]] in starred[call[name[getattr], parameter[name[self], constant[CONTENT_MODELS], tuple[[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26145b0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[has_requisite_content_models] ( identifier[self] ):
literal[string]
keyword[for] identifier[cmodel] keyword[in] identifier[getattr] ( identifier[self] , literal[string] ,()):
keyword[if] keyword[not] identifier[self] . identifier[has_model] ( identifier[cmodel] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def has_requisite_content_models(self):
""":type: bool
True when the current object has the expected content models
for whatever subclass of :class:`DigitalObject` it was
initialized as."""
for cmodel in getattr(self, 'CONTENT_MODELS', ()):
if not self.has_model(cmodel):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cmodel']]
return True |
def linOriginRegression(points):
"""
computes a linear regression starting at zero
"""
j = sum([ i[0] for i in points ])
k = sum([ i[1] for i in points ])
if j != 0:
return k/j, j, k
return 1, j, k | def function[linOriginRegression, parameter[points]]:
constant[
computes a linear regression starting at zero
]
variable[j] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20e955b40>]]
variable[k] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20e957250>]]
if compare[name[j] not_equal[!=] constant[0]] begin[:]
return[tuple[[<ast.BinOp object at 0x7da20e9567a0>, <ast.Name object at 0x7da20e957eb0>, <ast.Name object at 0x7da20e954310>]]]
return[tuple[[<ast.Constant object at 0x7da20e956410>, <ast.Name object at 0x7da20e956bc0>, <ast.Name object at 0x7da20e957190>]]] | keyword[def] identifier[linOriginRegression] ( identifier[points] ):
literal[string]
identifier[j] = identifier[sum] ([ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[points] ])
identifier[k] = identifier[sum] ([ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[points] ])
keyword[if] identifier[j] != literal[int] :
keyword[return] identifier[k] / identifier[j] , identifier[j] , identifier[k]
keyword[return] literal[int] , identifier[j] , identifier[k] | def linOriginRegression(points):
"""
computes a linear regression starting at zero
"""
j = sum([i[0] for i in points])
k = sum([i[1] for i in points])
if j != 0:
return (k / j, j, k) # depends on [control=['if'], data=['j']]
return (1, j, k) |
def find_template_companion(template, extension='', check=True):
"""
Returns the first found template companion file
"""
if check and not os.path.isfile(template):
yield ''
return # May be '<stdin>' (click)
template = os.path.abspath(template)
template_dirname = os.path.dirname(template)
template_basename = os.path.basename(template).split('.')
current_path = template_dirname
stop_path = os.path.commonprefix((os.getcwd(), current_path))
stop_path = os.path.dirname(stop_path)
token = template_basename[0] + '.'
while True:
for file in sorted(os.listdir(current_path)):
if not file.startswith(token):
continue
if not file.endswith(extension):
continue
file_parts = file.split('.')
for i in range(1, len(template_basename)):
if template_basename[:-i] != file_parts[:-1]:
continue
if current_path == template_dirname:
if file_parts == template_basename:
continue # Do not accept template itself
yield os.path.join(current_path, file)
if current_path == stop_path:
break
# cd ..
current_path = os.path.split(current_path)[0] | def function[find_template_companion, parameter[template, extension, check]]:
constant[
Returns the first found template companion file
]
if <ast.BoolOp object at 0x7da1b0be23b0> begin[:]
<ast.Yield object at 0x7da1b0be1fc0>
return[None]
variable[template] assign[=] call[name[os].path.abspath, parameter[name[template]]]
variable[template_dirname] assign[=] call[name[os].path.dirname, parameter[name[template]]]
variable[template_basename] assign[=] call[call[name[os].path.basename, parameter[name[template]]].split, parameter[constant[.]]]
variable[current_path] assign[=] name[template_dirname]
variable[stop_path] assign[=] call[name[os].path.commonprefix, parameter[tuple[[<ast.Call object at 0x7da1b0be01f0>, <ast.Name object at 0x7da1b0be13f0>]]]]
variable[stop_path] assign[=] call[name[os].path.dirname, parameter[name[stop_path]]]
variable[token] assign[=] binary_operation[call[name[template_basename]][constant[0]] + constant[.]]
while constant[True] begin[:]
for taget[name[file]] in starred[call[name[sorted], parameter[call[name[os].listdir, parameter[name[current_path]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0bc96f0> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b0bcbb50> begin[:]
continue
variable[file_parts] assign[=] call[name[file].split, parameter[constant[.]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[template_basename]]]]]] begin[:]
if compare[call[name[template_basename]][<ast.Slice object at 0x7da1b0bcb430>] not_equal[!=] call[name[file_parts]][<ast.Slice object at 0x7da1b0bf1600>]] begin[:]
continue
if compare[name[current_path] equal[==] name[template_dirname]] begin[:]
if compare[name[file_parts] equal[==] name[template_basename]] begin[:]
continue
<ast.Yield object at 0x7da1b0bf1330>
if compare[name[current_path] equal[==] name[stop_path]] begin[:]
break
variable[current_path] assign[=] call[call[name[os].path.split, parameter[name[current_path]]]][constant[0]] | keyword[def] identifier[find_template_companion] ( identifier[template] , identifier[extension] = literal[string] , identifier[check] = keyword[True] ):
literal[string]
keyword[if] identifier[check] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[template] ):
keyword[yield] literal[string]
keyword[return]
identifier[template] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[template] )
identifier[template_dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[template] )
identifier[template_basename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[template] ). identifier[split] ( literal[string] )
identifier[current_path] = identifier[template_dirname]
identifier[stop_path] = identifier[os] . identifier[path] . identifier[commonprefix] (( identifier[os] . identifier[getcwd] (), identifier[current_path] ))
identifier[stop_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[stop_path] )
identifier[token] = identifier[template_basename] [ literal[int] ]+ literal[string]
keyword[while] keyword[True] :
keyword[for] identifier[file] keyword[in] identifier[sorted] ( identifier[os] . identifier[listdir] ( identifier[current_path] )):
keyword[if] keyword[not] identifier[file] . identifier[startswith] ( identifier[token] ):
keyword[continue]
keyword[if] keyword[not] identifier[file] . identifier[endswith] ( identifier[extension] ):
keyword[continue]
identifier[file_parts] = identifier[file] . identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[template_basename] )):
keyword[if] identifier[template_basename] [:- identifier[i] ]!= identifier[file_parts] [:- literal[int] ]:
keyword[continue]
keyword[if] identifier[current_path] == identifier[template_dirname] :
keyword[if] identifier[file_parts] == identifier[template_basename] :
keyword[continue]
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[current_path] , identifier[file] )
keyword[if] identifier[current_path] == identifier[stop_path] :
keyword[break]
identifier[current_path] = identifier[os] . identifier[path] . identifier[split] ( identifier[current_path] )[ literal[int] ] | def find_template_companion(template, extension='', check=True):
"""
Returns the first found template companion file
"""
if check and (not os.path.isfile(template)):
yield ''
return # May be '<stdin>' (click) # depends on [control=['if'], data=[]]
template = os.path.abspath(template)
template_dirname = os.path.dirname(template)
template_basename = os.path.basename(template).split('.')
current_path = template_dirname
stop_path = os.path.commonprefix((os.getcwd(), current_path))
stop_path = os.path.dirname(stop_path)
token = template_basename[0] + '.'
while True:
for file in sorted(os.listdir(current_path)):
if not file.startswith(token):
continue # depends on [control=['if'], data=[]]
if not file.endswith(extension):
continue # depends on [control=['if'], data=[]]
file_parts = file.split('.')
for i in range(1, len(template_basename)):
if template_basename[:-i] != file_parts[:-1]:
continue # depends on [control=['if'], data=[]]
if current_path == template_dirname:
if file_parts == template_basename:
continue # Do not accept template itself # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
yield os.path.join(current_path, file) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['file']]
if current_path == stop_path:
break # depends on [control=['if'], data=[]]
# cd ..
current_path = os.path.split(current_path)[0] # depends on [control=['while'], data=[]] |
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time() | def function[start, parameter[self]]:
constant[Start the timer.]
if <ast.UnaryOp object at 0x7da1b0532a40> begin[:]
name[self]._t_start assign[=] call[name[time], parameter[]]
name[self]._is_running assign[=] constant[True]
name[self]._t_last assign[=] call[name[time], parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_running] :
identifier[self] . identifier[_t_start] = identifier[time] ()
identifier[self] . identifier[_is_running] = keyword[True]
identifier[self] . identifier[_t_last] = identifier[time] () | def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True # depends on [control=['if'], data=[]]
self._t_last = time() |
def generate_password_hash(password, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Generate password hash givin the password string and salt.
Args:
- ``password``: Password string.
- ``salt`` : Random base64 encoded string.
Optional args:
- ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14.
- ``r`` : the memory cost, defaults to 8.
- ``p`` : the parallelization parameter, defaults to 1.
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32.
The recommended parameters for interactive logins as of 2009 are N=16384,
r=8, p=1. Remember to use a good random salt.
Returns:
- base64 encoded scrypt hash.
"""
if PYTHON2:
password = password.encode('utf-8')
salt = salt.encode('utf-8')
pw_hash = scrypt_hash(password, salt, N, r, p, buflen)
return enbase64(pw_hash) | def function[generate_password_hash, parameter[password, salt, N, r, p, buflen]]:
constant[
Generate password hash givin the password string and salt.
Args:
- ``password``: Password string.
- ``salt`` : Random base64 encoded string.
Optional args:
- ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14.
- ``r`` : the memory cost, defaults to 8.
- ``p`` : the parallelization parameter, defaults to 1.
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32.
The recommended parameters for interactive logins as of 2009 are N=16384,
r=8, p=1. Remember to use a good random salt.
Returns:
- base64 encoded scrypt hash.
]
if name[PYTHON2] begin[:]
variable[password] assign[=] call[name[password].encode, parameter[constant[utf-8]]]
variable[salt] assign[=] call[name[salt].encode, parameter[constant[utf-8]]]
variable[pw_hash] assign[=] call[name[scrypt_hash], parameter[name[password], name[salt], name[N], name[r], name[p], name[buflen]]]
return[call[name[enbase64], parameter[name[pw_hash]]]] | keyword[def] identifier[generate_password_hash] ( identifier[password] , identifier[salt] , identifier[N] = literal[int] << literal[int] , identifier[r] = literal[int] , identifier[p] = literal[int] , identifier[buflen] = literal[int] ):
literal[string]
keyword[if] identifier[PYTHON2] :
identifier[password] = identifier[password] . identifier[encode] ( literal[string] )
identifier[salt] = identifier[salt] . identifier[encode] ( literal[string] )
identifier[pw_hash] = identifier[scrypt_hash] ( identifier[password] , identifier[salt] , identifier[N] , identifier[r] , identifier[p] , identifier[buflen] )
keyword[return] identifier[enbase64] ( identifier[pw_hash] ) | def generate_password_hash(password, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Generate password hash givin the password string and salt.
Args:
- ``password``: Password string.
- ``salt`` : Random base64 encoded string.
Optional args:
- ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14.
- ``r`` : the memory cost, defaults to 8.
- ``p`` : the parallelization parameter, defaults to 1.
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32.
The recommended parameters for interactive logins as of 2009 are N=16384,
r=8, p=1. Remember to use a good random salt.
Returns:
- base64 encoded scrypt hash.
"""
if PYTHON2:
password = password.encode('utf-8')
salt = salt.encode('utf-8') # depends on [control=['if'], data=[]]
pw_hash = scrypt_hash(password, salt, N, r, p, buflen)
return enbase64(pw_hash) |
def match(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
'''
Iterator over relationship IDs that match a pattern of components
origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
'''
#Can't use items or we risk client side RuntimeError: dictionary changed size during iteration
for index, curr_rel in enumerate(self._relationships):
matches = True
if origin and origin != curr_rel[ORIGIN]:
matches = False
continue
if rel and rel != curr_rel[RELATIONSHIP]:
matches = False
continue
if target and target != curr_rel[TARGET]:
matches = False
continue
if attrs:
for k, v in attrs.items():
if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
matches = False
if matches:
if include_ids:
yield index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
else:
yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
return | def function[match, parameter[self, origin, rel, target, attrs, include_ids]]:
constant[
Iterator over relationship IDs that match a pattern of components
origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
]
for taget[tuple[[<ast.Name object at 0x7da20c796890>, <ast.Name object at 0x7da20c7954e0>]]] in starred[call[name[enumerate], parameter[name[self]._relationships]]] begin[:]
variable[matches] assign[=] constant[True]
if <ast.BoolOp object at 0x7da20c794af0> begin[:]
variable[matches] assign[=] constant[False]
continue
if <ast.BoolOp object at 0x7da20c796710> begin[:]
variable[matches] assign[=] constant[False]
continue
if <ast.BoolOp object at 0x7da20c794880> begin[:]
variable[matches] assign[=] constant[False]
continue
if name[attrs] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c794580>, <ast.Name object at 0x7da20c796a10>]]] in starred[call[name[attrs].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20e955480> begin[:]
variable[matches] assign[=] constant[False]
if name[matches] begin[:]
if name[include_ids] begin[:]
<ast.Yield object at 0x7da20e956b00>
return[None] | keyword[def] identifier[match] ( identifier[self] , identifier[origin] = keyword[None] , identifier[rel] = keyword[None] , identifier[target] = keyword[None] , identifier[attrs] = keyword[None] , identifier[include_ids] = keyword[False] ):
literal[string]
keyword[for] identifier[index] , identifier[curr_rel] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_relationships] ):
identifier[matches] = keyword[True]
keyword[if] identifier[origin] keyword[and] identifier[origin] != identifier[curr_rel] [ identifier[ORIGIN] ]:
identifier[matches] = keyword[False]
keyword[continue]
keyword[if] identifier[rel] keyword[and] identifier[rel] != identifier[curr_rel] [ identifier[RELATIONSHIP] ]:
identifier[matches] = keyword[False]
keyword[continue]
keyword[if] identifier[target] keyword[and] identifier[target] != identifier[curr_rel] [ identifier[TARGET] ]:
identifier[matches] = keyword[False]
keyword[continue]
keyword[if] identifier[attrs] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[curr_rel] [ identifier[ATTRIBUTES] ] keyword[or] identifier[curr_rel] [ identifier[ATTRIBUTES] ]. identifier[get] ( identifier[k] )!= identifier[v] :
identifier[matches] = keyword[False]
keyword[if] identifier[matches] :
keyword[if] identifier[include_ids] :
keyword[yield] identifier[index] ,( identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ]. identifier[copy] ())
keyword[else] :
keyword[yield] ( identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ], identifier[curr_rel] [ literal[int] ]. identifier[copy] ())
keyword[return] | def match(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
"""
Iterator over relationship IDs that match a pattern of components
origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
"""
#Can't use items or we risk client side RuntimeError: dictionary changed size during iteration
for (index, curr_rel) in enumerate(self._relationships):
matches = True
if origin and origin != curr_rel[ORIGIN]:
matches = False
continue # depends on [control=['if'], data=[]]
if rel and rel != curr_rel[RELATIONSHIP]:
matches = False
continue # depends on [control=['if'], data=[]]
if target and target != curr_rel[TARGET]:
matches = False
continue # depends on [control=['if'], data=[]]
if attrs:
for (k, v) in attrs.items():
if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
matches = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if matches:
if include_ids:
yield (index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())) # depends on [control=['if'], data=[]]
else:
yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return |
def set_data(self, *args):
"""we cant to call set_data to manually update"""
db = self.begining.get_data() or formats.DATE_DEFAULT
df = self.end.get_data() or formats.DATE_DEFAULT
jours = max((df - db).days + 1, 0)
self.setText(str(jours) + (jours >= 2 and " jours" or " jour")) | def function[set_data, parameter[self]]:
constant[we cant to call set_data to manually update]
variable[db] assign[=] <ast.BoolOp object at 0x7da1b11e3670>
variable[df] assign[=] <ast.BoolOp object at 0x7da1b11e32b0>
variable[jours] assign[=] call[name[max], parameter[binary_operation[binary_operation[name[df] - name[db]].days + constant[1]], constant[0]]]
call[name[self].setText, parameter[binary_operation[call[name[str], parameter[name[jours]]] + <ast.BoolOp object at 0x7da1b11e1330>]]] | keyword[def] identifier[set_data] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[db] = identifier[self] . identifier[begining] . identifier[get_data] () keyword[or] identifier[formats] . identifier[DATE_DEFAULT]
identifier[df] = identifier[self] . identifier[end] . identifier[get_data] () keyword[or] identifier[formats] . identifier[DATE_DEFAULT]
identifier[jours] = identifier[max] (( identifier[df] - identifier[db] ). identifier[days] + literal[int] , literal[int] )
identifier[self] . identifier[setText] ( identifier[str] ( identifier[jours] )+( identifier[jours] >= literal[int] keyword[and] literal[string] keyword[or] literal[string] )) | def set_data(self, *args):
"""we cant to call set_data to manually update"""
db = self.begining.get_data() or formats.DATE_DEFAULT
df = self.end.get_data() or formats.DATE_DEFAULT
jours = max((df - db).days + 1, 0)
self.setText(str(jours) + (jours >= 2 and ' jours' or ' jour')) |
def change_keys(obj, convert):
"""
Recursively goes through the dictionary obj and replaces keys with the convert function.
"""
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
new = obj.__class__()
for k, v in obj.items():
new[convert(k)] = change_keys(v, convert)
elif isinstance(obj, (list, set, tuple)):
new = obj.__class__(change_keys(v, convert) for v in obj)
else:
return obj
return new | def function[change_keys, parameter[obj, convert]]:
constant[
Recursively goes through the dictionary obj and replaces keys with the convert function.
]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da20e9557e0>, <ast.Name object at 0x7da20e957e20>, <ast.Name object at 0x7da20e955030>]]]] begin[:]
return[name[obj]]
if call[name[isinstance], parameter[name[obj], name[dict]]] begin[:]
variable[new] assign[=] call[name[obj].__class__, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e954310>, <ast.Name object at 0x7da20e956440>]]] in starred[call[name[obj].items, parameter[]]] begin[:]
call[name[new]][call[name[convert], parameter[name[k]]]] assign[=] call[name[change_keys], parameter[name[v], name[convert]]]
return[name[new]] | keyword[def] identifier[change_keys] ( identifier[obj] , identifier[convert] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[str] , identifier[int] , identifier[float] )):
keyword[return] identifier[obj]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
identifier[new] = identifier[obj] . identifier[__class__] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[items] ():
identifier[new] [ identifier[convert] ( identifier[k] )]= identifier[change_keys] ( identifier[v] , identifier[convert] )
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[list] , identifier[set] , identifier[tuple] )):
identifier[new] = identifier[obj] . identifier[__class__] ( identifier[change_keys] ( identifier[v] , identifier[convert] ) keyword[for] identifier[v] keyword[in] identifier[obj] )
keyword[else] :
keyword[return] identifier[obj]
keyword[return] identifier[new] | def change_keys(obj, convert):
"""
Recursively goes through the dictionary obj and replaces keys with the convert function.
"""
if isinstance(obj, (str, int, float)):
return obj # depends on [control=['if'], data=[]]
if isinstance(obj, dict):
new = obj.__class__()
for (k, v) in obj.items():
new[convert(k)] = change_keys(v, convert) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, (list, set, tuple)):
new = obj.__class__((change_keys(v, convert) for v in obj)) # depends on [control=['if'], data=[]]
else:
return obj
return new |
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name
if not hasattr(softmax_layer, '_inbound_nodes'):
raise RuntimeError("Please update keras to version >= 2.1.3")
node = softmax_layer._inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name | def function[_get_logits_name, parameter[self]]:
constant[
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
]
variable[softmax_name] assign[=] call[name[self]._get_softmax_name, parameter[]]
variable[softmax_layer] assign[=] call[name[self].model.get_layer, parameter[name[softmax_name]]]
if <ast.UnaryOp object at 0x7da1b1fd6ce0> begin[:]
return[name[softmax_name]]
if <ast.UnaryOp object at 0x7da1b1fcbd90> begin[:]
<ast.Raise object at 0x7da1b1fca740>
variable[node] assign[=] call[name[softmax_layer]._inbound_nodes][constant[0]]
variable[logits_name] assign[=] call[name[node].inbound_layers][constant[0]].name
return[name[logits_name]] | keyword[def] identifier[_get_logits_name] ( identifier[self] ):
literal[string]
identifier[softmax_name] = identifier[self] . identifier[_get_softmax_name] ()
identifier[softmax_layer] = identifier[self] . identifier[model] . identifier[get_layer] ( identifier[softmax_name] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[softmax_layer] , identifier[Activation] ):
keyword[return] identifier[softmax_name]
keyword[if] keyword[not] identifier[hasattr] ( identifier[softmax_layer] , literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[node] = identifier[softmax_layer] . identifier[_inbound_nodes] [ literal[int] ]
identifier[logits_name] = identifier[node] . identifier[inbound_layers] [ literal[int] ]. identifier[name]
keyword[return] identifier[logits_name] | def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name # depends on [control=['if'], data=[]]
if not hasattr(softmax_layer, '_inbound_nodes'):
raise RuntimeError('Please update keras to version >= 2.1.3') # depends on [control=['if'], data=[]]
node = softmax_layer._inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name |
def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
archive_url = capture(url, **kwargs)
click.echo(archive_url) | def function[cli, parameter[url, user_agent]]:
constant[
Archives the provided URL using archive.is.
]
variable[kwargs] assign[=] dictionary[[], []]
if name[user_agent] begin[:]
call[name[kwargs]][constant[user_agent]] assign[=] name[user_agent]
variable[archive_url] assign[=] call[name[capture], parameter[name[url]]]
call[name[click].echo, parameter[name[archive_url]]] | keyword[def] identifier[cli] ( identifier[url] , identifier[user_agent] ):
literal[string]
identifier[kwargs] ={}
keyword[if] identifier[user_agent] :
identifier[kwargs] [ literal[string] ]= identifier[user_agent]
identifier[archive_url] = identifier[capture] ( identifier[url] ,** identifier[kwargs] )
identifier[click] . identifier[echo] ( identifier[archive_url] ) | def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent # depends on [control=['if'], data=[]]
archive_url = capture(url, **kwargs)
click.echo(archive_url) |
def append(self, symbol, metadata, start_time=None):
"""
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
"""
if start_time is None:
start_time = dt.utcnow()
old_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if old_metadata is not None:
if old_metadata['start_time'] >= start_time:
raise ValueError('start_time={} is earlier than the last metadata @{}'.format(start_time,
old_metadata['start_time']))
if old_metadata['metadata'] == metadata:
return old_metadata
elif metadata is None:
return
self.find_one_and_update({'symbol': symbol}, {'$set': {'end_time': start_time}},
sort=[('start_time', pymongo.DESCENDING)])
document = {'_id': bson.ObjectId(), 'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
mongo_retry(self.insert_one)(document)
logger.debug('Finished writing metadata for %s', symbol)
return document | def function[append, parameter[self, symbol, metadata, start_time]]:
constant[
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
]
if compare[name[start_time] is constant[None]] begin[:]
variable[start_time] assign[=] call[name[dt].utcnow, parameter[]]
variable[old_metadata] assign[=] call[name[self].find_one, parameter[dictionary[[<ast.Constant object at 0x7da18f721630>], [<ast.Name object at 0x7da18f723be0>]]]]
if compare[name[old_metadata] is_not constant[None]] begin[:]
if compare[call[name[old_metadata]][constant[start_time]] greater_or_equal[>=] name[start_time]] begin[:]
<ast.Raise object at 0x7da18f723310>
if compare[call[name[old_metadata]][constant[metadata]] equal[==] name[metadata]] begin[:]
return[name[old_metadata]]
call[name[self].find_one_and_update, parameter[dictionary[[<ast.Constant object at 0x7da20c76ce20>], [<ast.Name object at 0x7da20c76f520>]], dictionary[[<ast.Constant object at 0x7da20c76fd00>], [<ast.Dict object at 0x7da20c76ecb0>]]]]
variable[document] assign[=] dictionary[[<ast.Constant object at 0x7da20c76c070>, <ast.Constant object at 0x7da20c76e950>, <ast.Constant object at 0x7da20c76ebc0>, <ast.Constant object at 0x7da20c76f820>], [<ast.Call object at 0x7da20c76fe50>, <ast.Name object at 0x7da20c76c7f0>, <ast.Name object at 0x7da20c76ca00>, <ast.Name object at 0x7da20c76e200>]]
call[call[name[mongo_retry], parameter[name[self].insert_one]], parameter[name[document]]]
call[name[logger].debug, parameter[constant[Finished writing metadata for %s], name[symbol]]]
return[name[document]] | keyword[def] identifier[append] ( identifier[self] , identifier[symbol] , identifier[metadata] , identifier[start_time] = keyword[None] ):
literal[string]
keyword[if] identifier[start_time] keyword[is] keyword[None] :
identifier[start_time] = identifier[dt] . identifier[utcnow] ()
identifier[old_metadata] = identifier[self] . identifier[find_one] ({ literal[string] : identifier[symbol] }, identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[DESCENDING] )])
keyword[if] identifier[old_metadata] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[old_metadata] [ literal[string] ]>= identifier[start_time] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[start_time] ,
identifier[old_metadata] [ literal[string] ]))
keyword[if] identifier[old_metadata] [ literal[string] ]== identifier[metadata] :
keyword[return] identifier[old_metadata]
keyword[elif] identifier[metadata] keyword[is] keyword[None] :
keyword[return]
identifier[self] . identifier[find_one_and_update] ({ literal[string] : identifier[symbol] },{ literal[string] :{ literal[string] : identifier[start_time] }},
identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[DESCENDING] )])
identifier[document] ={ literal[string] : identifier[bson] . identifier[ObjectId] (), literal[string] : identifier[symbol] , literal[string] : identifier[metadata] , literal[string] : identifier[start_time] }
identifier[mongo_retry] ( identifier[self] . identifier[insert_one] )( identifier[document] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[symbol] )
keyword[return] identifier[document] | def append(self, symbol, metadata, start_time=None):
"""
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
"""
if start_time is None:
start_time = dt.utcnow() # depends on [control=['if'], data=['start_time']]
old_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if old_metadata is not None:
if old_metadata['start_time'] >= start_time:
raise ValueError('start_time={} is earlier than the last metadata @{}'.format(start_time, old_metadata['start_time'])) # depends on [control=['if'], data=['start_time']]
if old_metadata['metadata'] == metadata:
return old_metadata # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['old_metadata']]
elif metadata is None:
return # depends on [control=['if'], data=[]]
self.find_one_and_update({'symbol': symbol}, {'$set': {'end_time': start_time}}, sort=[('start_time', pymongo.DESCENDING)])
document = {'_id': bson.ObjectId(), 'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
mongo_retry(self.insert_one)(document)
logger.debug('Finished writing metadata for %s', symbol)
return document |
def get_agents(self, pool_id, agent_name=None, include_capabilities=None, include_assigned_request=None, include_last_completed_request=None, property_filters=None, demands=None):
"""GetAgents.
[Preview API] Get a list of agents.
:param int pool_id: The agent pool containing the agents
:param str agent_name: Filter on agent name
:param bool include_capabilities: Whether to include the agents' capabilities in the response
:param bool include_assigned_request: Whether to include details about the agents' current work
:param bool include_last_completed_request: Whether to include details about the agents' most recent completed work
:param [str] property_filters: Filter which custom properties will be returned
:param [str] demands: Filter by demands the agents can satisfy
:rtype: [TaskAgent]
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
query_parameters = {}
if agent_name is not None:
query_parameters['agentName'] = self._serialize.query('agent_name', agent_name, 'str')
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool')
if include_assigned_request is not None:
query_parameters['includeAssignedRequest'] = self._serialize.query('include_assigned_request', include_assigned_request, 'bool')
if include_last_completed_request is not None:
query_parameters['includeLastCompletedRequest'] = self._serialize.query('include_last_completed_request', include_last_completed_request, 'bool')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if demands is not None:
demands = ",".join(demands)
query_parameters['demands'] = self._serialize.query('demands', demands, 'str')
response = self._send(http_method='GET',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgent]', self._unwrap_collection(response)) | def function[get_agents, parameter[self, pool_id, agent_name, include_capabilities, include_assigned_request, include_last_completed_request, property_filters, demands]]:
constant[GetAgents.
[Preview API] Get a list of agents.
:param int pool_id: The agent pool containing the agents
:param str agent_name: Filter on agent name
:param bool include_capabilities: Whether to include the agents' capabilities in the response
:param bool include_assigned_request: Whether to include details about the agents' current work
:param bool include_last_completed_request: Whether to include details about the agents' most recent completed work
:param [str] property_filters: Filter which custom properties will be returned
:param [str] demands: Filter by demands the agents can satisfy
:rtype: [TaskAgent]
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[pool_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[poolId]] assign[=] call[name[self]._serialize.url, parameter[constant[pool_id], name[pool_id], constant[int]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[agent_name] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[agentName]] assign[=] call[name[self]._serialize.query, parameter[constant[agent_name], name[agent_name], constant[str]]]
if compare[name[include_capabilities] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[includeCapabilities]] assign[=] call[name[self]._serialize.query, parameter[constant[include_capabilities], name[include_capabilities], constant[bool]]]
if compare[name[include_assigned_request] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[includeAssignedRequest]] assign[=] call[name[self]._serialize.query, parameter[constant[include_assigned_request], name[include_assigned_request], constant[bool]]]
if compare[name[include_last_completed_request] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[includeLastCompletedRequest]] assign[=] call[name[self]._serialize.query, parameter[constant[include_last_completed_request], name[include_last_completed_request], constant[bool]]]
if compare[name[property_filters] is_not constant[None]] begin[:]
variable[property_filters] assign[=] call[constant[,].join, parameter[name[property_filters]]]
call[name[query_parameters]][constant[propertyFilters]] assign[=] call[name[self]._serialize.query, parameter[constant[property_filters], name[property_filters], constant[str]]]
if compare[name[demands] is_not constant[None]] begin[:]
variable[demands] assign[=] call[constant[,].join, parameter[name[demands]]]
call[name[query_parameters]][constant[demands]] assign[=] call[name[self]._serialize.query, parameter[constant[demands], name[demands], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[TaskAgent]], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_agents] ( identifier[self] , identifier[pool_id] , identifier[agent_name] = keyword[None] , identifier[include_capabilities] = keyword[None] , identifier[include_assigned_request] = keyword[None] , identifier[include_last_completed_request] = keyword[None] , identifier[property_filters] = keyword[None] , identifier[demands] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[pool_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[pool_id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[agent_name] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[agent_name] , literal[string] )
keyword[if] identifier[include_capabilities] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_capabilities] , literal[string] )
keyword[if] identifier[include_assigned_request] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_assigned_request] , literal[string] )
keyword[if] identifier[include_last_completed_request] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_last_completed_request] , literal[string] )
keyword[if] identifier[property_filters] keyword[is] keyword[not] keyword[None] :
identifier[property_filters] = literal[string] . identifier[join] ( identifier[property_filters] )
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[property_filters] , literal[string] )
keyword[if] identifier[demands] keyword[is] keyword[not] keyword[None] :
identifier[demands] = literal[string] . identifier[join] ( identifier[demands] )
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[demands] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_agents(self, pool_id, agent_name=None, include_capabilities=None, include_assigned_request=None, include_last_completed_request=None, property_filters=None, demands=None):
"""GetAgents.
[Preview API] Get a list of agents.
:param int pool_id: The agent pool containing the agents
:param str agent_name: Filter on agent name
:param bool include_capabilities: Whether to include the agents' capabilities in the response
:param bool include_assigned_request: Whether to include details about the agents' current work
:param bool include_last_completed_request: Whether to include details about the agents' most recent completed work
:param [str] property_filters: Filter which custom properties will be returned
:param [str] demands: Filter by demands the agents can satisfy
:rtype: [TaskAgent]
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') # depends on [control=['if'], data=['pool_id']]
query_parameters = {}
if agent_name is not None:
query_parameters['agentName'] = self._serialize.query('agent_name', agent_name, 'str') # depends on [control=['if'], data=['agent_name']]
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool') # depends on [control=['if'], data=['include_capabilities']]
if include_assigned_request is not None:
query_parameters['includeAssignedRequest'] = self._serialize.query('include_assigned_request', include_assigned_request, 'bool') # depends on [control=['if'], data=['include_assigned_request']]
if include_last_completed_request is not None:
query_parameters['includeLastCompletedRequest'] = self._serialize.query('include_last_completed_request', include_last_completed_request, 'bool') # depends on [control=['if'], data=['include_last_completed_request']]
if property_filters is not None:
property_filters = ','.join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') # depends on [control=['if'], data=['property_filters']]
if demands is not None:
demands = ','.join(demands)
query_parameters['demands'] = self._serialize.query('demands', demands, 'str') # depends on [control=['if'], data=['demands']]
response = self._send(http_method='GET', location_id='e298ef32-5878-4cab-993c-043836571f42', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('[TaskAgent]', self._unwrap_collection(response)) |
def left(self, speed=1):
"""
Make the robot turn left by running the right motor forward and left
motor backward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
"""
self.right_motor.forward(speed)
self.left_motor.backward(speed) | def function[left, parameter[self, speed]]:
constant[
Make the robot turn left by running the right motor forward and left
motor backward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
]
call[name[self].right_motor.forward, parameter[name[speed]]]
call[name[self].left_motor.backward, parameter[name[speed]]] | keyword[def] identifier[left] ( identifier[self] , identifier[speed] = literal[int] ):
literal[string]
identifier[self] . identifier[right_motor] . identifier[forward] ( identifier[speed] )
identifier[self] . identifier[left_motor] . identifier[backward] ( identifier[speed] ) | def left(self, speed=1):
"""
Make the robot turn left by running the right motor forward and left
motor backward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
"""
self.right_motor.forward(speed)
self.left_motor.backward(speed) |
def install(name, link, path, priority):
'''
Install new alternative for defined <name>
name
is the master name for this link group
(e.g. pager)
link
is the symlink pointing to /etc/alternatives/<name>.
(e.g. /usr/bin/pager)
path
is the location of the new alternative target.
NB: This file / directory must already exist.
(e.g. /usr/bin/less)
priority
is an integer; options with higher numbers have higher priority in
automatic mode.
'''
ret = {'name': name,
'link': link,
'path': path,
'priority': priority,
'result': True,
'changes': {},
'comment': ''}
if __salt__['alternatives.check_exists'](name, path):
ret['comment'] = 'Alternative {0} for {1} is already registered'.format(path, name)
else:
if __opts__['test']:
ret['comment'] = (
'Alternative will be set for {0} to {1} with priority {2}'
).format(name, path, priority)
ret['result'] = None
return ret
out = __salt__['alternatives.install'](name, link, path, priority)
if __salt__['alternatives.check_exists'](name, path):
if __salt__['alternatives.check_installed'](name, path):
ret['comment'] = (
'Alternative for {0} set to path {1} with priority {2}'
).format(name, path, priority)
else:
ret['comment'] = (
'Alternative {0} for {1} registered with priority {2} and not set to default'
).format(path, name, priority)
ret['changes'] = {'name': name,
'link': link,
'path': path,
'priority': priority}
else:
ret['result'] = False
ret['comment'] = (
'Alternative for {0} not installed: {1}'
).format(name, out)
return ret | def function[install, parameter[name, link, path, priority]]:
constant[
Install new alternative for defined <name>
name
is the master name for this link group
(e.g. pager)
link
is the symlink pointing to /etc/alternatives/<name>.
(e.g. /usr/bin/pager)
path
is the location of the new alternative target.
NB: This file / directory must already exist.
(e.g. /usr/bin/less)
priority
is an integer; options with higher numbers have higher priority in
automatic mode.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21bbd60>, <ast.Constant object at 0x7da1b21bbd30>, <ast.Constant object at 0x7da1b21bbd00>, <ast.Constant object at 0x7da1b21bbcd0>, <ast.Constant object at 0x7da1b21bbca0>, <ast.Constant object at 0x7da1b21bbc70>, <ast.Constant object at 0x7da1b21bbc40>], [<ast.Name object at 0x7da1b21bbc10>, <ast.Name object at 0x7da1b21bbbe0>, <ast.Name object at 0x7da1b21bbbb0>, <ast.Name object at 0x7da1b21bbb80>, <ast.Constant object at 0x7da1b21bbb50>, <ast.Dict object at 0x7da1b21bbb20>, <ast.Constant object at 0x7da1b21bbaf0>]]
if call[call[name[__salt__]][constant[alternatives.check_exists]], parameter[name[name], name[path]]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Alternative {0} for {1} is already registered].format, parameter[name[path], name[name]]]
return[name[ret]] | keyword[def] identifier[install] ( identifier[name] , identifier[link] , identifier[path] , identifier[priority] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : identifier[link] ,
literal[string] : identifier[path] ,
literal[string] : identifier[priority] ,
literal[string] : keyword[True] ,
literal[string] :{},
literal[string] : literal[string] }
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[path] ):
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[path] , identifier[name] )
keyword[else] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]=(
literal[string]
). identifier[format] ( identifier[name] , identifier[path] , identifier[priority] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[link] , identifier[path] , identifier[priority] )
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[path] ):
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[path] ):
identifier[ret] [ literal[string] ]=(
literal[string]
). identifier[format] ( identifier[name] , identifier[path] , identifier[priority] )
keyword[else] :
identifier[ret] [ literal[string] ]=(
literal[string]
). identifier[format] ( identifier[path] , identifier[name] , identifier[priority] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[name] ,
literal[string] : identifier[link] ,
literal[string] : identifier[path] ,
literal[string] : identifier[priority] }
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]=(
literal[string]
). identifier[format] ( identifier[name] , identifier[out] )
keyword[return] identifier[ret] | def install(name, link, path, priority):
"""
Install new alternative for defined <name>
name
is the master name for this link group
(e.g. pager)
link
is the symlink pointing to /etc/alternatives/<name>.
(e.g. /usr/bin/pager)
path
is the location of the new alternative target.
NB: This file / directory must already exist.
(e.g. /usr/bin/less)
priority
is an integer; options with higher numbers have higher priority in
automatic mode.
"""
ret = {'name': name, 'link': link, 'path': path, 'priority': priority, 'result': True, 'changes': {}, 'comment': ''}
if __salt__['alternatives.check_exists'](name, path):
ret['comment'] = 'Alternative {0} for {1} is already registered'.format(path, name) # depends on [control=['if'], data=[]]
else:
if __opts__['test']:
ret['comment'] = 'Alternative will be set for {0} to {1} with priority {2}'.format(name, path, priority)
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
out = __salt__['alternatives.install'](name, link, path, priority)
if __salt__['alternatives.check_exists'](name, path):
if __salt__['alternatives.check_installed'](name, path):
ret['comment'] = 'Alternative for {0} set to path {1} with priority {2}'.format(name, path, priority) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Alternative {0} for {1} registered with priority {2} and not set to default'.format(path, name, priority)
ret['changes'] = {'name': name, 'link': link, 'path': path, 'priority': priority} # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Alternative for {0} not installed: {1}'.format(name, out)
return ret |
def _set_env_from_extras(self, extras):
"""
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
"""
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and not keyfile_json_str:
self.log.info('Using gcloud with application default credentials.')
elif key_path:
os.environ[G_APP_CRED] = key_path
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str)
os.environ[G_APP_CRED] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key | def function[_set_env_from_extras, parameter[self, extras]]:
constant[
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
]
variable[key_path] assign[=] call[name[self]._get_field, parameter[name[extras], constant[key_path], constant[False]]]
variable[keyfile_json_str] assign[=] call[name[self]._get_field, parameter[name[extras], constant[keyfile_dict], constant[False]]]
if <ast.BoolOp object at 0x7da20c6c7c40> begin[:]
call[name[self].log.info, parameter[constant[Using gcloud with application default credentials.]]] | keyword[def] identifier[_set_env_from_extras] ( identifier[self] , identifier[extras] ):
literal[string]
identifier[key_path] = identifier[self] . identifier[_get_field] ( identifier[extras] , literal[string] , keyword[False] )
identifier[keyfile_json_str] = identifier[self] . identifier[_get_field] ( identifier[extras] , literal[string] , keyword[False] )
keyword[if] keyword[not] identifier[key_path] keyword[and] keyword[not] identifier[keyfile_json_str] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[elif] identifier[key_path] :
identifier[os] . identifier[environ] [ identifier[G_APP_CRED] ]= identifier[key_path]
keyword[else] :
identifier[service_key] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] )
identifier[service_key] . identifier[write] ( identifier[keyfile_json_str] )
identifier[os] . identifier[environ] [ identifier[G_APP_CRED] ]= identifier[service_key] . identifier[name]
keyword[return] identifier[service_key] | def _set_env_from_extras(self, extras):
"""
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
"""
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and (not keyfile_json_str):
self.log.info('Using gcloud with application default credentials.') # depends on [control=['if'], data=[]]
elif key_path:
os.environ[G_APP_CRED] = key_path # depends on [control=['if'], data=[]]
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str)
os.environ[G_APP_CRED] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key |
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("no2index")
reference_time_node = ET.SubElement(root_node, "reference_time")
reference_time_node.text = str(self._reference_time)
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = str(self._interval)
no2_samples_node = ET.SubElement(root_node, "no2_samples")
for smpl in self._no2_samples:
s = smpl.copy()
# turn values to 12 decimal digits-formatted strings
s['label'] = s['label']
s['value'] = '{:.12e}'.format(s['value'])
s['precision'] = '{:.12e}'.format(s['precision'])
xmlutils.create_DOM_node_from_dict(s, "no2_sample",
no2_samples_node)
root_node.append(self._location._to_DOM())
return root_node | def function[_to_DOM, parameter[self]]:
constant[
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
]
variable[root_node] assign[=] call[name[ET].Element, parameter[constant[no2index]]]
variable[reference_time_node] assign[=] call[name[ET].SubElement, parameter[name[root_node], constant[reference_time]]]
name[reference_time_node].text assign[=] call[name[str], parameter[name[self]._reference_time]]
variable[reception_time_node] assign[=] call[name[ET].SubElement, parameter[name[root_node], constant[reception_time]]]
name[reception_time_node].text assign[=] call[name[str], parameter[name[self]._reception_time]]
variable[interval_node] assign[=] call[name[ET].SubElement, parameter[name[root_node], constant[interval]]]
name[interval_node].text assign[=] call[name[str], parameter[name[self]._interval]]
variable[no2_samples_node] assign[=] call[name[ET].SubElement, parameter[name[root_node], constant[no2_samples]]]
for taget[name[smpl]] in starred[name[self]._no2_samples] begin[:]
variable[s] assign[=] call[name[smpl].copy, parameter[]]
call[name[s]][constant[label]] assign[=] call[name[s]][constant[label]]
call[name[s]][constant[value]] assign[=] call[constant[{:.12e}].format, parameter[call[name[s]][constant[value]]]]
call[name[s]][constant[precision]] assign[=] call[constant[{:.12e}].format, parameter[call[name[s]][constant[precision]]]]
call[name[xmlutils].create_DOM_node_from_dict, parameter[name[s], constant[no2_sample], name[no2_samples_node]]]
call[name[root_node].append, parameter[call[name[self]._location._to_DOM, parameter[]]]]
return[name[root_node]] | keyword[def] identifier[_to_DOM] ( identifier[self] ):
literal[string]
identifier[root_node] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[reference_time_node] = identifier[ET] . identifier[SubElement] ( identifier[root_node] , literal[string] )
identifier[reference_time_node] . identifier[text] = identifier[str] ( identifier[self] . identifier[_reference_time] )
identifier[reception_time_node] = identifier[ET] . identifier[SubElement] ( identifier[root_node] , literal[string] )
identifier[reception_time_node] . identifier[text] = identifier[str] ( identifier[self] . identifier[_reception_time] )
identifier[interval_node] = identifier[ET] . identifier[SubElement] ( identifier[root_node] , literal[string] )
identifier[interval_node] . identifier[text] = identifier[str] ( identifier[self] . identifier[_interval] )
identifier[no2_samples_node] = identifier[ET] . identifier[SubElement] ( identifier[root_node] , literal[string] )
keyword[for] identifier[smpl] keyword[in] identifier[self] . identifier[_no2_samples] :
identifier[s] = identifier[smpl] . identifier[copy] ()
identifier[s] [ literal[string] ]= identifier[s] [ literal[string] ]
identifier[s] [ literal[string] ]= literal[string] . identifier[format] ( identifier[s] [ literal[string] ])
identifier[s] [ literal[string] ]= literal[string] . identifier[format] ( identifier[s] [ literal[string] ])
identifier[xmlutils] . identifier[create_DOM_node_from_dict] ( identifier[s] , literal[string] ,
identifier[no2_samples_node] )
identifier[root_node] . identifier[append] ( identifier[self] . identifier[_location] . identifier[_to_DOM] ())
keyword[return] identifier[root_node] | def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element('no2index')
reference_time_node = ET.SubElement(root_node, 'reference_time')
reference_time_node.text = str(self._reference_time)
reception_time_node = ET.SubElement(root_node, 'reception_time')
reception_time_node.text = str(self._reception_time)
interval_node = ET.SubElement(root_node, 'interval')
interval_node.text = str(self._interval)
no2_samples_node = ET.SubElement(root_node, 'no2_samples')
for smpl in self._no2_samples:
s = smpl.copy()
# turn values to 12 decimal digits-formatted strings
s['label'] = s['label']
s['value'] = '{:.12e}'.format(s['value'])
s['precision'] = '{:.12e}'.format(s['precision'])
xmlutils.create_DOM_node_from_dict(s, 'no2_sample', no2_samples_node) # depends on [control=['for'], data=['smpl']]
root_node.append(self._location._to_DOM())
return root_node |
def append(self, other, ignore_meta_conflict=False, inplace=False,
**kwargs):
"""Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
kwargs are passed through to `IamDataFrame(other, **kwargs)`
"""
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
if self.time_col is not other.time_col:
raise ValueError('incompatible time format (years vs. datetime)!')
ret = copy.deepcopy(self) if not inplace else self
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(
other.meta.loc[intersect, cols]):
conflict_idx = (
pd.concat([ret.meta.loc[intersect, cols],
other.meta.loc[intersect, cols]]
).drop_duplicates()
.index.drop_duplicates()
)
msg = 'conflict in `meta` for scenarios {}'.format(
[i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg)
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer',
left_index=True, right_index=True)
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \
else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg)
# append other.data (verify integrity for no duplicates)
_data = ret.data.set_index(ret._LONG_IDX).append(
other.data.set_index(other._LONG_IDX), verify_integrity=True)
# merge extra columns in `data` and set `LONG_IDX`
ret.extra_cols += [i for i in other.extra_cols
if i not in ret.extra_cols]
ret._LONG_IDX = IAMC_IDX + [ret.time_col] + ret.extra_cols
ret.data = sort_data(_data.reset_index(), ret._LONG_IDX)
if not inplace:
return ret | def function[append, parameter[self, other, ignore_meta_conflict, inplace]]:
constant[Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
kwargs are passed through to `IamDataFrame(other, **kwargs)`
]
if <ast.UnaryOp object at 0x7da18bcc90f0> begin[:]
variable[other] assign[=] call[name[IamDataFrame], parameter[name[other]]]
variable[ignore_meta_conflict] assign[=] constant[True]
if compare[name[self].time_col is_not name[other].time_col] begin[:]
<ast.Raise object at 0x7da18bcc86a0>
variable[ret] assign[=] <ast.IfExp object at 0x7da18bccb4f0>
variable[diff] assign[=] call[name[other].meta.index.difference, parameter[name[ret].meta.index]]
variable[intersect] assign[=] call[name[other].meta.index.intersection, parameter[name[ret].meta.index]]
if <ast.UnaryOp object at 0x7da18bcc9810> begin[:]
if <ast.UnaryOp object at 0x7da18bcca080> begin[:]
variable[cols] assign[=] <ast.ListComp object at 0x7da18bccacb0>
if <ast.UnaryOp object at 0x7da18bcc83d0> begin[:]
variable[conflict_idx] assign[=] call[call[call[name[pd].concat, parameter[list[[<ast.Subscript object at 0x7da18bcc9900>, <ast.Subscript object at 0x7da1b0f2b820>]]]].drop_duplicates, parameter[]].index.drop_duplicates, parameter[]]
variable[msg] assign[=] call[constant[conflict in `meta` for scenarios {}].format, parameter[<ast.ListComp object at 0x7da1b0f29ae0>]]
<ast.Raise object at 0x7da1b0f29d50>
variable[cols] assign[=] <ast.ListComp object at 0x7da1b0f29e70>
variable[_meta] assign[=] call[name[other].meta.loc][tuple[[<ast.Name object at 0x7da1b0f2b2b0>, <ast.Name object at 0x7da1b0f2b2e0>]]]
name[ret].meta assign[=] call[name[ret].meta.merge, parameter[name[_meta]]]
if <ast.UnaryOp object at 0x7da1b0f29bd0> begin[:]
variable[sort_kwarg] assign[=] <ast.IfExp object at 0x7da1b0f2a380>
name[ret].meta assign[=] call[name[ret].meta.append, parameter[call[name[other].meta.loc][tuple[[<ast.Name object at 0x7da1b0f2a290>, <ast.Slice object at 0x7da1b0f2bd90>]]]]]
variable[_data] assign[=] call[call[name[ret].data.set_index, parameter[name[ret]._LONG_IDX]].append, parameter[call[name[other].data.set_index, parameter[name[other]._LONG_IDX]]]]
<ast.AugAssign object at 0x7da1b0f2bc10>
name[ret]._LONG_IDX assign[=] binary_operation[binary_operation[name[IAMC_IDX] + list[[<ast.Attribute object at 0x7da1b0f2abf0>]]] + name[ret].extra_cols]
name[ret].data assign[=] call[name[sort_data], parameter[call[name[_data].reset_index, parameter[]], name[ret]._LONG_IDX]]
if <ast.UnaryOp object at 0x7da1b0f38cd0> begin[:]
return[name[ret]] | keyword[def] identifier[append] ( identifier[self] , identifier[other] , identifier[ignore_meta_conflict] = keyword[False] , identifier[inplace] = keyword[False] ,
** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[IamDataFrame] ):
identifier[other] = identifier[IamDataFrame] ( identifier[other] ,** identifier[kwargs] )
identifier[ignore_meta_conflict] = keyword[True]
keyword[if] identifier[self] . identifier[time_col] keyword[is] keyword[not] identifier[other] . identifier[time_col] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[ret] = identifier[copy] . identifier[deepcopy] ( identifier[self] ) keyword[if] keyword[not] identifier[inplace] keyword[else] identifier[self]
identifier[diff] = identifier[other] . identifier[meta] . identifier[index] . identifier[difference] ( identifier[ret] . identifier[meta] . identifier[index] )
identifier[intersect] = identifier[other] . identifier[meta] . identifier[index] . identifier[intersection] ( identifier[ret] . identifier[meta] . identifier[index] )
keyword[if] keyword[not] identifier[intersect] . identifier[empty] :
keyword[if] keyword[not] identifier[ignore_meta_conflict] :
identifier[cols] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[other] . identifier[meta] . identifier[columns] keyword[if] identifier[i] keyword[in] identifier[ret] . identifier[meta] . identifier[columns] ]
keyword[if] keyword[not] identifier[ret] . identifier[meta] . identifier[loc] [ identifier[intersect] , identifier[cols] ]. identifier[equals] (
identifier[other] . identifier[meta] . identifier[loc] [ identifier[intersect] , identifier[cols] ]):
identifier[conflict_idx] =(
identifier[pd] . identifier[concat] ([ identifier[ret] . identifier[meta] . identifier[loc] [ identifier[intersect] , identifier[cols] ],
identifier[other] . identifier[meta] . identifier[loc] [ identifier[intersect] , identifier[cols] ]]
). identifier[drop_duplicates] ()
. identifier[index] . identifier[drop_duplicates] ()
)
identifier[msg] = literal[string] . identifier[format] (
[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[conflict_idx] ). identifier[index] ])
keyword[raise] identifier[ValueError] ( identifier[msg] )
identifier[cols] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[other] . identifier[meta] . identifier[columns] keyword[if] identifier[i] keyword[not] keyword[in] identifier[ret] . identifier[meta] . identifier[columns] ]
identifier[_meta] = identifier[other] . identifier[meta] . identifier[loc] [ identifier[intersect] , identifier[cols] ]
identifier[ret] . identifier[meta] = identifier[ret] . identifier[meta] . identifier[merge] ( identifier[_meta] , identifier[how] = literal[string] ,
identifier[left_index] = keyword[True] , identifier[right_index] = keyword[True] )
keyword[if] keyword[not] identifier[diff] . identifier[empty] :
identifier[sort_kwarg] ={} keyword[if] identifier[int] ( identifier[pd] . identifier[__version__] . identifier[split] ( literal[string] )[ literal[int] ])< literal[int] keyword[else] identifier[dict] ( identifier[sort] = keyword[False] )
identifier[ret] . identifier[meta] = identifier[ret] . identifier[meta] . identifier[append] ( identifier[other] . identifier[meta] . identifier[loc] [ identifier[diff] ,:],** identifier[sort_kwarg] )
identifier[_data] = identifier[ret] . identifier[data] . identifier[set_index] ( identifier[ret] . identifier[_LONG_IDX] ). identifier[append] (
identifier[other] . identifier[data] . identifier[set_index] ( identifier[other] . identifier[_LONG_IDX] ), identifier[verify_integrity] = keyword[True] )
identifier[ret] . identifier[extra_cols] +=[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[other] . identifier[extra_cols]
keyword[if] identifier[i] keyword[not] keyword[in] identifier[ret] . identifier[extra_cols] ]
identifier[ret] . identifier[_LONG_IDX] = identifier[IAMC_IDX] +[ identifier[ret] . identifier[time_col] ]+ identifier[ret] . identifier[extra_cols]
identifier[ret] . identifier[data] = identifier[sort_data] ( identifier[_data] . identifier[reset_index] (), identifier[ret] . identifier[_LONG_IDX] )
keyword[if] keyword[not] identifier[inplace] :
keyword[return] identifier[ret] | def append(self, other, ignore_meta_conflict=False, inplace=False, **kwargs):
"""Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
kwargs are passed through to `IamDataFrame(other, **kwargs)`
"""
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True # depends on [control=['if'], data=[]]
if self.time_col is not other.time_col:
raise ValueError('incompatible time format (years vs. datetime)!') # depends on [control=['if'], data=[]]
ret = copy.deepcopy(self) if not inplace else self
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(other.meta.loc[intersect, cols]):
conflict_idx = pd.concat([ret.meta.loc[intersect, cols], other.meta.loc[intersect, cols]]).drop_duplicates().index.drop_duplicates()
msg = 'conflict in `meta` for scenarios {}'.format([i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer', left_index=True, right_index=True) # depends on [control=['if'], data=[]]
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg) # depends on [control=['if'], data=[]]
# append other.data (verify integrity for no duplicates)
_data = ret.data.set_index(ret._LONG_IDX).append(other.data.set_index(other._LONG_IDX), verify_integrity=True)
# merge extra columns in `data` and set `LONG_IDX`
ret.extra_cols += [i for i in other.extra_cols if i not in ret.extra_cols]
ret._LONG_IDX = IAMC_IDX + [ret.time_col] + ret.extra_cols
ret.data = sort_data(_data.reset_index(), ret._LONG_IDX)
if not inplace:
return ret # depends on [control=['if'], data=[]] |
def update_ledger(self, ledger_id, description=None):
"""Update ledger info
Arguments:
ledger_id:
Ledger id assigned by mCASH
description:
Description of the Ledger and it's usage
"""
arguments = {'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/', arguments) | def function[update_ledger, parameter[self, ledger_id, description]]:
constant[Update ledger info
Arguments:
ledger_id:
Ledger id assigned by mCASH
description:
Description of the Ledger and it's usage
]
variable[arguments] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c7100>], [<ast.Name object at 0x7da20c6c5d20>]]
return[call[name[self].do_req, parameter[constant[PUT], binary_operation[binary_operation[binary_operation[name[self].merchant_api_base_url + constant[/ledger/]] + name[ledger_id]] + constant[/]], name[arguments]]]] | keyword[def] identifier[update_ledger] ( identifier[self] , identifier[ledger_id] , identifier[description] = keyword[None] ):
literal[string]
identifier[arguments] ={ literal[string] : identifier[description] }
keyword[return] identifier[self] . identifier[do_req] ( literal[string] ,
identifier[self] . identifier[merchant_api_base_url] + literal[string] +
identifier[ledger_id] + literal[string] , identifier[arguments] ) | def update_ledger(self, ledger_id, description=None):
"""Update ledger info
Arguments:
ledger_id:
Ledger id assigned by mCASH
description:
Description of the Ledger and it's usage
"""
arguments = {'description': description}
return self.do_req('PUT', self.merchant_api_base_url + '/ledger/' + ledger_id + '/', arguments) |
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
_logger.info('Binding %s to %s with %s',
self.EXCHANGE, self._queue, self._routing_key)
self._channel.queue_bind(self.on_bindok, self._queue,
self.EXCHANGE, self._routing_key) | def function[on_queue_declareok, parameter[self, method_frame]]:
constant[Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
]
call[name[_logger].info, parameter[constant[Binding %s to %s with %s], name[self].EXCHANGE, name[self]._queue, name[self]._routing_key]]
call[name[self]._channel.queue_bind, parameter[name[self].on_bindok, name[self]._queue, name[self].EXCHANGE, name[self]._routing_key]] | keyword[def] identifier[on_queue_declareok] ( identifier[self] , identifier[method_frame] ):
literal[string]
identifier[_logger] . identifier[info] ( literal[string] ,
identifier[self] . identifier[EXCHANGE] , identifier[self] . identifier[_queue] , identifier[self] . identifier[_routing_key] )
identifier[self] . identifier[_channel] . identifier[queue_bind] ( identifier[self] . identifier[on_bindok] , identifier[self] . identifier[_queue] ,
identifier[self] . identifier[EXCHANGE] , identifier[self] . identifier[_routing_key] ) | def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
_logger.info('Binding %s to %s with %s', self.EXCHANGE, self._queue, self._routing_key)
self._channel.queue_bind(self.on_bindok, self._queue, self.EXCHANGE, self._routing_key) |
async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be read. Note that receiving
a response doesn't imply the message was read, and this action
will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout) | <ast.AsyncFunctionDef object at 0x7da18f00c880> | keyword[async] keyword[def] identifier[wait_read] ( identifier[self] , identifier[message] = keyword[None] ,*, identifier[timeout] = keyword[None] ):
literal[string]
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[future] = identifier[self] . identifier[_client] . identifier[loop] . identifier[create_future] ()
identifier[target_id] = identifier[self] . identifier[_get_message_id] ( identifier[message] )
keyword[if] identifier[self] . identifier[_last_read] keyword[is] keyword[None] :
identifier[self] . identifier[_last_read] = identifier[target_id] - literal[int]
keyword[if] identifier[self] . identifier[_last_read] >= identifier[target_id] :
keyword[return]
identifier[self] . identifier[_pending_reads] [ identifier[target_id] ]= identifier[future]
keyword[return] keyword[await] identifier[self] . identifier[_get_result] ( identifier[future] , identifier[start_time] , identifier[timeout] ) | async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be read. Note that receiving
a response doesn't imply the message was read, and this action
will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1 # depends on [control=['if'], data=[]]
if self._last_read >= target_id:
return # depends on [control=['if'], data=[]]
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout) |
def get_groupname(taskfileinfo):
"""Return a suitable name for a groupname for the given taskfileinfo.
:param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
element = taskfileinfo.task.element
name = element.name
return name + "_grp" | def function[get_groupname, parameter[taskfileinfo]]:
constant[Return a suitable name for a groupname for the given taskfileinfo.
:param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
]
variable[element] assign[=] name[taskfileinfo].task.element
variable[name] assign[=] name[element].name
return[binary_operation[name[name] + constant[_grp]]] | keyword[def] identifier[get_groupname] ( identifier[taskfileinfo] ):
literal[string]
identifier[element] = identifier[taskfileinfo] . identifier[task] . identifier[element]
identifier[name] = identifier[element] . identifier[name]
keyword[return] identifier[name] + literal[string] | def get_groupname(taskfileinfo):
"""Return a suitable name for a groupname for the given taskfileinfo.
:param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
element = taskfileinfo.task.element
name = element.name
return name + '_grp' |
def delete(self, bridge):
"""
Delete a bridge by name
:param bridge: bridge name
:return:
"""
args = {
'name': bridge,
}
self._bridge_chk.check(args)
return self._client.json('bridge.delete', args) | def function[delete, parameter[self, bridge]]:
constant[
Delete a bridge by name
:param bridge: bridge name
:return:
]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b04cbb20>], [<ast.Name object at 0x7da1b04ca6b0>]]
call[name[self]._bridge_chk.check, parameter[name[args]]]
return[call[name[self]._client.json, parameter[constant[bridge.delete], name[args]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[bridge] ):
literal[string]
identifier[args] ={
literal[string] : identifier[bridge] ,
}
identifier[self] . identifier[_bridge_chk] . identifier[check] ( identifier[args] )
keyword[return] identifier[self] . identifier[_client] . identifier[json] ( literal[string] , identifier[args] ) | def delete(self, bridge):
"""
Delete a bridge by name
:param bridge: bridge name
:return:
"""
args = {'name': bridge}
self._bridge_chk.check(args)
return self._client.json('bridge.delete', args) |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload.
"""
super(CreateRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):
self._object_type = primitives.Enumeration(
enums.ObjectType,
tag=enums.Tags.OBJECT_TYPE
)
self._object_type.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the object "
"type."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):
self._template_attribute = objects.TemplateAttribute()
self._template_attribute.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the "
"template attribute."
)
else:
# NOTE (ph) For now, leave attributes natively in TemplateAttribute
# form and just convert to the KMIP 2.0 Attributes form as needed
# for encoding/decoding purposes. Changing the payload to require
# the new Attributes structure will trigger a bunch of second-order
# effects across the client and server codebases that is beyond
# the scope of updating the Create payloads to support KMIP 2.0.
if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):
attributes = objects.Attributes()
attributes.read(local_buffer, kmip_version=kmip_version)
value = objects.convert_attributes_to_template_attribute(
attributes
)
self._template_attribute = value
else:
raise exceptions.InvalidKmipEncoding(
"The Create request payload encoding is missing the "
"attributes structure."
)
self.is_oversized(local_buffer) | def function[read, parameter[self, input_buffer, kmip_version]]:
constant[
Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload.
]
call[call[name[super], parameter[name[CreateRequestPayload], name[self]]].read, parameter[name[input_buffer]]]
variable[local_buffer] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_buffer].read, parameter[name[self].length]]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.OBJECT_TYPE, name[local_buffer]]] begin[:]
name[self]._object_type assign[=] call[name[primitives].Enumeration, parameter[name[enums].ObjectType]]
call[name[self]._object_type.read, parameter[name[local_buffer]]]
if compare[name[kmip_version] less[<] name[enums].KMIPVersion.KMIP_2_0] begin[:]
if call[name[self].is_tag_next, parameter[name[enums].Tags.TEMPLATE_ATTRIBUTE, name[local_buffer]]] begin[:]
name[self]._template_attribute assign[=] call[name[objects].TemplateAttribute, parameter[]]
call[name[self]._template_attribute.read, parameter[name[local_buffer]]]
call[name[self].is_oversized, parameter[name[local_buffer]]] | keyword[def] identifier[read] ( identifier[self] , identifier[input_buffer] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ):
literal[string]
identifier[super] ( identifier[CreateRequestPayload] , identifier[self] ). identifier[read] (
identifier[input_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[local_buffer] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_buffer] . identifier[read] ( identifier[self] . identifier[length] ))
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[OBJECT_TYPE] , identifier[local_buffer] ):
identifier[self] . identifier[_object_type] = identifier[primitives] . identifier[Enumeration] (
identifier[enums] . identifier[ObjectType] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[OBJECT_TYPE]
)
identifier[self] . identifier[_object_type] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[if] identifier[kmip_version] < identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_2_0] :
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[TEMPLATE_ATTRIBUTE] , identifier[local_buffer] ):
identifier[self] . identifier[_template_attribute] = identifier[objects] . identifier[TemplateAttribute] ()
identifier[self] . identifier[_template_attribute] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[else] :
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[ATTRIBUTES] , identifier[local_buffer] ):
identifier[attributes] = identifier[objects] . identifier[Attributes] ()
identifier[attributes] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
identifier[value] = identifier[objects] . identifier[convert_attributes_to_template_attribute] (
identifier[attributes]
)
identifier[self] . identifier[_template_attribute] = identifier[value]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
identifier[self] . identifier[is_oversized] ( identifier[local_buffer] ) | def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Create request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the object type or template
attribute is missing from the encoded payload.
"""
super(CreateRequestPayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):
self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)
self._object_type.read(local_buffer, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the object type.')
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):
self._template_attribute = objects.TemplateAttribute()
self._template_attribute.read(local_buffer, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the template attribute.') # depends on [control=['if'], data=['kmip_version']]
# NOTE (ph) For now, leave attributes natively in TemplateAttribute
# form and just convert to the KMIP 2.0 Attributes form as needed
# for encoding/decoding purposes. Changing the payload to require
# the new Attributes structure will trigger a bunch of second-order
# effects across the client and server codebases that is beyond
# the scope of updating the Create payloads to support KMIP 2.0.
elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):
attributes = objects.Attributes()
attributes.read(local_buffer, kmip_version=kmip_version)
value = objects.convert_attributes_to_template_attribute(attributes)
self._template_attribute = value # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the attributes structure.')
self.is_oversized(local_buffer) |
def as_set(obj):
"""
Convert obj into a set, returns None if obj is None.
>>> assert as_set(None) is None and as_set(1) == set([1]) and as_set(range(1,3)) == set([1, 2])
"""
if obj is None or isinstance(obj, collections.Set):
return obj
if not isinstance(obj, collections.Iterable):
return set((obj,))
else:
return set(obj) | def function[as_set, parameter[obj]]:
constant[
Convert obj into a set, returns None if obj is None.
>>> assert as_set(None) is None and as_set(1) == set([1]) and as_set(range(1,3)) == set([1, 2])
]
if <ast.BoolOp object at 0x7da1b13d20b0> begin[:]
return[name[obj]]
if <ast.UnaryOp object at 0x7da1b13d3f40> begin[:]
return[call[name[set], parameter[tuple[[<ast.Name object at 0x7da1b13d0a90>]]]]] | keyword[def] identifier[as_set] ( identifier[obj] ):
literal[string]
keyword[if] identifier[obj] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[obj] , identifier[collections] . identifier[Set] ):
keyword[return] identifier[obj]
keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[collections] . identifier[Iterable] ):
keyword[return] identifier[set] (( identifier[obj] ,))
keyword[else] :
keyword[return] identifier[set] ( identifier[obj] ) | def as_set(obj):
"""
Convert obj into a set, returns None if obj is None.
>>> assert as_set(None) is None and as_set(1) == set([1]) and as_set(range(1,3)) == set([1, 2])
"""
if obj is None or isinstance(obj, collections.Set):
return obj # depends on [control=['if'], data=[]]
if not isinstance(obj, collections.Iterable):
return set((obj,)) # depends on [control=['if'], data=[]]
else:
return set(obj) |
def generate_inverse_mapping(order):
"""Genereate a lambda entry -> PN order map.
This function will generate the opposite of generate mapping. So where
generate_mapping gives dict[key] = item this will give
dict[item] = key. Valid PN orders are:
{}
Parameters
----------
order : string
A string containing a PN order. Valid values are given above.
Returns
--------
mapping : dictionary
An inverse mapping between the active Lambda terms and index in the
metric
"""
mapping = generate_mapping(order)
inv_mapping = {}
for key,value in mapping.items():
inv_mapping[value] = key
return inv_mapping | def function[generate_inverse_mapping, parameter[order]]:
constant[Genereate a lambda entry -> PN order map.
This function will generate the opposite of generate mapping. So where
generate_mapping gives dict[key] = item this will give
dict[item] = key. Valid PN orders are:
{}
Parameters
----------
order : string
A string containing a PN order. Valid values are given above.
Returns
--------
mapping : dictionary
An inverse mapping between the active Lambda terms and index in the
metric
]
variable[mapping] assign[=] call[name[generate_mapping], parameter[name[order]]]
variable[inv_mapping] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc992d0>, <ast.Name object at 0x7da18dc9a980>]]] in starred[call[name[mapping].items, parameter[]]] begin[:]
call[name[inv_mapping]][name[value]] assign[=] name[key]
return[name[inv_mapping]] | keyword[def] identifier[generate_inverse_mapping] ( identifier[order] ):
literal[string]
identifier[mapping] = identifier[generate_mapping] ( identifier[order] )
identifier[inv_mapping] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[mapping] . identifier[items] ():
identifier[inv_mapping] [ identifier[value] ]= identifier[key]
keyword[return] identifier[inv_mapping] | def generate_inverse_mapping(order):
"""Genereate a lambda entry -> PN order map.
This function will generate the opposite of generate mapping. So where
generate_mapping gives dict[key] = item this will give
dict[item] = key. Valid PN orders are:
{}
Parameters
----------
order : string
A string containing a PN order. Valid values are given above.
Returns
--------
mapping : dictionary
An inverse mapping between the active Lambda terms and index in the
metric
"""
mapping = generate_mapping(order)
inv_mapping = {}
for (key, value) in mapping.items():
inv_mapping[value] = key # depends on [control=['for'], data=[]]
return inv_mapping |
def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5):
"""Return list of available Sesame objects."""
sesames = []
account = CandyHouseAccount(email, password, timeout=timeout)
for sesame in account.sesames:
if device_ids is not None and sesame['device_id'] not in device_ids:
continue
if nicknames is not None and sesame['nickname'] not in nicknames:
continue
sesames.append(Sesame(account, sesame))
return sesames | def function[get_sesames, parameter[email, password, device_ids, nicknames, timeout]]:
constant[Return list of available Sesame objects.]
variable[sesames] assign[=] list[[]]
variable[account] assign[=] call[name[CandyHouseAccount], parameter[name[email], name[password]]]
for taget[name[sesame]] in starred[name[account].sesames] begin[:]
if <ast.BoolOp object at 0x7da1b0b36c20> begin[:]
continue
if <ast.BoolOp object at 0x7da1b0b37160> begin[:]
continue
call[name[sesames].append, parameter[call[name[Sesame], parameter[name[account], name[sesame]]]]]
return[name[sesames]] | keyword[def] identifier[get_sesames] ( identifier[email] , identifier[password] , identifier[device_ids] = keyword[None] , identifier[nicknames] = keyword[None] , identifier[timeout] = literal[int] ):
literal[string]
identifier[sesames] =[]
identifier[account] = identifier[CandyHouseAccount] ( identifier[email] , identifier[password] , identifier[timeout] = identifier[timeout] )
keyword[for] identifier[sesame] keyword[in] identifier[account] . identifier[sesames] :
keyword[if] identifier[device_ids] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sesame] [ literal[string] ] keyword[not] keyword[in] identifier[device_ids] :
keyword[continue]
keyword[if] identifier[nicknames] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sesame] [ literal[string] ] keyword[not] keyword[in] identifier[nicknames] :
keyword[continue]
identifier[sesames] . identifier[append] ( identifier[Sesame] ( identifier[account] , identifier[sesame] ))
keyword[return] identifier[sesames] | def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5):
"""Return list of available Sesame objects."""
sesames = []
account = CandyHouseAccount(email, password, timeout=timeout)
for sesame in account.sesames:
if device_ids is not None and sesame['device_id'] not in device_ids:
continue # depends on [control=['if'], data=[]]
if nicknames is not None and sesame['nickname'] not in nicknames:
continue # depends on [control=['if'], data=[]]
sesames.append(Sesame(account, sesame)) # depends on [control=['for'], data=['sesame']]
return sesames |
def main(*args):
"""Launch the main routine."""
parser = argparse.ArgumentParser()
parser.add_argument("action",
help="create, check, run, make-nb, or run-nb")
parser.add_argument("--directory", "-dir", default=os.getcwd(),
help="path to directory with a .sciunit file")
parser.add_argument("--stop", "-s", default=True,
help="stop and raise errors, halting the program")
parser.add_argument("--tests", "-t", default=False,
help="runs tests instead of suites")
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
file_path = os.path.join(args.directory, '.sciunit')
config = None
if args.action == 'create':
create(file_path)
elif args.action == 'check':
config = parse(file_path, show=True)
print("\nNo configuration errors reported.")
elif args.action == 'run':
config = parse(file_path)
run(config, path=args.directory,
stop_on_error=args.stop, just_tests=args.tests)
elif args.action == 'make-nb':
config = parse(file_path)
make_nb(config, path=args.directory,
stop_on_error=args.stop, just_tests=args.tests)
elif args.action == 'run-nb':
config = parse(file_path)
run_nb(config, path=args.directory)
else:
raise NameError('No such action %s' % args.action)
if config:
cleanup(config, path=args.directory) | def function[main, parameter[]]:
constant[Launch the main routine.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[action]]]
call[name[parser].add_argument, parameter[constant[--directory], constant[-dir]]]
call[name[parser].add_argument, parameter[constant[--stop], constant[-s]]]
call[name[parser].add_argument, parameter[constant[--tests], constant[-t]]]
if name[args] begin[:]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[args]]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[args].directory, constant[.sciunit]]]
variable[config] assign[=] constant[None]
if compare[name[args].action equal[==] constant[create]] begin[:]
call[name[create], parameter[name[file_path]]]
if name[config] begin[:]
call[name[cleanup], parameter[name[config]]] | keyword[def] identifier[main] (* identifier[args] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = identifier[os] . identifier[getcwd] (),
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = keyword[True] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[default] = keyword[False] ,
identifier[help] = literal[string] )
keyword[if] identifier[args] :
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[args] )
keyword[else] :
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[directory] , literal[string] )
identifier[config] = keyword[None]
keyword[if] identifier[args] . identifier[action] == literal[string] :
identifier[create] ( identifier[file_path] )
keyword[elif] identifier[args] . identifier[action] == literal[string] :
identifier[config] = identifier[parse] ( identifier[file_path] , identifier[show] = keyword[True] )
identifier[print] ( literal[string] )
keyword[elif] identifier[args] . identifier[action] == literal[string] :
identifier[config] = identifier[parse] ( identifier[file_path] )
identifier[run] ( identifier[config] , identifier[path] = identifier[args] . identifier[directory] ,
identifier[stop_on_error] = identifier[args] . identifier[stop] , identifier[just_tests] = identifier[args] . identifier[tests] )
keyword[elif] identifier[args] . identifier[action] == literal[string] :
identifier[config] = identifier[parse] ( identifier[file_path] )
identifier[make_nb] ( identifier[config] , identifier[path] = identifier[args] . identifier[directory] ,
identifier[stop_on_error] = identifier[args] . identifier[stop] , identifier[just_tests] = identifier[args] . identifier[tests] )
keyword[elif] identifier[args] . identifier[action] == literal[string] :
identifier[config] = identifier[parse] ( identifier[file_path] )
identifier[run_nb] ( identifier[config] , identifier[path] = identifier[args] . identifier[directory] )
keyword[else] :
keyword[raise] identifier[NameError] ( literal[string] % identifier[args] . identifier[action] )
keyword[if] identifier[config] :
identifier[cleanup] ( identifier[config] , identifier[path] = identifier[args] . identifier[directory] ) | def main(*args):
"""Launch the main routine."""
parser = argparse.ArgumentParser()
parser.add_argument('action', help='create, check, run, make-nb, or run-nb')
parser.add_argument('--directory', '-dir', default=os.getcwd(), help='path to directory with a .sciunit file')
parser.add_argument('--stop', '-s', default=True, help='stop and raise errors, halting the program')
parser.add_argument('--tests', '-t', default=False, help='runs tests instead of suites')
if args:
args = parser.parse_args(args) # depends on [control=['if'], data=[]]
else:
args = parser.parse_args()
file_path = os.path.join(args.directory, '.sciunit')
config = None
if args.action == 'create':
create(file_path) # depends on [control=['if'], data=[]]
elif args.action == 'check':
config = parse(file_path, show=True)
print('\nNo configuration errors reported.') # depends on [control=['if'], data=[]]
elif args.action == 'run':
config = parse(file_path)
run(config, path=args.directory, stop_on_error=args.stop, just_tests=args.tests) # depends on [control=['if'], data=[]]
elif args.action == 'make-nb':
config = parse(file_path)
make_nb(config, path=args.directory, stop_on_error=args.stop, just_tests=args.tests) # depends on [control=['if'], data=[]]
elif args.action == 'run-nb':
config = parse(file_path)
run_nb(config, path=args.directory) # depends on [control=['if'], data=[]]
else:
raise NameError('No such action %s' % args.action)
if config:
cleanup(config, path=args.directory) # depends on [control=['if'], data=[]] |
def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None):
"""Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
play with different settings without having to re-run tokenization on the entire corpus.
Args:
min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
tokens will be kept. Set to None to keep everything. (Default value: None)
"""
if not self.has_vocab:
raise ValueError("You need to build the vocabulary using `build_vocab` "
"before using `apply_encoding_options`")
if min_token_count < 1:
raise ValueError("`min_token_count` should atleast be 1")
# Remove tokens with freq < min_token_count
token_counts = list(self._token_counts.items())
token_counts = [x for x in token_counts if x[1] >= min_token_count]
# Clip to max_tokens.
if limit_top_tokens is not None:
token_counts.sort(key=lambda x: x[1], reverse=True)
filtered_tokens = list(zip(*token_counts))[0]
filtered_tokens = filtered_tokens[:limit_top_tokens]
else:
filtered_tokens = zip(*token_counts)[0]
# Generate indices based on filtered tokens.
self.create_token_indices(filtered_tokens) | def function[apply_encoding_options, parameter[self, min_token_count, limit_top_tokens]]:
constant[Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
play with different settings without having to re-run tokenization on the entire corpus.
Args:
min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
tokens will be kept. Set to None to keep everything. (Default value: None)
]
if <ast.UnaryOp object at 0x7da1b0f1c6a0> begin[:]
<ast.Raise object at 0x7da1b0f1c3a0>
if compare[name[min_token_count] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b0f1cbb0>
variable[token_counts] assign[=] call[name[list], parameter[call[name[self]._token_counts.items, parameter[]]]]
variable[token_counts] assign[=] <ast.ListComp object at 0x7da1b0f1e8c0>
if compare[name[limit_top_tokens] is_not constant[None]] begin[:]
call[name[token_counts].sort, parameter[]]
variable[filtered_tokens] assign[=] call[call[name[list], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da1b0f1c970>]]]]][constant[0]]
variable[filtered_tokens] assign[=] call[name[filtered_tokens]][<ast.Slice object at 0x7da1b10a7f40>]
call[name[self].create_token_indices, parameter[name[filtered_tokens]]] | keyword[def] identifier[apply_encoding_options] ( identifier[self] , identifier[min_token_count] = literal[int] , identifier[limit_top_tokens] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_vocab] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[min_token_count] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[token_counts] = identifier[list] ( identifier[self] . identifier[_token_counts] . identifier[items] ())
identifier[token_counts] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[token_counts] keyword[if] identifier[x] [ literal[int] ]>= identifier[min_token_count] ]
keyword[if] identifier[limit_top_tokens] keyword[is] keyword[not] keyword[None] :
identifier[token_counts] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] )
identifier[filtered_tokens] = identifier[list] ( identifier[zip] (* identifier[token_counts] ))[ literal[int] ]
identifier[filtered_tokens] = identifier[filtered_tokens] [: identifier[limit_top_tokens] ]
keyword[else] :
identifier[filtered_tokens] = identifier[zip] (* identifier[token_counts] )[ literal[int] ]
identifier[self] . identifier[create_token_indices] ( identifier[filtered_tokens] ) | def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None):
"""Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
play with different settings without having to re-run tokenization on the entire corpus.
Args:
min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
tokens will be kept. Set to None to keep everything. (Default value: None)
"""
if not self.has_vocab:
raise ValueError('You need to build the vocabulary using `build_vocab` before using `apply_encoding_options`') # depends on [control=['if'], data=[]]
if min_token_count < 1:
raise ValueError('`min_token_count` should atleast be 1') # depends on [control=['if'], data=[]]
# Remove tokens with freq < min_token_count
token_counts = list(self._token_counts.items())
token_counts = [x for x in token_counts if x[1] >= min_token_count]
# Clip to max_tokens.
if limit_top_tokens is not None:
token_counts.sort(key=lambda x: x[1], reverse=True)
filtered_tokens = list(zip(*token_counts))[0]
filtered_tokens = filtered_tokens[:limit_top_tokens] # depends on [control=['if'], data=['limit_top_tokens']]
else:
filtered_tokens = zip(*token_counts)[0]
# Generate indices based on filtered tokens.
self.create_token_indices(filtered_tokens) |
async def migrate_redis1_to_redis2(storage1: RedisStorage, storage2: RedisStorage2):
"""
Helper for migrating from RedisStorage to RedisStorage2
:param storage1: instance of RedisStorage
:param storage2: instance of RedisStorage2
:return:
"""
if not isinstance(storage1, RedisStorage): # better than assertion
raise TypeError(f"{type(storage1)} is not RedisStorage instance.")
if not isinstance(storage2, RedisStorage):
raise TypeError(f"{type(storage2)} is not RedisStorage instance.")
log = logging.getLogger('aiogram.RedisStorage')
for chat, user in await storage1.get_states_list():
state = await storage1.get_state(chat=chat, user=user)
await storage2.set_state(chat=chat, user=user, state=state)
data = await storage1.get_data(chat=chat, user=user)
await storage2.set_data(chat=chat, user=user, data=data)
bucket = await storage1.get_bucket(chat=chat, user=user)
await storage2.set_bucket(chat=chat, user=user, bucket=bucket)
log.info(f"Migrated user {user} in chat {chat}") | <ast.AsyncFunctionDef object at 0x7da1b18448e0> | keyword[async] keyword[def] identifier[migrate_redis1_to_redis2] ( identifier[storage1] : identifier[RedisStorage] , identifier[storage2] : identifier[RedisStorage2] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[storage1] , identifier[RedisStorage] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[storage2] , identifier[RedisStorage] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] )
keyword[for] identifier[chat] , identifier[user] keyword[in] keyword[await] identifier[storage1] . identifier[get_states_list] ():
identifier[state] = keyword[await] identifier[storage1] . identifier[get_state] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] )
keyword[await] identifier[storage2] . identifier[set_state] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] , identifier[state] = identifier[state] )
identifier[data] = keyword[await] identifier[storage1] . identifier[get_data] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] )
keyword[await] identifier[storage2] . identifier[set_data] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] , identifier[data] = identifier[data] )
identifier[bucket] = keyword[await] identifier[storage1] . identifier[get_bucket] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] )
keyword[await] identifier[storage2] . identifier[set_bucket] ( identifier[chat] = identifier[chat] , identifier[user] = identifier[user] , identifier[bucket] = identifier[bucket] )
identifier[log] . identifier[info] ( literal[string] ) | async def migrate_redis1_to_redis2(storage1: RedisStorage, storage2: RedisStorage2):
"""
Helper for migrating from RedisStorage to RedisStorage2
:param storage1: instance of RedisStorage
:param storage2: instance of RedisStorage2
:return:
"""
if not isinstance(storage1, RedisStorage): # better than assertion
raise TypeError(f'{type(storage1)} is not RedisStorage instance.') # depends on [control=['if'], data=[]]
if not isinstance(storage2, RedisStorage):
raise TypeError(f'{type(storage2)} is not RedisStorage instance.') # depends on [control=['if'], data=[]]
log = logging.getLogger('aiogram.RedisStorage')
for (chat, user) in await storage1.get_states_list():
state = await storage1.get_state(chat=chat, user=user)
await storage2.set_state(chat=chat, user=user, state=state)
data = await storage1.get_data(chat=chat, user=user)
await storage2.set_data(chat=chat, user=user, data=data)
bucket = await storage1.get_bucket(chat=chat, user=user)
await storage2.set_bucket(chat=chat, user=user, bucket=bucket)
log.info(f'Migrated user {user} in chat {chat}') # depends on [control=['for'], data=[]] |
def _expire_data(self):
"""
Remove all expired entries.
"""
expire_time_stamp = time.time() - self.expire_time
self.timed_data = {d: t for d, t in self.timed_data.items()
if t > expire_time_stamp} | def function[_expire_data, parameter[self]]:
constant[
Remove all expired entries.
]
variable[expire_time_stamp] assign[=] binary_operation[call[name[time].time, parameter[]] - name[self].expire_time]
name[self].timed_data assign[=] <ast.DictComp object at 0x7da207f9b640> | keyword[def] identifier[_expire_data] ( identifier[self] ):
literal[string]
identifier[expire_time_stamp] = identifier[time] . identifier[time] ()- identifier[self] . identifier[expire_time]
identifier[self] . identifier[timed_data] ={ identifier[d] : identifier[t] keyword[for] identifier[d] , identifier[t] keyword[in] identifier[self] . identifier[timed_data] . identifier[items] ()
keyword[if] identifier[t] > identifier[expire_time_stamp] } | def _expire_data(self):
"""
Remove all expired entries.
"""
expire_time_stamp = time.time() - self.expire_time
self.timed_data = {d: t for (d, t) in self.timed_data.items() if t > expire_time_stamp} |
def sample_poly(self, poly, penalty_strength=1.0,
keep_penalty_variables=False,
discard_unsatisfied=False, **parameters):
"""Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm,
penalty_strength=penalty_strength,
keep_penalty_variables=keep_penalty_variables,
discard_unsatisfied=discard_unsatisfied) | def function[sample_poly, parameter[self, poly, penalty_strength, keep_penalty_variables, discard_unsatisfied]]:
constant[Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
]
variable[bqm] assign[=] call[name[make_quadratic], parameter[name[poly], name[penalty_strength]]]
variable[response] assign[=] call[name[self].child.sample, parameter[name[bqm]]]
return[call[name[polymorph_response], parameter[name[response], name[poly], name[bqm]]]] | keyword[def] identifier[sample_poly] ( identifier[self] , identifier[poly] , identifier[penalty_strength] = literal[int] ,
identifier[keep_penalty_variables] = keyword[False] ,
identifier[discard_unsatisfied] = keyword[False] ,** identifier[parameters] ):
literal[string]
identifier[bqm] = identifier[make_quadratic] ( identifier[poly] , identifier[penalty_strength] , identifier[vartype] = identifier[poly] . identifier[vartype] )
identifier[response] = identifier[self] . identifier[child] . identifier[sample] ( identifier[bqm] ,** identifier[parameters] )
keyword[return] identifier[polymorph_response] ( identifier[response] , identifier[poly] , identifier[bqm] ,
identifier[penalty_strength] = identifier[penalty_strength] ,
identifier[keep_penalty_variables] = identifier[keep_penalty_variables] ,
identifier[discard_unsatisfied] = identifier[discard_unsatisfied] ) | def sample_poly(self, poly, penalty_strength=1.0, keep_penalty_variables=False, discard_unsatisfied=False, **parameters):
"""Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm, penalty_strength=penalty_strength, keep_penalty_variables=keep_penalty_variables, discard_unsatisfied=discard_unsatisfied) |
def parse_10qk(self, response):
'''Parse 10-Q or 10-K XML report.'''
loader = ReportItemLoader(response=response)
item = loader.load_item()
if 'doc_type' in item:
doc_type = item['doc_type']
if doc_type in ('10-Q', '10-K'):
return item
return None | def function[parse_10qk, parameter[self, response]]:
constant[Parse 10-Q or 10-K XML report.]
variable[loader] assign[=] call[name[ReportItemLoader], parameter[]]
variable[item] assign[=] call[name[loader].load_item, parameter[]]
if compare[constant[doc_type] in name[item]] begin[:]
variable[doc_type] assign[=] call[name[item]][constant[doc_type]]
if compare[name[doc_type] in tuple[[<ast.Constant object at 0x7da20c992830>, <ast.Constant object at 0x7da20c993820>]]] begin[:]
return[name[item]]
return[constant[None]] | keyword[def] identifier[parse_10qk] ( identifier[self] , identifier[response] ):
literal[string]
identifier[loader] = identifier[ReportItemLoader] ( identifier[response] = identifier[response] )
identifier[item] = identifier[loader] . identifier[load_item] ()
keyword[if] literal[string] keyword[in] identifier[item] :
identifier[doc_type] = identifier[item] [ literal[string] ]
keyword[if] identifier[doc_type] keyword[in] ( literal[string] , literal[string] ):
keyword[return] identifier[item]
keyword[return] keyword[None] | def parse_10qk(self, response):
"""Parse 10-Q or 10-K XML report."""
loader = ReportItemLoader(response=response)
item = loader.load_item()
if 'doc_type' in item:
doc_type = item['doc_type']
if doc_type in ('10-Q', '10-K'):
return item # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['item']]
return None |
def ctrl_c_signal_handler(_, frame):
"""CTRL-c signal handler - enters a pause point if it can.
"""
global ctrl_c_calls
ctrl_c_calls += 1
if ctrl_c_calls > 10:
shutit_global.shutit_global_object.handle_exit(exit_code=1)
shutit_frame = get_shutit_frame(frame)
if in_ctrlc:
msg = 'CTRL-C hit twice, quitting'
if shutit_frame:
shutit_global.shutit_global_object.shutit_print('\n')
shutit = shutit_frame.f_locals['shutit']
shutit.log(msg,level=logging.CRITICAL)
else:
shutit_global.shutit_global_object.shutit_print(msg)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if shutit_frame:
shutit = shutit_frame.f_locals['shutit']
if shutit.build['ctrlc_passthrough']:
shutit.self.get_current_shutit_pexpect_session().pexpect_child.sendline(r'')
return
shutit_global.shutit_global_object.shutit_print(colorise(31,"\r" + r"You may need to wait for a command to complete before a pause point is available. Alternatively, CTRL-\ to quit."))
shutit.build['ctrlc_stop'] = True
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start()
# Reset the ctrl-c calls
ctrl_c_calls = 0
return
shutit_global.shutit_global_object.shutit_print(colorise(31,'\n' + '*' * 80))
shutit_global.shutit_global_object.shutit_print(colorise(31,"CTRL-c caught, CTRL-c twice to quit."))
shutit_global.shutit_global_object.shutit_print(colorise(31,'*' * 80))
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start()
# Reset the ctrl-c calls
ctrl_c_calls = 0 | def function[ctrl_c_signal_handler, parameter[_, frame]]:
constant[CTRL-c signal handler - enters a pause point if it can.
]
<ast.Global object at 0x7da1b12cb6d0>
<ast.AugAssign object at 0x7da1b12cb940>
if compare[name[ctrl_c_calls] greater[>] constant[10]] begin[:]
call[name[shutit_global].shutit_global_object.handle_exit, parameter[]]
variable[shutit_frame] assign[=] call[name[get_shutit_frame], parameter[name[frame]]]
if name[in_ctrlc] begin[:]
variable[msg] assign[=] constant[CTRL-C hit twice, quitting]
if name[shutit_frame] begin[:]
call[name[shutit_global].shutit_global_object.shutit_print, parameter[constant[
]]]
variable[shutit] assign[=] call[name[shutit_frame].f_locals][constant[shutit]]
call[name[shutit].log, parameter[name[msg]]]
call[name[shutit_global].shutit_global_object.handle_exit, parameter[]]
if name[shutit_frame] begin[:]
variable[shutit] assign[=] call[name[shutit_frame].f_locals][constant[shutit]]
if call[name[shutit].build][constant[ctrlc_passthrough]] begin[:]
call[call[name[shutit].self.get_current_shutit_pexpect_session, parameter[]].pexpect_child.sendline, parameter[constant[]]]
return[None]
call[name[shutit_global].shutit_global_object.shutit_print, parameter[call[name[colorise], parameter[constant[31], binary_operation[constant[
] + constant[You may need to wait for a command to complete before a pause point is available. Alternatively, CTRL-\ to quit.]]]]]]
call[name[shutit].build][constant[ctrlc_stop]] assign[=] constant[True]
variable[t] assign[=] call[name[threading].Thread, parameter[]]
name[t].daemon assign[=] constant[True]
call[name[t].start, parameter[]]
variable[ctrl_c_calls] assign[=] constant[0]
return[None]
call[name[shutit_global].shutit_global_object.shutit_print, parameter[call[name[colorise], parameter[constant[31], binary_operation[constant[
] + binary_operation[constant[*] * constant[80]]]]]]]
call[name[shutit_global].shutit_global_object.shutit_print, parameter[call[name[colorise], parameter[constant[31], constant[CTRL-c caught, CTRL-c twice to quit.]]]]]
call[name[shutit_global].shutit_global_object.shutit_print, parameter[call[name[colorise], parameter[constant[31], binary_operation[constant[*] * constant[80]]]]]]
variable[t] assign[=] call[name[threading].Thread, parameter[]]
name[t].daemon assign[=] constant[True]
call[name[t].start, parameter[]]
variable[ctrl_c_calls] assign[=] constant[0] | keyword[def] identifier[ctrl_c_signal_handler] ( identifier[_] , identifier[frame] ):
literal[string]
keyword[global] identifier[ctrl_c_calls]
identifier[ctrl_c_calls] += literal[int]
keyword[if] identifier[ctrl_c_calls] > literal[int] :
identifier[shutit_global] . identifier[shutit_global_object] . identifier[handle_exit] ( identifier[exit_code] = literal[int] )
identifier[shutit_frame] = identifier[get_shutit_frame] ( identifier[frame] )
keyword[if] identifier[in_ctrlc] :
identifier[msg] = literal[string]
keyword[if] identifier[shutit_frame] :
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( literal[string] )
identifier[shutit] = identifier[shutit_frame] . identifier[f_locals] [ literal[string] ]
identifier[shutit] . identifier[log] ( identifier[msg] , identifier[level] = identifier[logging] . identifier[CRITICAL] )
keyword[else] :
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( identifier[msg] )
identifier[shutit_global] . identifier[shutit_global_object] . identifier[handle_exit] ( identifier[exit_code] = literal[int] )
keyword[if] identifier[shutit_frame] :
identifier[shutit] = identifier[shutit_frame] . identifier[f_locals] [ literal[string] ]
keyword[if] identifier[shutit] . identifier[build] [ literal[string] ]:
identifier[shutit] . identifier[self] . identifier[get_current_shutit_pexpect_session] (). identifier[pexpect_child] . identifier[sendline] ( literal[string] )
keyword[return]
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( identifier[colorise] ( literal[int] , literal[string] + literal[string] ))
identifier[shutit] . identifier[build] [ literal[string] ]= keyword[True]
identifier[t] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[ctrlc_background] )
identifier[t] . identifier[daemon] = keyword[True]
identifier[t] . identifier[start] ()
identifier[ctrl_c_calls] = literal[int]
keyword[return]
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( identifier[colorise] ( literal[int] , literal[string] + literal[string] * literal[int] ))
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( identifier[colorise] ( literal[int] , literal[string] ))
identifier[shutit_global] . identifier[shutit_global_object] . identifier[shutit_print] ( identifier[colorise] ( literal[int] , literal[string] * literal[int] ))
identifier[t] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[ctrlc_background] )
identifier[t] . identifier[daemon] = keyword[True]
identifier[t] . identifier[start] ()
identifier[ctrl_c_calls] = literal[int] | def ctrl_c_signal_handler(_, frame):
"""CTRL-c signal handler - enters a pause point if it can.
"""
global ctrl_c_calls
ctrl_c_calls += 1
if ctrl_c_calls > 10:
shutit_global.shutit_global_object.handle_exit(exit_code=1) # depends on [control=['if'], data=[]]
shutit_frame = get_shutit_frame(frame)
if in_ctrlc:
msg = 'CTRL-C hit twice, quitting'
if shutit_frame:
shutit_global.shutit_global_object.shutit_print('\n')
shutit = shutit_frame.f_locals['shutit']
shutit.log(msg, level=logging.CRITICAL) # depends on [control=['if'], data=[]]
else:
shutit_global.shutit_global_object.shutit_print(msg)
shutit_global.shutit_global_object.handle_exit(exit_code=1) # depends on [control=['if'], data=[]]
if shutit_frame:
shutit = shutit_frame.f_locals['shutit']
if shutit.build['ctrlc_passthrough']:
shutit.self.get_current_shutit_pexpect_session().pexpect_child.sendline('\x03')
return # depends on [control=['if'], data=[]]
shutit_global.shutit_global_object.shutit_print(colorise(31, '\r' + 'You may need to wait for a command to complete before a pause point is available. Alternatively, CTRL-\\ to quit.'))
shutit.build['ctrlc_stop'] = True
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start() # Reset the ctrl-c calls
ctrl_c_calls = 0
return # depends on [control=['if'], data=[]]
shutit_global.shutit_global_object.shutit_print(colorise(31, '\n' + '*' * 80))
shutit_global.shutit_global_object.shutit_print(colorise(31, 'CTRL-c caught, CTRL-c twice to quit.'))
shutit_global.shutit_global_object.shutit_print(colorise(31, '*' * 80))
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start() # Reset the ctrl-c calls
ctrl_c_calls = 0 |
def get_private_agents():
"""Provides a list of hostnames / IPs that are private agents in the cluster"""
agent_list = []
agents = __get_all_agents()
for agent in agents:
if(len(agent["reserved_resources"]) == 0):
agent_list.append(agent["hostname"])
else:
private = True
for reservation in agent["reserved_resources"]:
if("slave_public" in reservation):
private = False
if(private):
agent_list.append(agent["hostname"])
return agent_list | def function[get_private_agents, parameter[]]:
constant[Provides a list of hostnames / IPs that are private agents in the cluster]
variable[agent_list] assign[=] list[[]]
variable[agents] assign[=] call[name[__get_all_agents], parameter[]]
for taget[name[agent]] in starred[name[agents]] begin[:]
if compare[call[name[len], parameter[call[name[agent]][constant[reserved_resources]]]] equal[==] constant[0]] begin[:]
call[name[agent_list].append, parameter[call[name[agent]][constant[hostname]]]]
return[name[agent_list]] | keyword[def] identifier[get_private_agents] ():
literal[string]
identifier[agent_list] =[]
identifier[agents] = identifier[__get_all_agents] ()
keyword[for] identifier[agent] keyword[in] identifier[agents] :
keyword[if] ( identifier[len] ( identifier[agent] [ literal[string] ])== literal[int] ):
identifier[agent_list] . identifier[append] ( identifier[agent] [ literal[string] ])
keyword[else] :
identifier[private] = keyword[True]
keyword[for] identifier[reservation] keyword[in] identifier[agent] [ literal[string] ]:
keyword[if] ( literal[string] keyword[in] identifier[reservation] ):
identifier[private] = keyword[False]
keyword[if] ( identifier[private] ):
identifier[agent_list] . identifier[append] ( identifier[agent] [ literal[string] ])
keyword[return] identifier[agent_list] | def get_private_agents():
"""Provides a list of hostnames / IPs that are private agents in the cluster"""
agent_list = []
agents = __get_all_agents()
for agent in agents:
if len(agent['reserved_resources']) == 0:
agent_list.append(agent['hostname']) # depends on [control=['if'], data=[]]
else:
private = True
for reservation in agent['reserved_resources']:
if 'slave_public' in reservation:
private = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reservation']]
if private:
agent_list.append(agent['hostname']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['agent']]
return agent_list |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the MACSignatureKeyInformation struct to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
local_stream = BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
else:
raise ValueError(
"Invalid struct missing the unique identifier attribute."
)
if self._cryptographic_parameters:
self._cryptographic_parameters.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(MACSignatureKeyInformation, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer) | def function[write, parameter[self, output_stream, kmip_version]]:
constant[
Write the data encoding the MACSignatureKeyInformation struct to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
]
variable[local_stream] assign[=] call[name[BytearrayStream], parameter[]]
if name[self]._unique_identifier begin[:]
call[name[self]._unique_identifier.write, parameter[name[local_stream]]]
if name[self]._cryptographic_parameters begin[:]
call[name[self]._cryptographic_parameters.write, parameter[name[local_stream]]]
name[self].length assign[=] call[name[local_stream].length, parameter[]]
call[call[name[super], parameter[name[MACSignatureKeyInformation], name[self]]].write, parameter[name[output_stream]]]
call[name[output_stream].write, parameter[name[local_stream].buffer]] | keyword[def] identifier[write] ( identifier[self] , identifier[output_stream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ):
literal[string]
identifier[local_stream] = identifier[BytearrayStream] ()
keyword[if] identifier[self] . identifier[_unique_identifier] :
identifier[self] . identifier[_unique_identifier] . identifier[write] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] identifier[self] . identifier[_cryptographic_parameters] :
identifier[self] . identifier[_cryptographic_parameters] . identifier[write] (
identifier[local_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[length] = identifier[local_stream] . identifier[length] ()
identifier[super] ( identifier[MACSignatureKeyInformation] , identifier[self] ). identifier[write] (
identifier[output_stream] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[output_stream] . identifier[write] ( identifier[local_stream] . identifier[buffer] ) | def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the MACSignatureKeyInformation struct to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
local_stream = BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid struct missing the unique identifier attribute.')
if self._cryptographic_parameters:
self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]]
self.length = local_stream.length()
super(MACSignatureKeyInformation, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) |
def update(self, w, e):
"""
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `w` : weights of the layers (2-dimensional layer).
Every row represents one node.
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
"""
if len(w.shape) == 1:
e = self.activation(self.y, f=self.f, der=True) * e * w
dw = self.mu * np.outer(e, self.x)
else:
e = self.activation(self.y, f=self.f, der=True) * (1 - self.y) * np.dot(e, w)
dw = self.mu * np.outer(e, self.x)
w = self.w[:,1:]
self.w += dw
return w, e | def function[update, parameter[self, w, e]]:
constant[
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `w` : weights of the layers (2-dimensional layer).
Every row represents one node.
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
]
if compare[call[name[len], parameter[name[w].shape]] equal[==] constant[1]] begin[:]
variable[e] assign[=] binary_operation[binary_operation[call[name[self].activation, parameter[name[self].y]] * name[e]] * name[w]]
variable[dw] assign[=] binary_operation[name[self].mu * call[name[np].outer, parameter[name[e], name[self].x]]]
variable[w] assign[=] call[name[self].w][tuple[[<ast.Slice object at 0x7da1b0ff93f0>, <ast.Slice object at 0x7da1b0ff96f0>]]]
<ast.AugAssign object at 0x7da1b0ff84f0>
return[tuple[[<ast.Name object at 0x7da1b0ef84f0>, <ast.Name object at 0x7da1b0ef9cc0>]]] | keyword[def] identifier[update] ( identifier[self] , identifier[w] , identifier[e] ):
literal[string]
keyword[if] identifier[len] ( identifier[w] . identifier[shape] )== literal[int] :
identifier[e] = identifier[self] . identifier[activation] ( identifier[self] . identifier[y] , identifier[f] = identifier[self] . identifier[f] , identifier[der] = keyword[True] )* identifier[e] * identifier[w]
identifier[dw] = identifier[self] . identifier[mu] * identifier[np] . identifier[outer] ( identifier[e] , identifier[self] . identifier[x] )
keyword[else] :
identifier[e] = identifier[self] . identifier[activation] ( identifier[self] . identifier[y] , identifier[f] = identifier[self] . identifier[f] , identifier[der] = keyword[True] )*( literal[int] - identifier[self] . identifier[y] )* identifier[np] . identifier[dot] ( identifier[e] , identifier[w] )
identifier[dw] = identifier[self] . identifier[mu] * identifier[np] . identifier[outer] ( identifier[e] , identifier[self] . identifier[x] )
identifier[w] = identifier[self] . identifier[w] [:, literal[int] :]
identifier[self] . identifier[w] += identifier[dw]
keyword[return] identifier[w] , identifier[e] | def update(self, w, e):
"""
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `w` : weights of the layers (2-dimensional layer).
Every row represents one node.
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
"""
if len(w.shape) == 1:
e = self.activation(self.y, f=self.f, der=True) * e * w
dw = self.mu * np.outer(e, self.x) # depends on [control=['if'], data=[]]
else:
e = self.activation(self.y, f=self.f, der=True) * (1 - self.y) * np.dot(e, w)
dw = self.mu * np.outer(e, self.x)
w = self.w[:, 1:]
self.w += dw
return (w, e) |
def write_dot(build_context, conf: Config, out_f):
"""Write build graph in dot format to `out_f` file-like object."""
not_buildenv_targets = get_not_buildenv_targets(build_context)
prebuilt_targets = get_prebuilt_targets(build_context)
out_f.write('strict digraph {\n')
for node in build_context.target_graph.nodes:
if conf.show_buildenv_deps or node in not_buildenv_targets:
cached = node in prebuilt_targets
fillcolor = 'fillcolor="grey",style=filled' if cached else ''
color = TARGETS_COLORS.get(
build_context.targets[node].builder_name, 'black')
out_f.write(' "{}" [color="{}",{}];\n'.format(node, color,
fillcolor))
out_f.writelines(' "{}" -> "{}";\n'.format(u, v)
for u, v in build_context.target_graph.edges
if conf.show_buildenv_deps or
(u in not_buildenv_targets and v in not_buildenv_targets))
out_f.write('}\n\n') | def function[write_dot, parameter[build_context, conf, out_f]]:
constant[Write build graph in dot format to `out_f` file-like object.]
variable[not_buildenv_targets] assign[=] call[name[get_not_buildenv_targets], parameter[name[build_context]]]
variable[prebuilt_targets] assign[=] call[name[get_prebuilt_targets], parameter[name[build_context]]]
call[name[out_f].write, parameter[constant[strict digraph {
]]]
for taget[name[node]] in starred[name[build_context].target_graph.nodes] begin[:]
if <ast.BoolOp object at 0x7da1b1bedd20> begin[:]
variable[cached] assign[=] compare[name[node] in name[prebuilt_targets]]
variable[fillcolor] assign[=] <ast.IfExp object at 0x7da1b1bee410>
variable[color] assign[=] call[name[TARGETS_COLORS].get, parameter[call[name[build_context].targets][name[node]].builder_name, constant[black]]]
call[name[out_f].write, parameter[call[constant[ "{}" [color="{}",{}];
].format, parameter[name[node], name[color], name[fillcolor]]]]]
call[name[out_f].writelines, parameter[<ast.GeneratorExp object at 0x7da1b1bee3b0>]]
call[name[out_f].write, parameter[constant[}
]]] | keyword[def] identifier[write_dot] ( identifier[build_context] , identifier[conf] : identifier[Config] , identifier[out_f] ):
literal[string]
identifier[not_buildenv_targets] = identifier[get_not_buildenv_targets] ( identifier[build_context] )
identifier[prebuilt_targets] = identifier[get_prebuilt_targets] ( identifier[build_context] )
identifier[out_f] . identifier[write] ( literal[string] )
keyword[for] identifier[node] keyword[in] identifier[build_context] . identifier[target_graph] . identifier[nodes] :
keyword[if] identifier[conf] . identifier[show_buildenv_deps] keyword[or] identifier[node] keyword[in] identifier[not_buildenv_targets] :
identifier[cached] = identifier[node] keyword[in] identifier[prebuilt_targets]
identifier[fillcolor] = literal[string] keyword[if] identifier[cached] keyword[else] literal[string]
identifier[color] = identifier[TARGETS_COLORS] . identifier[get] (
identifier[build_context] . identifier[targets] [ identifier[node] ]. identifier[builder_name] , literal[string] )
identifier[out_f] . identifier[write] ( literal[string] . identifier[format] ( identifier[node] , identifier[color] ,
identifier[fillcolor] ))
identifier[out_f] . identifier[writelines] ( literal[string] . identifier[format] ( identifier[u] , identifier[v] )
keyword[for] identifier[u] , identifier[v] keyword[in] identifier[build_context] . identifier[target_graph] . identifier[edges]
keyword[if] identifier[conf] . identifier[show_buildenv_deps] keyword[or]
( identifier[u] keyword[in] identifier[not_buildenv_targets] keyword[and] identifier[v] keyword[in] identifier[not_buildenv_targets] ))
identifier[out_f] . identifier[write] ( literal[string] ) | def write_dot(build_context, conf: Config, out_f):
"""Write build graph in dot format to `out_f` file-like object."""
not_buildenv_targets = get_not_buildenv_targets(build_context)
prebuilt_targets = get_prebuilt_targets(build_context)
out_f.write('strict digraph {\n')
for node in build_context.target_graph.nodes:
if conf.show_buildenv_deps or node in not_buildenv_targets:
cached = node in prebuilt_targets
fillcolor = 'fillcolor="grey",style=filled' if cached else ''
color = TARGETS_COLORS.get(build_context.targets[node].builder_name, 'black')
out_f.write(' "{}" [color="{}",{}];\n'.format(node, color, fillcolor)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
out_f.writelines((' "{}" -> "{}";\n'.format(u, v) for (u, v) in build_context.target_graph.edges if conf.show_buildenv_deps or (u in not_buildenv_targets and v in not_buildenv_targets)))
out_f.write('}\n\n') |
def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source) | def function[add_data_file, parameter[data_files, target, source]]:
constant[Add an entry to data_files]
for taget[tuple[[<ast.Name object at 0x7da1b1151420>, <ast.Name object at 0x7da1b11514b0>]]] in starred[name[data_files]] begin[:]
if compare[name[t] equal[==] name[target]] begin[:]
break
if compare[name[source] <ast.NotIn object at 0x7da2590d7190> name[f]] begin[:]
call[name[f].append, parameter[name[source]]] | keyword[def] identifier[add_data_file] ( identifier[data_files] , identifier[target] , identifier[source] ):
literal[string]
keyword[for] identifier[t] , identifier[f] keyword[in] identifier[data_files] :
keyword[if] identifier[t] == identifier[target] :
keyword[break]
keyword[else] :
identifier[data_files] . identifier[append] (( identifier[target] ,[]))
identifier[f] = identifier[data_files] [- literal[int] ][ literal[int] ]
keyword[if] identifier[source] keyword[not] keyword[in] identifier[f] :
identifier[f] . identifier[append] ( identifier[source] ) | def add_data_file(data_files, target, source):
"""Add an entry to data_files"""
for (t, f) in data_files:
if t == target:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source) # depends on [control=['if'], data=['source', 'f']] |
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree | def function[get_tree, parameter[self]]:
constant[
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
]
variable[cache_key] assign[=] call[constant[_].join, parameter[call[name[map], parameter[name[str], call[name[_flatten], parameter[name[page_numbers]]]]]]]
variable[tree] assign[=] call[name[self]._parse_tree_cacher.get, parameter[name[cache_key]]]
if compare[name[tree] is constant[None]] begin[:]
variable[root] assign[=] call[name[parser].makeelement, parameter[constant[pdfxml]]]
if name[self].doc.info begin[:]
for taget[tuple[[<ast.Name object at 0x7da20e9b1780>, <ast.Name object at 0x7da20e9b3a90>]]] in starred[call[name[list], parameter[call[call[name[self].doc.info][constant[0]].items, parameter[]]]]] begin[:]
variable[k] assign[=] call[name[obj_to_string], parameter[name[k]]]
variable[v] assign[=] call[name[obj_to_string], parameter[call[name[resolve1], parameter[name[v]]]]]
<ast.Try object at 0x7da20e9b3880>
if <ast.UnaryOp object at 0x7da20e9b3e20> begin[:]
if name[page_numbers] begin[:]
variable[pages] assign[=] <ast.ListComp object at 0x7da2044c21d0>
for taget[tuple[[<ast.Name object at 0x7da2044c09a0>, <ast.Name object at 0x7da2044c3df0>]]] in starred[name[pages]] begin[:]
variable[page] assign[=] call[name[self]._xmlize, parameter[name[page]]]
call[name[page].set, parameter[constant[page_index], call[name[obj_to_string], parameter[name[n]]]]]
call[name[page].set, parameter[constant[page_label], call[name[self].doc.get_page_number, parameter[name[n]]]]]
call[name[root].append, parameter[name[page]]]
call[name[self]._clean_text, parameter[name[root]]]
variable[tree] assign[=] call[name[etree].ElementTree, parameter[name[root]]]
call[name[self]._parse_tree_cacher.set, parameter[name[cache_key], name[tree]]]
return[name[tree]] | keyword[def] identifier[get_tree] ( identifier[self] ,* identifier[page_numbers] ):
literal[string]
identifier[cache_key] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[_flatten] ( identifier[page_numbers] )))
identifier[tree] = identifier[self] . identifier[_parse_tree_cacher] . identifier[get] ( identifier[cache_key] )
keyword[if] identifier[tree] keyword[is] keyword[None] :
identifier[root] = identifier[parser] . identifier[makeelement] ( literal[string] )
keyword[if] identifier[self] . identifier[doc] . identifier[info] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[self] . identifier[doc] . identifier[info] [ literal[int] ]. identifier[items] ()):
identifier[k] = identifier[obj_to_string] ( identifier[k] )
identifier[v] = identifier[obj_to_string] ( identifier[resolve1] ( identifier[v] ))
keyword[try] :
identifier[root] . identifier[set] ( identifier[k] , identifier[v] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[e] . identifier[args] [ literal[int] ]:
identifier[k] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[k] )
identifier[root] . identifier[set] ( identifier[k] , identifier[v] )
keyword[if] keyword[not] ( identifier[len] ( identifier[page_numbers] )== literal[int] keyword[and] identifier[page_numbers] [ literal[int] ] keyword[is] keyword[None] ):
keyword[if] identifier[page_numbers] :
identifier[pages] =[[ identifier[n] , identifier[self] . identifier[get_layout] ( identifier[self] . identifier[get_page] ( identifier[n] ))] keyword[for] identifier[n] keyword[in]
identifier[_flatten] ( identifier[page_numbers] )]
keyword[else] :
identifier[pages] = identifier[enumerate] ( identifier[self] . identifier[get_layouts] ())
keyword[for] identifier[n] , identifier[page] keyword[in] identifier[pages] :
identifier[page] = identifier[self] . identifier[_xmlize] ( identifier[page] )
identifier[page] . identifier[set] ( literal[string] , identifier[obj_to_string] ( identifier[n] ))
identifier[page] . identifier[set] ( literal[string] , identifier[self] . identifier[doc] . identifier[get_page_number] ( identifier[n] ))
identifier[root] . identifier[append] ( identifier[page] )
identifier[self] . identifier[_clean_text] ( identifier[root] )
identifier[tree] = identifier[etree] . identifier[ElementTree] ( identifier[root] )
identifier[self] . identifier[_parse_tree_cacher] . identifier[set] ( identifier[cache_key] , identifier[tree] )
keyword[return] identifier[tree] | def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = '_'.join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None: # set up root
root = parser.makeelement('pdfxml')
if self.doc.info:
for (k, v) in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v) # depends on [control=['try'], data=[]]
except ValueError as e: # Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if 'Invalid attribute name' in e.args[0]:
k = re.sub('\\W', '_', k)
root.set(k, v) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not (len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in _flatten(page_numbers)] # depends on [control=['if'], data=[]]
else:
pages = enumerate(self.get_layouts())
for (n, page) in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page) # depends on [control=['for'], data=[]]
self._clean_text(root) # depends on [control=['if'], data=[]] # wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree) # depends on [control=['if'], data=['tree']]
return tree |
def _start_transport(self, chain_state: ChainState):
""" Initialize the transport and related facilities.
Note:
The transport must not be started before the node has caught up
with the blockchain through `AlarmTask.first_run()`. This
synchronization includes the on-chain channel state and is
necessary to reject new messages for closed channels.
"""
assert self.alarm.is_primed(), f'AlarmTask not primed. node:{self!r}'
assert self.ready_to_process_events, f'Event procossing disable. node:{self!r}'
self.transport.start(
raiden_service=self,
message_handler=self.message_handler,
prev_auth_data=chain_state.last_transport_authdata,
)
for neighbour in views.all_neighbour_nodes(chain_state):
if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
self.start_health_check_for(neighbour) | def function[_start_transport, parameter[self, chain_state]]:
constant[ Initialize the transport and related facilities.
Note:
The transport must not be started before the node has caught up
with the blockchain through `AlarmTask.first_run()`. This
synchronization includes the on-chain channel state and is
necessary to reject new messages for closed channels.
]
assert[call[name[self].alarm.is_primed, parameter[]]]
assert[name[self].ready_to_process_events]
call[name[self].transport.start, parameter[]]
for taget[name[neighbour]] in starred[call[name[views].all_neighbour_nodes, parameter[name[chain_state]]]] begin[:]
if compare[name[neighbour] not_equal[!=] name[ConnectionManager].BOOTSTRAP_ADDR] begin[:]
call[name[self].start_health_check_for, parameter[name[neighbour]]] | keyword[def] identifier[_start_transport] ( identifier[self] , identifier[chain_state] : identifier[ChainState] ):
literal[string]
keyword[assert] identifier[self] . identifier[alarm] . identifier[is_primed] (), literal[string]
keyword[assert] identifier[self] . identifier[ready_to_process_events] , literal[string]
identifier[self] . identifier[transport] . identifier[start] (
identifier[raiden_service] = identifier[self] ,
identifier[message_handler] = identifier[self] . identifier[message_handler] ,
identifier[prev_auth_data] = identifier[chain_state] . identifier[last_transport_authdata] ,
)
keyword[for] identifier[neighbour] keyword[in] identifier[views] . identifier[all_neighbour_nodes] ( identifier[chain_state] ):
keyword[if] identifier[neighbour] != identifier[ConnectionManager] . identifier[BOOTSTRAP_ADDR] :
identifier[self] . identifier[start_health_check_for] ( identifier[neighbour] ) | def _start_transport(self, chain_state: ChainState):
""" Initialize the transport and related facilities.
Note:
The transport must not be started before the node has caught up
with the blockchain through `AlarmTask.first_run()`. This
synchronization includes the on-chain channel state and is
necessary to reject new messages for closed channels.
"""
assert self.alarm.is_primed(), f'AlarmTask not primed. node:{self!r}'
assert self.ready_to_process_events, f'Event procossing disable. node:{self!r}'
self.transport.start(raiden_service=self, message_handler=self.message_handler, prev_auth_data=chain_state.last_transport_authdata)
for neighbour in views.all_neighbour_nodes(chain_state):
if neighbour != ConnectionManager.BOOTSTRAP_ADDR:
self.start_health_check_for(neighbour) # depends on [control=['if'], data=['neighbour']] # depends on [control=['for'], data=['neighbour']] |
def load_projects(self):
"""
Preload the list of projects from disk
"""
server_config = Config.instance().get_section_config("Server")
projects_path = os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects"))
os.makedirs(projects_path, exist_ok=True)
try:
for project_path in os.listdir(projects_path):
project_dir = os.path.join(projects_path, project_path)
if os.path.isdir(project_dir):
for file in os.listdir(project_dir):
if file.endswith(".gns3"):
try:
yield from self.load_project(os.path.join(project_dir, file), load=False)
except (aiohttp.web_exceptions.HTTPConflict, NotImplementedError):
pass # Skip not compatible projects
except OSError as e:
log.error(str(e)) | def function[load_projects, parameter[self]]:
constant[
Preload the list of projects from disk
]
variable[server_config] assign[=] call[call[name[Config].instance, parameter[]].get_section_config, parameter[constant[Server]]]
variable[projects_path] assign[=] call[name[os].path.expanduser, parameter[call[name[server_config].get, parameter[constant[projects_path], constant[~/GNS3/projects]]]]]
call[name[os].makedirs, parameter[name[projects_path]]]
<ast.Try object at 0x7da18eb57cd0> | keyword[def] identifier[load_projects] ( identifier[self] ):
literal[string]
identifier[server_config] = identifier[Config] . identifier[instance] (). identifier[get_section_config] ( literal[string] )
identifier[projects_path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[server_config] . identifier[get] ( literal[string] , literal[string] ))
identifier[os] . identifier[makedirs] ( identifier[projects_path] , identifier[exist_ok] = keyword[True] )
keyword[try] :
keyword[for] identifier[project_path] keyword[in] identifier[os] . identifier[listdir] ( identifier[projects_path] ):
identifier[project_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[projects_path] , identifier[project_path] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[project_dir] ):
keyword[for] identifier[file] keyword[in] identifier[os] . identifier[listdir] ( identifier[project_dir] ):
keyword[if] identifier[file] . identifier[endswith] ( literal[string] ):
keyword[try] :
keyword[yield] keyword[from] identifier[self] . identifier[load_project] ( identifier[os] . identifier[path] . identifier[join] ( identifier[project_dir] , identifier[file] ), identifier[load] = keyword[False] )
keyword[except] ( identifier[aiohttp] . identifier[web_exceptions] . identifier[HTTPConflict] , identifier[NotImplementedError] ):
keyword[pass]
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( identifier[str] ( identifier[e] )) | def load_projects(self):
"""
Preload the list of projects from disk
"""
server_config = Config.instance().get_section_config('Server')
projects_path = os.path.expanduser(server_config.get('projects_path', '~/GNS3/projects'))
os.makedirs(projects_path, exist_ok=True)
try:
for project_path in os.listdir(projects_path):
project_dir = os.path.join(projects_path, project_path)
if os.path.isdir(project_dir):
for file in os.listdir(project_dir):
if file.endswith('.gns3'):
try:
yield from self.load_project(os.path.join(project_dir, file), load=False) # depends on [control=['try'], data=[]]
except (aiohttp.web_exceptions.HTTPConflict, NotImplementedError):
pass # Skip not compatible projects # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['project_path']] # depends on [control=['try'], data=[]]
except OSError as e:
log.error(str(e)) # depends on [control=['except'], data=['e']] |
def load_new_labware(container_name):
""" Load a labware in the new schema into a placeable.
:raises KeyError: If the labware name is not found
"""
defn = new_labware.load_definition_by_name(container_name)
labware_id = defn['otId']
saved_offset = _look_up_offsets(labware_id)
container = Container()
log.info(f"Container name {container_name}")
container.properties['type'] = container_name
container.properties['otId'] = labware_id
format = defn['parameters']['format']
container._coordinates = Vector(defn['cornerOffsetFromSlot'])
for well_name in itertools.chain(*defn['ordering']):
well_obj, well_pos = _load_new_well(
defn['wells'][well_name], saved_offset, format)
container.add(well_obj, well_name, well_pos)
return container | def function[load_new_labware, parameter[container_name]]:
constant[ Load a labware in the new schema into a placeable.
:raises KeyError: If the labware name is not found
]
variable[defn] assign[=] call[name[new_labware].load_definition_by_name, parameter[name[container_name]]]
variable[labware_id] assign[=] call[name[defn]][constant[otId]]
variable[saved_offset] assign[=] call[name[_look_up_offsets], parameter[name[labware_id]]]
variable[container] assign[=] call[name[Container], parameter[]]
call[name[log].info, parameter[<ast.JoinedStr object at 0x7da2044c0e50>]]
call[name[container].properties][constant[type]] assign[=] name[container_name]
call[name[container].properties][constant[otId]] assign[=] name[labware_id]
variable[format] assign[=] call[call[name[defn]][constant[parameters]]][constant[format]]
name[container]._coordinates assign[=] call[name[Vector], parameter[call[name[defn]][constant[cornerOffsetFromSlot]]]]
for taget[name[well_name]] in starred[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da2044c11b0>]]] begin[:]
<ast.Tuple object at 0x7da2044c1cf0> assign[=] call[name[_load_new_well], parameter[call[call[name[defn]][constant[wells]]][name[well_name]], name[saved_offset], name[format]]]
call[name[container].add, parameter[name[well_obj], name[well_name], name[well_pos]]]
return[name[container]] | keyword[def] identifier[load_new_labware] ( identifier[container_name] ):
literal[string]
identifier[defn] = identifier[new_labware] . identifier[load_definition_by_name] ( identifier[container_name] )
identifier[labware_id] = identifier[defn] [ literal[string] ]
identifier[saved_offset] = identifier[_look_up_offsets] ( identifier[labware_id] )
identifier[container] = identifier[Container] ()
identifier[log] . identifier[info] ( literal[string] )
identifier[container] . identifier[properties] [ literal[string] ]= identifier[container_name]
identifier[container] . identifier[properties] [ literal[string] ]= identifier[labware_id]
identifier[format] = identifier[defn] [ literal[string] ][ literal[string] ]
identifier[container] . identifier[_coordinates] = identifier[Vector] ( identifier[defn] [ literal[string] ])
keyword[for] identifier[well_name] keyword[in] identifier[itertools] . identifier[chain] (* identifier[defn] [ literal[string] ]):
identifier[well_obj] , identifier[well_pos] = identifier[_load_new_well] (
identifier[defn] [ literal[string] ][ identifier[well_name] ], identifier[saved_offset] , identifier[format] )
identifier[container] . identifier[add] ( identifier[well_obj] , identifier[well_name] , identifier[well_pos] )
keyword[return] identifier[container] | def load_new_labware(container_name):
""" Load a labware in the new schema into a placeable.
:raises KeyError: If the labware name is not found
"""
defn = new_labware.load_definition_by_name(container_name)
labware_id = defn['otId']
saved_offset = _look_up_offsets(labware_id)
container = Container()
log.info(f'Container name {container_name}')
container.properties['type'] = container_name
container.properties['otId'] = labware_id
format = defn['parameters']['format']
container._coordinates = Vector(defn['cornerOffsetFromSlot'])
for well_name in itertools.chain(*defn['ordering']):
(well_obj, well_pos) = _load_new_well(defn['wells'][well_name], saved_offset, format)
container.add(well_obj, well_name, well_pos) # depends on [control=['for'], data=['well_name']]
return container |
def make_public(self, container, ttl=None):
"""
Enables CDN access for the specified container, and optionally sets the
TTL for the container.
"""
return self._set_cdn_access(container, public=True, ttl=ttl) | def function[make_public, parameter[self, container, ttl]]:
constant[
Enables CDN access for the specified container, and optionally sets the
TTL for the container.
]
return[call[name[self]._set_cdn_access, parameter[name[container]]]] | keyword[def] identifier[make_public] ( identifier[self] , identifier[container] , identifier[ttl] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_set_cdn_access] ( identifier[container] , identifier[public] = keyword[True] , identifier[ttl] = identifier[ttl] ) | def make_public(self, container, ttl=None):
"""
Enables CDN access for the specified container, and optionally sets the
TTL for the container.
"""
return self._set_cdn_access(container, public=True, ttl=ttl) |
def _weight_opacity(weight, weight_range):
# type: (float, float) -> str
""" Return opacity value for given weight as a string.
"""
min_opacity = 0.8
if np.isclose(weight, 0) and np.isclose(weight_range, 0):
rel_weight = 0.0
else:
rel_weight = abs(weight) / weight_range
return '{:.2f}'.format(min_opacity + (1 - min_opacity) * rel_weight) | def function[_weight_opacity, parameter[weight, weight_range]]:
constant[ Return opacity value for given weight as a string.
]
variable[min_opacity] assign[=] constant[0.8]
if <ast.BoolOp object at 0x7da1b1c79210> begin[:]
variable[rel_weight] assign[=] constant[0.0]
return[call[constant[{:.2f}].format, parameter[binary_operation[name[min_opacity] + binary_operation[binary_operation[constant[1] - name[min_opacity]] * name[rel_weight]]]]]] | keyword[def] identifier[_weight_opacity] ( identifier[weight] , identifier[weight_range] ):
literal[string]
identifier[min_opacity] = literal[int]
keyword[if] identifier[np] . identifier[isclose] ( identifier[weight] , literal[int] ) keyword[and] identifier[np] . identifier[isclose] ( identifier[weight_range] , literal[int] ):
identifier[rel_weight] = literal[int]
keyword[else] :
identifier[rel_weight] = identifier[abs] ( identifier[weight] )/ identifier[weight_range]
keyword[return] literal[string] . identifier[format] ( identifier[min_opacity] +( literal[int] - identifier[min_opacity] )* identifier[rel_weight] ) | def _weight_opacity(weight, weight_range):
# type: (float, float) -> str
' Return opacity value for given weight as a string.\n '
min_opacity = 0.8
if np.isclose(weight, 0) and np.isclose(weight_range, 0):
rel_weight = 0.0 # depends on [control=['if'], data=[]]
else:
rel_weight = abs(weight) / weight_range
return '{:.2f}'.format(min_opacity + (1 - min_opacity) * rel_weight) |
def __parameter_default(self, field):
"""Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
"""
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
else:
return field.default | def function[__parameter_default, parameter[self, field]]:
constant[Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
]
if name[field].default begin[:]
if call[name[isinstance], parameter[name[field], name[messages].EnumField]] begin[:]
return[name[field].default.name] | keyword[def] identifier[__parameter_default] ( identifier[self] , identifier[field] ):
literal[string]
keyword[if] identifier[field] . identifier[default] :
keyword[if] identifier[isinstance] ( identifier[field] , identifier[messages] . identifier[EnumField] ):
keyword[return] identifier[field] . identifier[default] . identifier[name]
keyword[else] :
keyword[return] identifier[field] . identifier[default] | def __parameter_default(self, field):
"""Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
"""
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name # depends on [control=['if'], data=[]]
else:
return field.default # depends on [control=['if'], data=[]] |
def noinject(module_name=None, module_prefix='[???]', DEBUG=False, module=None, N=0, via=None):
"""
Use in modules that do not have inject in them
Does not inject anything into the module. Just lets utool know that a module
is being imported so the import order can be debuged
"""
if PRINT_INJECT_ORDER:
from utool._internal import meta_util_dbg
callername = meta_util_dbg.get_caller_name(N=N + 1, strict=False)
lineno = meta_util_dbg.get_caller_lineno(N=N + 1, strict=False)
suff = ' via %s' % (via,) if via else ''
fmtdict = dict(N=N, lineno=lineno, callername=callername,
modname=module_name, suff=suff)
msg = '[util_inject] N={N} {modname} is imported by {callername} at lineno={lineno}{suff}'.format(**fmtdict)
if DEBUG_SLOW_IMPORT:
global PREV_MODNAME
seconds = tt.toc()
import_times[(PREV_MODNAME, module_name)] = seconds
PREV_MODNAME = module_name
builtins.print(msg)
if DEBUG_SLOW_IMPORT:
tt.tic()
# builtins.print(elapsed)
if EXIT_ON_INJECT_MODNAME == module_name:
builtins.print('...exiting')
assert False, 'exit in inject requested' | def function[noinject, parameter[module_name, module_prefix, DEBUG, module, N, via]]:
constant[
Use in modules that do not have inject in them
Does not inject anything into the module. Just lets utool know that a module
is being imported so the import order can be debuged
]
if name[PRINT_INJECT_ORDER] begin[:]
from relative_module[utool._internal] import module[meta_util_dbg]
variable[callername] assign[=] call[name[meta_util_dbg].get_caller_name, parameter[]]
variable[lineno] assign[=] call[name[meta_util_dbg].get_caller_lineno, parameter[]]
variable[suff] assign[=] <ast.IfExp object at 0x7da1b24ebaf0>
variable[fmtdict] assign[=] call[name[dict], parameter[]]
variable[msg] assign[=] call[constant[[util_inject] N={N} {modname} is imported by {callername} at lineno={lineno}{suff}].format, parameter[]]
if name[DEBUG_SLOW_IMPORT] begin[:]
<ast.Global object at 0x7da1b24ebee0>
variable[seconds] assign[=] call[name[tt].toc, parameter[]]
call[name[import_times]][tuple[[<ast.Name object at 0x7da1b24e82b0>, <ast.Name object at 0x7da1b24e8a90>]]] assign[=] name[seconds]
variable[PREV_MODNAME] assign[=] name[module_name]
call[name[builtins].print, parameter[name[msg]]]
if name[DEBUG_SLOW_IMPORT] begin[:]
call[name[tt].tic, parameter[]]
if compare[name[EXIT_ON_INJECT_MODNAME] equal[==] name[module_name]] begin[:]
call[name[builtins].print, parameter[constant[...exiting]]]
assert[constant[False]] | keyword[def] identifier[noinject] ( identifier[module_name] = keyword[None] , identifier[module_prefix] = literal[string] , identifier[DEBUG] = keyword[False] , identifier[module] = keyword[None] , identifier[N] = literal[int] , identifier[via] = keyword[None] ):
literal[string]
keyword[if] identifier[PRINT_INJECT_ORDER] :
keyword[from] identifier[utool] . identifier[_internal] keyword[import] identifier[meta_util_dbg]
identifier[callername] = identifier[meta_util_dbg] . identifier[get_caller_name] ( identifier[N] = identifier[N] + literal[int] , identifier[strict] = keyword[False] )
identifier[lineno] = identifier[meta_util_dbg] . identifier[get_caller_lineno] ( identifier[N] = identifier[N] + literal[int] , identifier[strict] = keyword[False] )
identifier[suff] = literal[string] %( identifier[via] ,) keyword[if] identifier[via] keyword[else] literal[string]
identifier[fmtdict] = identifier[dict] ( identifier[N] = identifier[N] , identifier[lineno] = identifier[lineno] , identifier[callername] = identifier[callername] ,
identifier[modname] = identifier[module_name] , identifier[suff] = identifier[suff] )
identifier[msg] = literal[string] . identifier[format] (** identifier[fmtdict] )
keyword[if] identifier[DEBUG_SLOW_IMPORT] :
keyword[global] identifier[PREV_MODNAME]
identifier[seconds] = identifier[tt] . identifier[toc] ()
identifier[import_times] [( identifier[PREV_MODNAME] , identifier[module_name] )]= identifier[seconds]
identifier[PREV_MODNAME] = identifier[module_name]
identifier[builtins] . identifier[print] ( identifier[msg] )
keyword[if] identifier[DEBUG_SLOW_IMPORT] :
identifier[tt] . identifier[tic] ()
keyword[if] identifier[EXIT_ON_INJECT_MODNAME] == identifier[module_name] :
identifier[builtins] . identifier[print] ( literal[string] )
keyword[assert] keyword[False] , literal[string] | def noinject(module_name=None, module_prefix='[???]', DEBUG=False, module=None, N=0, via=None):
"""
Use in modules that do not have inject in them
Does not inject anything into the module. Just lets utool know that a module
is being imported so the import order can be debuged
"""
if PRINT_INJECT_ORDER:
from utool._internal import meta_util_dbg
callername = meta_util_dbg.get_caller_name(N=N + 1, strict=False)
lineno = meta_util_dbg.get_caller_lineno(N=N + 1, strict=False)
suff = ' via %s' % (via,) if via else ''
fmtdict = dict(N=N, lineno=lineno, callername=callername, modname=module_name, suff=suff)
msg = '[util_inject] N={N} {modname} is imported by {callername} at lineno={lineno}{suff}'.format(**fmtdict)
if DEBUG_SLOW_IMPORT:
global PREV_MODNAME
seconds = tt.toc()
import_times[PREV_MODNAME, module_name] = seconds
PREV_MODNAME = module_name # depends on [control=['if'], data=[]]
builtins.print(msg)
if DEBUG_SLOW_IMPORT:
tt.tic() # depends on [control=['if'], data=[]]
# builtins.print(elapsed)
if EXIT_ON_INJECT_MODNAME == module_name:
builtins.print('...exiting')
assert False, 'exit in inject requested' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def generate_samples(order, domain=1, rule="R", antithetic=None):
"""
Sample generator.
Args:
order (int):
Sample order. Determines the number of samples to create.
domain (Dist, int, numpy.ndarray):
Defines the space where the samples are generated. If integer is
provided, the space ``[0, 1]^domain`` will be used. If array-like
object is provided, a hypercube it defines will be used. If
distribution, the domain it spans will be used.
rule (str):
rule for generating samples. The various rules are listed in
:mod:`chaospy.distributions.sampler.generator`.
antithetic (tuple):
Sequence of boolean values. Represents the axes to mirror using
antithetic variable.
"""
logger = logging.getLogger(__name__)
logger.debug("generating random samples using rule %s", rule)
rule = rule.upper()
if isinstance(domain, int):
dim = domain
trans = lambda x_data: x_data
elif isinstance(domain, (tuple, list, numpy.ndarray)):
domain = numpy.asfarray(domain)
if len(domain.shape) < 2:
dim = 1
else:
dim = len(domain[0])
trans = lambda x_data: ((domain[1]-domain[0])*x_data.T + domain[0]).T
else:
dist = domain
dim = len(dist)
trans = dist.inv
if antithetic is not None:
from .antithetic import create_antithetic_variates
antithetic = numpy.array(antithetic, dtype=bool).flatten()
if antithetic.size == 1 and dim > 1:
antithetic = numpy.repeat(antithetic, dim)
size = numpy.sum(1*numpy.array(antithetic))
order_saved = order
order = int(numpy.log(order - dim))
order = order if order > 1 else 1
while order**dim < order_saved:
order += 1
trans_ = trans
trans = lambda x_data: trans_(
create_antithetic_variates(x_data, antithetic)[:, :order_saved])
assert rule in SAMPLERS, "rule not recognised"
sampler = SAMPLERS[rule]
x_data = trans(sampler(order=order, dim=dim))
logger.debug("order: %d, dim: %d -> shape: %s", order, dim, x_data.shape)
return x_data | def function[generate_samples, parameter[order, domain, rule, antithetic]]:
constant[
Sample generator.
Args:
order (int):
Sample order. Determines the number of samples to create.
domain (Dist, int, numpy.ndarray):
Defines the space where the samples are generated. If integer is
provided, the space ``[0, 1]^domain`` will be used. If array-like
object is provided, a hypercube it defines will be used. If
distribution, the domain it spans will be used.
rule (str):
rule for generating samples. The various rules are listed in
:mod:`chaospy.distributions.sampler.generator`.
antithetic (tuple):
Sequence of boolean values. Represents the axes to mirror using
antithetic variable.
]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].debug, parameter[constant[generating random samples using rule %s], name[rule]]]
variable[rule] assign[=] call[name[rule].upper, parameter[]]
if call[name[isinstance], parameter[name[domain], name[int]]] begin[:]
variable[dim] assign[=] name[domain]
variable[trans] assign[=] <ast.Lambda object at 0x7da2043450c0>
if compare[name[antithetic] is_not constant[None]] begin[:]
from relative_module[antithetic] import module[create_antithetic_variates]
variable[antithetic] assign[=] call[call[name[numpy].array, parameter[name[antithetic]]].flatten, parameter[]]
if <ast.BoolOp object at 0x7da204347010> begin[:]
variable[antithetic] assign[=] call[name[numpy].repeat, parameter[name[antithetic], name[dim]]]
variable[size] assign[=] call[name[numpy].sum, parameter[binary_operation[constant[1] * call[name[numpy].array, parameter[name[antithetic]]]]]]
variable[order_saved] assign[=] name[order]
variable[order] assign[=] call[name[int], parameter[call[name[numpy].log, parameter[binary_operation[name[order] - name[dim]]]]]]
variable[order] assign[=] <ast.IfExp object at 0x7da18f812f20>
while compare[binary_operation[name[order] ** name[dim]] less[<] name[order_saved]] begin[:]
<ast.AugAssign object at 0x7da18f811f30>
variable[trans_] assign[=] name[trans]
variable[trans] assign[=] <ast.Lambda object at 0x7da18f8137c0>
assert[compare[name[rule] in name[SAMPLERS]]]
variable[sampler] assign[=] call[name[SAMPLERS]][name[rule]]
variable[x_data] assign[=] call[name[trans], parameter[call[name[sampler], parameter[]]]]
call[name[logger].debug, parameter[constant[order: %d, dim: %d -> shape: %s], name[order], name[dim], name[x_data].shape]]
return[name[x_data]] | keyword[def] identifier[generate_samples] ( identifier[order] , identifier[domain] = literal[int] , identifier[rule] = literal[string] , identifier[antithetic] = keyword[None] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[rule] )
identifier[rule] = identifier[rule] . identifier[upper] ()
keyword[if] identifier[isinstance] ( identifier[domain] , identifier[int] ):
identifier[dim] = identifier[domain]
identifier[trans] = keyword[lambda] identifier[x_data] : identifier[x_data]
keyword[elif] identifier[isinstance] ( identifier[domain] ,( identifier[tuple] , identifier[list] , identifier[numpy] . identifier[ndarray] )):
identifier[domain] = identifier[numpy] . identifier[asfarray] ( identifier[domain] )
keyword[if] identifier[len] ( identifier[domain] . identifier[shape] )< literal[int] :
identifier[dim] = literal[int]
keyword[else] :
identifier[dim] = identifier[len] ( identifier[domain] [ literal[int] ])
identifier[trans] = keyword[lambda] identifier[x_data] :(( identifier[domain] [ literal[int] ]- identifier[domain] [ literal[int] ])* identifier[x_data] . identifier[T] + identifier[domain] [ literal[int] ]). identifier[T]
keyword[else] :
identifier[dist] = identifier[domain]
identifier[dim] = identifier[len] ( identifier[dist] )
identifier[trans] = identifier[dist] . identifier[inv]
keyword[if] identifier[antithetic] keyword[is] keyword[not] keyword[None] :
keyword[from] . identifier[antithetic] keyword[import] identifier[create_antithetic_variates]
identifier[antithetic] = identifier[numpy] . identifier[array] ( identifier[antithetic] , identifier[dtype] = identifier[bool] ). identifier[flatten] ()
keyword[if] identifier[antithetic] . identifier[size] == literal[int] keyword[and] identifier[dim] > literal[int] :
identifier[antithetic] = identifier[numpy] . identifier[repeat] ( identifier[antithetic] , identifier[dim] )
identifier[size] = identifier[numpy] . identifier[sum] ( literal[int] * identifier[numpy] . identifier[array] ( identifier[antithetic] ))
identifier[order_saved] = identifier[order]
identifier[order] = identifier[int] ( identifier[numpy] . identifier[log] ( identifier[order] - identifier[dim] ))
identifier[order] = identifier[order] keyword[if] identifier[order] > literal[int] keyword[else] literal[int]
keyword[while] identifier[order] ** identifier[dim] < identifier[order_saved] :
identifier[order] += literal[int]
identifier[trans_] = identifier[trans]
identifier[trans] = keyword[lambda] identifier[x_data] : identifier[trans_] (
identifier[create_antithetic_variates] ( identifier[x_data] , identifier[antithetic] )[:,: identifier[order_saved] ])
keyword[assert] identifier[rule] keyword[in] identifier[SAMPLERS] , literal[string]
identifier[sampler] = identifier[SAMPLERS] [ identifier[rule] ]
identifier[x_data] = identifier[trans] ( identifier[sampler] ( identifier[order] = identifier[order] , identifier[dim] = identifier[dim] ))
identifier[logger] . identifier[debug] ( literal[string] , identifier[order] , identifier[dim] , identifier[x_data] . identifier[shape] )
keyword[return] identifier[x_data] | def generate_samples(order, domain=1, rule='R', antithetic=None):
"""
Sample generator.
Args:
order (int):
Sample order. Determines the number of samples to create.
domain (Dist, int, numpy.ndarray):
Defines the space where the samples are generated. If integer is
provided, the space ``[0, 1]^domain`` will be used. If array-like
object is provided, a hypercube it defines will be used. If
distribution, the domain it spans will be used.
rule (str):
rule for generating samples. The various rules are listed in
:mod:`chaospy.distributions.sampler.generator`.
antithetic (tuple):
Sequence of boolean values. Represents the axes to mirror using
antithetic variable.
"""
logger = logging.getLogger(__name__)
logger.debug('generating random samples using rule %s', rule)
rule = rule.upper()
if isinstance(domain, int):
dim = domain
trans = lambda x_data: x_data # depends on [control=['if'], data=[]]
elif isinstance(domain, (tuple, list, numpy.ndarray)):
domain = numpy.asfarray(domain)
if len(domain.shape) < 2:
dim = 1 # depends on [control=['if'], data=[]]
else:
dim = len(domain[0])
trans = lambda x_data: ((domain[1] - domain[0]) * x_data.T + domain[0]).T # depends on [control=['if'], data=[]]
else:
dist = domain
dim = len(dist)
trans = dist.inv
if antithetic is not None:
from .antithetic import create_antithetic_variates
antithetic = numpy.array(antithetic, dtype=bool).flatten()
if antithetic.size == 1 and dim > 1:
antithetic = numpy.repeat(antithetic, dim) # depends on [control=['if'], data=[]]
size = numpy.sum(1 * numpy.array(antithetic))
order_saved = order
order = int(numpy.log(order - dim))
order = order if order > 1 else 1
while order ** dim < order_saved:
order += 1 # depends on [control=['while'], data=[]]
trans_ = trans
trans = lambda x_data: trans_(create_antithetic_variates(x_data, antithetic)[:, :order_saved]) # depends on [control=['if'], data=['antithetic']]
assert rule in SAMPLERS, 'rule not recognised'
sampler = SAMPLERS[rule]
x_data = trans(sampler(order=order, dim=dim))
logger.debug('order: %d, dim: %d -> shape: %s', order, dim, x_data.shape)
return x_data |
def print_devices():
"""
Simple test function which prints out all devices found by evdev
"""
def device_verbose_info(device: InputDevice) -> {}:
"""
Gather and format as much info as possible about the supplied InputDevice. Used mostly for debugging at this point.
:param device:
An InputDevice to examine
:return:
A dict containing as much information as possible about the input device.
"""
def axis_name(axis_code):
try:
return ecodes.ABS[axis_code]
except KeyError:
return 'EXTENDED_CODE_{}'.format(axis_code)
def rel_axis_name(axis_code):
try:
return ecodes.REL[axis_code]
except KeyError:
return 'EXTENDED_CODE_{}'.format(axis_code)
axes = None
if has_abs_axes(device):
axes = {
axis_name(axis_code): {'code': axis_code, 'min': axis_info.min, 'max': axis_info.max,
'fuzz': axis_info.fuzz,
'flat': axis_info.flat, 'res': axis_info.resolution} for
axis_code, axis_info in device.capabilities().get(3)}
rel_axes = None
if has_rel_axes(device):
print(device.capabilities().get(2))
rel_axes = {
rel_axis_name(axis_code): {'code': axis_code} for
axis_code in device.capabilities().get(2)}
buttons = None
if has_buttons(device):
buttons = {code: names for (names, code) in
dict(util.resolve_ecodes_dict({1: device.capabilities().get(1)})).get(('EV_KEY', 1))}
return {'fn': device.fn, 'path': device.path, 'name': device.name, 'phys': device.phys, 'uniq': device.uniq,
'vendor': device.info.vendor, 'product': device.info.product, 'version': device.info.version,
'bus': device.info.bustype, 'axes': axes, 'rel_axes': rel_axes, 'buttons': buttons,
'unique_name': unique_name(device)}
def has_abs_axes(device):
return device.capabilities().get(3) is not None
def has_rel_axes(device):
return device.capabilities().get(2) is not None
def has_buttons(device):
return device.capabilities().get(1) is not None
_check_import()
for d in [InputDevice(fn) for fn in list_devices()]:
if has_abs_axes(d) or has_rel_axes(d):
pp = pprint.PrettyPrinter(indent=2, width=100)
pp.pprint(device_verbose_info(d)) | def function[print_devices, parameter[]]:
constant[
Simple test function which prints out all devices found by evdev
]
def function[device_verbose_info, parameter[device]]:
constant[
Gather and format as much info as possible about the supplied InputDevice. Used mostly for debugging at this point.
:param device:
An InputDevice to examine
:return:
A dict containing as much information as possible about the input device.
]
def function[axis_name, parameter[axis_code]]:
<ast.Try object at 0x7da20c6c7df0>
def function[rel_axis_name, parameter[axis_code]]:
<ast.Try object at 0x7da20c6c54b0>
variable[axes] assign[=] constant[None]
if call[name[has_abs_axes], parameter[name[device]]] begin[:]
variable[axes] assign[=] <ast.DictComp object at 0x7da20c6c5030>
variable[rel_axes] assign[=] constant[None]
if call[name[has_rel_axes], parameter[name[device]]] begin[:]
call[name[print], parameter[call[call[name[device].capabilities, parameter[]].get, parameter[constant[2]]]]]
variable[rel_axes] assign[=] <ast.DictComp object at 0x7da20c6c6650>
variable[buttons] assign[=] constant[None]
if call[name[has_buttons], parameter[name[device]]] begin[:]
variable[buttons] assign[=] <ast.DictComp object at 0x7da20c6c4220>
return[dictionary[[<ast.Constant object at 0x7da2047eb4f0>, <ast.Constant object at 0x7da2047e8a60>, <ast.Constant object at 0x7da2047eb550>, <ast.Constant object at 0x7da2047ea7a0>, <ast.Constant object at 0x7da2047eac20>, <ast.Constant object at 0x7da2047eafb0>, <ast.Constant object at 0x7da2047ebac0>, <ast.Constant object at 0x7da2047eb3a0>, <ast.Constant object at 0x7da2047ea440>, <ast.Constant object at 0x7da2044c1030>, <ast.Constant object at 0x7da2044c2140>, <ast.Constant object at 0x7da2044c1090>, <ast.Constant object at 0x7da2044c3bb0>], [<ast.Attribute object at 0x7da2044c33d0>, <ast.Attribute object at 0x7da2044c03a0>, <ast.Attribute object at 0x7da2044c2200>, <ast.Attribute object at 0x7da2044c1930>, <ast.Attribute object at 0x7da2044c0280>, <ast.Attribute object at 0x7da20c6c4ee0>, <ast.Attribute object at 0x7da20c6c7520>, <ast.Attribute object at 0x7da20c6c7610>, <ast.Attribute object at 0x7da20c6c4be0>, <ast.Name object at 0x7da20c6c47c0>, <ast.Name object at 0x7da20c6c5cf0>, <ast.Name object at 0x7da20c6c5c60>, <ast.Call object at 0x7da20c6c64a0>]]]
def function[has_abs_axes, parameter[device]]:
return[compare[call[call[name[device].capabilities, parameter[]].get, parameter[constant[3]]] is_not constant[None]]]
def function[has_rel_axes, parameter[device]]:
return[compare[call[call[name[device].capabilities, parameter[]].get, parameter[constant[2]]] is_not constant[None]]]
def function[has_buttons, parameter[device]]:
return[compare[call[call[name[device].capabilities, parameter[]].get, parameter[constant[1]]] is_not constant[None]]]
call[name[_check_import], parameter[]]
for taget[name[d]] in starred[<ast.ListComp object at 0x7da20c6c5630>] begin[:]
if <ast.BoolOp object at 0x7da20c6c5810> begin[:]
variable[pp] assign[=] call[name[pprint].PrettyPrinter, parameter[]]
call[name[pp].pprint, parameter[call[name[device_verbose_info], parameter[name[d]]]]] | keyword[def] identifier[print_devices] ():
literal[string]
keyword[def] identifier[device_verbose_info] ( identifier[device] : identifier[InputDevice] )->{}:
literal[string]
keyword[def] identifier[axis_name] ( identifier[axis_code] ):
keyword[try] :
keyword[return] identifier[ecodes] . identifier[ABS] [ identifier[axis_code] ]
keyword[except] identifier[KeyError] :
keyword[return] literal[string] . identifier[format] ( identifier[axis_code] )
keyword[def] identifier[rel_axis_name] ( identifier[axis_code] ):
keyword[try] :
keyword[return] identifier[ecodes] . identifier[REL] [ identifier[axis_code] ]
keyword[except] identifier[KeyError] :
keyword[return] literal[string] . identifier[format] ( identifier[axis_code] )
identifier[axes] = keyword[None]
keyword[if] identifier[has_abs_axes] ( identifier[device] ):
identifier[axes] ={
identifier[axis_name] ( identifier[axis_code] ):{ literal[string] : identifier[axis_code] , literal[string] : identifier[axis_info] . identifier[min] , literal[string] : identifier[axis_info] . identifier[max] ,
literal[string] : identifier[axis_info] . identifier[fuzz] ,
literal[string] : identifier[axis_info] . identifier[flat] , literal[string] : identifier[axis_info] . identifier[resolution] } keyword[for]
identifier[axis_code] , identifier[axis_info] keyword[in] identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] )}
identifier[rel_axes] = keyword[None]
keyword[if] identifier[has_rel_axes] ( identifier[device] ):
identifier[print] ( identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] ))
identifier[rel_axes] ={
identifier[rel_axis_name] ( identifier[axis_code] ):{ literal[string] : identifier[axis_code] } keyword[for]
identifier[axis_code] keyword[in] identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] )}
identifier[buttons] = keyword[None]
keyword[if] identifier[has_buttons] ( identifier[device] ):
identifier[buttons] ={ identifier[code] : identifier[names] keyword[for] ( identifier[names] , identifier[code] ) keyword[in]
identifier[dict] ( identifier[util] . identifier[resolve_ecodes_dict] ({ literal[int] : identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] )})). identifier[get] (( literal[string] , literal[int] ))}
keyword[return] { literal[string] : identifier[device] . identifier[fn] , literal[string] : identifier[device] . identifier[path] , literal[string] : identifier[device] . identifier[name] , literal[string] : identifier[device] . identifier[phys] , literal[string] : identifier[device] . identifier[uniq] ,
literal[string] : identifier[device] . identifier[info] . identifier[vendor] , literal[string] : identifier[device] . identifier[info] . identifier[product] , literal[string] : identifier[device] . identifier[info] . identifier[version] ,
literal[string] : identifier[device] . identifier[info] . identifier[bustype] , literal[string] : identifier[axes] , literal[string] : identifier[rel_axes] , literal[string] : identifier[buttons] ,
literal[string] : identifier[unique_name] ( identifier[device] )}
keyword[def] identifier[has_abs_axes] ( identifier[device] ):
keyword[return] identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] ) keyword[is] keyword[not] keyword[None]
keyword[def] identifier[has_rel_axes] ( identifier[device] ):
keyword[return] identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] ) keyword[is] keyword[not] keyword[None]
keyword[def] identifier[has_buttons] ( identifier[device] ):
keyword[return] identifier[device] . identifier[capabilities] (). identifier[get] ( literal[int] ) keyword[is] keyword[not] keyword[None]
identifier[_check_import] ()
keyword[for] identifier[d] keyword[in] [ identifier[InputDevice] ( identifier[fn] ) keyword[for] identifier[fn] keyword[in] identifier[list_devices] ()]:
keyword[if] identifier[has_abs_axes] ( identifier[d] ) keyword[or] identifier[has_rel_axes] ( identifier[d] ):
identifier[pp] = identifier[pprint] . identifier[PrettyPrinter] ( identifier[indent] = literal[int] , identifier[width] = literal[int] )
identifier[pp] . identifier[pprint] ( identifier[device_verbose_info] ( identifier[d] )) | def print_devices():
"""
Simple test function which prints out all devices found by evdev
"""
def device_verbose_info(device: InputDevice) -> {}:
"""
Gather and format as much info as possible about the supplied InputDevice. Used mostly for debugging at this point.
:param device:
An InputDevice to examine
:return:
A dict containing as much information as possible about the input device.
"""
def axis_name(axis_code):
try:
return ecodes.ABS[axis_code] # depends on [control=['try'], data=[]]
except KeyError:
return 'EXTENDED_CODE_{}'.format(axis_code) # depends on [control=['except'], data=[]]
def rel_axis_name(axis_code):
try:
return ecodes.REL[axis_code] # depends on [control=['try'], data=[]]
except KeyError:
return 'EXTENDED_CODE_{}'.format(axis_code) # depends on [control=['except'], data=[]]
axes = None
if has_abs_axes(device):
axes = {axis_name(axis_code): {'code': axis_code, 'min': axis_info.min, 'max': axis_info.max, 'fuzz': axis_info.fuzz, 'flat': axis_info.flat, 'res': axis_info.resolution} for (axis_code, axis_info) in device.capabilities().get(3)} # depends on [control=['if'], data=[]]
rel_axes = None
if has_rel_axes(device):
print(device.capabilities().get(2))
rel_axes = {rel_axis_name(axis_code): {'code': axis_code} for axis_code in device.capabilities().get(2)} # depends on [control=['if'], data=[]]
buttons = None
if has_buttons(device):
buttons = {code: names for (names, code) in dict(util.resolve_ecodes_dict({1: device.capabilities().get(1)})).get(('EV_KEY', 1))} # depends on [control=['if'], data=[]]
return {'fn': device.fn, 'path': device.path, 'name': device.name, 'phys': device.phys, 'uniq': device.uniq, 'vendor': device.info.vendor, 'product': device.info.product, 'version': device.info.version, 'bus': device.info.bustype, 'axes': axes, 'rel_axes': rel_axes, 'buttons': buttons, 'unique_name': unique_name(device)}
def has_abs_axes(device):
return device.capabilities().get(3) is not None
def has_rel_axes(device):
return device.capabilities().get(2) is not None
def has_buttons(device):
return device.capabilities().get(1) is not None
_check_import()
for d in [InputDevice(fn) for fn in list_devices()]:
if has_abs_axes(d) or has_rel_axes(d):
pp = pprint.PrettyPrinter(indent=2, width=100)
pp.pprint(device_verbose_info(d)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']] |
def dir_type(arg):
"""An argparse type representing a valid directory."""
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg)))
return arg | def function[dir_type, parameter[arg]]:
constant[An argparse type representing a valid directory.]
if <ast.UnaryOp object at 0x7da2054a6da0> begin[:]
<ast.Raise object at 0x7da2054a5b10>
return[name[arg]] | keyword[def] identifier[dir_type] ( identifier[arg] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[arg] ):
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[arg] )))
keyword[return] identifier[arg] | def dir_type(arg):
"""An argparse type representing a valid directory."""
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError('{0} is not a valid directory'.format(repr(arg))) # depends on [control=['if'], data=[]]
return arg |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.