code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def tgv_phantom(space, edge_smoothing=0.2):
"""Piecewise affine phantom.
This phantom is taken from [Bre+2010] and includes both linearly varying
regions and sharp discontinuities. It is designed to work well with
Total Generalized Variation (TGV) type regularization.
Parameters
----------
space : `DiscreteLp`, 2 dimensional
Discretized space in which the phantom is supposed to be created.
Needs to be two-dimensional.
edge_smoothing : nonnegative float, optional
Smoothing of the edges of the phantom, given as smoothing width in
units of minimum pixel size.
Returns
-------
phantom : ``space``-element
The generated phantom in ``space``. Values have range [0, 1].
Notes
-----
The original phantom is given by a specific image. In this implementation,
we extracted the underlying parameters and the phantom thus works with
spaces of any shape. Due to this, small variations may occur when compared
to the original phantom.
References
----------
[Bre+2010] K. Bredies, K. Kunisch, and T. Pock.
*Total Generalized Variation*. SIAM Journal on Imaging Sciences,
3(3):492-526, Jan. 2010
"""
if space.ndim != 2:
raise ValueError('`space.ndim` must be 2, got {}'
''.format(space.ndim))
y, x = space.meshgrid
# Use a smooth sigmoid to get some anti-aliasing across edges.
scale = edge_smoothing / np.min(space.shape)
def sigmoid(val):
if edge_smoothing != 0:
val = val / scale
return 1 / (1 + np.exp(-val))
else:
return (val > 0).astype(val.dtype)
# Normalize to [0, 1]
x = (x - np.min(x)) / (np.max(x) - np.min(x))
y = (y - np.min(y)) / (np.max(y) - np.min(y))
# Background
values = -(x + y) / 2
# Square-ish region
indicator = np.ones(space.shape)
indicator *= sigmoid(-(0.015199034981905914 * x - y + 0.13896260554885403))
indicator *= sigmoid((0.3333333333333323 * y - x + 0.598958333333334))
indicator *= sigmoid((-2.4193548387096726 * y - x + 2.684979838709672))
values += indicator * 2 * (x + y - 1)
# Ellipse part
x_c = x - 0.71606842360499456
y_c = y - 0.18357884949910641
width = 0.55677657235995637
height = 0.37279391542283741
phi = 0.62911754900697558
x_c_rot = (np.cos(phi) * x_c - np.sin(phi) * y_c) / width
y_c_rot = (np.sin(phi) * x_c + np.cos(phi) * y_c) / height
indicator = sigmoid(np.sqrt(x_c_rot ** 2 + y_c_rot ** 2) - 1)
values = indicator * values + 1.5 * (1 - indicator) * (-x - 2 * y + 0.6)
# Normalize values
values = (values - np.min(values)) / (np.max(values) - np.min(values))
return space.element(values) | def function[tgv_phantom, parameter[space, edge_smoothing]]:
constant[Piecewise affine phantom.
This phantom is taken from [Bre+2010] and includes both linearly varying
regions and sharp discontinuities. It is designed to work well with
Total Generalized Variation (TGV) type regularization.
Parameters
----------
space : `DiscreteLp`, 2 dimensional
Discretized space in which the phantom is supposed to be created.
Needs to be two-dimensional.
edge_smoothing : nonnegative float, optional
Smoothing of the edges of the phantom, given as smoothing width in
units of minimum pixel size.
Returns
-------
phantom : ``space``-element
The generated phantom in ``space``. Values have range [0, 1].
Notes
-----
The original phantom is given by a specific image. In this implementation,
we extracted the underlying parameters and the phantom thus works with
spaces of any shape. Due to this, small variations may occur when compared
to the original phantom.
References
----------
[Bre+2010] K. Bredies, K. Kunisch, and T. Pock.
*Total Generalized Variation*. SIAM Journal on Imaging Sciences,
3(3):492-526, Jan. 2010
]
if compare[name[space].ndim not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1ea2950>
<ast.Tuple object at 0x7da1b1ea2080> assign[=] name[space].meshgrid
variable[scale] assign[=] binary_operation[name[edge_smoothing] / call[name[np].min, parameter[name[space].shape]]]
def function[sigmoid, parameter[val]]:
if compare[name[edge_smoothing] not_equal[!=] constant[0]] begin[:]
variable[val] assign[=] binary_operation[name[val] / name[scale]]
return[binary_operation[constant[1] / binary_operation[constant[1] + call[name[np].exp, parameter[<ast.UnaryOp object at 0x7da1b1ea2470>]]]]]
variable[x] assign[=] binary_operation[binary_operation[name[x] - call[name[np].min, parameter[name[x]]]] / binary_operation[call[name[np].max, parameter[name[x]]] - call[name[np].min, parameter[name[x]]]]]
variable[y] assign[=] binary_operation[binary_operation[name[y] - call[name[np].min, parameter[name[y]]]] / binary_operation[call[name[np].max, parameter[name[y]]] - call[name[np].min, parameter[name[y]]]]]
variable[values] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b1ea16f0> / constant[2]]
variable[indicator] assign[=] call[name[np].ones, parameter[name[space].shape]]
<ast.AugAssign object at 0x7da1b1ea23b0>
<ast.AugAssign object at 0x7da1b1ea3280>
<ast.AugAssign object at 0x7da1b1ea38b0>
<ast.AugAssign object at 0x7da1b1ea06d0>
variable[x_c] assign[=] binary_operation[name[x] - constant[0.7160684236049946]]
variable[y_c] assign[=] binary_operation[name[y] - constant[0.1835788494991064]]
variable[width] assign[=] constant[0.5567765723599564]
variable[height] assign[=] constant[0.3727939154228374]
variable[phi] assign[=] constant[0.6291175490069756]
variable[x_c_rot] assign[=] binary_operation[binary_operation[binary_operation[call[name[np].cos, parameter[name[phi]]] * name[x_c]] - binary_operation[call[name[np].sin, parameter[name[phi]]] * name[y_c]]] / name[width]]
variable[y_c_rot] assign[=] binary_operation[binary_operation[binary_operation[call[name[np].sin, parameter[name[phi]]] * name[x_c]] + binary_operation[call[name[np].cos, parameter[name[phi]]] * name[y_c]]] / name[height]]
variable[indicator] assign[=] call[name[sigmoid], parameter[binary_operation[call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x_c_rot] ** constant[2]] + binary_operation[name[y_c_rot] ** constant[2]]]]] - constant[1]]]]
variable[values] assign[=] binary_operation[binary_operation[name[indicator] * name[values]] + binary_operation[binary_operation[constant[1.5] * binary_operation[constant[1] - name[indicator]]] * binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b1ea36a0> - binary_operation[constant[2] * name[y]]] + constant[0.6]]]]
variable[values] assign[=] binary_operation[binary_operation[name[values] - call[name[np].min, parameter[name[values]]]] / binary_operation[call[name[np].max, parameter[name[values]]] - call[name[np].min, parameter[name[values]]]]]
return[call[name[space].element, parameter[name[values]]]] | keyword[def] identifier[tgv_phantom] ( identifier[space] , identifier[edge_smoothing] = literal[int] ):
literal[string]
keyword[if] identifier[space] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[space] . identifier[ndim] ))
identifier[y] , identifier[x] = identifier[space] . identifier[meshgrid]
identifier[scale] = identifier[edge_smoothing] / identifier[np] . identifier[min] ( identifier[space] . identifier[shape] )
keyword[def] identifier[sigmoid] ( identifier[val] ):
keyword[if] identifier[edge_smoothing] != literal[int] :
identifier[val] = identifier[val] / identifier[scale]
keyword[return] literal[int] /( literal[int] + identifier[np] . identifier[exp] (- identifier[val] ))
keyword[else] :
keyword[return] ( identifier[val] > literal[int] ). identifier[astype] ( identifier[val] . identifier[dtype] )
identifier[x] =( identifier[x] - identifier[np] . identifier[min] ( identifier[x] ))/( identifier[np] . identifier[max] ( identifier[x] )- identifier[np] . identifier[min] ( identifier[x] ))
identifier[y] =( identifier[y] - identifier[np] . identifier[min] ( identifier[y] ))/( identifier[np] . identifier[max] ( identifier[y] )- identifier[np] . identifier[min] ( identifier[y] ))
identifier[values] =-( identifier[x] + identifier[y] )/ literal[int]
identifier[indicator] = identifier[np] . identifier[ones] ( identifier[space] . identifier[shape] )
identifier[indicator] *= identifier[sigmoid] (-( literal[int] * identifier[x] - identifier[y] + literal[int] ))
identifier[indicator] *= identifier[sigmoid] (( literal[int] * identifier[y] - identifier[x] + literal[int] ))
identifier[indicator] *= identifier[sigmoid] ((- literal[int] * identifier[y] - identifier[x] + literal[int] ))
identifier[values] += identifier[indicator] * literal[int] *( identifier[x] + identifier[y] - literal[int] )
identifier[x_c] = identifier[x] - literal[int]
identifier[y_c] = identifier[y] - literal[int]
identifier[width] = literal[int]
identifier[height] = literal[int]
identifier[phi] = literal[int]
identifier[x_c_rot] =( identifier[np] . identifier[cos] ( identifier[phi] )* identifier[x_c] - identifier[np] . identifier[sin] ( identifier[phi] )* identifier[y_c] )/ identifier[width]
identifier[y_c_rot] =( identifier[np] . identifier[sin] ( identifier[phi] )* identifier[x_c] + identifier[np] . identifier[cos] ( identifier[phi] )* identifier[y_c] )/ identifier[height]
identifier[indicator] = identifier[sigmoid] ( identifier[np] . identifier[sqrt] ( identifier[x_c_rot] ** literal[int] + identifier[y_c_rot] ** literal[int] )- literal[int] )
identifier[values] = identifier[indicator] * identifier[values] + literal[int] *( literal[int] - identifier[indicator] )*(- identifier[x] - literal[int] * identifier[y] + literal[int] )
identifier[values] =( identifier[values] - identifier[np] . identifier[min] ( identifier[values] ))/( identifier[np] . identifier[max] ( identifier[values] )- identifier[np] . identifier[min] ( identifier[values] ))
keyword[return] identifier[space] . identifier[element] ( identifier[values] ) | def tgv_phantom(space, edge_smoothing=0.2):
"""Piecewise affine phantom.
This phantom is taken from [Bre+2010] and includes both linearly varying
regions and sharp discontinuities. It is designed to work well with
Total Generalized Variation (TGV) type regularization.
Parameters
----------
space : `DiscreteLp`, 2 dimensional
Discretized space in which the phantom is supposed to be created.
Needs to be two-dimensional.
edge_smoothing : nonnegative float, optional
Smoothing of the edges of the phantom, given as smoothing width in
units of minimum pixel size.
Returns
-------
phantom : ``space``-element
The generated phantom in ``space``. Values have range [0, 1].
Notes
-----
The original phantom is given by a specific image. In this implementation,
we extracted the underlying parameters and the phantom thus works with
spaces of any shape. Due to this, small variations may occur when compared
to the original phantom.
References
----------
[Bre+2010] K. Bredies, K. Kunisch, and T. Pock.
*Total Generalized Variation*. SIAM Journal on Imaging Sciences,
3(3):492-526, Jan. 2010
"""
if space.ndim != 2:
raise ValueError('`space.ndim` must be 2, got {}'.format(space.ndim)) # depends on [control=['if'], data=[]]
(y, x) = space.meshgrid
# Use a smooth sigmoid to get some anti-aliasing across edges.
scale = edge_smoothing / np.min(space.shape)
def sigmoid(val):
if edge_smoothing != 0:
val = val / scale
return 1 / (1 + np.exp(-val)) # depends on [control=['if'], data=[]]
else:
return (val > 0).astype(val.dtype)
# Normalize to [0, 1]
x = (x - np.min(x)) / (np.max(x) - np.min(x))
y = (y - np.min(y)) / (np.max(y) - np.min(y))
# Background
values = -(x + y) / 2
# Square-ish region
indicator = np.ones(space.shape)
indicator *= sigmoid(-(0.015199034981905914 * x - y + 0.13896260554885403))
indicator *= sigmoid(0.3333333333333323 * y - x + 0.598958333333334)
indicator *= sigmoid(-2.4193548387096726 * y - x + 2.684979838709672)
values += indicator * 2 * (x + y - 1)
# Ellipse part
x_c = x - 0.7160684236049946
y_c = y - 0.1835788494991064
width = 0.5567765723599564
height = 0.3727939154228374
phi = 0.6291175490069756
x_c_rot = (np.cos(phi) * x_c - np.sin(phi) * y_c) / width
y_c_rot = (np.sin(phi) * x_c + np.cos(phi) * y_c) / height
indicator = sigmoid(np.sqrt(x_c_rot ** 2 + y_c_rot ** 2) - 1)
values = indicator * values + 1.5 * (1 - indicator) * (-x - 2 * y + 0.6)
# Normalize values
values = (values - np.min(values)) / (np.max(values) - np.min(values))
return space.element(values) |
def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='',
alreadyRunning=False, minimumWorkers=0, maximumWorkers=0,
jobType='', priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request. This is called by
clients that wish to startup a new job, like a Hypersearch, stream job, or
specific model evaluation from the engine.
This puts a new entry into the jobs table. The CJM is always periodically
sweeping the jobs table and when it finds a new job, will proceed to start it
up on Hadoop.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop - it gives it a job
record to work with.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID - unique ID assigned to this job
"""
jobHash = self._normalizeHash(uuid.uuid1().bytes)
@g_retrySQL
def insertWithRetries():
with ConnectionFactory.get() as conn:
return self._insertOrGetUniqueJobNoRetries(
conn, client=client, cmdLine=cmdLine, jobHash=jobHash,
clientInfo=clientInfo, clientKey=clientKey, params=params,
minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,
jobType=jobType, priority=priority, alreadyRunning=alreadyRunning)
try:
jobID = insertWithRetries()
except:
self._logger.exception(
'jobInsert FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r;'
'jobHash=%r; cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash,
cmdLine)
raise
else:
self._logger.info(
'jobInsert: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; '
'clientKey=%r; jobHash=%r; cmdLine=%r',
jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey,
jobHash, cmdLine)
return jobID | def function[jobInsert, parameter[self, client, cmdLine, clientInfo, clientKey, params, alreadyRunning, minimumWorkers, maximumWorkers, jobType, priority]]:
constant[ Add an entry to the jobs table for a new job request. This is called by
clients that wish to startup a new job, like a Hypersearch, stream job, or
specific model evaluation from the engine.
This puts a new entry into the jobs table. The CJM is always periodically
sweeping the jobs table and when it finds a new job, will proceed to start it
up on Hadoop.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop - it gives it a job
record to work with.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID - unique ID assigned to this job
]
variable[jobHash] assign[=] call[name[self]._normalizeHash, parameter[call[name[uuid].uuid1, parameter[]].bytes]]
def function[insertWithRetries, parameter[]]:
with call[name[ConnectionFactory].get, parameter[]] begin[:]
return[call[name[self]._insertOrGetUniqueJobNoRetries, parameter[name[conn]]]]
<ast.Try object at 0x7da18f09c640>
return[name[jobID]] | keyword[def] identifier[jobInsert] ( identifier[self] , identifier[client] , identifier[cmdLine] , identifier[clientInfo] = literal[string] , identifier[clientKey] = literal[string] , identifier[params] = literal[string] ,
identifier[alreadyRunning] = keyword[False] , identifier[minimumWorkers] = literal[int] , identifier[maximumWorkers] = literal[int] ,
identifier[jobType] = literal[string] , identifier[priority] = identifier[DEFAULT_JOB_PRIORITY] ):
literal[string]
identifier[jobHash] = identifier[self] . identifier[_normalizeHash] ( identifier[uuid] . identifier[uuid1] (). identifier[bytes] )
@ identifier[g_retrySQL]
keyword[def] identifier[insertWithRetries] ():
keyword[with] identifier[ConnectionFactory] . identifier[get] () keyword[as] identifier[conn] :
keyword[return] identifier[self] . identifier[_insertOrGetUniqueJobNoRetries] (
identifier[conn] , identifier[client] = identifier[client] , identifier[cmdLine] = identifier[cmdLine] , identifier[jobHash] = identifier[jobHash] ,
identifier[clientInfo] = identifier[clientInfo] , identifier[clientKey] = identifier[clientKey] , identifier[params] = identifier[params] ,
identifier[minimumWorkers] = identifier[minimumWorkers] , identifier[maximumWorkers] = identifier[maximumWorkers] ,
identifier[jobType] = identifier[jobType] , identifier[priority] = identifier[priority] , identifier[alreadyRunning] = identifier[alreadyRunning] )
keyword[try] :
identifier[jobID] = identifier[insertWithRetries] ()
keyword[except] :
identifier[self] . identifier[_logger] . identifier[exception] (
literal[string]
literal[string] ,
identifier[jobType] , identifier[client] , identifier[_abbreviate] ( identifier[clientInfo] , literal[int] ), identifier[clientKey] , identifier[jobHash] ,
identifier[cmdLine] )
keyword[raise]
keyword[else] :
identifier[self] . identifier[_logger] . identifier[info] (
literal[string]
literal[string] ,
identifier[jobID] , identifier[jobType] , identifier[client] , identifier[_abbreviate] ( identifier[clientInfo] , literal[int] ), identifier[clientKey] ,
identifier[jobHash] , identifier[cmdLine] )
keyword[return] identifier[jobID] | def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='', alreadyRunning=False, minimumWorkers=0, maximumWorkers=0, jobType='', priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request. This is called by
clients that wish to startup a new job, like a Hypersearch, stream job, or
specific model evaluation from the engine.
This puts a new entry into the jobs table. The CJM is always periodically
sweeping the jobs table and when it finds a new job, will proceed to start it
up on Hadoop.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop - it gives it a job
record to work with.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID - unique ID assigned to this job
"""
jobHash = self._normalizeHash(uuid.uuid1().bytes)
@g_retrySQL
def insertWithRetries():
with ConnectionFactory.get() as conn:
return self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHash, clientInfo=clientInfo, clientKey=clientKey, params=params, minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers, jobType=jobType, priority=priority, alreadyRunning=alreadyRunning) # depends on [control=['with'], data=['conn']]
try:
jobID = insertWithRetries() # depends on [control=['try'], data=[]]
except:
self._logger.exception('jobInsert FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r;jobHash=%r; cmdLine=%r', jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine)
raise # depends on [control=['except'], data=[]]
else:
self._logger.info('jobInsert: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r', jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine)
return jobID |
def config(self, show_row_hdrs=True, show_col_hdrs=True,
show_col_hdr_in_cell=False, auto_resize=True):
"""
Override the in-class params:
@param show_row_hdrs : show row headers
@param show_col_hdrs : show column headers
@param show_col_hdr_in_cell : embed column header in each cell
@param auto_resize : auto resize according to the size of terminal
"""
self.show_row_hdrs = show_row_hdrs
self.show_col_hdrs = show_col_hdrs
self.show_col_hdr_in_cell = show_col_hdr_in_cell | def function[config, parameter[self, show_row_hdrs, show_col_hdrs, show_col_hdr_in_cell, auto_resize]]:
constant[
Override the in-class params:
@param show_row_hdrs : show row headers
@param show_col_hdrs : show column headers
@param show_col_hdr_in_cell : embed column header in each cell
@param auto_resize : auto resize according to the size of terminal
]
name[self].show_row_hdrs assign[=] name[show_row_hdrs]
name[self].show_col_hdrs assign[=] name[show_col_hdrs]
name[self].show_col_hdr_in_cell assign[=] name[show_col_hdr_in_cell] | keyword[def] identifier[config] ( identifier[self] , identifier[show_row_hdrs] = keyword[True] , identifier[show_col_hdrs] = keyword[True] ,
identifier[show_col_hdr_in_cell] = keyword[False] , identifier[auto_resize] = keyword[True] ):
literal[string]
identifier[self] . identifier[show_row_hdrs] = identifier[show_row_hdrs]
identifier[self] . identifier[show_col_hdrs] = identifier[show_col_hdrs]
identifier[self] . identifier[show_col_hdr_in_cell] = identifier[show_col_hdr_in_cell] | def config(self, show_row_hdrs=True, show_col_hdrs=True, show_col_hdr_in_cell=False, auto_resize=True):
"""
Override the in-class params:
@param show_row_hdrs : show row headers
@param show_col_hdrs : show column headers
@param show_col_hdr_in_cell : embed column header in each cell
@param auto_resize : auto resize according to the size of terminal
"""
self.show_row_hdrs = show_row_hdrs
self.show_col_hdrs = show_col_hdrs
self.show_col_hdr_in_cell = show_col_hdr_in_cell |
def send_text(self, user_id, content, account=None):
"""
发送文本消息
详情请参考
http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param content: 消息正文
:param account: 可选,客服账号
:return: 返回的 JSON 数据包
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
res = client.message.send_text('openid', 'text')
"""
data = {
'touser': user_id,
'msgtype': 'text',
'text': {'content': content}
}
return self._send_custom_message(data, account=account) | def function[send_text, parameter[self, user_id, content, account]]:
constant[
发送文本消息
详情请参考
http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param content: 消息正文
:param account: 可选,客服账号
:return: 返回的 JSON 数据包
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
res = client.message.send_text('openid', 'text')
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ecb80>, <ast.Constant object at 0x7da1b21efeb0>, <ast.Constant object at 0x7da1b21ed4b0>], [<ast.Name object at 0x7da1b21eda20>, <ast.Constant object at 0x7da1b21ecf40>, <ast.Dict object at 0x7da1b21efe20>]]
return[call[name[self]._send_custom_message, parameter[name[data]]]] | keyword[def] identifier[send_text] ( identifier[self] , identifier[user_id] , identifier[content] , identifier[account] = keyword[None] ):
literal[string]
identifier[data] ={
literal[string] : identifier[user_id] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[content] }
}
keyword[return] identifier[self] . identifier[_send_custom_message] ( identifier[data] , identifier[account] = identifier[account] ) | def send_text(self, user_id, content, account=None):
"""
发送文本消息
详情请参考
http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param content: 消息正文
:param account: 可选,客服账号
:return: 返回的 JSON 数据包
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
res = client.message.send_text('openid', 'text')
"""
data = {'touser': user_id, 'msgtype': 'text', 'text': {'content': content}}
return self._send_custom_message(data, account=account) |
def _get_battery_status(self, battery):
"""
Get the battery status
"""
if battery["charge"] == -1:
return (UNKNOWN_SYMBOL, UNKNOWN, "#FFFFFF")
if battery["isCharging"]:
status = self.status_chr
color = self.py3.COLOR_GOOD
else:
status = self.status_bat
color = self.py3.COLOR_DEGRADED
if not battery["isCharging"] and battery["charge"] <= self.low_threshold:
color = self.py3.COLOR_BAD
if battery["charge"] > 99:
status = self.status_full
return (battery["charge"], status, color) | def function[_get_battery_status, parameter[self, battery]]:
constant[
Get the battery status
]
if compare[call[name[battery]][constant[charge]] equal[==] <ast.UnaryOp object at 0x7da1b2089750>] begin[:]
return[tuple[[<ast.Name object at 0x7da1b208ae00>, <ast.Name object at 0x7da1b208bb80>, <ast.Constant object at 0x7da1b208b550>]]]
if call[name[battery]][constant[isCharging]] begin[:]
variable[status] assign[=] name[self].status_chr
variable[color] assign[=] name[self].py3.COLOR_GOOD
if <ast.BoolOp object at 0x7da1b208af20> begin[:]
variable[color] assign[=] name[self].py3.COLOR_BAD
if compare[call[name[battery]][constant[charge]] greater[>] constant[99]] begin[:]
variable[status] assign[=] name[self].status_full
return[tuple[[<ast.Subscript object at 0x7da1b20880a0>, <ast.Name object at 0x7da1b208a980>, <ast.Name object at 0x7da1b208a6b0>]]] | keyword[def] identifier[_get_battery_status] ( identifier[self] , identifier[battery] ):
literal[string]
keyword[if] identifier[battery] [ literal[string] ]==- literal[int] :
keyword[return] ( identifier[UNKNOWN_SYMBOL] , identifier[UNKNOWN] , literal[string] )
keyword[if] identifier[battery] [ literal[string] ]:
identifier[status] = identifier[self] . identifier[status_chr]
identifier[color] = identifier[self] . identifier[py3] . identifier[COLOR_GOOD]
keyword[else] :
identifier[status] = identifier[self] . identifier[status_bat]
identifier[color] = identifier[self] . identifier[py3] . identifier[COLOR_DEGRADED]
keyword[if] keyword[not] identifier[battery] [ literal[string] ] keyword[and] identifier[battery] [ literal[string] ]<= identifier[self] . identifier[low_threshold] :
identifier[color] = identifier[self] . identifier[py3] . identifier[COLOR_BAD]
keyword[if] identifier[battery] [ literal[string] ]> literal[int] :
identifier[status] = identifier[self] . identifier[status_full]
keyword[return] ( identifier[battery] [ literal[string] ], identifier[status] , identifier[color] ) | def _get_battery_status(self, battery):
"""
Get the battery status
"""
if battery['charge'] == -1:
return (UNKNOWN_SYMBOL, UNKNOWN, '#FFFFFF') # depends on [control=['if'], data=[]]
if battery['isCharging']:
status = self.status_chr
color = self.py3.COLOR_GOOD # depends on [control=['if'], data=[]]
else:
status = self.status_bat
color = self.py3.COLOR_DEGRADED
if not battery['isCharging'] and battery['charge'] <= self.low_threshold:
color = self.py3.COLOR_BAD # depends on [control=['if'], data=[]]
if battery['charge'] > 99:
status = self.status_full # depends on [control=['if'], data=[]]
return (battery['charge'], status, color) |
def _create_dataset(self, *data):
"""Converts input data to the appropriate Dataset"""
# Make sure data is a tuple of dense tensors
data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]
return TensorDataset(*data) | def function[_create_dataset, parameter[self]]:
constant[Converts input data to the appropriate Dataset]
variable[data] assign[=] <ast.ListComp object at 0x7da1b1b64280>
return[call[name[TensorDataset], parameter[<ast.Starred object at 0x7da1b1cbb040>]]] | keyword[def] identifier[_create_dataset] ( identifier[self] ,* identifier[data] ):
literal[string]
identifier[data] =[ identifier[self] . identifier[_to_torch] ( identifier[x] , identifier[dtype] = identifier[torch] . identifier[FloatTensor] ) keyword[for] identifier[x] keyword[in] identifier[data] ]
keyword[return] identifier[TensorDataset] (* identifier[data] ) | def _create_dataset(self, *data):
"""Converts input data to the appropriate Dataset"""
# Make sure data is a tuple of dense tensors
data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]
return TensorDataset(*data) |
def histogram(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots a histogram of the sarray provided as input, and returns the
resulting Plot object.
The function supports numeric SArrays with dtypes int or float.
Parameters
----------
sa : SArray
The data to get a histogram for. Must be numeric (int/float).
xlabel : str (optional)
The text label for the X axis. Defaults to "Values".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Count".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the histogram.
Examples
--------
Make a histogram of an SArray.
>>> x = turicreate.SArray([1,2,3,4,5,1,1,1,1,2,2,3,2,3,1,1,1,4])
>>> hist = turicreate.visualization.histogram(x)
"""
if (not isinstance(sa, tc.data_structures.sarray.SArray) or
sa.dtype not in [int, float]):
raise ValueError("turicreate.visualization.histogram supports " +
"SArrays of dtypes: int, float")
title = _get_title(title)
plt_ref = tc.extensions.plot_histogram(sa,
xlabel, ylabel, title)
return Plot(plt_ref) | def function[histogram, parameter[sa, xlabel, ylabel, title]]:
constant[
Plots a histogram of the sarray provided as input, and returns the
resulting Plot object.
The function supports numeric SArrays with dtypes int or float.
Parameters
----------
sa : SArray
The data to get a histogram for. Must be numeric (int/float).
xlabel : str (optional)
The text label for the X axis. Defaults to "Values".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Count".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the histogram.
Examples
--------
Make a histogram of an SArray.
>>> x = turicreate.SArray([1,2,3,4,5,1,1,1,1,2,2,3,2,3,1,1,1,4])
>>> hist = turicreate.visualization.histogram(x)
]
if <ast.BoolOp object at 0x7da1b1f8f2b0> begin[:]
<ast.Raise object at 0x7da1b1f8f730>
variable[title] assign[=] call[name[_get_title], parameter[name[title]]]
variable[plt_ref] assign[=] call[name[tc].extensions.plot_histogram, parameter[name[sa], name[xlabel], name[ylabel], name[title]]]
return[call[name[Plot], parameter[name[plt_ref]]]] | keyword[def] identifier[histogram] ( identifier[sa] , identifier[xlabel] = identifier[LABEL_DEFAULT] , identifier[ylabel] = identifier[LABEL_DEFAULT] , identifier[title] = identifier[LABEL_DEFAULT] ):
literal[string]
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[sa] , identifier[tc] . identifier[data_structures] . identifier[sarray] . identifier[SArray] ) keyword[or]
identifier[sa] . identifier[dtype] keyword[not] keyword[in] [ identifier[int] , identifier[float] ]):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] )
identifier[title] = identifier[_get_title] ( identifier[title] )
identifier[plt_ref] = identifier[tc] . identifier[extensions] . identifier[plot_histogram] ( identifier[sa] ,
identifier[xlabel] , identifier[ylabel] , identifier[title] )
keyword[return] identifier[Plot] ( identifier[plt_ref] ) | def histogram(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots a histogram of the sarray provided as input, and returns the
resulting Plot object.
The function supports numeric SArrays with dtypes int or float.
Parameters
----------
sa : SArray
The data to get a histogram for. Must be numeric (int/float).
xlabel : str (optional)
The text label for the X axis. Defaults to "Values".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Count".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the histogram.
Examples
--------
Make a histogram of an SArray.
>>> x = turicreate.SArray([1,2,3,4,5,1,1,1,1,2,2,3,2,3,1,1,1,4])
>>> hist = turicreate.visualization.histogram(x)
"""
if not isinstance(sa, tc.data_structures.sarray.SArray) or sa.dtype not in [int, float]:
raise ValueError('turicreate.visualization.histogram supports ' + 'SArrays of dtypes: int, float') # depends on [control=['if'], data=[]]
title = _get_title(title)
plt_ref = tc.extensions.plot_histogram(sa, xlabel, ylabel, title)
return Plot(plt_ref) |
def send(self):
""" Post fields and files to an HTTP server as multipart/form-data.
Return the server's response.
"""
scheme, location, path, query, _ = urlparse.urlsplit(self.url)
assert scheme in ("http", "https"), "Unsupported scheme %r" % scheme
content_type, body = self._encode_multipart_formdata()
handle = getattr(httplib, scheme.upper() + "Connection")(location)
if self.mock_http:
# Don't actually send anything, print to stdout instead
handle.sock = parts.Bunch(
sendall=lambda x: sys.stdout.write(fmt.to_utf8(
''.join((c if 32 <= ord(c) < 127 or ord(c) in (8, 10) else u'\u27ea%02X\u27eb' % ord(c)) for c in x)
)),
makefile=lambda dummy, _: StringIO.StringIO("\r\n".join((
"HTTP/1.0 204 NO CONTENT",
"Content-Length: 0",
"",
))),
close=lambda: None,
)
handle.putrequest('POST', urlparse.urlunsplit(('', '', path, query, '')))
handle.putheader('Content-Type', content_type)
handle.putheader('Content-Length', str(len(body)))
for key, val in self.headers.items():
handle.putheader(key, val)
handle.endheaders()
handle.send(body)
#print handle.__dict__
return handle.getresponse() | def function[send, parameter[self]]:
constant[ Post fields and files to an HTTP server as multipart/form-data.
Return the server's response.
]
<ast.Tuple object at 0x7da18f58e530> assign[=] call[name[urlparse].urlsplit, parameter[name[self].url]]
assert[compare[name[scheme] in tuple[[<ast.Constant object at 0x7da18f58e6e0>, <ast.Constant object at 0x7da18f58dab0>]]]]
<ast.Tuple object at 0x7da18f58f280> assign[=] call[name[self]._encode_multipart_formdata, parameter[]]
variable[handle] assign[=] call[call[name[getattr], parameter[name[httplib], binary_operation[call[name[scheme].upper, parameter[]] + constant[Connection]]]], parameter[name[location]]]
if name[self].mock_http begin[:]
name[handle].sock assign[=] call[name[parts].Bunch, parameter[]]
call[name[handle].putrequest, parameter[constant[POST], call[name[urlparse].urlunsplit, parameter[tuple[[<ast.Constant object at 0x7da20c6e78b0>, <ast.Constant object at 0x7da20c6e5ab0>, <ast.Name object at 0x7da20c6e6560>, <ast.Name object at 0x7da20c6e4c70>, <ast.Constant object at 0x7da20c6e5300>]]]]]]
call[name[handle].putheader, parameter[constant[Content-Type], name[content_type]]]
call[name[handle].putheader, parameter[constant[Content-Length], call[name[str], parameter[call[name[len], parameter[name[body]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e6a10>, <ast.Name object at 0x7da20c6e6da0>]]] in starred[call[name[self].headers.items, parameter[]]] begin[:]
call[name[handle].putheader, parameter[name[key], name[val]]]
call[name[handle].endheaders, parameter[]]
call[name[handle].send, parameter[name[body]]]
return[call[name[handle].getresponse, parameter[]]] | keyword[def] identifier[send] ( identifier[self] ):
literal[string]
identifier[scheme] , identifier[location] , identifier[path] , identifier[query] , identifier[_] = identifier[urlparse] . identifier[urlsplit] ( identifier[self] . identifier[url] )
keyword[assert] identifier[scheme] keyword[in] ( literal[string] , literal[string] ), literal[string] % identifier[scheme]
identifier[content_type] , identifier[body] = identifier[self] . identifier[_encode_multipart_formdata] ()
identifier[handle] = identifier[getattr] ( identifier[httplib] , identifier[scheme] . identifier[upper] ()+ literal[string] )( identifier[location] )
keyword[if] identifier[self] . identifier[mock_http] :
identifier[handle] . identifier[sock] = identifier[parts] . identifier[Bunch] (
identifier[sendall] = keyword[lambda] identifier[x] : identifier[sys] . identifier[stdout] . identifier[write] ( identifier[fmt] . identifier[to_utf8] (
literal[string] . identifier[join] (( identifier[c] keyword[if] literal[int] <= identifier[ord] ( identifier[c] )< literal[int] keyword[or] identifier[ord] ( identifier[c] ) keyword[in] ( literal[int] , literal[int] ) keyword[else] literal[string] % identifier[ord] ( identifier[c] )) keyword[for] identifier[c] keyword[in] identifier[x] )
)),
identifier[makefile] = keyword[lambda] identifier[dummy] , identifier[_] : identifier[StringIO] . identifier[StringIO] ( literal[string] . identifier[join] ((
literal[string] ,
literal[string] ,
literal[string] ,
))),
identifier[close] = keyword[lambda] : keyword[None] ,
)
identifier[handle] . identifier[putrequest] ( literal[string] , identifier[urlparse] . identifier[urlunsplit] (( literal[string] , literal[string] , identifier[path] , identifier[query] , literal[string] )))
identifier[handle] . identifier[putheader] ( literal[string] , identifier[content_type] )
identifier[handle] . identifier[putheader] ( literal[string] , identifier[str] ( identifier[len] ( identifier[body] )))
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[self] . identifier[headers] . identifier[items] ():
identifier[handle] . identifier[putheader] ( identifier[key] , identifier[val] )
identifier[handle] . identifier[endheaders] ()
identifier[handle] . identifier[send] ( identifier[body] )
keyword[return] identifier[handle] . identifier[getresponse] () | def send(self):
""" Post fields and files to an HTTP server as multipart/form-data.
Return the server's response.
"""
(scheme, location, path, query, _) = urlparse.urlsplit(self.url)
assert scheme in ('http', 'https'), 'Unsupported scheme %r' % scheme
(content_type, body) = self._encode_multipart_formdata()
handle = getattr(httplib, scheme.upper() + 'Connection')(location)
if self.mock_http:
# Don't actually send anything, print to stdout instead
handle.sock = parts.Bunch(sendall=lambda x: sys.stdout.write(fmt.to_utf8(''.join((c if 32 <= ord(c) < 127 or ord(c) in (8, 10) else u'⟪%02X⟫' % ord(c) for c in x)))), makefile=lambda dummy, _: StringIO.StringIO('\r\n'.join(('HTTP/1.0 204 NO CONTENT', 'Content-Length: 0', ''))), close=lambda : None) # depends on [control=['if'], data=[]]
handle.putrequest('POST', urlparse.urlunsplit(('', '', path, query, '')))
handle.putheader('Content-Type', content_type)
handle.putheader('Content-Length', str(len(body)))
for (key, val) in self.headers.items():
handle.putheader(key, val) # depends on [control=['for'], data=[]]
handle.endheaders()
handle.send(body)
#print handle.__dict__
return handle.getresponse() |
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator | def function[print_doc1, parameter[]]:
constant[Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# [34mFirst line of docstring[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# [34mFirst paragraph of docstring which contains more than one line[0m
]
variable[color] assign[=] call[name[kwargs].get, parameter[constant[color], name[blue]]]
variable[bold] assign[=] call[name[kwargs].get, parameter[constant[bold], constant[False]]]
variable[prefix] assign[=] call[name[kwargs].get, parameter[constant[prefix], constant[]]]
variable[tail] assign[=] call[name[kwargs].get, parameter[constant[tail], constant[
]]]
def function[real_decorator, parameter[func]]:
constant[real decorator function]
def function[wrapper, parameter[]]:
constant[the wrapper function]
<ast.Try object at 0x7da204623ee0>
return[call[name[func], parameter[<ast.Starred object at 0x7da1b10d56c0>]]]
return[name[wrapper]]
variable[invoked] assign[=] call[name[bool], parameter[<ast.BoolOp object at 0x7da20e961f60>]]
if <ast.UnaryOp object at 0x7da20e961390> begin[:]
return[call[name[real_decorator], parameter[]]]
return[name[real_decorator]] | keyword[def] identifier[print_doc1] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[color] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[blue] )
identifier[bold] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[prefix] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[tail] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
keyword[def] identifier[real_decorator] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[prgf] = identifier[first_paragraph] ( identifier[func] . identifier[__doc__] )
identifier[print] ( identifier[color] ( identifier[prefix] + identifier[prgf] + identifier[tail] , identifier[bold] ))
keyword[except] identifier[AttributeError] keyword[as] identifier[exc] :
identifier[name] = identifier[func] . identifier[__name__]
identifier[print] ( identifier[red] ( identifier[flo] ( literal[string] )))
keyword[raise] ( identifier[exc] )
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
identifier[invoked] = identifier[bool] ( keyword[not] identifier[args] keyword[or] identifier[kwargs] )
keyword[if] keyword[not] identifier[invoked] :
keyword[return] identifier[real_decorator] ( identifier[func] = identifier[args] [ literal[int] ])
keyword[return] identifier[real_decorator] | def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """
# ... pass
# ...
# >>> foo()
# \x1b[34mFirst line of docstring\x1b[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \x1b[34mFirst paragraph of docstring which contains more than one line\x1b[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
"""real decorator function"""
@wraps(func)
def wrapper(*args, **kwargs):
"""the wrapper function"""
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold)) # depends on [control=['try'], data=[]]
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise exc # depends on [control=['except'], data=['exc']]
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0]) # depends on [control=['if'], data=[]]
return real_decorator |
def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1) | def function[_add_months, parameter[self, date, months]]:
constant[
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
]
variable[year] assign[=] binary_operation[name[date].year + binary_operation[binary_operation[binary_operation[name[date].month + name[months]] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[12]]]
variable[month] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[date].month + name[months]] - constant[1]] <ast.Mod object at 0x7da2590d6920> constant[12]] + constant[1]]
return[call[name[datetime].date, parameter[]]] | keyword[def] identifier[_add_months] ( identifier[self] , identifier[date] , identifier[months] ):
literal[string]
identifier[year] = identifier[date] . identifier[year] +( identifier[date] . identifier[month] + identifier[months] - literal[int] )// literal[int]
identifier[month] =( identifier[date] . identifier[month] + identifier[months] - literal[int] )% literal[int] + literal[int]
keyword[return] identifier[datetime] . identifier[date] ( identifier[year] = identifier[year] , identifier[month] = identifier[month] , identifier[day] = literal[int] ) | def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1) |
def p_moduleComplianceClause(self, p):
"""moduleComplianceClause : LOWERCASE_IDENTIFIER MODULE_COMPLIANCE STATUS Status DESCRIPTION Text ReferPart ComplianceModulePart COLON_COLON_EQUAL '{' objectIdentifier '}'"""
p[0] = ('moduleComplianceClause',
p[1], # id
# p[2], # MODULE_COMPLIANCE
p[4], # status
(p[5], p[6]), # description
p[7], # reference
p[8], # ComplianceModules
p[11]) | def function[p_moduleComplianceClause, parameter[self, p]]:
constant[moduleComplianceClause : LOWERCASE_IDENTIFIER MODULE_COMPLIANCE STATUS Status DESCRIPTION Text ReferPart ComplianceModulePart COLON_COLON_EQUAL '{' objectIdentifier '}']
call[name[p]][constant[0]] assign[=] tuple[[<ast.Constant object at 0x7da1b01087c0>, <ast.Subscript object at 0x7da1b01082b0>, <ast.Subscript object at 0x7da1b01087f0>, <ast.Tuple object at 0x7da1b0108910>, <ast.Subscript object at 0x7da1b0108340>, <ast.Subscript object at 0x7da1b01097e0>, <ast.Subscript object at 0x7da1b010bcd0>]] | keyword[def] identifier[p_moduleComplianceClause] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]=( literal[string] ,
identifier[p] [ literal[int] ],
identifier[p] [ literal[int] ],
( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]),
identifier[p] [ literal[int] ],
identifier[p] [ literal[int] ],
identifier[p] [ literal[int] ]) | def p_moduleComplianceClause(self, p):
"""moduleComplianceClause : LOWERCASE_IDENTIFIER MODULE_COMPLIANCE STATUS Status DESCRIPTION Text ReferPart ComplianceModulePart COLON_COLON_EQUAL '{' objectIdentifier '}'""" # id
# p[2], # MODULE_COMPLIANCE
# status
# description
# reference
# ComplianceModules
p[0] = ('moduleComplianceClause', p[1], p[4], (p[5], p[6]), p[7], p[8], p[11]) |
def detect(self, sampfrom=0, sampto='end', learn=True, verbose=True):
"""
Detect qrs locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
"""
if sampfrom < 0:
raise ValueError("'sampfrom' cannot be negative")
self.sampfrom = sampfrom
if sampto == 'end':
sampto = self.sig_len
elif sampto > self.sig_len:
raise ValueError("'sampto' cannot exceed the signal length")
self.sampto = sampto
self.verbose = verbose
# Don't attempt to run on a flat signal
if np.max(self.sig) == np.min(self.sig):
self.qrs_inds = np.empty(0)
if self.verbose:
print('Flat signal. Detection skipped.')
return
# Get/set signal configuration fields from Conf object
self._set_conf()
# Bandpass filter the signal
self._bandpass()
# Compute moving wave integration of filtered signal
self._mwi()
# Initialize the running parameters
if learn:
self._learn_init_params()
else:
self._set_default_init_params()
# Run the detection
self._run_detection() | def function[detect, parameter[self, sampfrom, sampto, learn, verbose]]:
constant[
Detect qrs locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
]
if compare[name[sampfrom] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b18a3b20>
name[self].sampfrom assign[=] name[sampfrom]
if compare[name[sampto] equal[==] constant[end]] begin[:]
variable[sampto] assign[=] name[self].sig_len
name[self].sampto assign[=] name[sampto]
name[self].verbose assign[=] name[verbose]
if compare[call[name[np].max, parameter[name[self].sig]] equal[==] call[name[np].min, parameter[name[self].sig]]] begin[:]
name[self].qrs_inds assign[=] call[name[np].empty, parameter[constant[0]]]
if name[self].verbose begin[:]
call[name[print], parameter[constant[Flat signal. Detection skipped.]]]
return[None]
call[name[self]._set_conf, parameter[]]
call[name[self]._bandpass, parameter[]]
call[name[self]._mwi, parameter[]]
if name[learn] begin[:]
call[name[self]._learn_init_params, parameter[]]
call[name[self]._run_detection, parameter[]] | keyword[def] identifier[detect] ( identifier[self] , identifier[sampfrom] = literal[int] , identifier[sampto] = literal[string] , identifier[learn] = keyword[True] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[if] identifier[sampfrom] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[sampfrom] = identifier[sampfrom]
keyword[if] identifier[sampto] == literal[string] :
identifier[sampto] = identifier[self] . identifier[sig_len]
keyword[elif] identifier[sampto] > identifier[self] . identifier[sig_len] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[sampto] = identifier[sampto]
identifier[self] . identifier[verbose] = identifier[verbose]
keyword[if] identifier[np] . identifier[max] ( identifier[self] . identifier[sig] )== identifier[np] . identifier[min] ( identifier[self] . identifier[sig] ):
identifier[self] . identifier[qrs_inds] = identifier[np] . identifier[empty] ( literal[int] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[print] ( literal[string] )
keyword[return]
identifier[self] . identifier[_set_conf] ()
identifier[self] . identifier[_bandpass] ()
identifier[self] . identifier[_mwi] ()
keyword[if] identifier[learn] :
identifier[self] . identifier[_learn_init_params] ()
keyword[else] :
identifier[self] . identifier[_set_default_init_params] ()
identifier[self] . identifier[_run_detection] () | def detect(self, sampfrom=0, sampto='end', learn=True, verbose=True):
"""
Detect qrs locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
"""
if sampfrom < 0:
raise ValueError("'sampfrom' cannot be negative") # depends on [control=['if'], data=[]]
self.sampfrom = sampfrom
if sampto == 'end':
sampto = self.sig_len # depends on [control=['if'], data=['sampto']]
elif sampto > self.sig_len:
raise ValueError("'sampto' cannot exceed the signal length") # depends on [control=['if'], data=[]]
self.sampto = sampto
self.verbose = verbose
# Don't attempt to run on a flat signal
if np.max(self.sig) == np.min(self.sig):
self.qrs_inds = np.empty(0)
if self.verbose:
print('Flat signal. Detection skipped.') # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
# Get/set signal configuration fields from Conf object
self._set_conf()
# Bandpass filter the signal
self._bandpass()
# Compute moving wave integration of filtered signal
self._mwi()
# Initialize the running parameters
if learn:
self._learn_init_params() # depends on [control=['if'], data=[]]
else:
self._set_default_init_params()
# Run the detection
self._run_detection() |
def rrandom():
"""Get the next random number in the range [0.0, 1.0].
Returns a float."""
import urllib.request
import urllib.error
import urllib.parse
if checkquota() < 1:
raise Exception("Your www.random.org quota has already run out.")
request = urllib.request.Request(
'http://www.random.org/integers/?num=1&min=0&max=1000000000&col=1&base=10&format=plain&rnd=new')
request.add_header('User-Agent', 'randomwrapy/0.1 very alpha')
opener = urllib.request.build_opener()
numlist = opener.open(request).read()
num = numlist.split()[0]
return float(num) / 1000000000 | def function[rrandom, parameter[]]:
constant[Get the next random number in the range [0.0, 1.0].
Returns a float.]
import module[urllib.request]
import module[urllib.error]
import module[urllib.parse]
if compare[call[name[checkquota], parameter[]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b2461cc0>
variable[request] assign[=] call[name[urllib].request.Request, parameter[constant[http://www.random.org/integers/?num=1&min=0&max=1000000000&col=1&base=10&format=plain&rnd=new]]]
call[name[request].add_header, parameter[constant[User-Agent], constant[randomwrapy/0.1 very alpha]]]
variable[opener] assign[=] call[name[urllib].request.build_opener, parameter[]]
variable[numlist] assign[=] call[call[name[opener].open, parameter[name[request]]].read, parameter[]]
variable[num] assign[=] call[call[name[numlist].split, parameter[]]][constant[0]]
return[binary_operation[call[name[float], parameter[name[num]]] / constant[1000000000]]] | keyword[def] identifier[rrandom] ():
literal[string]
keyword[import] identifier[urllib] . identifier[request]
keyword[import] identifier[urllib] . identifier[error]
keyword[import] identifier[urllib] . identifier[parse]
keyword[if] identifier[checkquota] ()< literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[request] = identifier[urllib] . identifier[request] . identifier[Request] (
literal[string] )
identifier[request] . identifier[add_header] ( literal[string] , literal[string] )
identifier[opener] = identifier[urllib] . identifier[request] . identifier[build_opener] ()
identifier[numlist] = identifier[opener] . identifier[open] ( identifier[request] ). identifier[read] ()
identifier[num] = identifier[numlist] . identifier[split] ()[ literal[int] ]
keyword[return] identifier[float] ( identifier[num] )/ literal[int] | def rrandom():
"""Get the next random number in the range [0.0, 1.0].
Returns a float."""
import urllib.request
import urllib.error
import urllib.parse
if checkquota() < 1:
raise Exception('Your www.random.org quota has already run out.') # depends on [control=['if'], data=[]]
request = urllib.request.Request('http://www.random.org/integers/?num=1&min=0&max=1000000000&col=1&base=10&format=plain&rnd=new')
request.add_header('User-Agent', 'randomwrapy/0.1 very alpha')
opener = urllib.request.build_opener()
numlist = opener.open(request).read()
num = numlist.split()[0]
return float(num) / 1000000000 |
def explain_prediction_lightning(estimator, doc, vec=None, top=None,
target_names=None, targets=None,
feature_names=None, vectorized=False,
coef_scale=None):
""" Return an explanation of a lightning estimator predictions """
return explain_weights_lightning_not_supported(estimator, doc) | def function[explain_prediction_lightning, parameter[estimator, doc, vec, top, target_names, targets, feature_names, vectorized, coef_scale]]:
constant[ Return an explanation of a lightning estimator predictions ]
return[call[name[explain_weights_lightning_not_supported], parameter[name[estimator], name[doc]]]] | keyword[def] identifier[explain_prediction_lightning] ( identifier[estimator] , identifier[doc] , identifier[vec] = keyword[None] , identifier[top] = keyword[None] ,
identifier[target_names] = keyword[None] , identifier[targets] = keyword[None] ,
identifier[feature_names] = keyword[None] , identifier[vectorized] = keyword[False] ,
identifier[coef_scale] = keyword[None] ):
literal[string]
keyword[return] identifier[explain_weights_lightning_not_supported] ( identifier[estimator] , identifier[doc] ) | def explain_prediction_lightning(estimator, doc, vec=None, top=None, target_names=None, targets=None, feature_names=None, vectorized=False, coef_scale=None):
""" Return an explanation of a lightning estimator predictions """
return explain_weights_lightning_not_supported(estimator, doc) |
def AND(queryArr,
exclude = None):
"""
create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query
"""
assert isinstance(queryArr, list), "provided argument as not a list"
assert len(queryArr) > 0, "queryArr had an empty list"
q = CombinedQuery()
q.setQueryParam("$and", [])
for item in queryArr:
assert isinstance(item, (CombinedQuery, BaseQuery)), "item in the list was not a CombinedQuery or BaseQuery instance"
q.getQuery()["$and"].append(item.getQuery())
if exclude != None:
assert isinstance(exclude, (CombinedQuery, BaseQuery)), "exclude parameter was not a CombinedQuery or BaseQuery instance"
q.setQueryParam("$not", exclude.getQuery())
return q | def function[AND, parameter[queryArr, exclude]]:
constant[
create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query
]
assert[call[name[isinstance], parameter[name[queryArr], name[list]]]]
assert[compare[call[name[len], parameter[name[queryArr]]] greater[>] constant[0]]]
variable[q] assign[=] call[name[CombinedQuery], parameter[]]
call[name[q].setQueryParam, parameter[constant[$and], list[[]]]]
for taget[name[item]] in starred[name[queryArr]] begin[:]
assert[call[name[isinstance], parameter[name[item], tuple[[<ast.Name object at 0x7da18fe93f10>, <ast.Name object at 0x7da18fe930a0>]]]]]
call[call[call[name[q].getQuery, parameter[]]][constant[$and]].append, parameter[call[name[item].getQuery, parameter[]]]]
if compare[name[exclude] not_equal[!=] constant[None]] begin[:]
assert[call[name[isinstance], parameter[name[exclude], tuple[[<ast.Name object at 0x7da18fe90430>, <ast.Name object at 0x7da18fe91360>]]]]]
call[name[q].setQueryParam, parameter[constant[$not], call[name[exclude].getQuery, parameter[]]]]
return[name[q]] | keyword[def] identifier[AND] ( identifier[queryArr] ,
identifier[exclude] = keyword[None] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[queryArr] , identifier[list] ), literal[string]
keyword[assert] identifier[len] ( identifier[queryArr] )> literal[int] , literal[string]
identifier[q] = identifier[CombinedQuery] ()
identifier[q] . identifier[setQueryParam] ( literal[string] ,[])
keyword[for] identifier[item] keyword[in] identifier[queryArr] :
keyword[assert] identifier[isinstance] ( identifier[item] ,( identifier[CombinedQuery] , identifier[BaseQuery] )), literal[string]
identifier[q] . identifier[getQuery] ()[ literal[string] ]. identifier[append] ( identifier[item] . identifier[getQuery] ())
keyword[if] identifier[exclude] != keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[exclude] ,( identifier[CombinedQuery] , identifier[BaseQuery] )), literal[string]
identifier[q] . identifier[setQueryParam] ( literal[string] , identifier[exclude] . identifier[getQuery] ())
keyword[return] identifier[q] | def AND(queryArr, exclude=None):
"""
create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query
"""
assert isinstance(queryArr, list), 'provided argument as not a list'
assert len(queryArr) > 0, 'queryArr had an empty list'
q = CombinedQuery()
q.setQueryParam('$and', [])
for item in queryArr:
assert isinstance(item, (CombinedQuery, BaseQuery)), 'item in the list was not a CombinedQuery or BaseQuery instance'
q.getQuery()['$and'].append(item.getQuery()) # depends on [control=['for'], data=['item']]
if exclude != None:
assert isinstance(exclude, (CombinedQuery, BaseQuery)), 'exclude parameter was not a CombinedQuery or BaseQuery instance'
q.setQueryParam('$not', exclude.getQuery()) # depends on [control=['if'], data=['exclude']]
return q |
def validate_data_dir(data_dir):
"""
Validates all files in a data_dir
"""
all_meta, all_table, all_element, all_component = fileio.get_all_filelist(data_dir)
for f in all_meta:
full_path = os.path.join(data_dir, f)
validate_file('metadata', full_path)
for f in all_table:
full_path = os.path.join(data_dir, f)
validate_file('table', full_path)
for f in all_element:
full_path = os.path.join(data_dir, f)
validate_file('element', full_path)
for f in all_component:
full_path = os.path.join(data_dir, f)
validate_file('component', full_path) | def function[validate_data_dir, parameter[data_dir]]:
constant[
Validates all files in a data_dir
]
<ast.Tuple object at 0x7da2041d8550> assign[=] call[name[fileio].get_all_filelist, parameter[name[data_dir]]]
for taget[name[f]] in starred[name[all_meta]] begin[:]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[data_dir], name[f]]]
call[name[validate_file], parameter[constant[metadata], name[full_path]]]
for taget[name[f]] in starred[name[all_table]] begin[:]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[data_dir], name[f]]]
call[name[validate_file], parameter[constant[table], name[full_path]]]
for taget[name[f]] in starred[name[all_element]] begin[:]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[data_dir], name[f]]]
call[name[validate_file], parameter[constant[element], name[full_path]]]
for taget[name[f]] in starred[name[all_component]] begin[:]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[data_dir], name[f]]]
call[name[validate_file], parameter[constant[component], name[full_path]]] | keyword[def] identifier[validate_data_dir] ( identifier[data_dir] ):
literal[string]
identifier[all_meta] , identifier[all_table] , identifier[all_element] , identifier[all_component] = identifier[fileio] . identifier[get_all_filelist] ( identifier[data_dir] )
keyword[for] identifier[f] keyword[in] identifier[all_meta] :
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[f] )
identifier[validate_file] ( literal[string] , identifier[full_path] )
keyword[for] identifier[f] keyword[in] identifier[all_table] :
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[f] )
identifier[validate_file] ( literal[string] , identifier[full_path] )
keyword[for] identifier[f] keyword[in] identifier[all_element] :
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[f] )
identifier[validate_file] ( literal[string] , identifier[full_path] )
keyword[for] identifier[f] keyword[in] identifier[all_component] :
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[f] )
identifier[validate_file] ( literal[string] , identifier[full_path] ) | def validate_data_dir(data_dir):
"""
Validates all files in a data_dir
"""
(all_meta, all_table, all_element, all_component) = fileio.get_all_filelist(data_dir)
for f in all_meta:
full_path = os.path.join(data_dir, f)
validate_file('metadata', full_path) # depends on [control=['for'], data=['f']]
for f in all_table:
full_path = os.path.join(data_dir, f)
validate_file('table', full_path) # depends on [control=['for'], data=['f']]
for f in all_element:
full_path = os.path.join(data_dir, f)
validate_file('element', full_path) # depends on [control=['for'], data=['f']]
for f in all_component:
full_path = os.path.join(data_dir, f)
validate_file('component', full_path) # depends on [control=['for'], data=['f']] |
def list(self, **params):
"""
Retrieve all deals
Returns all deals available to the user according to the parameters provided
:calls: ``get /deals``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Deals.
:rtype: list
"""
_, _, deals = self.http_client.get("/deals", params=params)
for deal in deals:
deal['value'] = Coercion.to_decimal(deal['value'])
return deals | def function[list, parameter[self]]:
constant[
Retrieve all deals
Returns all deals available to the user according to the parameters provided
:calls: ``get /deals``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Deals.
:rtype: list
]
<ast.Tuple object at 0x7da18f811780> assign[=] call[name[self].http_client.get, parameter[constant[/deals]]]
for taget[name[deal]] in starred[name[deals]] begin[:]
call[name[deal]][constant[value]] assign[=] call[name[Coercion].to_decimal, parameter[call[name[deal]][constant[value]]]]
return[name[deals]] | keyword[def] identifier[list] ( identifier[self] ,** identifier[params] ):
literal[string]
identifier[_] , identifier[_] , identifier[deals] = identifier[self] . identifier[http_client] . identifier[get] ( literal[string] , identifier[params] = identifier[params] )
keyword[for] identifier[deal] keyword[in] identifier[deals] :
identifier[deal] [ literal[string] ]= identifier[Coercion] . identifier[to_decimal] ( identifier[deal] [ literal[string] ])
keyword[return] identifier[deals] | def list(self, **params):
"""
Retrieve all deals
Returns all deals available to the user according to the parameters provided
:calls: ``get /deals``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Deals.
:rtype: list
"""
(_, _, deals) = self.http_client.get('/deals', params=params)
for deal in deals:
deal['value'] = Coercion.to_decimal(deal['value']) # depends on [control=['for'], data=['deal']]
return deals |
def parser(key = "default"):
"""Returns the parser for the given key, (e.g. 'ssh')"""
#Make sure we have a parser for that key. If we don't, then set
#one up if we know what parameters to use; otherwise return the
#default parser.
if key not in _parsers:
if key == "ssh":
_parsers["ssh"] = CodeParser(True, False)
else:
key = "default"
return _parsers[key] | def function[parser, parameter[key]]:
constant[Returns the parser for the given key, (e.g. 'ssh')]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[_parsers]] begin[:]
if compare[name[key] equal[==] constant[ssh]] begin[:]
call[name[_parsers]][constant[ssh]] assign[=] call[name[CodeParser], parameter[constant[True], constant[False]]]
return[call[name[_parsers]][name[key]]] | keyword[def] identifier[parser] ( identifier[key] = literal[string] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[_parsers] :
keyword[if] identifier[key] == literal[string] :
identifier[_parsers] [ literal[string] ]= identifier[CodeParser] ( keyword[True] , keyword[False] )
keyword[else] :
identifier[key] = literal[string]
keyword[return] identifier[_parsers] [ identifier[key] ] | def parser(key='default'):
"""Returns the parser for the given key, (e.g. 'ssh')"""
#Make sure we have a parser for that key. If we don't, then set
#one up if we know what parameters to use; otherwise return the
#default parser.
if key not in _parsers:
if key == 'ssh':
_parsers['ssh'] = CodeParser(True, False) # depends on [control=['if'], data=[]]
else:
key = 'default' # depends on [control=['if'], data=['key', '_parsers']]
return _parsers[key] |
def __check_file_status(self, index):
"""Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder"""
if self.__file_status_flag:
# Avoid infinite loop: when the QMessageBox.question pops, it
# gets focus and then give it back to the CodeEditor instance,
# triggering a refresh cycle which calls this method
return
self.__file_status_flag = True
finfo = self.data[index]
name = osp.basename(finfo.filename)
if finfo.newly_created:
# File was just created (not yet saved): do nothing
# (do not return because of the clean-up at the end of the method)
pass
elif not osp.isfile(finfo.filename):
# File doesn't exist (removed, moved or offline):
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> is unavailable "
"(this file may have been removed, moved "
"or renamed outside Spyder)."
"<br>Do you want to close it?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.close_file(index)
else:
finfo.newly_created = True
finfo.editor.document().setModified(True)
self.modification_changed(index=index)
else:
# Else, testing if it has been modified elsewhere:
lastm = QFileInfo(finfo.filename).lastModified()
if to_text_string(lastm.toString()) \
!= to_text_string(finfo.lastmodified.toString()):
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified outside Spyder."
"<br>Do you want to reload it and lose all "
"your changes?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.reload(index)
else:
finfo.lastmodified = lastm
else:
self.reload(index)
# Finally, resetting temporary flag:
self.__file_status_flag = False | def function[__check_file_status, parameter[self, index]]:
constant[Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder]
if name[self].__file_status_flag begin[:]
return[None]
name[self].__file_status_flag assign[=] constant[True]
variable[finfo] assign[=] call[name[self].data][name[index]]
variable[name] assign[=] call[name[osp].basename, parameter[name[finfo].filename]]
if name[finfo].newly_created begin[:]
pass
name[self].__file_status_flag assign[=] constant[False] | keyword[def] identifier[__check_file_status] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[self] . identifier[__file_status_flag] :
keyword[return]
identifier[self] . identifier[__file_status_flag] = keyword[True]
identifier[finfo] = identifier[self] . identifier[data] [ identifier[index] ]
identifier[name] = identifier[osp] . identifier[basename] ( identifier[finfo] . identifier[filename] )
keyword[if] identifier[finfo] . identifier[newly_created] :
keyword[pass]
keyword[elif] keyword[not] identifier[osp] . identifier[isfile] ( identifier[finfo] . identifier[filename] ):
identifier[self] . identifier[msgbox] = identifier[QMessageBox] (
identifier[QMessageBox] . identifier[Warning] ,
identifier[self] . identifier[title] ,
identifier[_] ( literal[string]
literal[string]
literal[string]
literal[string] )% identifier[name] ,
identifier[QMessageBox] . identifier[Yes] | identifier[QMessageBox] . identifier[No] ,
identifier[self] )
identifier[answer] = identifier[self] . identifier[msgbox] . identifier[exec_] ()
keyword[if] identifier[answer] == identifier[QMessageBox] . identifier[Yes] :
identifier[self] . identifier[close_file] ( identifier[index] )
keyword[else] :
identifier[finfo] . identifier[newly_created] = keyword[True]
identifier[finfo] . identifier[editor] . identifier[document] (). identifier[setModified] ( keyword[True] )
identifier[self] . identifier[modification_changed] ( identifier[index] = identifier[index] )
keyword[else] :
identifier[lastm] = identifier[QFileInfo] ( identifier[finfo] . identifier[filename] ). identifier[lastModified] ()
keyword[if] identifier[to_text_string] ( identifier[lastm] . identifier[toString] ())!= identifier[to_text_string] ( identifier[finfo] . identifier[lastmodified] . identifier[toString] ()):
keyword[if] identifier[finfo] . identifier[editor] . identifier[document] (). identifier[isModified] ():
identifier[self] . identifier[msgbox] = identifier[QMessageBox] (
identifier[QMessageBox] . identifier[Question] ,
identifier[self] . identifier[title] ,
identifier[_] ( literal[string]
literal[string]
literal[string] )% identifier[name] ,
identifier[QMessageBox] . identifier[Yes] | identifier[QMessageBox] . identifier[No] ,
identifier[self] )
identifier[answer] = identifier[self] . identifier[msgbox] . identifier[exec_] ()
keyword[if] identifier[answer] == identifier[QMessageBox] . identifier[Yes] :
identifier[self] . identifier[reload] ( identifier[index] )
keyword[else] :
identifier[finfo] . identifier[lastmodified] = identifier[lastm]
keyword[else] :
identifier[self] . identifier[reload] ( identifier[index] )
identifier[self] . identifier[__file_status_flag] = keyword[False] | def __check_file_status(self, index):
"""Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder"""
if self.__file_status_flag: # Avoid infinite loop: when the QMessageBox.question pops, it
# gets focus and then give it back to the CodeEditor instance,
# triggering a refresh cycle which calls this method
return # depends on [control=['if'], data=[]]
self.__file_status_flag = True
finfo = self.data[index]
name = osp.basename(finfo.filename)
if finfo.newly_created: # File was just created (not yet saved): do nothing
# (do not return because of the clean-up at the end of the method)
pass # depends on [control=['if'], data=[]]
elif not osp.isfile(finfo.filename): # File doesn't exist (removed, moved or offline):
self.msgbox = QMessageBox(QMessageBox.Warning, self.title, _('<b>%s</b> is unavailable (this file may have been removed, moved or renamed outside Spyder).<br>Do you want to close it?') % name, QMessageBox.Yes | QMessageBox.No, self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.close_file(index) # depends on [control=['if'], data=[]]
else:
finfo.newly_created = True
finfo.editor.document().setModified(True)
self.modification_changed(index=index) # depends on [control=['if'], data=[]]
else: # Else, testing if it has been modified elsewhere:
lastm = QFileInfo(finfo.filename).lastModified()
if to_text_string(lastm.toString()) != to_text_string(finfo.lastmodified.toString()):
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(QMessageBox.Question, self.title, _('<b>%s</b> has been modified outside Spyder.<br>Do you want to reload it and lose all your changes?') % name, QMessageBox.Yes | QMessageBox.No, self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.reload(index) # depends on [control=['if'], data=[]]
else:
finfo.lastmodified = lastm # depends on [control=['if'], data=[]]
else:
self.reload(index) # depends on [control=['if'], data=[]] # Finally, resetting temporary flag:
self.__file_status_flag = False |
def close(self, code=None):
'''return a `close` :class:`Frame`.
'''
code = code or 1000
body = pack('!H', code)
body += self._close_codes.get(code, '').encode('utf-8')
return self.encode(body, opcode=0x8) | def function[close, parameter[self, code]]:
constant[return a `close` :class:`Frame`.
]
variable[code] assign[=] <ast.BoolOp object at 0x7da18f58e860>
variable[body] assign[=] call[name[pack], parameter[constant[!H], name[code]]]
<ast.AugAssign object at 0x7da18f58f7f0>
return[call[name[self].encode, parameter[name[body]]]] | keyword[def] identifier[close] ( identifier[self] , identifier[code] = keyword[None] ):
literal[string]
identifier[code] = identifier[code] keyword[or] literal[int]
identifier[body] = identifier[pack] ( literal[string] , identifier[code] )
identifier[body] += identifier[self] . identifier[_close_codes] . identifier[get] ( identifier[code] , literal[string] ). identifier[encode] ( literal[string] )
keyword[return] identifier[self] . identifier[encode] ( identifier[body] , identifier[opcode] = literal[int] ) | def close(self, code=None):
"""return a `close` :class:`Frame`.
"""
code = code or 1000
body = pack('!H', code)
body += self._close_codes.get(code, '').encode('utf-8')
return self.encode(body, opcode=8) |
def actualize (self, scanner = None):
""" Generates all the actual targets and sets up build actions for
this target.
If 'scanner' is specified, creates an additional target
with the same location as actual target, which will depend on the
actual target and be associated with 'scanner'. That additional
target is returned. See the docs (#dependency_scanning) for rationale.
Target must correspond to a file if 'scanner' is specified.
If scanner is not specified, then actual target is returned.
"""
if __debug__:
from .scanner import Scanner
assert scanner is None or isinstance(scanner, Scanner)
actual_name = self.actualize_no_scanner ()
if self.always_:
bjam.call("ALWAYS", actual_name)
if not scanner:
return actual_name
else:
# Add the scanner instance to the grist for name.
g = '-'.join ([ungrist(get_grist(actual_name)), str(id(scanner))])
name = replace_grist (actual_name, '<' + g + '>')
if name not in self.made_:
self.made_ [name] = True
self.project_.manager ().engine ().add_dependency (name, actual_name)
self.actualize_location (name)
self.project_.manager ().scanners ().install (scanner, name, str (self))
return name | def function[actualize, parameter[self, scanner]]:
constant[ Generates all the actual targets and sets up build actions for
this target.
If 'scanner' is specified, creates an additional target
with the same location as actual target, which will depend on the
actual target and be associated with 'scanner'. That additional
target is returned. See the docs (#dependency_scanning) for rationale.
Target must correspond to a file if 'scanner' is specified.
If scanner is not specified, then actual target is returned.
]
if name[__debug__] begin[:]
from relative_module[scanner] import module[Scanner]
assert[<ast.BoolOp object at 0x7da1b1f8f8b0>]
variable[actual_name] assign[=] call[name[self].actualize_no_scanner, parameter[]]
if name[self].always_ begin[:]
call[name[bjam].call, parameter[constant[ALWAYS], name[actual_name]]]
if <ast.UnaryOp object at 0x7da1b1f8da50> begin[:]
return[name[actual_name]] | keyword[def] identifier[actualize] ( identifier[self] , identifier[scanner] = keyword[None] ):
literal[string]
keyword[if] identifier[__debug__] :
keyword[from] . identifier[scanner] keyword[import] identifier[Scanner]
keyword[assert] identifier[scanner] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[scanner] , identifier[Scanner] )
identifier[actual_name] = identifier[self] . identifier[actualize_no_scanner] ()
keyword[if] identifier[self] . identifier[always_] :
identifier[bjam] . identifier[call] ( literal[string] , identifier[actual_name] )
keyword[if] keyword[not] identifier[scanner] :
keyword[return] identifier[actual_name]
keyword[else] :
identifier[g] = literal[string] . identifier[join] ([ identifier[ungrist] ( identifier[get_grist] ( identifier[actual_name] )), identifier[str] ( identifier[id] ( identifier[scanner] ))])
identifier[name] = identifier[replace_grist] ( identifier[actual_name] , literal[string] + identifier[g] + literal[string] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[made_] :
identifier[self] . identifier[made_] [ identifier[name] ]= keyword[True]
identifier[self] . identifier[project_] . identifier[manager] (). identifier[engine] (). identifier[add_dependency] ( identifier[name] , identifier[actual_name] )
identifier[self] . identifier[actualize_location] ( identifier[name] )
identifier[self] . identifier[project_] . identifier[manager] (). identifier[scanners] (). identifier[install] ( identifier[scanner] , identifier[name] , identifier[str] ( identifier[self] ))
keyword[return] identifier[name] | def actualize(self, scanner=None):
""" Generates all the actual targets and sets up build actions for
this target.
If 'scanner' is specified, creates an additional target
with the same location as actual target, which will depend on the
actual target and be associated with 'scanner'. That additional
target is returned. See the docs (#dependency_scanning) for rationale.
Target must correspond to a file if 'scanner' is specified.
If scanner is not specified, then actual target is returned.
"""
if __debug__:
from .scanner import Scanner
assert scanner is None or isinstance(scanner, Scanner) # depends on [control=['if'], data=[]]
actual_name = self.actualize_no_scanner()
if self.always_:
bjam.call('ALWAYS', actual_name) # depends on [control=['if'], data=[]]
if not scanner:
return actual_name # depends on [control=['if'], data=[]]
else:
# Add the scanner instance to the grist for name.
g = '-'.join([ungrist(get_grist(actual_name)), str(id(scanner))])
name = replace_grist(actual_name, '<' + g + '>')
if name not in self.made_:
self.made_[name] = True
self.project_.manager().engine().add_dependency(name, actual_name)
self.actualize_location(name)
self.project_.manager().scanners().install(scanner, name, str(self)) # depends on [control=['if'], data=['name']]
return name |
def parse_number_from_substring(substring) -> Optional[float]:
'''
Returns the number in the expected string "N:12.3", where "N" is the
key, and "12.3" is a floating point value
For the temp-deck or thermocycler's temperature response, one expected
input is something like "T:none", where "none" should return a None value
'''
try:
value = substring.split(':')[1]
if value.strip().lower() == 'none':
return None
return round(float(value), GCODE_ROUNDING_PRECISION)
except (ValueError, IndexError, TypeError, AttributeError):
log.exception('Unexpected argument to parse_number_from_substring:')
raise ParseError(
'Unexpected argument to parse_number_from_substring: {}'.format(
substring)) | def function[parse_number_from_substring, parameter[substring]]:
constant[
Returns the number in the expected string "N:12.3", where "N" is the
key, and "12.3" is a floating point value
For the temp-deck or thermocycler's temperature response, one expected
input is something like "T:none", where "none" should return a None value
]
<ast.Try object at 0x7da204962d40> | keyword[def] identifier[parse_number_from_substring] ( identifier[substring] )-> identifier[Optional] [ identifier[float] ]:
literal[string]
keyword[try] :
identifier[value] = identifier[substring] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[value] . identifier[strip] (). identifier[lower] ()== literal[string] :
keyword[return] keyword[None]
keyword[return] identifier[round] ( identifier[float] ( identifier[value] ), identifier[GCODE_ROUNDING_PRECISION] )
keyword[except] ( identifier[ValueError] , identifier[IndexError] , identifier[TypeError] , identifier[AttributeError] ):
identifier[log] . identifier[exception] ( literal[string] )
keyword[raise] identifier[ParseError] (
literal[string] . identifier[format] (
identifier[substring] )) | def parse_number_from_substring(substring) -> Optional[float]:
"""
Returns the number in the expected string "N:12.3", where "N" is the
key, and "12.3" is a floating point value
For the temp-deck or thermocycler's temperature response, one expected
input is something like "T:none", where "none" should return a None value
"""
try:
value = substring.split(':')[1]
if value.strip().lower() == 'none':
return None # depends on [control=['if'], data=[]]
return round(float(value), GCODE_ROUNDING_PRECISION) # depends on [control=['try'], data=[]]
except (ValueError, IndexError, TypeError, AttributeError):
log.exception('Unexpected argument to parse_number_from_substring:')
raise ParseError('Unexpected argument to parse_number_from_substring: {}'.format(substring)) # depends on [control=['except'], data=[]] |
def slaves(self, name):
"""Returns a list of slaves for ``name``."""
fut = self.execute(b'SLAVES', name, encoding='utf-8')
return wait_convert(fut, parse_sentinel_slaves_and_sentinels) | def function[slaves, parameter[self, name]]:
constant[Returns a list of slaves for ``name``.]
variable[fut] assign[=] call[name[self].execute, parameter[constant[b'SLAVES'], name[name]]]
return[call[name[wait_convert], parameter[name[fut], name[parse_sentinel_slaves_and_sentinels]]]] | keyword[def] identifier[slaves] ( identifier[self] , identifier[name] ):
literal[string]
identifier[fut] = identifier[self] . identifier[execute] ( literal[string] , identifier[name] , identifier[encoding] = literal[string] )
keyword[return] identifier[wait_convert] ( identifier[fut] , identifier[parse_sentinel_slaves_and_sentinels] ) | def slaves(self, name):
"""Returns a list of slaves for ``name``."""
fut = self.execute(b'SLAVES', name, encoding='utf-8')
return wait_convert(fut, parse_sentinel_slaves_and_sentinels) |
def connect_async(self, connection_id, connection_string, callback, retries=4, context=None):
"""Connect to a device by its connection_string
This function asynchronously connects to a device by its BLE address + address type passed in the
connection_string parameter and calls callback when finished. Callback is called on either success
or failure with the signature:
callback(connection_id: int, result: bool, value: None)
The optional retries argument specifies how many times we should retry the connection
if the connection fails due to an early disconnect. Early disconnects are expected ble failure
modes in busy environments where the slave device misses the connection packet and the master
therefore fails immediately. Retrying a few times should succeed in this case.
Args:
connection_string (string): A BLE address information in AA:BB:CC:DD:EE:FF,<address_type> format
connection_id (int): A unique integer set by the caller for referring to this connection once created
callback (callable): A callback function called when the connection has succeeded or failed
retries (int): The number of attempts to connect to this device that can end in early disconnect
before we give up and report that we could not connect. A retry count of 0 will mean that
we fail as soon as we receive the first early disconnect.
context (dict): If we are retrying to connect, passes the context to not considering it as a new connection.
"""
if context is None:
# It is the first attempt to connect: begin a new connection
context = {
'connection_id': connection_id,
'retries': retries,
'retry_connect': False,
'connection_string': connection_string,
'connect_time': time.time(),
'callback': callback
}
self.connections.begin_connection(
connection_id,
connection_string,
callback,
context,
self.get_config('default_timeout')
)
# Don't scan while we attempt to connect to this device
if self.scanning:
self.stop_scan()
address, address_type = connection_string.split(',')
# First, cancel any pending connection to prevent errors when starting a new one
self.bable.cancel_connection(sync=False)
# Send a connect request
self.bable.connect(
address=address,
address_type=address_type,
connection_interval=[7.5, 7.5],
on_connected=[self._on_connection_finished, context],
on_disconnected=[self._on_unexpected_disconnection, context]
) | def function[connect_async, parameter[self, connection_id, connection_string, callback, retries, context]]:
constant[Connect to a device by its connection_string
This function asynchronously connects to a device by its BLE address + address type passed in the
connection_string parameter and calls callback when finished. Callback is called on either success
or failure with the signature:
callback(connection_id: int, result: bool, value: None)
The optional retries argument specifies how many times we should retry the connection
if the connection fails due to an early disconnect. Early disconnects are expected ble failure
modes in busy environments where the slave device misses the connection packet and the master
therefore fails immediately. Retrying a few times should succeed in this case.
Args:
connection_string (string): A BLE address information in AA:BB:CC:DD:EE:FF,<address_type> format
connection_id (int): A unique integer set by the caller for referring to this connection once created
callback (callable): A callback function called when the connection has succeeded or failed
retries (int): The number of attempts to connect to this device that can end in early disconnect
before we give up and report that we could not connect. A retry count of 0 will mean that
we fail as soon as we receive the first early disconnect.
context (dict): If we are retrying to connect, passes the context to not considering it as a new connection.
]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da20ec062c0>, <ast.Constant object at 0x7da20ec05c90>, <ast.Constant object at 0x7da20ec06230>, <ast.Constant object at 0x7da20ec049a0>, <ast.Constant object at 0x7da204621690>, <ast.Constant object at 0x7da204621b40>], [<ast.Name object at 0x7da204621420>, <ast.Name object at 0x7da204621cf0>, <ast.Constant object at 0x7da204621fc0>, <ast.Name object at 0x7da204621810>, <ast.Call object at 0x7da204620160>, <ast.Name object at 0x7da204622ec0>]]
call[name[self].connections.begin_connection, parameter[name[connection_id], name[connection_string], name[callback], name[context], call[name[self].get_config, parameter[constant[default_timeout]]]]]
if name[self].scanning begin[:]
call[name[self].stop_scan, parameter[]]
<ast.Tuple object at 0x7da20c993100> assign[=] call[name[connection_string].split, parameter[constant[,]]]
call[name[self].bable.cancel_connection, parameter[]]
call[name[self].bable.connect, parameter[]] | keyword[def] identifier[connect_async] ( identifier[self] , identifier[connection_id] , identifier[connection_string] , identifier[callback] , identifier[retries] = literal[int] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] ={
literal[string] : identifier[connection_id] ,
literal[string] : identifier[retries] ,
literal[string] : keyword[False] ,
literal[string] : identifier[connection_string] ,
literal[string] : identifier[time] . identifier[time] (),
literal[string] : identifier[callback]
}
identifier[self] . identifier[connections] . identifier[begin_connection] (
identifier[connection_id] ,
identifier[connection_string] ,
identifier[callback] ,
identifier[context] ,
identifier[self] . identifier[get_config] ( literal[string] )
)
keyword[if] identifier[self] . identifier[scanning] :
identifier[self] . identifier[stop_scan] ()
identifier[address] , identifier[address_type] = identifier[connection_string] . identifier[split] ( literal[string] )
identifier[self] . identifier[bable] . identifier[cancel_connection] ( identifier[sync] = keyword[False] )
identifier[self] . identifier[bable] . identifier[connect] (
identifier[address] = identifier[address] ,
identifier[address_type] = identifier[address_type] ,
identifier[connection_interval] =[ literal[int] , literal[int] ],
identifier[on_connected] =[ identifier[self] . identifier[_on_connection_finished] , identifier[context] ],
identifier[on_disconnected] =[ identifier[self] . identifier[_on_unexpected_disconnection] , identifier[context] ]
) | def connect_async(self, connection_id, connection_string, callback, retries=4, context=None):
"""Connect to a device by its connection_string
This function asynchronously connects to a device by its BLE address + address type passed in the
connection_string parameter and calls callback when finished. Callback is called on either success
or failure with the signature:
callback(connection_id: int, result: bool, value: None)
The optional retries argument specifies how many times we should retry the connection
if the connection fails due to an early disconnect. Early disconnects are expected ble failure
modes in busy environments where the slave device misses the connection packet and the master
therefore fails immediately. Retrying a few times should succeed in this case.
Args:
connection_string (string): A BLE address information in AA:BB:CC:DD:EE:FF,<address_type> format
connection_id (int): A unique integer set by the caller for referring to this connection once created
callback (callable): A callback function called when the connection has succeeded or failed
retries (int): The number of attempts to connect to this device that can end in early disconnect
before we give up and report that we could not connect. A retry count of 0 will mean that
we fail as soon as we receive the first early disconnect.
context (dict): If we are retrying to connect, passes the context to not considering it as a new connection.
"""
if context is None:
# It is the first attempt to connect: begin a new connection
context = {'connection_id': connection_id, 'retries': retries, 'retry_connect': False, 'connection_string': connection_string, 'connect_time': time.time(), 'callback': callback}
self.connections.begin_connection(connection_id, connection_string, callback, context, self.get_config('default_timeout')) # depends on [control=['if'], data=['context']]
# Don't scan while we attempt to connect to this device
if self.scanning:
self.stop_scan() # depends on [control=['if'], data=[]]
(address, address_type) = connection_string.split(',')
# First, cancel any pending connection to prevent errors when starting a new one
self.bable.cancel_connection(sync=False)
# Send a connect request
self.bable.connect(address=address, address_type=address_type, connection_interval=[7.5, 7.5], on_connected=[self._on_connection_finished, context], on_disconnected=[self._on_unexpected_disconnection, context]) |
def TENSES(self):
""" Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
"""
a = set(TENSES[id] for id in self._format)
a = a.union(set(TENSES[id] for id in self._default.keys()))
a = a.union(set(TENSES[id] for id in self._default.values()))
a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation.
return a | def function[TENSES, parameter[self]]:
constant[ Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
]
variable[a] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6e5000>]]
variable[a] assign[=] call[name[a].union, parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6e7550>]]]]
variable[a] assign[=] call[name[a].union, parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6e67a0>]]]]
variable[a] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b26afdf0>]]
return[name[a]] | keyword[def] identifier[TENSES] ( identifier[self] ):
literal[string]
identifier[a] = identifier[set] ( identifier[TENSES] [ identifier[id] ] keyword[for] identifier[id] keyword[in] identifier[self] . identifier[_format] )
identifier[a] = identifier[a] . identifier[union] ( identifier[set] ( identifier[TENSES] [ identifier[id] ] keyword[for] identifier[id] keyword[in] identifier[self] . identifier[_default] . identifier[keys] ()))
identifier[a] = identifier[a] . identifier[union] ( identifier[set] ( identifier[TENSES] [ identifier[id] ] keyword[for] identifier[id] keyword[in] identifier[self] . identifier[_default] . identifier[values] ()))
identifier[a] = identifier[sorted] ( identifier[x] [:- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[a] keyword[if] identifier[x] [- literal[int] ] keyword[is] keyword[False] )
keyword[return] identifier[a] | def TENSES(self):
""" Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
"""
a = set((TENSES[id] for id in self._format))
a = a.union(set((TENSES[id] for id in self._default.keys())))
a = a.union(set((TENSES[id] for id in self._default.values())))
a = sorted((x[:-2] for x in a if x[-2] is False)) # Exclude negation.
return a |
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done') | def function[fix, parameter[csvfile]]:
constant[Apply a fix (ie. remove plain names)]
call[name[header], parameter[constant[Apply fixes from {}], name[csvfile].name]]
variable[bads] assign[=] list[[]]
variable[reader] assign[=] call[name[csv].reader, parameter[name[csvfile]]]
call[name[reader].next, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0b388b0>, <ast.Name object at 0x7da1b0b39ea0>, <ast.Name object at 0x7da1b0b3b640>, <ast.Name object at 0x7da1b0b3a590>]]] in starred[name[reader]] begin[:]
variable[advice] assign[=] call[name[Advice].objects.get, parameter[]]
variable[sources] assign[=] <ast.ListComp object at 0x7da1b0b391b0>
variable[dests] assign[=] <ast.ListComp object at 0x7da1b0b3aef0>
if <ast.UnaryOp object at 0x7da1b0b3b010> begin[:]
call[name[bads].append, parameter[name[id]]]
continue
for taget[tuple[[<ast.Name object at 0x7da1b0b3b4c0>, <ast.Name object at 0x7da1b0b38100>]]] in starred[call[name[zip], parameter[name[sources], name[dests]]]] begin[:]
call[name[echo], parameter[constant[{0}: Replace {1} with {2}], call[name[white], parameter[name[id]]], call[name[white], parameter[name[source]]], call[name[white], parameter[name[dest]]]]]
name[advice].subject assign[=] call[name[advice].subject.replace, parameter[name[source], name[dest]]]
name[advice].content assign[=] call[name[advice].content.replace, parameter[name[source], name[dest]]]
call[name[advice].save, parameter[]]
call[name[index], parameter[name[advice]]]
for taget[name[id]] in starred[name[bads]] begin[:]
call[name[echo], parameter[constant[{0}: Replacements length not matching], call[name[white], parameter[name[id]]]]]
call[name[success], parameter[constant[Done]]] | keyword[def] identifier[fix] ( identifier[csvfile] ):
literal[string]
identifier[header] ( literal[string] , identifier[csvfile] . identifier[name] )
identifier[bads] =[]
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[csvfile] )
identifier[reader] . identifier[next] ()
keyword[for] identifier[id] , identifier[_] , identifier[sources] , identifier[dests] keyword[in] identifier[reader] :
identifier[advice] = identifier[Advice] . identifier[objects] . identifier[get] ( identifier[id] = identifier[id] )
identifier[sources] =[ identifier[s] . identifier[strip] () keyword[for] identifier[s] keyword[in] identifier[sources] . identifier[split] ( literal[string] ) keyword[if] identifier[s] . identifier[strip] ()]
identifier[dests] =[ identifier[d] . identifier[strip] () keyword[for] identifier[d] keyword[in] identifier[dests] . identifier[split] ( literal[string] ) keyword[if] identifier[d] . identifier[strip] ()]
keyword[if] keyword[not] identifier[len] ( identifier[sources] )== identifier[len] ( identifier[dests] ):
identifier[bads] . identifier[append] ( identifier[id] )
keyword[continue]
keyword[for] identifier[source] , identifier[dest] keyword[in] identifier[zip] ( identifier[sources] , identifier[dests] ):
identifier[echo] ( literal[string] , identifier[white] ( identifier[id] ), identifier[white] ( identifier[source] ), identifier[white] ( identifier[dest] ))
identifier[advice] . identifier[subject] = identifier[advice] . identifier[subject] . identifier[replace] ( identifier[source] , identifier[dest] )
identifier[advice] . identifier[content] = identifier[advice] . identifier[content] . identifier[replace] ( identifier[source] , identifier[dest] )
identifier[advice] . identifier[save] ()
identifier[index] ( identifier[advice] )
keyword[for] identifier[id] keyword[in] identifier[bads] :
identifier[echo] ( literal[string] , identifier[white] ( identifier[id] ))
identifier[success] ( literal[string] ) | def fix(csvfile):
"""Apply a fix (ie. remove plain names)"""
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for (id, _, sources, dests) in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue # depends on [control=['if'], data=[]]
for (source, dest) in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest) # depends on [control=['for'], data=[]]
advice.save()
index(advice) # depends on [control=['for'], data=[]]
for id in bads:
echo('{0}: Replacements length not matching', white(id)) # depends on [control=['for'], data=['id']]
success('Done') |
def configure_plugin(app): # noqa: C901
"""
This is a factory function that configures all the routes for
flask given a particular library.
"""
@app.route(
"/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/", methods=["GET"]
)
def client_has_addon(hashed_client_id, addon_id):
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
pf = recommendation_manager._ctx["profile_fetcher"]
client_meta = pf.get(hashed_client_id)
if client_meta is None:
# no valid client metadata was found for the given
# clientId
result = {"results": False, 'error': 'No client found'}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
result = {"results": addon_id in client_meta.get("installed_addons", [])}
response = app.response_class(
response=json.dumps(result), status=200, mimetype="application/json"
)
return response
@app.route("/v1/api/recommendations/<hashed_client_id>/", methods=["GET", "POST"])
def recommendations(hashed_client_id):
"""Return a list of recommendations provided a telemetry client_id."""
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
extra_data = {}
extra_data["options"] = {}
extra_data["options"]["promoted"] = []
try:
if request.method == "POST":
json_data = request.data
# At least Python3.5 returns request.data as bytes
# type instead of a string type.
# Both Python2.7 and Python3.7 return a string type
if type(json_data) == bytes:
json_data = json_data.decode("utf8")
if json_data != "":
post_data = json.loads(json_data)
raw_promoted_guids = post_data.get("options", {}).get(
"promoted", []
)
promoted_guids = clean_promoted_guids(raw_promoted_guids)
extra_data["options"]["promoted"] = promoted_guids
except Exception as e:
jdata = {}
jdata["results"] = []
jdata["error"] = "Invalid JSON in POST: {}".format(e)
return app.response_class(
response=json.dumps(jdata, status=400, mimetype="application/json")
)
# Coerce the uuid.UUID type into a string
client_id = str(hashed_client_id)
locale = request.args.get("locale", None)
if locale is not None:
extra_data["locale"] = locale
platform = request.args.get("platform", None)
if platform is not None:
extra_data["platform"] = platform
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
recommendations = recommendation_manager.recommend(
client_id=client_id, limit=TAAR_MAX_RESULTS, extra_data=extra_data
)
promoted_guids = extra_data.get("options", {}).get("promoted", [])
recommendations = merge_promoted_guids(promoted_guids, recommendations)
# Strip out weights from TAAR results to maintain compatibility
# with TAAR 1.0
jdata = {"results": [x[0] for x in recommendations]}
response = app.response_class(
response=json.dumps(jdata), status=200, mimetype="application/json"
)
return response
def check_proxy_manager(PROXY_MANAGER):
if PROXY_MANAGER.getResource() is None:
ctx = default_context()
profile_fetcher = ProfileFetcher(ctx)
ctx["profile_fetcher"] = profile_fetcher
# Lock the context down after we've got basic bits installed
root_ctx = ctx.child()
r_factory = recommenders.RecommenderFactory(root_ctx)
root_ctx["recommender_factory"] = r_factory
instance = recommenders.RecommendationManager(root_ctx.child())
PROXY_MANAGER.setResource(instance)
return PROXY_MANAGER.getResource()
class MyPlugin:
def set(self, config_options):
"""
This setter is primarily so that we can instrument the
cached RecommendationManager implementation under test.
All plugins should implement this set method to enable
overwriting configuration options with a TAAR library.
"""
global PROXY_MANAGER
if "PROXY_RESOURCE" in config_options:
PROXY_MANAGER._resource = config_options["PROXY_RESOURCE"]
return MyPlugin() | def function[configure_plugin, parameter[app]]:
constant[
This is a factory function that configures all the routes for
flask given a particular library.
]
def function[client_has_addon, parameter[hashed_client_id, addon_id]]:
<ast.Global object at 0x7da1b0ea0340>
variable[recommendation_manager] assign[=] call[name[check_proxy_manager], parameter[name[PROXY_MANAGER]]]
variable[pf] assign[=] call[name[recommendation_manager]._ctx][constant[profile_fetcher]]
variable[client_meta] assign[=] call[name[pf].get, parameter[name[hashed_client_id]]]
if compare[name[client_meta] is constant[None]] begin[:]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ea3d00>, <ast.Constant object at 0x7da1b0ea3d90>], [<ast.Constant object at 0x7da1b0ea3a30>, <ast.Constant object at 0x7da1b0ea3c10>]]
variable[response] assign[=] call[name[app].response_class, parameter[]]
return[name[response]]
variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ea3b80>], [<ast.Compare object at 0x7da1b0ea1a50>]]
variable[response] assign[=] call[name[app].response_class, parameter[]]
return[name[response]]
def function[recommendations, parameter[hashed_client_id]]:
constant[Return a list of recommendations provided a telemetry client_id.]
<ast.Global object at 0x7da1b0ea1f00>
variable[extra_data] assign[=] dictionary[[], []]
call[name[extra_data]][constant[options]] assign[=] dictionary[[], []]
call[call[name[extra_data]][constant[options]]][constant[promoted]] assign[=] list[[]]
<ast.Try object at 0x7da1b0effb80>
variable[client_id] assign[=] call[name[str], parameter[name[hashed_client_id]]]
variable[locale] assign[=] call[name[request].args.get, parameter[constant[locale], constant[None]]]
if compare[name[locale] is_not constant[None]] begin[:]
call[name[extra_data]][constant[locale]] assign[=] name[locale]
variable[platform] assign[=] call[name[request].args.get, parameter[constant[platform], constant[None]]]
if compare[name[platform] is_not constant[None]] begin[:]
call[name[extra_data]][constant[platform]] assign[=] name[platform]
variable[recommendation_manager] assign[=] call[name[check_proxy_manager], parameter[name[PROXY_MANAGER]]]
variable[recommendations] assign[=] call[name[recommendation_manager].recommend, parameter[]]
variable[promoted_guids] assign[=] call[call[name[extra_data].get, parameter[constant[options], dictionary[[], []]]].get, parameter[constant[promoted], list[[]]]]
variable[recommendations] assign[=] call[name[merge_promoted_guids], parameter[name[promoted_guids], name[recommendations]]]
variable[jdata] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c0d4b0>], [<ast.ListComp object at 0x7da1b0c0e380>]]
variable[response] assign[=] call[name[app].response_class, parameter[]]
return[name[response]]
def function[check_proxy_manager, parameter[PROXY_MANAGER]]:
if compare[call[name[PROXY_MANAGER].getResource, parameter[]] is constant[None]] begin[:]
variable[ctx] assign[=] call[name[default_context], parameter[]]
variable[profile_fetcher] assign[=] call[name[ProfileFetcher], parameter[name[ctx]]]
call[name[ctx]][constant[profile_fetcher]] assign[=] name[profile_fetcher]
variable[root_ctx] assign[=] call[name[ctx].child, parameter[]]
variable[r_factory] assign[=] call[name[recommenders].RecommenderFactory, parameter[name[root_ctx]]]
call[name[root_ctx]][constant[recommender_factory]] assign[=] name[r_factory]
variable[instance] assign[=] call[name[recommenders].RecommendationManager, parameter[call[name[root_ctx].child, parameter[]]]]
call[name[PROXY_MANAGER].setResource, parameter[name[instance]]]
return[call[name[PROXY_MANAGER].getResource, parameter[]]]
class class[MyPlugin, parameter[]] begin[:]
def function[set, parameter[self, config_options]]:
constant[
This setter is primarily so that we can instrument the
cached RecommendationManager implementation under test.
All plugins should implement this set method to enable
overwriting configuration options with a TAAR library.
]
<ast.Global object at 0x7da1b0c0e230>
if compare[constant[PROXY_RESOURCE] in name[config_options]] begin[:]
name[PROXY_MANAGER]._resource assign[=] call[name[config_options]][constant[PROXY_RESOURCE]]
return[call[name[MyPlugin], parameter[]]] | keyword[def] identifier[configure_plugin] ( identifier[app] ):
literal[string]
@ identifier[app] . identifier[route] (
literal[string] , identifier[methods] =[ literal[string] ]
)
keyword[def] identifier[client_has_addon] ( identifier[hashed_client_id] , identifier[addon_id] ):
keyword[global] identifier[PROXY_MANAGER]
identifier[recommendation_manager] = identifier[check_proxy_manager] ( identifier[PROXY_MANAGER] )
identifier[pf] = identifier[recommendation_manager] . identifier[_ctx] [ literal[string] ]
identifier[client_meta] = identifier[pf] . identifier[get] ( identifier[hashed_client_id] )
keyword[if] identifier[client_meta] keyword[is] keyword[None] :
identifier[result] ={ literal[string] : keyword[False] , literal[string] : literal[string] }
identifier[response] = identifier[app] . identifier[response_class] (
identifier[response] = identifier[json] . identifier[dumps] ( identifier[result] ), identifier[status] = literal[int] , identifier[mimetype] = literal[string]
)
keyword[return] identifier[response]
identifier[result] ={ literal[string] : identifier[addon_id] keyword[in] identifier[client_meta] . identifier[get] ( literal[string] ,[])}
identifier[response] = identifier[app] . identifier[response_class] (
identifier[response] = identifier[json] . identifier[dumps] ( identifier[result] ), identifier[status] = literal[int] , identifier[mimetype] = literal[string]
)
keyword[return] identifier[response]
@ identifier[app] . identifier[route] ( literal[string] , identifier[methods] =[ literal[string] , literal[string] ])
keyword[def] identifier[recommendations] ( identifier[hashed_client_id] ):
literal[string]
keyword[global] identifier[PROXY_MANAGER]
identifier[extra_data] ={}
identifier[extra_data] [ literal[string] ]={}
identifier[extra_data] [ literal[string] ][ literal[string] ]=[]
keyword[try] :
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[json_data] = identifier[request] . identifier[data]
keyword[if] identifier[type] ( identifier[json_data] )== identifier[bytes] :
identifier[json_data] = identifier[json_data] . identifier[decode] ( literal[string] )
keyword[if] identifier[json_data] != literal[string] :
identifier[post_data] = identifier[json] . identifier[loads] ( identifier[json_data] )
identifier[raw_promoted_guids] = identifier[post_data] . identifier[get] ( literal[string] ,{}). identifier[get] (
literal[string] ,[]
)
identifier[promoted_guids] = identifier[clean_promoted_guids] ( identifier[raw_promoted_guids] )
identifier[extra_data] [ literal[string] ][ literal[string] ]= identifier[promoted_guids]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[jdata] ={}
identifier[jdata] [ literal[string] ]=[]
identifier[jdata] [ literal[string] ]= literal[string] . identifier[format] ( identifier[e] )
keyword[return] identifier[app] . identifier[response_class] (
identifier[response] = identifier[json] . identifier[dumps] ( identifier[jdata] , identifier[status] = literal[int] , identifier[mimetype] = literal[string] )
)
identifier[client_id] = identifier[str] ( identifier[hashed_client_id] )
identifier[locale] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[locale] keyword[is] keyword[not] keyword[None] :
identifier[extra_data] [ literal[string] ]= identifier[locale]
identifier[platform] = identifier[request] . identifier[args] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[platform] keyword[is] keyword[not] keyword[None] :
identifier[extra_data] [ literal[string] ]= identifier[platform]
identifier[recommendation_manager] = identifier[check_proxy_manager] ( identifier[PROXY_MANAGER] )
identifier[recommendations] = identifier[recommendation_manager] . identifier[recommend] (
identifier[client_id] = identifier[client_id] , identifier[limit] = identifier[TAAR_MAX_RESULTS] , identifier[extra_data] = identifier[extra_data]
)
identifier[promoted_guids] = identifier[extra_data] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,[])
identifier[recommendations] = identifier[merge_promoted_guids] ( identifier[promoted_guids] , identifier[recommendations] )
identifier[jdata] ={ literal[string] :[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[recommendations] ]}
identifier[response] = identifier[app] . identifier[response_class] (
identifier[response] = identifier[json] . identifier[dumps] ( identifier[jdata] ), identifier[status] = literal[int] , identifier[mimetype] = literal[string]
)
keyword[return] identifier[response]
keyword[def] identifier[check_proxy_manager] ( identifier[PROXY_MANAGER] ):
keyword[if] identifier[PROXY_MANAGER] . identifier[getResource] () keyword[is] keyword[None] :
identifier[ctx] = identifier[default_context] ()
identifier[profile_fetcher] = identifier[ProfileFetcher] ( identifier[ctx] )
identifier[ctx] [ literal[string] ]= identifier[profile_fetcher]
identifier[root_ctx] = identifier[ctx] . identifier[child] ()
identifier[r_factory] = identifier[recommenders] . identifier[RecommenderFactory] ( identifier[root_ctx] )
identifier[root_ctx] [ literal[string] ]= identifier[r_factory]
identifier[instance] = identifier[recommenders] . identifier[RecommendationManager] ( identifier[root_ctx] . identifier[child] ())
identifier[PROXY_MANAGER] . identifier[setResource] ( identifier[instance] )
keyword[return] identifier[PROXY_MANAGER] . identifier[getResource] ()
keyword[class] identifier[MyPlugin] :
keyword[def] identifier[set] ( identifier[self] , identifier[config_options] ):
literal[string]
keyword[global] identifier[PROXY_MANAGER]
keyword[if] literal[string] keyword[in] identifier[config_options] :
identifier[PROXY_MANAGER] . identifier[_resource] = identifier[config_options] [ literal[string] ]
keyword[return] identifier[MyPlugin] () | def configure_plugin(app): # noqa: C901
'\n This is a factory function that configures all the routes for\n flask given a particular library.\n '
@app.route('/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/', methods=['GET'])
def client_has_addon(hashed_client_id, addon_id):
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
pf = recommendation_manager._ctx['profile_fetcher']
client_meta = pf.get(hashed_client_id)
if client_meta is None:
# no valid client metadata was found for the given
# clientId
result = {'results': False, 'error': 'No client found'}
response = app.response_class(response=json.dumps(result), status=200, mimetype='application/json')
return response # depends on [control=['if'], data=[]]
result = {'results': addon_id in client_meta.get('installed_addons', [])}
response = app.response_class(response=json.dumps(result), status=200, mimetype='application/json')
return response
@app.route('/v1/api/recommendations/<hashed_client_id>/', methods=['GET', 'POST'])
def recommendations(hashed_client_id):
"""Return a list of recommendations provided a telemetry client_id."""
# Use the module global PROXY_MANAGER
global PROXY_MANAGER
extra_data = {}
extra_data['options'] = {}
extra_data['options']['promoted'] = []
try:
if request.method == 'POST':
json_data = request.data
# At least Python3.5 returns request.data as bytes
# type instead of a string type.
# Both Python2.7 and Python3.7 return a string type
if type(json_data) == bytes:
json_data = json_data.decode('utf8') # depends on [control=['if'], data=[]]
if json_data != '':
post_data = json.loads(json_data)
raw_promoted_guids = post_data.get('options', {}).get('promoted', [])
promoted_guids = clean_promoted_guids(raw_promoted_guids)
extra_data['options']['promoted'] = promoted_guids # depends on [control=['if'], data=['json_data']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
jdata = {}
jdata['results'] = []
jdata['error'] = 'Invalid JSON in POST: {}'.format(e)
return app.response_class(response=json.dumps(jdata, status=400, mimetype='application/json')) # depends on [control=['except'], data=['e']]
# Coerce the uuid.UUID type into a string
client_id = str(hashed_client_id)
locale = request.args.get('locale', None)
if locale is not None:
extra_data['locale'] = locale # depends on [control=['if'], data=['locale']]
platform = request.args.get('platform', None)
if platform is not None:
extra_data['platform'] = platform # depends on [control=['if'], data=['platform']]
recommendation_manager = check_proxy_manager(PROXY_MANAGER)
recommendations = recommendation_manager.recommend(client_id=client_id, limit=TAAR_MAX_RESULTS, extra_data=extra_data)
promoted_guids = extra_data.get('options', {}).get('promoted', [])
recommendations = merge_promoted_guids(promoted_guids, recommendations)
# Strip out weights from TAAR results to maintain compatibility
# with TAAR 1.0
jdata = {'results': [x[0] for x in recommendations]}
response = app.response_class(response=json.dumps(jdata), status=200, mimetype='application/json')
return response
def check_proxy_manager(PROXY_MANAGER):
if PROXY_MANAGER.getResource() is None:
ctx = default_context()
profile_fetcher = ProfileFetcher(ctx)
ctx['profile_fetcher'] = profile_fetcher
# Lock the context down after we've got basic bits installed
root_ctx = ctx.child()
r_factory = recommenders.RecommenderFactory(root_ctx)
root_ctx['recommender_factory'] = r_factory
instance = recommenders.RecommendationManager(root_ctx.child())
PROXY_MANAGER.setResource(instance) # depends on [control=['if'], data=[]]
return PROXY_MANAGER.getResource()
class MyPlugin:
def set(self, config_options):
"""
This setter is primarily so that we can instrument the
cached RecommendationManager implementation under test.
All plugins should implement this set method to enable
overwriting configuration options with a TAAR library.
"""
global PROXY_MANAGER
if 'PROXY_RESOURCE' in config_options:
PROXY_MANAGER._resource = config_options['PROXY_RESOURCE'] # depends on [control=['if'], data=['config_options']]
return MyPlugin() |
def api_get(uri, key=None):
"""
Simple API endpoint get, return only the keys we care about
"""
response = get_json(uri)
if response:
if type(response) == list:
r = response[0]
elif type(response) == dict:
r = response
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r)
if key in r:
return r[key] | def function[api_get, parameter[uri, key]]:
constant[
Simple API endpoint get, return only the keys we care about
]
variable[response] assign[=] call[name[get_json], parameter[name[uri]]]
if name[response] begin[:]
if compare[call[name[type], parameter[name[response]]] equal[==] name[list]] begin[:]
variable[r] assign[=] call[name[response]][constant[0]]
if compare[call[name[type], parameter[name[r]]] equal[==] name[dict]] begin[:]
if compare[name[key] equal[==] name[USER_LOGIN]] begin[:]
return[call[name[user_login], parameter[name[r]]]]
if compare[name[key] in name[r]] begin[:]
return[call[name[r]][name[key]]] | keyword[def] identifier[api_get] ( identifier[uri] , identifier[key] = keyword[None] ):
literal[string]
identifier[response] = identifier[get_json] ( identifier[uri] )
keyword[if] identifier[response] :
keyword[if] identifier[type] ( identifier[response] )== identifier[list] :
identifier[r] = identifier[response] [ literal[int] ]
keyword[elif] identifier[type] ( identifier[response] )== identifier[dict] :
identifier[r] = identifier[response]
keyword[if] identifier[type] ( identifier[r] )== identifier[dict] :
keyword[if] identifier[key] == identifier[USER_LOGIN] :
keyword[return] identifier[user_login] ( identifier[r] )
keyword[if] identifier[key] keyword[in] identifier[r] :
keyword[return] identifier[r] [ identifier[key] ] | def api_get(uri, key=None):
"""
Simple API endpoint get, return only the keys we care about
"""
response = get_json(uri)
if response:
if type(response) == list:
r = response[0] # depends on [control=['if'], data=[]]
elif type(response) == dict:
r = response # depends on [control=['if'], data=[]]
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r) # depends on [control=['if'], data=[]]
if key in r:
return r[key] # depends on [control=['if'], data=['key', 'r']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def crypto_box_beforenm(pk, sk):
"""
Computes and returns the shared key for the public key ``pk`` and the
secret key ``sk``. This can be used to speed up operations where the same
set of keys is going to be used multiple times.
:param pk: bytes
:param sk: bytes
:rtype: bytes
"""
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError("Invalid public key")
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError("Invalid secret key")
k = ffi.new("unsigned char[]", crypto_box_BEFORENMBYTES)
rc = lib.crypto_box_beforenm(k, pk, sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(k, crypto_box_BEFORENMBYTES)[:] | def function[crypto_box_beforenm, parameter[pk, sk]]:
constant[
Computes and returns the shared key for the public key ``pk`` and the
secret key ``sk``. This can be used to speed up operations where the same
set of keys is going to be used multiple times.
:param pk: bytes
:param sk: bytes
:rtype: bytes
]
if compare[call[name[len], parameter[name[pk]]] not_equal[!=] name[crypto_box_PUBLICKEYBYTES]] begin[:]
<ast.Raise object at 0x7da1b26aefb0>
if compare[call[name[len], parameter[name[sk]]] not_equal[!=] name[crypto_box_SECRETKEYBYTES]] begin[:]
<ast.Raise object at 0x7da1b26ad7b0>
variable[k] assign[=] call[name[ffi].new, parameter[constant[unsigned char[]], name[crypto_box_BEFORENMBYTES]]]
variable[rc] assign[=] call[name[lib].crypto_box_beforenm, parameter[name[k], name[pk], name[sk]]]
call[name[ensure], parameter[compare[name[rc] equal[==] constant[0]], constant[Unexpected library error]]]
return[call[call[name[ffi].buffer, parameter[name[k], name[crypto_box_BEFORENMBYTES]]]][<ast.Slice object at 0x7da20c6a8a30>]] | keyword[def] identifier[crypto_box_beforenm] ( identifier[pk] , identifier[sk] ):
literal[string]
keyword[if] identifier[len] ( identifier[pk] )!= identifier[crypto_box_PUBLICKEYBYTES] :
keyword[raise] identifier[exc] . identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[sk] )!= identifier[crypto_box_SECRETKEYBYTES] :
keyword[raise] identifier[exc] . identifier[ValueError] ( literal[string] )
identifier[k] = identifier[ffi] . identifier[new] ( literal[string] , identifier[crypto_box_BEFORENMBYTES] )
identifier[rc] = identifier[lib] . identifier[crypto_box_beforenm] ( identifier[k] , identifier[pk] , identifier[sk] )
identifier[ensure] ( identifier[rc] == literal[int] ,
literal[string] ,
identifier[raising] = identifier[exc] . identifier[RuntimeError] )
keyword[return] identifier[ffi] . identifier[buffer] ( identifier[k] , identifier[crypto_box_BEFORENMBYTES] )[:] | def crypto_box_beforenm(pk, sk):
"""
Computes and returns the shared key for the public key ``pk`` and the
secret key ``sk``. This can be used to speed up operations where the same
set of keys is going to be used multiple times.
:param pk: bytes
:param sk: bytes
:rtype: bytes
"""
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError('Invalid public key') # depends on [control=['if'], data=[]]
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError('Invalid secret key') # depends on [control=['if'], data=[]]
k = ffi.new('unsigned char[]', crypto_box_BEFORENMBYTES)
rc = lib.crypto_box_beforenm(k, pk, sk)
ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError)
return ffi.buffer(k, crypto_box_BEFORENMBYTES)[:] |
def compile_geometry(lat, lon, elev):
"""
Take in lists of lat and lon coordinates, and determine what geometry to create
:param list lat: Latitude values
:param list lon: Longitude values
:param float elev: Elevation value
:return dict:
"""
logger_excel.info("enter compile_geometry")
lat = _remove_geo_placeholders(lat)
lon = _remove_geo_placeholders(lon)
# 4 coordinate values
if len(lat) == 2 and len(lon) == 2:
logger_excel.info("found 4 coordinates")
geo_dict = geometry_linestring(lat, lon, elev)
# # 4 coordinate values
# if (lat[0] != lat[1]) and (lon[0] != lon[1]):
# geo_dict = geometry_polygon(lat, lon)
# # 3 unique coordinates
# else:
# geo_dict = geometry_multipoint(lat, lon)
#
# 2 coordinate values
elif len(lat) == 1 and len(lon) == 1:
logger_excel.info("found 2 coordinates")
geo_dict = geometry_point(lat, lon, elev)
# coordinate range. one value given but not the other.
elif (None in lon and None not in lat) or (len(lat) > 0 and len(lon) == 0):
geo_dict = geometry_range(lat, elev, "lat")
elif (None in lat and None not in lon) or (len(lon) > 0 and len(lat) == 0):
geo_dict = geometry_range(lat, elev, "lon")
# Too many points, or no points
else:
geo_dict = {}
logger_excel.warn("compile_geometry: invalid coordinates: lat: {}, lon: {}".format(lat, lon))
logger_excel.info("exit compile_geometry")
return geo_dict | def function[compile_geometry, parameter[lat, lon, elev]]:
constant[
Take in lists of lat and lon coordinates, and determine what geometry to create
:param list lat: Latitude values
:param list lon: Longitude values
:param float elev: Elevation value
:return dict:
]
call[name[logger_excel].info, parameter[constant[enter compile_geometry]]]
variable[lat] assign[=] call[name[_remove_geo_placeholders], parameter[name[lat]]]
variable[lon] assign[=] call[name[_remove_geo_placeholders], parameter[name[lon]]]
if <ast.BoolOp object at 0x7da204960fa0> begin[:]
call[name[logger_excel].info, parameter[constant[found 4 coordinates]]]
variable[geo_dict] assign[=] call[name[geometry_linestring], parameter[name[lat], name[lon], name[elev]]]
call[name[logger_excel].info, parameter[constant[exit compile_geometry]]]
return[name[geo_dict]] | keyword[def] identifier[compile_geometry] ( identifier[lat] , identifier[lon] , identifier[elev] ):
literal[string]
identifier[logger_excel] . identifier[info] ( literal[string] )
identifier[lat] = identifier[_remove_geo_placeholders] ( identifier[lat] )
identifier[lon] = identifier[_remove_geo_placeholders] ( identifier[lon] )
keyword[if] identifier[len] ( identifier[lat] )== literal[int] keyword[and] identifier[len] ( identifier[lon] )== literal[int] :
identifier[logger_excel] . identifier[info] ( literal[string] )
identifier[geo_dict] = identifier[geometry_linestring] ( identifier[lat] , identifier[lon] , identifier[elev] )
keyword[elif] identifier[len] ( identifier[lat] )== literal[int] keyword[and] identifier[len] ( identifier[lon] )== literal[int] :
identifier[logger_excel] . identifier[info] ( literal[string] )
identifier[geo_dict] = identifier[geometry_point] ( identifier[lat] , identifier[lon] , identifier[elev] )
keyword[elif] ( keyword[None] keyword[in] identifier[lon] keyword[and] keyword[None] keyword[not] keyword[in] identifier[lat] ) keyword[or] ( identifier[len] ( identifier[lat] )> literal[int] keyword[and] identifier[len] ( identifier[lon] )== literal[int] ):
identifier[geo_dict] = identifier[geometry_range] ( identifier[lat] , identifier[elev] , literal[string] )
keyword[elif] ( keyword[None] keyword[in] identifier[lat] keyword[and] keyword[None] keyword[not] keyword[in] identifier[lon] ) keyword[or] ( identifier[len] ( identifier[lon] )> literal[int] keyword[and] identifier[len] ( identifier[lat] )== literal[int] ):
identifier[geo_dict] = identifier[geometry_range] ( identifier[lat] , identifier[elev] , literal[string] )
keyword[else] :
identifier[geo_dict] ={}
identifier[logger_excel] . identifier[warn] ( literal[string] . identifier[format] ( identifier[lat] , identifier[lon] ))
identifier[logger_excel] . identifier[info] ( literal[string] )
keyword[return] identifier[geo_dict] | def compile_geometry(lat, lon, elev):
"""
Take in lists of lat and lon coordinates, and determine what geometry to create
:param list lat: Latitude values
:param list lon: Longitude values
:param float elev: Elevation value
:return dict:
"""
logger_excel.info('enter compile_geometry')
lat = _remove_geo_placeholders(lat)
lon = _remove_geo_placeholders(lon)
# 4 coordinate values
if len(lat) == 2 and len(lon) == 2:
logger_excel.info('found 4 coordinates')
geo_dict = geometry_linestring(lat, lon, elev) # depends on [control=['if'], data=[]]
# # 4 coordinate values
# if (lat[0] != lat[1]) and (lon[0] != lon[1]):
# geo_dict = geometry_polygon(lat, lon)
# # 3 unique coordinates
# else:
# geo_dict = geometry_multipoint(lat, lon)
#
# 2 coordinate values
elif len(lat) == 1 and len(lon) == 1:
logger_excel.info('found 2 coordinates')
geo_dict = geometry_point(lat, lon, elev) # depends on [control=['if'], data=[]]
# coordinate range. one value given but not the other.
elif None in lon and None not in lat or (len(lat) > 0 and len(lon) == 0):
geo_dict = geometry_range(lat, elev, 'lat') # depends on [control=['if'], data=[]]
elif None in lat and None not in lon or (len(lon) > 0 and len(lat) == 0):
geo_dict = geometry_range(lat, elev, 'lon') # depends on [control=['if'], data=[]]
else:
# Too many points, or no points
geo_dict = {}
logger_excel.warn('compile_geometry: invalid coordinates: lat: {}, lon: {}'.format(lat, lon))
logger_excel.info('exit compile_geometry')
return geo_dict |
def save_sequence_rule(self, sequence_rule_form, *args, **kwargs):
"""Pass through to provider SequenceRuleAdminSession.update_sequence_rule"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if sequence_rule_form.is_for_update():
return self.update_sequence_rule(sequence_rule_form, *args, **kwargs)
else:
return self.create_sequence_rule(sequence_rule_form, *args, **kwargs) | def function[save_sequence_rule, parameter[self, sequence_rule_form]]:
constant[Pass through to provider SequenceRuleAdminSession.update_sequence_rule]
if call[name[sequence_rule_form].is_for_update, parameter[]] begin[:]
return[call[name[self].update_sequence_rule, parameter[name[sequence_rule_form], <ast.Starred object at 0x7da18c4cc490>]]] | keyword[def] identifier[save_sequence_rule] ( identifier[self] , identifier[sequence_rule_form] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[sequence_rule_form] . identifier[is_for_update] ():
keyword[return] identifier[self] . identifier[update_sequence_rule] ( identifier[sequence_rule_form] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[self] . identifier[create_sequence_rule] ( identifier[sequence_rule_form] ,* identifier[args] ,** identifier[kwargs] ) | def save_sequence_rule(self, sequence_rule_form, *args, **kwargs):
"""Pass through to provider SequenceRuleAdminSession.update_sequence_rule"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if sequence_rule_form.is_for_update():
return self.update_sequence_rule(sequence_rule_form, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
return self.create_sequence_rule(sequence_rule_form, *args, **kwargs) |
def local_expert_attention(x,
k,
loss_coef,
attention_num_experts,
train=True,
batch_coordinate=None,
**kwargs):
"""Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
"""
if batch_coordinate is None:
batch_coordinate = tf.expand_dims(
coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1)
with tf.variable_scope("local_expert_attention"):
additional_dispatch_params = {"batch_coordinate": batch_coordinate}
return expert_utils.local_moe(
x,
train,
functools.partial(self_attention_expert, **kwargs),
attention_num_experts,
k=k,
loss_coef=loss_coef,
pass_x=True,
pass_gates=False,
additional_dispatch_params=additional_dispatch_params,
) | def function[local_expert_attention, parameter[x, k, loss_coef, attention_num_experts, train, batch_coordinate]]:
constant[Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
]
if compare[name[batch_coordinate] is constant[None]] begin[:]
variable[batch_coordinate] assign[=] call[name[tf].expand_dims, parameter[call[name[coordinate_tensor], parameter[call[call[name[common_layers].shape_list, parameter[name[x]]]][<ast.Slice object at 0x7da18c4cde10>]]]]]
with call[name[tf].variable_scope, parameter[constant[local_expert_attention]]] begin[:]
variable[additional_dispatch_params] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfb50>], [<ast.Name object at 0x7da18c4cd750>]]
return[call[name[expert_utils].local_moe, parameter[name[x], name[train], call[name[functools].partial, parameter[name[self_attention_expert]]], name[attention_num_experts]]]] | keyword[def] identifier[local_expert_attention] ( identifier[x] ,
identifier[k] ,
identifier[loss_coef] ,
identifier[attention_num_experts] ,
identifier[train] = keyword[True] ,
identifier[batch_coordinate] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[batch_coordinate] keyword[is] keyword[None] :
identifier[batch_coordinate] = identifier[tf] . identifier[expand_dims] (
identifier[coordinate_tensor] ( identifier[common_layers] . identifier[shape_list] ( identifier[x] )[:- literal[int] ], identifier[axis] = literal[int] ), identifier[axis] =- literal[int] )
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[additional_dispatch_params] ={ literal[string] : identifier[batch_coordinate] }
keyword[return] identifier[expert_utils] . identifier[local_moe] (
identifier[x] ,
identifier[train] ,
identifier[functools] . identifier[partial] ( identifier[self_attention_expert] ,** identifier[kwargs] ),
identifier[attention_num_experts] ,
identifier[k] = identifier[k] ,
identifier[loss_coef] = identifier[loss_coef] ,
identifier[pass_x] = keyword[True] ,
identifier[pass_gates] = keyword[False] ,
identifier[additional_dispatch_params] = identifier[additional_dispatch_params] ,
) | def local_expert_attention(x, k, loss_coef, attention_num_experts, train=True, batch_coordinate=None, **kwargs):
"""Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
"""
if batch_coordinate is None:
batch_coordinate = tf.expand_dims(coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1) # depends on [control=['if'], data=['batch_coordinate']]
with tf.variable_scope('local_expert_attention'):
additional_dispatch_params = {'batch_coordinate': batch_coordinate}
return expert_utils.local_moe(x, train, functools.partial(self_attention_expert, **kwargs), attention_num_experts, k=k, loss_coef=loss_coef, pass_x=True, pass_gates=False, additional_dispatch_params=additional_dispatch_params) # depends on [control=['with'], data=[]] |
def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest() | def function[hash, parameter[file]]:
constant[
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
]
call[name[exists], parameter[name[file]]]
call[name[log], parameter[call[call[name[_], parameter[constant[hashing {}...]]].format, parameter[name[file]]]]]
with call[name[open], parameter[name[file], constant[rb]]] begin[:]
variable[sha256] assign[=] call[name[hashlib].sha256, parameter[]]
for taget[name[block]] in starred[call[name[iter], parameter[<ast.Lambda object at 0x7da1b170c460>, constant[b'']]]] begin[:]
call[name[sha256].update, parameter[name[block]]]
return[call[name[sha256].hexdigest, parameter[]]] | keyword[def] identifier[hash] ( identifier[file] ):
literal[string]
identifier[exists] ( identifier[file] )
identifier[log] ( identifier[_] ( literal[string] ). identifier[format] ( identifier[file] ))
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[f] :
identifier[sha256] = identifier[hashlib] . identifier[sha256] ()
keyword[for] identifier[block] keyword[in] identifier[iter] ( keyword[lambda] : identifier[f] . identifier[read] ( literal[int] ), literal[string] ):
identifier[sha256] . identifier[update] ( identifier[block] )
keyword[return] identifier[sha256] . identifier[hexdigest] () | def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_('hashing {}...').format(file))
# https://stackoverflow.com/a/22058673
with open(file, 'rb') as f:
sha256 = hashlib.sha256()
for block in iter(lambda : f.read(65536), b''):
sha256.update(block) # depends on [control=['for'], data=['block']]
return sha256.hexdigest() # depends on [control=['with'], data=['f']] |
def clear(self):
"""
Cleans up the handler. The handler can't be used after this method has
been called
"""
self._logger.debug("Component handlers are cleared")
# Clean up everything to avoid stale references, ...
self._field = None
self._name = None
self._logger = None | def function[clear, parameter[self]]:
constant[
Cleans up the handler. The handler can't be used after this method has
been called
]
call[name[self]._logger.debug, parameter[constant[Component handlers are cleared]]]
name[self]._field assign[=] constant[None]
name[self]._name assign[=] constant[None]
name[self]._logger assign[=] constant[None] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_field] = keyword[None]
identifier[self] . identifier[_name] = keyword[None]
identifier[self] . identifier[_logger] = keyword[None] | def clear(self):
"""
Cleans up the handler. The handler can't be used after this method has
been called
"""
self._logger.debug('Component handlers are cleared')
# Clean up everything to avoid stale references, ...
self._field = None
self._name = None
self._logger = None |
def handle(self, request, buffer_size):
"""
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
"""
if self.component_type == StreamComponent.SOURCE:
msg = self.handler_function()
return self.__send(request, msg)
logger = self.logger
data = self.__receive(request, buffer_size)
if data is None:
return False
else:
logger.debug(data.split(self.TERMINATOR))
for message in data.split(self.TERMINATOR)[:-1]:
logger.debug(message)
result = self.handler_function(message)
if self.component_type == StreamComponent.PROCESSOR:
if not self.__send(request, result):
return False
return True | def function[handle, parameter[self, request, buffer_size]]:
constant[
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
]
if compare[name[self].component_type equal[==] name[StreamComponent].SOURCE] begin[:]
variable[msg] assign[=] call[name[self].handler_function, parameter[]]
return[call[name[self].__send, parameter[name[request], name[msg]]]]
variable[logger] assign[=] name[self].logger
variable[data] assign[=] call[name[self].__receive, parameter[name[request], name[buffer_size]]]
if compare[name[data] is constant[None]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[handle] ( identifier[self] , identifier[request] , identifier[buffer_size] ):
literal[string]
keyword[if] identifier[self] . identifier[component_type] == identifier[StreamComponent] . identifier[SOURCE] :
identifier[msg] = identifier[self] . identifier[handler_function] ()
keyword[return] identifier[self] . identifier[__send] ( identifier[request] , identifier[msg] )
identifier[logger] = identifier[self] . identifier[logger]
identifier[data] = identifier[self] . identifier[__receive] ( identifier[request] , identifier[buffer_size] )
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[else] :
identifier[logger] . identifier[debug] ( identifier[data] . identifier[split] ( identifier[self] . identifier[TERMINATOR] ))
keyword[for] identifier[message] keyword[in] identifier[data] . identifier[split] ( identifier[self] . identifier[TERMINATOR] )[:- literal[int] ]:
identifier[logger] . identifier[debug] ( identifier[message] )
identifier[result] = identifier[self] . identifier[handler_function] ( identifier[message] )
keyword[if] identifier[self] . identifier[component_type] == identifier[StreamComponent] . identifier[PROCESSOR] :
keyword[if] keyword[not] identifier[self] . identifier[__send] ( identifier[request] , identifier[result] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def handle(self, request, buffer_size):
"""
Handle a message
:param request: the request socket.
:param buffer_size: the buffer size.
:return: True if success, False otherwise
"""
if self.component_type == StreamComponent.SOURCE:
msg = self.handler_function()
return self.__send(request, msg) # depends on [control=['if'], data=[]]
logger = self.logger
data = self.__receive(request, buffer_size)
if data is None:
return False # depends on [control=['if'], data=[]]
else:
logger.debug(data.split(self.TERMINATOR))
for message in data.split(self.TERMINATOR)[:-1]:
logger.debug(message)
result = self.handler_function(message)
if self.component_type == StreamComponent.PROCESSOR:
if not self.__send(request, result):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['message']]
return True |
def corethreads(self):
"""
Create a .cds file consisting of fasta records of CDS features for each strain
"""
printtime('Creating CDS files and finding core genes', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.coregroups, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
# Define the name of the file to store the CDS nucleotide sequences
sample.prokka.cds = os.path.join(sample.prokka.outputdir, '{}.cds'.format(sample.name))
self.corequeue.put(sample)
self.corequeue.join()
# Write the core .fasta files for each gene
self.corewriter() | def function[corethreads, parameter[self]]:
constant[
Create a .cds file consisting of fasta records of CDS features for each strain
]
call[name[printtime], parameter[constant[Creating CDS files and finding core genes], name[self].start]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].cpus]]] begin[:]
variable[threads] assign[=] call[name[Thread], parameter[]]
call[name[threads].setDaemon, parameter[constant[True]]]
call[name[threads].start, parameter[]]
for taget[name[sample]] in starred[name[self].runmetadata.samples] begin[:]
name[sample].prokka.cds assign[=] call[name[os].path.join, parameter[name[sample].prokka.outputdir, call[constant[{}.cds].format, parameter[name[sample].name]]]]
call[name[self].corequeue.put, parameter[name[sample]]]
call[name[self].corequeue.join, parameter[]]
call[name[self].corewriter, parameter[]] | keyword[def] identifier[corethreads] ( identifier[self] ):
literal[string]
identifier[printtime] ( literal[string] , identifier[self] . identifier[start] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[cpus] ):
identifier[threads] = identifier[Thread] ( identifier[target] = identifier[self] . identifier[coregroups] , identifier[args] =())
identifier[threads] . identifier[setDaemon] ( keyword[True] )
identifier[threads] . identifier[start] ()
keyword[for] identifier[sample] keyword[in] identifier[self] . identifier[runmetadata] . identifier[samples] :
identifier[sample] . identifier[prokka] . identifier[cds] = identifier[os] . identifier[path] . identifier[join] ( identifier[sample] . identifier[prokka] . identifier[outputdir] , literal[string] . identifier[format] ( identifier[sample] . identifier[name] ))
identifier[self] . identifier[corequeue] . identifier[put] ( identifier[sample] )
identifier[self] . identifier[corequeue] . identifier[join] ()
identifier[self] . identifier[corewriter] () | def corethreads(self):
"""
Create a .cds file consisting of fasta records of CDS features for each strain
"""
printtime('Creating CDS files and finding core genes', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.coregroups, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start() # depends on [control=['for'], data=[]]
for sample in self.runmetadata.samples:
# Define the name of the file to store the CDS nucleotide sequences
sample.prokka.cds = os.path.join(sample.prokka.outputdir, '{}.cds'.format(sample.name))
self.corequeue.put(sample) # depends on [control=['for'], data=['sample']]
self.corequeue.join()
# Write the core .fasta files for each gene
self.corewriter() |
def enrich(self, column1, column2):
""" This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
"""
if column1 not in self.data.columns or \
column2 not in self.data.columns:
return self.data
self.data["timedifference"] = (self.data[column2] - self.data[column1]) / np.timedelta64(1, 's')
return self.data | def function[enrich, parameter[self, column1, column2]]:
constant[ This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
]
if <ast.BoolOp object at 0x7da1b2370310> begin[:]
return[name[self].data]
call[name[self].data][constant[timedifference]] assign[=] binary_operation[binary_operation[call[name[self].data][name[column2]] - call[name[self].data][name[column1]]] / call[name[np].timedelta64, parameter[constant[1], constant[s]]]]
return[name[self].data] | keyword[def] identifier[enrich] ( identifier[self] , identifier[column1] , identifier[column2] ):
literal[string]
keyword[if] identifier[column1] keyword[not] keyword[in] identifier[self] . identifier[data] . identifier[columns] keyword[or] identifier[column2] keyword[not] keyword[in] identifier[self] . identifier[data] . identifier[columns] :
keyword[return] identifier[self] . identifier[data]
identifier[self] . identifier[data] [ literal[string] ]=( identifier[self] . identifier[data] [ identifier[column2] ]- identifier[self] . identifier[data] [ identifier[column1] ])/ identifier[np] . identifier[timedelta64] ( literal[int] , literal[string] )
keyword[return] identifier[self] . identifier[data] | def enrich(self, column1, column2):
""" This method calculates the difference in seconds between
the 2 columns (column2 - column1)
The final result may provided negative values depending on the values
from column1 and column2.
:param column1: first column. Values in column1 must be datetime type
:param column2: second column. Values in column2 must be datetime type
:type column1: string
:type column2: string
:return: original dataframe with a new column with the difference
between column2 - column1
:rtype: pandas.DataFrame
"""
if column1 not in self.data.columns or column2 not in self.data.columns:
return self.data # depends on [control=['if'], data=[]]
self.data['timedifference'] = (self.data[column2] - self.data[column1]) / np.timedelta64(1, 's')
return self.data |
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name="mean_attn")(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name="mean_attn_final")(
tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]])) | def function[mean_with_attention, parameter[x, name, num_heads]]:
constant[Mean and attention to reduce spatial dimensions.]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[shape] assign[=] call[name[shape_list], parameter[name[x]]]
variable[m] assign[=] call[name[tf].reduce_mean, parameter[name[x], list[[<ast.Constant object at 0x7da1b1ff1840>, <ast.Constant object at 0x7da1b1ff11b0>]]]]
variable[a] assign[=] call[call[call[name[layers], parameter[]].Dense, parameter[name[num_heads]]], parameter[name[x]]]
variable[s] assign[=] call[name[tf].reshape, parameter[name[a], list[[<ast.Subscript object at 0x7da18ede4e80>, <ast.UnaryOp object at 0x7da18ede4df0>, <ast.Name object at 0x7da18ede6ef0>]]]]
variable[s] assign[=] call[name[tf].nn.softmax, parameter[name[s]]]
variable[s] assign[=] call[name[tf].reshape, parameter[name[s], binary_operation[call[name[shape]][<ast.Slice object at 0x7da18ede58d0>] + list[[<ast.Constant object at 0x7da18ede4ca0>, <ast.Name object at 0x7da18ede4c10>]]]]]
variable[am] assign[=] call[name[tf].reduce_mean, parameter[binary_operation[call[name[tf].expand_dims, parameter[name[x]]] * name[s]], list[[<ast.Constant object at 0x7da18ede7220>, <ast.Constant object at 0x7da18ede5840>]]]]
variable[l] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da18ede5fc0>, <ast.Call object at 0x7da18ede4ee0>]]]]
return[call[call[call[name[layers], parameter[]].Dense, parameter[binary_operation[constant[2] * call[name[shape]][<ast.UnaryOp object at 0x7da18ede7f10>]]]], parameter[call[name[tf].reshape, parameter[name[l], list[[<ast.Subscript object at 0x7da18ede6980>, <ast.BinOp object at 0x7da18ede7130>]]]]]]] | keyword[def] identifier[mean_with_attention] ( identifier[x] , identifier[name] , identifier[num_heads] = literal[int] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] ):
identifier[shape] = identifier[shape_list] ( identifier[x] )
identifier[m] = identifier[tf] . identifier[reduce_mean] ( identifier[x] ,[ literal[int] , literal[int] ])
identifier[a] = identifier[layers] (). identifier[Dense] ( identifier[num_heads] , identifier[name] = literal[string] )( identifier[x] )
identifier[s] = identifier[tf] . identifier[reshape] ( identifier[a] ,[ identifier[shape] [ literal[int] ],- literal[int] , identifier[num_heads] ])
identifier[s] = identifier[tf] . identifier[nn] . identifier[softmax] ( identifier[s] , identifier[axis] = literal[int] )
identifier[s] = identifier[tf] . identifier[reshape] ( identifier[s] , identifier[shape] [:- literal[int] ]+[ literal[int] , identifier[num_heads] ])
identifier[am] = identifier[tf] . identifier[reduce_mean] ( identifier[tf] . identifier[expand_dims] ( identifier[x] , identifier[axis] =- literal[int] )* identifier[s] ,[ literal[int] , literal[int] ])
identifier[l] = identifier[tf] . identifier[concat] ([ identifier[am] , identifier[tf] . identifier[expand_dims] ( identifier[m] , identifier[axis] =- literal[int] )], identifier[axis] =- literal[int] )
keyword[return] identifier[layers] (). identifier[Dense] ( literal[int] * identifier[shape] [- literal[int] ], identifier[name] = literal[string] )(
identifier[tf] . identifier[reshape] ( identifier[l] ,[ identifier[shape] [ literal[int] ],( identifier[num_heads] + literal[int] )* identifier[shape] [- literal[int] ]])) | def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name='mean_attn')(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name='mean_attn_final')(tf.reshape(l, [shape[0], (num_heads + 1) * shape[-1]])) # depends on [control=['with'], data=[]] |
def on(self, event: str) -> Callable:
""" Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
"""
def outer(func):
self.add_event(func, event)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer | def function[on, parameter[self, event]]:
constant[ Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
]
def function[outer, parameter[func]]:
call[name[self].add_event, parameter[name[func], name[event]]]
def function[wrapper, parameter[]]:
return[call[name[func], parameter[<ast.Starred object at 0x7da2044c2350>]]]
return[name[wrapper]]
return[name[outer]] | keyword[def] identifier[on] ( identifier[self] , identifier[event] : identifier[str] )-> identifier[Callable] :
literal[string]
keyword[def] identifier[outer] ( identifier[func] ):
identifier[self] . identifier[add_event] ( identifier[func] , identifier[event] )
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[outer] | def on(self, event: str) -> Callable:
""" Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
"""
def outer(func):
self.add_event(func, event)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer |
def make_patch_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.patch(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status() | def function[make_patch_request, parameter[url, data, params, headers, connection]]:
constant[
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
]
variable[timeout] assign[=] call[name[getattr], parameter[name[connection], constant[timeout]]]
variable[response] assign[=] call[name[connection].patch, parameter[name[url]]]
if <ast.BoolOp object at 0x7da18fe90430> begin[:]
return[<ast.IfExp object at 0x7da18fe93760>] | keyword[def] identifier[make_patch_request] ( identifier[url] , identifier[data] , identifier[params] , identifier[headers] , identifier[connection] ):
literal[string]
identifier[timeout] = identifier[getattr] ( identifier[connection] , literal[string] )
identifier[response] = identifier[connection] . identifier[patch] ( identifier[url] , identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[headers] = identifier[headers] ,
identifier[timeout] = identifier[timeout] )
keyword[if] identifier[response] . identifier[ok] keyword[or] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[response] . identifier[json] () keyword[if] identifier[response] . identifier[content] keyword[else] keyword[None]
keyword[else] :
identifier[response] . identifier[raise_for_status] () | def make_patch_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.patch(url, data=data, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None # depends on [control=['if'], data=[]]
else:
response.raise_for_status() |
def _link_variables_on_expr(self, variable_manager, block, stmt_idx, stmt, expr):
"""
Link atoms (AIL expressions) in the given expression to corresponding variables identified previously.
:param variable_manager: Variable manager of the function.
:param ailment.Block block: AIL block.
:param int stmt_idx: ID of the statement.
:param stmt: The AIL statement that `expr` belongs to.
:param expr: The AIl expression to work on.
:return: None
"""
if type(expr) is ailment.Expr.Register:
# find a register variable
reg_vars = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
# TODO: make sure it is the correct register we are looking for
if len(reg_vars) == 1:
reg_var, offset = next(iter(reg_vars))
expr.variable = reg_var
expr.offset = offset
elif type(expr) is ailment.Expr.Load:
# import ipdb; ipdb.set_trace()
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 0:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.addr)
else:
if len(variables) > 1:
l.error("More than one variable are available for atom %s. Consider fixing it using phi nodes.",
expr
)
var, offset = next(iter(variables))
expr.variable = var
expr.offset = offset
elif type(expr) is ailment.Expr.BinaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset
else:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[0])
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[1])
elif type(expr) is ailment.Expr.UnaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset
else:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands)
elif type(expr) is ailment.Expr.Convert:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operand)
elif isinstance(expr, ailment.Expr.BasePointerOffset):
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset | def function[_link_variables_on_expr, parameter[self, variable_manager, block, stmt_idx, stmt, expr]]:
constant[
Link atoms (AIL expressions) in the given expression to corresponding variables identified previously.
:param variable_manager: Variable manager of the function.
:param ailment.Block block: AIL block.
:param int stmt_idx: ID of the statement.
:param stmt: The AIL statement that `expr` belongs to.
:param expr: The AIl expression to work on.
:return: None
]
if compare[call[name[type], parameter[name[expr]]] is name[ailment].Expr.Register] begin[:]
variable[reg_vars] assign[=] call[name[variable_manager].find_variables_by_atom, parameter[name[block].addr, name[stmt_idx], name[expr]]]
if compare[call[name[len], parameter[name[reg_vars]]] equal[==] constant[1]] begin[:]
<ast.Tuple object at 0x7da204346980> assign[=] call[name[next], parameter[call[name[iter], parameter[name[reg_vars]]]]]
name[expr].variable assign[=] name[reg_var]
name[expr].offset assign[=] name[offset] | keyword[def] identifier[_link_variables_on_expr] ( identifier[self] , identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] ):
literal[string]
keyword[if] identifier[type] ( identifier[expr] ) keyword[is] identifier[ailment] . identifier[Expr] . identifier[Register] :
identifier[reg_vars] = identifier[variable_manager] . identifier[find_variables_by_atom] ( identifier[block] . identifier[addr] , identifier[stmt_idx] , identifier[expr] )
keyword[if] identifier[len] ( identifier[reg_vars] )== literal[int] :
identifier[reg_var] , identifier[offset] = identifier[next] ( identifier[iter] ( identifier[reg_vars] ))
identifier[expr] . identifier[variable] = identifier[reg_var]
identifier[expr] . identifier[offset] = identifier[offset]
keyword[elif] identifier[type] ( identifier[expr] ) keyword[is] identifier[ailment] . identifier[Expr] . identifier[Load] :
identifier[variables] = identifier[variable_manager] . identifier[find_variables_by_atom] ( identifier[block] . identifier[addr] , identifier[stmt_idx] , identifier[expr] )
keyword[if] identifier[len] ( identifier[variables] )== literal[int] :
identifier[self] . identifier[_link_variables_on_expr] ( identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] . identifier[addr] )
keyword[else] :
keyword[if] identifier[len] ( identifier[variables] )> literal[int] :
identifier[l] . identifier[error] ( literal[string] ,
identifier[expr]
)
identifier[var] , identifier[offset] = identifier[next] ( identifier[iter] ( identifier[variables] ))
identifier[expr] . identifier[variable] = identifier[var]
identifier[expr] . identifier[offset] = identifier[offset]
keyword[elif] identifier[type] ( identifier[expr] ) keyword[is] identifier[ailment] . identifier[Expr] . identifier[BinaryOp] :
identifier[variables] = identifier[variable_manager] . identifier[find_variables_by_atom] ( identifier[block] . identifier[addr] , identifier[stmt_idx] , identifier[expr] )
keyword[if] identifier[len] ( identifier[variables] )== literal[int] :
identifier[var] , identifier[offset] = identifier[next] ( identifier[iter] ( identifier[variables] ))
identifier[expr] . identifier[referenced_variable] = identifier[var]
identifier[expr] . identifier[offset] = identifier[offset]
keyword[else] :
identifier[self] . identifier[_link_variables_on_expr] ( identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] . identifier[operands] [ literal[int] ])
identifier[self] . identifier[_link_variables_on_expr] ( identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] . identifier[operands] [ literal[int] ])
keyword[elif] identifier[type] ( identifier[expr] ) keyword[is] identifier[ailment] . identifier[Expr] . identifier[UnaryOp] :
identifier[variables] = identifier[variable_manager] . identifier[find_variables_by_atom] ( identifier[block] . identifier[addr] , identifier[stmt_idx] , identifier[expr] )
keyword[if] identifier[len] ( identifier[variables] )== literal[int] :
identifier[var] , identifier[offset] = identifier[next] ( identifier[iter] ( identifier[variables] ))
identifier[expr] . identifier[referenced_variable] = identifier[var]
identifier[expr] . identifier[offset] = identifier[offset]
keyword[else] :
identifier[self] . identifier[_link_variables_on_expr] ( identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] . identifier[operands] )
keyword[elif] identifier[type] ( identifier[expr] ) keyword[is] identifier[ailment] . identifier[Expr] . identifier[Convert] :
identifier[self] . identifier[_link_variables_on_expr] ( identifier[variable_manager] , identifier[block] , identifier[stmt_idx] , identifier[stmt] , identifier[expr] . identifier[operand] )
keyword[elif] identifier[isinstance] ( identifier[expr] , identifier[ailment] . identifier[Expr] . identifier[BasePointerOffset] ):
identifier[variables] = identifier[variable_manager] . identifier[find_variables_by_atom] ( identifier[block] . identifier[addr] , identifier[stmt_idx] , identifier[expr] )
keyword[if] identifier[len] ( identifier[variables] )== literal[int] :
identifier[var] , identifier[offset] = identifier[next] ( identifier[iter] ( identifier[variables] ))
identifier[expr] . identifier[referenced_variable] = identifier[var]
identifier[expr] . identifier[offset] = identifier[offset] | def _link_variables_on_expr(self, variable_manager, block, stmt_idx, stmt, expr):
"""
Link atoms (AIL expressions) in the given expression to corresponding variables identified previously.
:param variable_manager: Variable manager of the function.
:param ailment.Block block: AIL block.
:param int stmt_idx: ID of the statement.
:param stmt: The AIL statement that `expr` belongs to.
:param expr: The AIl expression to work on.
:return: None
"""
if type(expr) is ailment.Expr.Register:
# find a register variable
reg_vars = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
# TODO: make sure it is the correct register we are looking for
if len(reg_vars) == 1:
(reg_var, offset) = next(iter(reg_vars))
expr.variable = reg_var
expr.offset = offset # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type(expr) is ailment.Expr.Load:
# import ipdb; ipdb.set_trace()
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 0:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.addr) # depends on [control=['if'], data=[]]
else:
if len(variables) > 1:
l.error('More than one variable are available for atom %s. Consider fixing it using phi nodes.', expr) # depends on [control=['if'], data=[]]
(var, offset) = next(iter(variables))
expr.variable = var
expr.offset = offset # depends on [control=['if'], data=[]]
elif type(expr) is ailment.Expr.BinaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
(var, offset) = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset # depends on [control=['if'], data=[]]
else:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[0])
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[1]) # depends on [control=['if'], data=[]]
elif type(expr) is ailment.Expr.UnaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
(var, offset) = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset # depends on [control=['if'], data=[]]
else:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands) # depends on [control=['if'], data=[]]
elif type(expr) is ailment.Expr.Convert:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operand) # depends on [control=['if'], data=[]]
elif isinstance(expr, ailment.Expr.BasePointerOffset):
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
(var, offset) = next(iter(variables))
expr.referenced_variable = var
expr.offset = offset # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def gradient(self, ts):
"""
Find the gradient of the log likelihood with respect to the given time series.
Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf
Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters.
"""
gradient = self._jmodel.gradient(_py2java(self._ctx, Vectors.dense(ts)))
return _java2py(self._ctx, gradient) | def function[gradient, parameter[self, ts]]:
constant[
Find the gradient of the log likelihood with respect to the given time series.
Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf
Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters.
]
variable[gradient] assign[=] call[name[self]._jmodel.gradient, parameter[call[name[_py2java], parameter[name[self]._ctx, call[name[Vectors].dense, parameter[name[ts]]]]]]]
return[call[name[_java2py], parameter[name[self]._ctx, name[gradient]]]] | keyword[def] identifier[gradient] ( identifier[self] , identifier[ts] ):
literal[string]
identifier[gradient] = identifier[self] . identifier[_jmodel] . identifier[gradient] ( identifier[_py2java] ( identifier[self] . identifier[_ctx] , identifier[Vectors] . identifier[dense] ( identifier[ts] )))
keyword[return] identifier[_java2py] ( identifier[self] . identifier[_ctx] , identifier[gradient] ) | def gradient(self, ts):
"""
Find the gradient of the log likelihood with respect to the given time series.
Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf
Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters.
"""
gradient = self._jmodel.gradient(_py2java(self._ctx, Vectors.dense(ts)))
return _java2py(self._ctx, gradient) |
def code_constants(self):
"""
All of the constants that are used by this functions's code.
"""
# TODO: remove link register values
return [const.value for block in self.blocks for const in block.vex.constants] | def function[code_constants, parameter[self]]:
constant[
All of the constants that are used by this functions's code.
]
return[<ast.ListComp object at 0x7da18dc05a50>] | keyword[def] identifier[code_constants] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[const] . identifier[value] keyword[for] identifier[block] keyword[in] identifier[self] . identifier[blocks] keyword[for] identifier[const] keyword[in] identifier[block] . identifier[vex] . identifier[constants] ] | def code_constants(self):
"""
All of the constants that are used by this functions's code.
"""
# TODO: remove link register values
return [const.value for block in self.blocks for const in block.vex.constants] |
def unshell_list(s):
"""Turn a command-line argument into a list."""
if not s:
return None
if sys.platform == 'win32':
# When running coverage as coverage.exe, some of the behavior
# of the shell is emulated: wildcards are expanded into a list of
# filenames. So you have to single-quote patterns on the command
# line, but (not) helpfully, the single quotes are included in the
# argument, so we have to strip them off here.
s = s.strip("'")
return s.split(',') | def function[unshell_list, parameter[s]]:
constant[Turn a command-line argument into a list.]
if <ast.UnaryOp object at 0x7da20e962830> begin[:]
return[constant[None]]
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
variable[s] assign[=] call[name[s].strip, parameter[constant[']]]
return[call[name[s].split, parameter[constant[,]]]] | keyword[def] identifier[unshell_list] ( identifier[s] ):
literal[string]
keyword[if] keyword[not] identifier[s] :
keyword[return] keyword[None]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[s] = identifier[s] . identifier[strip] ( literal[string] )
keyword[return] identifier[s] . identifier[split] ( literal[string] ) | def unshell_list(s):
"""Turn a command-line argument into a list."""
if not s:
return None # depends on [control=['if'], data=[]]
if sys.platform == 'win32':
# When running coverage as coverage.exe, some of the behavior
# of the shell is emulated: wildcards are expanded into a list of
# filenames. So you have to single-quote patterns on the command
# line, but (not) helpfully, the single quotes are included in the
# argument, so we have to strip them off here.
s = s.strip("'") # depends on [control=['if'], data=[]]
return s.split(',') |
def load_new_checkpoint_when_available(
self, sess, current_checkpoint, sleep_seconds=10):
"""Waits for a new checkpoint to be available and then loads it.
Args:
sess: The current session.
current_checkpoint: The current checkpoint or None to just load the next
one.
sleep_seconds: How long to sleep between checks.
Returns:
The next checkpoint to use.
"""
# Load the checkpoint.
while True:
next_checkpoint = self.load_from_checkpoint(sess)
if not next_checkpoint or next_checkpoint == current_checkpoint:
print('Model not yet available, sleeping for %d seconds: '
'path %s; found: %s' %
(sleep_seconds,
os.path.dirname(self._save_path), current_checkpoint))
sys.stdout.flush()
time.sleep(sleep_seconds)
else:
return next_checkpoint | def function[load_new_checkpoint_when_available, parameter[self, sess, current_checkpoint, sleep_seconds]]:
constant[Waits for a new checkpoint to be available and then loads it.
Args:
sess: The current session.
current_checkpoint: The current checkpoint or None to just load the next
one.
sleep_seconds: How long to sleep between checks.
Returns:
The next checkpoint to use.
]
while constant[True] begin[:]
variable[next_checkpoint] assign[=] call[name[self].load_from_checkpoint, parameter[name[sess]]]
if <ast.BoolOp object at 0x7da1b2344b80> begin[:]
call[name[print], parameter[binary_operation[constant[Model not yet available, sleeping for %d seconds: path %s; found: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2345810>, <ast.Call object at 0x7da1b2346f20>, <ast.Name object at 0x7da1b2344340>]]]]]
call[name[sys].stdout.flush, parameter[]]
call[name[time].sleep, parameter[name[sleep_seconds]]] | keyword[def] identifier[load_new_checkpoint_when_available] (
identifier[self] , identifier[sess] , identifier[current_checkpoint] , identifier[sleep_seconds] = literal[int] ):
literal[string]
keyword[while] keyword[True] :
identifier[next_checkpoint] = identifier[self] . identifier[load_from_checkpoint] ( identifier[sess] )
keyword[if] keyword[not] identifier[next_checkpoint] keyword[or] identifier[next_checkpoint] == identifier[current_checkpoint] :
identifier[print] ( literal[string]
literal[string] %
( identifier[sleep_seconds] ,
identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[_save_path] ), identifier[current_checkpoint] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[time] . identifier[sleep] ( identifier[sleep_seconds] )
keyword[else] :
keyword[return] identifier[next_checkpoint] | def load_new_checkpoint_when_available(self, sess, current_checkpoint, sleep_seconds=10):
"""Waits for a new checkpoint to be available and then loads it.
Args:
sess: The current session.
current_checkpoint: The current checkpoint or None to just load the next
one.
sleep_seconds: How long to sleep between checks.
Returns:
The next checkpoint to use.
"""
# Load the checkpoint.
while True:
next_checkpoint = self.load_from_checkpoint(sess)
if not next_checkpoint or next_checkpoint == current_checkpoint:
print('Model not yet available, sleeping for %d seconds: path %s; found: %s' % (sleep_seconds, os.path.dirname(self._save_path), current_checkpoint))
sys.stdout.flush()
time.sleep(sleep_seconds) # depends on [control=['if'], data=[]]
else:
return next_checkpoint # depends on [control=['while'], data=[]] |
def _create_session(team, auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(
response=partial(_handle_response, team)
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-cli/%s (%s %s) %s/%s" % (
VERSION, platform.system(), platform.release(),
platform.python_implementation(), platform.python_version()
)
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session | def function[_create_session, parameter[team, auth]]:
constant[
Creates a session object to be used for `push`, `install`, etc.
]
variable[session] assign[=] call[name[requests].Session, parameter[]]
call[name[session].hooks.update, parameter[call[name[dict], parameter[]]]]
call[name[session].headers.update, parameter[dictionary[[<ast.Constant object at 0x7da1b1240460>, <ast.Constant object at 0x7da1b12405b0>, <ast.Constant object at 0x7da1b1240760>], [<ast.Constant object at 0x7da1b12407f0>, <ast.Constant object at 0x7da1b12403d0>, <ast.BinOp object at 0x7da1b1240310>]]]]
if compare[name[auth] is_not constant[None]] begin[:]
call[name[session].headers][constant[Authorization]] assign[=] binary_operation[constant[Bearer %s] <ast.Mod object at 0x7da2590d6920> call[name[auth]][constant[access_token]]]
return[name[session]] | keyword[def] identifier[_create_session] ( identifier[team] , identifier[auth] ):
literal[string]
identifier[session] = identifier[requests] . identifier[Session] ()
identifier[session] . identifier[hooks] . identifier[update] ( identifier[dict] (
identifier[response] = identifier[partial] ( identifier[_handle_response] , identifier[team] )
))
identifier[session] . identifier[headers] . identifier[update] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] %(
identifier[VERSION] , identifier[platform] . identifier[system] (), identifier[platform] . identifier[release] (),
identifier[platform] . identifier[python_implementation] (), identifier[platform] . identifier[python_version] ()
)
})
keyword[if] identifier[auth] keyword[is] keyword[not] keyword[None] :
identifier[session] . identifier[headers] [ literal[string] ]= literal[string] % identifier[auth] [ literal[string] ]
keyword[return] identifier[session] | def _create_session(team, auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(response=partial(_handle_response, team)))
session.headers.update({'Content-Type': 'application/json', 'Accept': 'application/json', 'User-Agent': 'quilt-cli/%s (%s %s) %s/%s' % (VERSION, platform.system(), platform.release(), platform.python_implementation(), platform.python_version())})
if auth is not None:
session.headers['Authorization'] = 'Bearer %s' % auth['access_token'] # depends on [control=['if'], data=['auth']]
return session |
def _digi_bounds(fmt):
"""
Return min and max digital values for each format type.
Accepts lists.
Parmeters
---------
fmt : str, or list
The wfdb dat format, or a list of them.
"""
if isinstance(fmt, list):
return [_digi_bounds(f) for f in fmt]
if fmt == '80':
return (-128, 127)
elif fmt == '212':
return (-2048, 2047)
elif fmt == '16':
return (-32768, 32767)
elif fmt == '24':
return (-8388608, 8388607)
elif fmt == '32':
return (-2147483648, 2147483647) | def function[_digi_bounds, parameter[fmt]]:
constant[
Return min and max digital values for each format type.
Accepts lists.
Parmeters
---------
fmt : str, or list
The wfdb dat format, or a list of them.
]
if call[name[isinstance], parameter[name[fmt], name[list]]] begin[:]
return[<ast.ListComp object at 0x7da1b196de10>]
if compare[name[fmt] equal[==] constant[80]] begin[:]
return[tuple[[<ast.UnaryOp object at 0x7da1b196dba0>, <ast.Constant object at 0x7da1b196dd50>]]] | keyword[def] identifier[_digi_bounds] ( identifier[fmt] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[fmt] , identifier[list] ):
keyword[return] [ identifier[_digi_bounds] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[fmt] ]
keyword[if] identifier[fmt] == literal[string] :
keyword[return] (- literal[int] , literal[int] )
keyword[elif] identifier[fmt] == literal[string] :
keyword[return] (- literal[int] , literal[int] )
keyword[elif] identifier[fmt] == literal[string] :
keyword[return] (- literal[int] , literal[int] )
keyword[elif] identifier[fmt] == literal[string] :
keyword[return] (- literal[int] , literal[int] )
keyword[elif] identifier[fmt] == literal[string] :
keyword[return] (- literal[int] , literal[int] ) | def _digi_bounds(fmt):
"""
Return min and max digital values for each format type.
Accepts lists.
Parmeters
---------
fmt : str, or list
The wfdb dat format, or a list of them.
"""
if isinstance(fmt, list):
return [_digi_bounds(f) for f in fmt] # depends on [control=['if'], data=[]]
if fmt == '80':
return (-128, 127) # depends on [control=['if'], data=[]]
elif fmt == '212':
return (-2048, 2047) # depends on [control=['if'], data=[]]
elif fmt == '16':
return (-32768, 32767) # depends on [control=['if'], data=[]]
elif fmt == '24':
return (-8388608, 8388607) # depends on [control=['if'], data=[]]
elif fmt == '32':
return (-2147483648, 2147483647) # depends on [control=['if'], data=[]] |
def com_google_fonts_check_aat(ttFont):
"""Are there unwanted Apple tables?"""
UNWANTED_TABLES = {
'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc',
'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just',
'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop',
'trak', 'xref'
}
unwanted_tables_found = []
for table in ttFont.keys():
if table in UNWANTED_TABLES:
unwanted_tables_found.append(table)
if len(unwanted_tables_found) > 0:
yield FAIL, ("Unwanted AAT tables were found"
" in the font and should be removed, either by"
" fonttools/ttx or by editing them using the tool"
" they built with:"
" {}").format(", ".join(unwanted_tables_found))
else:
yield PASS, "There are no unwanted AAT tables." | def function[com_google_fonts_check_aat, parameter[ttFont]]:
constant[Are there unwanted Apple tables?]
variable[UNWANTED_TABLES] assign[=] <ast.Set object at 0x7da1b12531c0>
variable[unwanted_tables_found] assign[=] list[[]]
for taget[name[table]] in starred[call[name[ttFont].keys, parameter[]]] begin[:]
if compare[name[table] in name[UNWANTED_TABLES]] begin[:]
call[name[unwanted_tables_found].append, parameter[name[table]]]
if compare[call[name[len], parameter[name[unwanted_tables_found]]] greater[>] constant[0]] begin[:]
<ast.Yield object at 0x7da1b12521d0> | keyword[def] identifier[com_google_fonts_check_aat] ( identifier[ttFont] ):
literal[string]
identifier[UNWANTED_TABLES] ={
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string]
}
identifier[unwanted_tables_found] =[]
keyword[for] identifier[table] keyword[in] identifier[ttFont] . identifier[keys] ():
keyword[if] identifier[table] keyword[in] identifier[UNWANTED_TABLES] :
identifier[unwanted_tables_found] . identifier[append] ( identifier[table] )
keyword[if] identifier[len] ( identifier[unwanted_tables_found] )> literal[int] :
keyword[yield] identifier[FAIL] ,( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( literal[string] . identifier[join] ( identifier[unwanted_tables_found] ))
keyword[else] :
keyword[yield] identifier[PASS] , literal[string] | def com_google_fonts_check_aat(ttFont):
"""Are there unwanted Apple tables?"""
UNWANTED_TABLES = {'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc', 'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just', 'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop', 'trak', 'xref'}
unwanted_tables_found = []
for table in ttFont.keys():
if table in UNWANTED_TABLES:
unwanted_tables_found.append(table) # depends on [control=['if'], data=['table']] # depends on [control=['for'], data=['table']]
if len(unwanted_tables_found) > 0:
yield (FAIL, 'Unwanted AAT tables were found in the font and should be removed, either by fonttools/ttx or by editing them using the tool they built with: {}'.format(', '.join(unwanted_tables_found))) # depends on [control=['if'], data=[]]
else:
yield (PASS, 'There are no unwanted AAT tables.') |
def _cast_to_type(self, value):
""" Raise error if the value is not a dict """
if not isinstance(value, dict):
self.fail('invalid', value=value)
return value | def function[_cast_to_type, parameter[self, value]]:
constant[ Raise error if the value is not a dict ]
if <ast.UnaryOp object at 0x7da1b1bb9ae0> begin[:]
call[name[self].fail, parameter[constant[invalid]]]
return[name[value]] | keyword[def] identifier[_cast_to_type] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[self] . identifier[fail] ( literal[string] , identifier[value] = identifier[value] )
keyword[return] identifier[value] | def _cast_to_type(self, value):
""" Raise error if the value is not a dict """
if not isinstance(value, dict):
self.fail('invalid', value=value) # depends on [control=['if'], data=[]]
return value |
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it)) | def function[locate, parameter[iterable, pred, window_size]]:
constant[Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
]
if compare[name[window_size] is constant[None]] begin[:]
return[call[name[compress], parameter[call[name[count], parameter[]], call[name[map], parameter[name[pred], name[iterable]]]]]]
if compare[name[window_size] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1d0c910>
variable[it] assign[=] call[name[windowed], parameter[name[iterable], name[window_size]]]
return[call[name[compress], parameter[call[name[count], parameter[]], call[name[starmap], parameter[name[pred], name[it]]]]]] | keyword[def] identifier[locate] ( identifier[iterable] , identifier[pred] = identifier[bool] , identifier[window_size] = keyword[None] ):
literal[string]
keyword[if] identifier[window_size] keyword[is] keyword[None] :
keyword[return] identifier[compress] ( identifier[count] (), identifier[map] ( identifier[pred] , identifier[iterable] ))
keyword[if] identifier[window_size] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[it] = identifier[windowed] ( identifier[iterable] , identifier[window_size] , identifier[fillvalue] = identifier[_marker] )
keyword[return] identifier[compress] ( identifier[count] (), identifier[starmap] ( identifier[pred] , identifier[it] )) | def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable)) # depends on [control=['if'], data=[]]
if window_size < 1:
raise ValueError('window size must be at least 1') # depends on [control=['if'], data=[]]
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it)) |
def solve(self, value, attribute, args=None, kwargs=None):
"""Try to get the given attribute/function result for the given value.
If ``args`` or ``kwargs`` are passed, the attribute got from ``value`` must be callable.
They will be passed anyway (using ``[]`` and ``{}`` as default values) if the attribute
is a function.
Arguments
---------
value : object
Instance of the class hold in ``self.source``
attribute : str
Name of the attribute to retrieve. Must be available through ``self.attributes``
args : list, default ``None``
If defined, list of non-named arguments that will be passed to the attribute if it's
callable.
kwargs : dict, default ``None``
If defined, list of named arguments that will be passed to the attribute if it's
callable.
Returns
-------
Value of the current attribute/function result for the ``value`` instance.
Raises
------
dataql.solvers.exceptions.NotSolvable
When the value is not an instance of the source class
dataql.solvers.exceptions.AttributeNotFound
When the attribute is not allowed
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> s = Source(date, ['day', 'strftime', ('str', str)])
>>> s.solve(d, 'day')
1
>>> s.solve(d, 'strftime', ['%F'])
'2015-06-01'
>>> s.solve(d, 'str')
'2015-06-01'
>>> s.solve('a string', ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> s.solve(d, 'month') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AttributeNotFound: `month` is not an allowed...for `datetime.date`
>>> s.solve(d, 'day', ['foo'])
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
if not isinstance(value, self.source) and (
not self.allow_class or value is not self.source):
raise NotSolvable(self, value)
attr = None
if attribute in self.attributes:
attr = self.attributes[attribute]
else:
for parent_source in self.parent_sources:
if attribute in parent_source.attributes:
attr = parent_source.attributes[attribute]
break
if attr is None:
raise AttributeNotFound(attribute, self)
try:
return attr.solve(value, args, kwargs)
except AttributeNotFound:
# Raise an ``AttributeError`` with ``self`` as source.
raise AttributeNotFound(attr, self) | def function[solve, parameter[self, value, attribute, args, kwargs]]:
constant[Try to get the given attribute/function result for the given value.
If ``args`` or ``kwargs`` are passed, the attribute got from ``value`` must be callable.
They will be passed anyway (using ``[]`` and ``{}`` as default values) if the attribute
is a function.
Arguments
---------
value : object
Instance of the class hold in ``self.source``
attribute : str
Name of the attribute to retrieve. Must be available through ``self.attributes``
args : list, default ``None``
If defined, list of non-named arguments that will be passed to the attribute if it's
callable.
kwargs : dict, default ``None``
If defined, list of named arguments that will be passed to the attribute if it's
callable.
Returns
-------
Value of the current attribute/function result for the ``value`` instance.
Raises
------
dataql.solvers.exceptions.NotSolvable
When the value is not an instance of the source class
dataql.solvers.exceptions.AttributeNotFound
When the attribute is not allowed
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> s = Source(date, ['day', 'strftime', ('str', str)])
>>> s.solve(d, 'day')
1
>>> s.solve(d, 'strftime', ['%F'])
'2015-06-01'
>>> s.solve(d, 'str')
'2015-06-01'
>>> s.solve('a string', ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> s.solve(d, 'month') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AttributeNotFound: `month` is not an allowed...for `datetime.date`
>>> s.solve(d, 'day', ['foo'])
Traceback (most recent call last):
TypeError: 'int' object is not callable
]
if <ast.BoolOp object at 0x7da204962ef0> begin[:]
<ast.Raise object at 0x7da204960ee0>
variable[attr] assign[=] constant[None]
if compare[name[attribute] in name[self].attributes] begin[:]
variable[attr] assign[=] call[name[self].attributes][name[attribute]]
<ast.Try object at 0x7da2047e96c0> | keyword[def] identifier[solve] ( identifier[self] , identifier[value] , identifier[attribute] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[self] . identifier[source] ) keyword[and] (
keyword[not] identifier[self] . identifier[allow_class] keyword[or] identifier[value] keyword[is] keyword[not] identifier[self] . identifier[source] ):
keyword[raise] identifier[NotSolvable] ( identifier[self] , identifier[value] )
identifier[attr] = keyword[None]
keyword[if] identifier[attribute] keyword[in] identifier[self] . identifier[attributes] :
identifier[attr] = identifier[self] . identifier[attributes] [ identifier[attribute] ]
keyword[else] :
keyword[for] identifier[parent_source] keyword[in] identifier[self] . identifier[parent_sources] :
keyword[if] identifier[attribute] keyword[in] identifier[parent_source] . identifier[attributes] :
identifier[attr] = identifier[parent_source] . identifier[attributes] [ identifier[attribute] ]
keyword[break]
keyword[if] identifier[attr] keyword[is] keyword[None] :
keyword[raise] identifier[AttributeNotFound] ( identifier[attribute] , identifier[self] )
keyword[try] :
keyword[return] identifier[attr] . identifier[solve] ( identifier[value] , identifier[args] , identifier[kwargs] )
keyword[except] identifier[AttributeNotFound] :
keyword[raise] identifier[AttributeNotFound] ( identifier[attr] , identifier[self] ) | def solve(self, value, attribute, args=None, kwargs=None):
"""Try to get the given attribute/function result for the given value.
If ``args`` or ``kwargs`` are passed, the attribute got from ``value`` must be callable.
They will be passed anyway (using ``[]`` and ``{}`` as default values) if the attribute
is a function.
Arguments
---------
value : object
Instance of the class hold in ``self.source``
attribute : str
Name of the attribute to retrieve. Must be available through ``self.attributes``
args : list, default ``None``
If defined, list of non-named arguments that will be passed to the attribute if it's
callable.
kwargs : dict, default ``None``
If defined, list of named arguments that will be passed to the attribute if it's
callable.
Returns
-------
Value of the current attribute/function result for the ``value`` instance.
Raises
------
dataql.solvers.exceptions.NotSolvable
When the value is not an instance of the source class
dataql.solvers.exceptions.AttributeNotFound
When the attribute is not allowed
Example
-------
>>> from datetime import date
>>> d = date(2015, 6, 1)
>>> s = Source(date, ['day', 'strftime', ('str', str)])
>>> s.solve(d, 'day')
1
>>> s.solve(d, 'strftime', ['%F'])
'2015-06-01'
>>> s.solve(d, 'str')
'2015-06-01'
>>> s.solve('a string', ['day']) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotSolvable: The `datetime.date` source can only...
>>> s.solve(d, 'month') # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.AttributeNotFound: `month` is not an allowed...for `datetime.date`
>>> s.solve(d, 'day', ['foo'])
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
if not isinstance(value, self.source) and (not self.allow_class or value is not self.source):
raise NotSolvable(self, value) # depends on [control=['if'], data=[]]
attr = None
if attribute in self.attributes:
attr = self.attributes[attribute] # depends on [control=['if'], data=['attribute']]
else:
for parent_source in self.parent_sources:
if attribute in parent_source.attributes:
attr = parent_source.attributes[attribute]
break # depends on [control=['if'], data=['attribute']] # depends on [control=['for'], data=['parent_source']]
if attr is None:
raise AttributeNotFound(attribute, self) # depends on [control=['if'], data=[]]
try:
return attr.solve(value, args, kwargs) # depends on [control=['try'], data=[]]
except AttributeNotFound:
# Raise an ``AttributeError`` with ``self`` as source.
raise AttributeNotFound(attr, self) # depends on [control=['except'], data=[]] |
def ascii_printable(self, keysym):
"""
If the keysym corresponds to a non-printable ascii character this will
return False. If it is printable, then True will be returned.
ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12)
will return '\x0b' and '\x0c' respectively.
"""
if 0 <= keysym < 9:
return False
elif 13 < keysym < 32:
return False
elif keysym > 126:
return False
else:
return True | def function[ascii_printable, parameter[self, keysym]]:
constant[
If the keysym corresponds to a non-printable ascii character this will
return False. If it is printable, then True will be returned.
ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12)
will return '' and '' respectively.
]
if compare[constant[0] less_or_equal[<=] name[keysym]] begin[:]
return[constant[False]] | keyword[def] identifier[ascii_printable] ( identifier[self] , identifier[keysym] ):
literal[string]
keyword[if] literal[int] <= identifier[keysym] < literal[int] :
keyword[return] keyword[False]
keyword[elif] literal[int] < identifier[keysym] < literal[int] :
keyword[return] keyword[False]
keyword[elif] identifier[keysym] > literal[int] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def ascii_printable(self, keysym):
"""
If the keysym corresponds to a non-printable ascii character this will
return False. If it is printable, then True will be returned.
ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12)
will return '\x0b' and '\x0c' respectively.
"""
if 0 <= keysym < 9:
return False # depends on [control=['if'], data=[]]
elif 13 < keysym < 32:
return False # depends on [control=['if'], data=[]]
elif keysym > 126:
return False # depends on [control=['if'], data=[]]
else:
return True |
def get_external_files(self):
"""
An external file manages file paths.
"""
external_files = []
for table in self._tables.values():
for r in table:
external_files.extend([ef for ef in r.get_external_files()])
return external_files | def function[get_external_files, parameter[self]]:
constant[
An external file manages file paths.
]
variable[external_files] assign[=] list[[]]
for taget[name[table]] in starred[call[name[self]._tables.values, parameter[]]] begin[:]
for taget[name[r]] in starred[name[table]] begin[:]
call[name[external_files].extend, parameter[<ast.ListComp object at 0x7da20c7950c0>]]
return[name[external_files]] | keyword[def] identifier[get_external_files] ( identifier[self] ):
literal[string]
identifier[external_files] =[]
keyword[for] identifier[table] keyword[in] identifier[self] . identifier[_tables] . identifier[values] ():
keyword[for] identifier[r] keyword[in] identifier[table] :
identifier[external_files] . identifier[extend] ([ identifier[ef] keyword[for] identifier[ef] keyword[in] identifier[r] . identifier[get_external_files] ()])
keyword[return] identifier[external_files] | def get_external_files(self):
"""
An external file manages file paths.
"""
external_files = []
for table in self._tables.values():
for r in table:
external_files.extend([ef for ef in r.get_external_files()]) # depends on [control=['for'], data=['r']] # depends on [control=['for'], data=['table']]
return external_files |
async def store_their_did(wallet_handle: int,
identity_json: str) -> None:
"""
Saves their DID for a pairwise connection in a secured Wallet,
so that it can be used to verify transaction.
:param wallet_handle: wallet handler (created by open_wallet).
:param identity_json: Identity information as json. Example:
{
"did": string, (required)
"verkey": string (optional, if only pk is provided),
"crypto_type": string, (optional; if not set then ed25519 curve is used;
currently only 'ed25519' value is supported for this field)
}
:return: None
"""
logger = logging.getLogger(__name__)
logger.debug("store_their_did: >>> wallet_handle: %r, identity_json: %r",
wallet_handle,
identity_json)
if not hasattr(store_their_did, "cb"):
logger.debug("store_their_did: Creating callback")
store_their_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_identity_json = c_char_p(identity_json.encode('utf-8'))
res = await do_call('indy_store_their_did',
c_wallet_handle,
c_identity_json,
store_their_did.cb)
logger.debug("store_their_did: <<< res: %r", res)
return res | <ast.AsyncFunctionDef object at 0x7da20e957460> | keyword[async] keyword[def] identifier[store_their_did] ( identifier[wallet_handle] : identifier[int] ,
identifier[identity_json] : identifier[str] )-> keyword[None] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[wallet_handle] ,
identifier[identity_json] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[store_their_did] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[store_their_did] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] ))
identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] )
identifier[c_identity_json] = identifier[c_char_p] ( identifier[identity_json] . identifier[encode] ( literal[string] ))
identifier[res] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_wallet_handle] ,
identifier[c_identity_json] ,
identifier[store_their_did] . identifier[cb] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[res] )
keyword[return] identifier[res] | async def store_their_did(wallet_handle: int, identity_json: str) -> None:
"""
Saves their DID for a pairwise connection in a secured Wallet,
so that it can be used to verify transaction.
:param wallet_handle: wallet handler (created by open_wallet).
:param identity_json: Identity information as json. Example:
{
"did": string, (required)
"verkey": string (optional, if only pk is provided),
"crypto_type": string, (optional; if not set then ed25519 curve is used;
currently only 'ed25519' value is supported for this field)
}
:return: None
"""
logger = logging.getLogger(__name__)
logger.debug('store_their_did: >>> wallet_handle: %r, identity_json: %r', wallet_handle, identity_json)
if not hasattr(store_their_did, 'cb'):
logger.debug('store_their_did: Creating callback')
store_their_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) # depends on [control=['if'], data=[]]
c_wallet_handle = c_int32(wallet_handle)
c_identity_json = c_char_p(identity_json.encode('utf-8'))
res = await do_call('indy_store_their_did', c_wallet_handle, c_identity_json, store_their_did.cb)
logger.debug('store_their_did: <<< res: %r', res)
return res |
def df(self, keys=None, basis=None, uwi=False):
"""
Return current curve data as a ``pandas.DataFrame`` object.
Everything has to have the same basis, because the depth
is going to become the index of the DataFrame. If you don't
provide one, ``welly`` will make one using ``survey_basis()``.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
basis (array): A basis, if you want to enforce one, otherwise
you'll get the result of ``survey_basis()``.
uwi (bool): Whether to add a 'UWI' column.
Returns:
pandas.DataFrame.
"""
try:
import pandas as pd
except:
m = "You must install pandas to use dataframes."
raise WellError(m)
from pandas.api.types import is_object_dtype
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
if basis is None:
basis = self.survey_basis(keys=keys)
if basis is None:
m = "No basis was provided and welly could not retrieve common basis."
raise WellError(m)
data = {k: v.to_basis(basis) for k, v in self.data.items() if isinstance(v, Curve)}
df = pd.DataFrame(data, columns=list(self.data.keys()))
df['Depth'] = basis
df = df.set_index('Depth')
if uwi:
df['UWI'] = [self.uwi for _ in basis]
df = df.reset_index()
df = df.set_index(['UWI', 'Depth'])
for column in df.columns:
if is_object_dtype(df[column].dtype):
try:
df[column] = df[column].astype(np.float64)
except ValueError:
pass
return df | def function[df, parameter[self, keys, basis, uwi]]:
constant[
Return current curve data as a ``pandas.DataFrame`` object.
Everything has to have the same basis, because the depth
is going to become the index of the DataFrame. If you don't
provide one, ``welly`` will make one using ``survey_basis()``.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
basis (array): A basis, if you want to enforce one, otherwise
you'll get the result of ``survey_basis()``.
uwi (bool): Whether to add a 'UWI' column.
Returns:
pandas.DataFrame.
]
<ast.Try object at 0x7da1b23eca00>
from relative_module[pandas.api.types] import module[is_object_dtype]
if compare[name[keys] is constant[None]] begin[:]
variable[keys] assign[=] <ast.ListComp object at 0x7da1b23edcc0>
if compare[name[basis] is constant[None]] begin[:]
variable[basis] assign[=] call[name[self].survey_basis, parameter[]]
if compare[name[basis] is constant[None]] begin[:]
variable[m] assign[=] constant[No basis was provided and welly could not retrieve common basis.]
<ast.Raise object at 0x7da1b2281f30>
variable[data] assign[=] <ast.DictComp object at 0x7da1b2282830>
variable[df] assign[=] call[name[pd].DataFrame, parameter[name[data]]]
call[name[df]][constant[Depth]] assign[=] name[basis]
variable[df] assign[=] call[name[df].set_index, parameter[constant[Depth]]]
if name[uwi] begin[:]
call[name[df]][constant[UWI]] assign[=] <ast.ListComp object at 0x7da1b2281ba0>
variable[df] assign[=] call[name[df].reset_index, parameter[]]
variable[df] assign[=] call[name[df].set_index, parameter[list[[<ast.Constant object at 0x7da1b2281e70>, <ast.Constant object at 0x7da1b2281de0>]]]]
for taget[name[column]] in starred[name[df].columns] begin[:]
if call[name[is_object_dtype], parameter[call[name[df]][name[column]].dtype]] begin[:]
<ast.Try object at 0x7da1b2282a10>
return[name[df]] | keyword[def] identifier[df] ( identifier[self] , identifier[keys] = keyword[None] , identifier[basis] = keyword[None] , identifier[uwi] = keyword[False] ):
literal[string]
keyword[try] :
keyword[import] identifier[pandas] keyword[as] identifier[pd]
keyword[except] :
identifier[m] = literal[string]
keyword[raise] identifier[WellError] ( identifier[m] )
keyword[from] identifier[pandas] . identifier[api] . identifier[types] keyword[import] identifier[is_object_dtype]
keyword[if] identifier[keys] keyword[is] keyword[None] :
identifier[keys] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[data] . identifier[items] () keyword[if] identifier[isinstance] ( identifier[v] , identifier[Curve] )]
keyword[else] :
identifier[keys] = identifier[utils] . identifier[flatten_list] ( identifier[keys] )
keyword[if] identifier[basis] keyword[is] keyword[None] :
identifier[basis] = identifier[self] . identifier[survey_basis] ( identifier[keys] = identifier[keys] )
keyword[if] identifier[basis] keyword[is] keyword[None] :
identifier[m] = literal[string]
keyword[raise] identifier[WellError] ( identifier[m] )
identifier[data] ={ identifier[k] : identifier[v] . identifier[to_basis] ( identifier[basis] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[data] . identifier[items] () keyword[if] identifier[isinstance] ( identifier[v] , identifier[Curve] )}
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[data] , identifier[columns] = identifier[list] ( identifier[self] . identifier[data] . identifier[keys] ()))
identifier[df] [ literal[string] ]= identifier[basis]
identifier[df] = identifier[df] . identifier[set_index] ( literal[string] )
keyword[if] identifier[uwi] :
identifier[df] [ literal[string] ]=[ identifier[self] . identifier[uwi] keyword[for] identifier[_] keyword[in] identifier[basis] ]
identifier[df] = identifier[df] . identifier[reset_index] ()
identifier[df] = identifier[df] . identifier[set_index] ([ literal[string] , literal[string] ])
keyword[for] identifier[column] keyword[in] identifier[df] . identifier[columns] :
keyword[if] identifier[is_object_dtype] ( identifier[df] [ identifier[column] ]. identifier[dtype] ):
keyword[try] :
identifier[df] [ identifier[column] ]= identifier[df] [ identifier[column] ]. identifier[astype] ( identifier[np] . identifier[float64] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[df] | def df(self, keys=None, basis=None, uwi=False):
"""
Return current curve data as a ``pandas.DataFrame`` object.
Everything has to have the same basis, because the depth
is going to become the index of the DataFrame. If you don't
provide one, ``welly`` will make one using ``survey_basis()``.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
basis (array): A basis, if you want to enforce one, otherwise
you'll get the result of ``survey_basis()``.
uwi (bool): Whether to add a 'UWI' column.
Returns:
pandas.DataFrame.
"""
try:
import pandas as pd # depends on [control=['try'], data=[]]
except:
m = 'You must install pandas to use dataframes.'
raise WellError(m) # depends on [control=['except'], data=[]]
from pandas.api.types import is_object_dtype
if keys is None:
keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)] # depends on [control=['if'], data=['keys']]
else:
keys = utils.flatten_list(keys)
if basis is None:
basis = self.survey_basis(keys=keys) # depends on [control=['if'], data=['basis']]
if basis is None:
m = 'No basis was provided and welly could not retrieve common basis.'
raise WellError(m) # depends on [control=['if'], data=[]]
data = {k: v.to_basis(basis) for (k, v) in self.data.items() if isinstance(v, Curve)}
df = pd.DataFrame(data, columns=list(self.data.keys()))
df['Depth'] = basis
df = df.set_index('Depth')
if uwi:
df['UWI'] = [self.uwi for _ in basis]
df = df.reset_index()
df = df.set_index(['UWI', 'Depth']) # depends on [control=['if'], data=[]]
for column in df.columns:
if is_object_dtype(df[column].dtype):
try:
df[column] = df[column].astype(np.float64) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['column']]
return df |
def get_response_data(self, response, parse_json=True):
"""
Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object
"""
if response.status_code in (requests.codes.ok, requests.codes.created):
if parse_json:
return response.json()
return response.content
elif response.status_code == requests.codes.bad_request:
response_json = response.json()
raise BadRequestException(response_json.get("error", False) or response_json.get("errors",
_("Bad Request: {text}").format(text=response.text)))
elif response.status_code == requests.codes.not_found:
raise NotFoundException(_("Resource not found: {url}").format(url=response.url))
elif response.status_code == requests.codes.internal_server_error:
raise ServerErrorException(_("Internal server error"))
elif response.status_code in (requests.codes.unauthorized, requests.codes.forbidden):
raise AuthErrorException(_("Access denied"))
elif response.status_code == requests.codes.too_many_requests:
raise RateLimitException(_(response.text))
else:
raise ServerErrorException(_("Unknown error occurred")) | def function[get_response_data, parameter[self, response, parse_json]]:
constant[
Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object
]
if compare[name[response].status_code in tuple[[<ast.Attribute object at 0x7da20c76fb50>, <ast.Attribute object at 0x7da20c76ff40>]]] begin[:]
if name[parse_json] begin[:]
return[call[name[response].json, parameter[]]]
return[name[response].content] | keyword[def] identifier[get_response_data] ( identifier[self] , identifier[response] , identifier[parse_json] = keyword[True] ):
literal[string]
keyword[if] identifier[response] . identifier[status_code] keyword[in] ( identifier[requests] . identifier[codes] . identifier[ok] , identifier[requests] . identifier[codes] . identifier[created] ):
keyword[if] identifier[parse_json] :
keyword[return] identifier[response] . identifier[json] ()
keyword[return] identifier[response] . identifier[content]
keyword[elif] identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[bad_request] :
identifier[response_json] = identifier[response] . identifier[json] ()
keyword[raise] identifier[BadRequestException] ( identifier[response_json] . identifier[get] ( literal[string] , keyword[False] ) keyword[or] identifier[response_json] . identifier[get] ( literal[string] ,
identifier[_] ( literal[string] ). identifier[format] ( identifier[text] = identifier[response] . identifier[text] )))
keyword[elif] identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[not_found] :
keyword[raise] identifier[NotFoundException] ( identifier[_] ( literal[string] ). identifier[format] ( identifier[url] = identifier[response] . identifier[url] ))
keyword[elif] identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[internal_server_error] :
keyword[raise] identifier[ServerErrorException] ( identifier[_] ( literal[string] ))
keyword[elif] identifier[response] . identifier[status_code] keyword[in] ( identifier[requests] . identifier[codes] . identifier[unauthorized] , identifier[requests] . identifier[codes] . identifier[forbidden] ):
keyword[raise] identifier[AuthErrorException] ( identifier[_] ( literal[string] ))
keyword[elif] identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[too_many_requests] :
keyword[raise] identifier[RateLimitException] ( identifier[_] ( identifier[response] . identifier[text] ))
keyword[else] :
keyword[raise] identifier[ServerErrorException] ( identifier[_] ( literal[string] )) | def get_response_data(self, response, parse_json=True):
"""
Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object
"""
if response.status_code in (requests.codes.ok, requests.codes.created):
if parse_json:
return response.json() # depends on [control=['if'], data=[]]
return response.content # depends on [control=['if'], data=[]]
elif response.status_code == requests.codes.bad_request:
response_json = response.json()
raise BadRequestException(response_json.get('error', False) or response_json.get('errors', _('Bad Request: {text}').format(text=response.text))) # depends on [control=['if'], data=[]]
elif response.status_code == requests.codes.not_found:
raise NotFoundException(_('Resource not found: {url}').format(url=response.url)) # depends on [control=['if'], data=[]]
elif response.status_code == requests.codes.internal_server_error:
raise ServerErrorException(_('Internal server error')) # depends on [control=['if'], data=[]]
elif response.status_code in (requests.codes.unauthorized, requests.codes.forbidden):
raise AuthErrorException(_('Access denied')) # depends on [control=['if'], data=[]]
elif response.status_code == requests.codes.too_many_requests:
raise RateLimitException(_(response.text)) # depends on [control=['if'], data=[]]
else:
raise ServerErrorException(_('Unknown error occurred')) |
def resize_image_folder(bucket, key_prefix, pil_size):
""" This function resizes all the images in a folder """
con = boto.connect_s3()
b = con.get_bucket(bucket)
for key in b.list(key_prefix):
key = b.get_key(key.name)
if 'image' not in key.content_type:
continue
size = key.get_metadata('size')
if size == str(pil_size):
continue
with tempfile.TemporaryFile() as big, tempfile.TemporaryFile() as small:
# download file and resize
key.get_contents_to_file(big)
big.flush()
big.seek(0)
img = Image.open(big)
img.thumbnail(pil_size, Image.ANTIALIAS)
img.save(small, img.format)
small.flush()
small.seek(0)
key.set_metadata('size', str(pil_size))
key.set_contents_from_file(small, headers={'Content-Type': key.content_type}) | def function[resize_image_folder, parameter[bucket, key_prefix, pil_size]]:
constant[ This function resizes all the images in a folder ]
variable[con] assign[=] call[name[boto].connect_s3, parameter[]]
variable[b] assign[=] call[name[con].get_bucket, parameter[name[bucket]]]
for taget[name[key]] in starred[call[name[b].list, parameter[name[key_prefix]]]] begin[:]
variable[key] assign[=] call[name[b].get_key, parameter[name[key].name]]
if compare[constant[image] <ast.NotIn object at 0x7da2590d7190> name[key].content_type] begin[:]
continue
variable[size] assign[=] call[name[key].get_metadata, parameter[constant[size]]]
if compare[name[size] equal[==] call[name[str], parameter[name[pil_size]]]] begin[:]
continue
with call[name[tempfile].TemporaryFile, parameter[]] begin[:]
call[name[key].get_contents_to_file, parameter[name[big]]]
call[name[big].flush, parameter[]]
call[name[big].seek, parameter[constant[0]]]
variable[img] assign[=] call[name[Image].open, parameter[name[big]]]
call[name[img].thumbnail, parameter[name[pil_size], name[Image].ANTIALIAS]]
call[name[img].save, parameter[name[small], name[img].format]]
call[name[small].flush, parameter[]]
call[name[small].seek, parameter[constant[0]]]
call[name[key].set_metadata, parameter[constant[size], call[name[str], parameter[name[pil_size]]]]]
call[name[key].set_contents_from_file, parameter[name[small]]] | keyword[def] identifier[resize_image_folder] ( identifier[bucket] , identifier[key_prefix] , identifier[pil_size] ):
literal[string]
identifier[con] = identifier[boto] . identifier[connect_s3] ()
identifier[b] = identifier[con] . identifier[get_bucket] ( identifier[bucket] )
keyword[for] identifier[key] keyword[in] identifier[b] . identifier[list] ( identifier[key_prefix] ):
identifier[key] = identifier[b] . identifier[get_key] ( identifier[key] . identifier[name] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[key] . identifier[content_type] :
keyword[continue]
identifier[size] = identifier[key] . identifier[get_metadata] ( literal[string] )
keyword[if] identifier[size] == identifier[str] ( identifier[pil_size] ):
keyword[continue]
keyword[with] identifier[tempfile] . identifier[TemporaryFile] () keyword[as] identifier[big] , identifier[tempfile] . identifier[TemporaryFile] () keyword[as] identifier[small] :
identifier[key] . identifier[get_contents_to_file] ( identifier[big] )
identifier[big] . identifier[flush] ()
identifier[big] . identifier[seek] ( literal[int] )
identifier[img] = identifier[Image] . identifier[open] ( identifier[big] )
identifier[img] . identifier[thumbnail] ( identifier[pil_size] , identifier[Image] . identifier[ANTIALIAS] )
identifier[img] . identifier[save] ( identifier[small] , identifier[img] . identifier[format] )
identifier[small] . identifier[flush] ()
identifier[small] . identifier[seek] ( literal[int] )
identifier[key] . identifier[set_metadata] ( literal[string] , identifier[str] ( identifier[pil_size] ))
identifier[key] . identifier[set_contents_from_file] ( identifier[small] , identifier[headers] ={ literal[string] : identifier[key] . identifier[content_type] }) | def resize_image_folder(bucket, key_prefix, pil_size):
""" This function resizes all the images in a folder """
con = boto.connect_s3()
b = con.get_bucket(bucket)
for key in b.list(key_prefix):
key = b.get_key(key.name)
if 'image' not in key.content_type:
continue # depends on [control=['if'], data=[]]
size = key.get_metadata('size')
if size == str(pil_size):
continue # depends on [control=['if'], data=[]]
with tempfile.TemporaryFile() as big, tempfile.TemporaryFile() as small:
# download file and resize
key.get_contents_to_file(big)
big.flush()
big.seek(0)
img = Image.open(big)
img.thumbnail(pil_size, Image.ANTIALIAS)
img.save(small, img.format)
small.flush()
small.seek(0)
key.set_metadata('size', str(pil_size))
key.set_contents_from_file(small, headers={'Content-Type': key.content_type}) # depends on [control=['with'], data=['big']] # depends on [control=['for'], data=['key']] |
def _get_virtual_variable(variables, key, level_vars=None, dim_sizes=None):
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
"""
if level_vars is None:
level_vars = {}
if dim_sizes is None:
dim_sizes = {}
if key in dim_sizes:
data = pd.Index(range(dim_sizes[key]), name=key)
variable = IndexVariable((key,), data)
return key, key, variable
if not isinstance(key, str):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) == 2:
ref_name, var_name = split_key
elif len(split_key) == 1:
ref_name, var_name = key, None
else:
raise KeyError(key)
if ref_name in level_vars:
dim_var = variables[level_vars[ref_name]]
ref_var = dim_var.to_index_variable().get_level_variable(ref_name)
else:
ref_var = variables[ref_name]
if var_name is None:
virtual_var = ref_var
var_name = key
else:
if _contains_datetime_like_objects(ref_var):
ref_var = xr.DataArray(ref_var)
data = getattr(ref_var.dt, var_name).data
else:
data = getattr(ref_var, var_name).data
virtual_var = Variable(ref_var.dims, data)
return ref_name, var_name, virtual_var | def function[_get_virtual_variable, parameter[variables, key, level_vars, dim_sizes]]:
constant[Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
]
if compare[name[level_vars] is constant[None]] begin[:]
variable[level_vars] assign[=] dictionary[[], []]
if compare[name[dim_sizes] is constant[None]] begin[:]
variable[dim_sizes] assign[=] dictionary[[], []]
if compare[name[key] in name[dim_sizes]] begin[:]
variable[data] assign[=] call[name[pd].Index, parameter[call[name[range], parameter[call[name[dim_sizes]][name[key]]]]]]
variable[variable] assign[=] call[name[IndexVariable], parameter[tuple[[<ast.Name object at 0x7da18c4cd120>]], name[data]]]
return[tuple[[<ast.Name object at 0x7da18c4ce590>, <ast.Name object at 0x7da18c4ced10>, <ast.Name object at 0x7da18c4ccb80>]]]
if <ast.UnaryOp object at 0x7da18c4ced40> begin[:]
<ast.Raise object at 0x7da18c4cfbb0>
variable[split_key] assign[=] call[name[key].split, parameter[constant[.], constant[1]]]
if compare[call[name[len], parameter[name[split_key]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da18c4cd060> assign[=] name[split_key]
if compare[name[ref_name] in name[level_vars]] begin[:]
variable[dim_var] assign[=] call[name[variables]][call[name[level_vars]][name[ref_name]]]
variable[ref_var] assign[=] call[call[name[dim_var].to_index_variable, parameter[]].get_level_variable, parameter[name[ref_name]]]
if compare[name[var_name] is constant[None]] begin[:]
variable[virtual_var] assign[=] name[ref_var]
variable[var_name] assign[=] name[key]
return[tuple[[<ast.Name object at 0x7da18c4cff10>, <ast.Name object at 0x7da18c4ceb90>, <ast.Name object at 0x7da18c4ce380>]]] | keyword[def] identifier[_get_virtual_variable] ( identifier[variables] , identifier[key] , identifier[level_vars] = keyword[None] , identifier[dim_sizes] = keyword[None] ):
literal[string]
keyword[if] identifier[level_vars] keyword[is] keyword[None] :
identifier[level_vars] ={}
keyword[if] identifier[dim_sizes] keyword[is] keyword[None] :
identifier[dim_sizes] ={}
keyword[if] identifier[key] keyword[in] identifier[dim_sizes] :
identifier[data] = identifier[pd] . identifier[Index] ( identifier[range] ( identifier[dim_sizes] [ identifier[key] ]), identifier[name] = identifier[key] )
identifier[variable] = identifier[IndexVariable] (( identifier[key] ,), identifier[data] )
keyword[return] identifier[key] , identifier[key] , identifier[variable]
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[str] ):
keyword[raise] identifier[KeyError] ( identifier[key] )
identifier[split_key] = identifier[key] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[split_key] )== literal[int] :
identifier[ref_name] , identifier[var_name] = identifier[split_key]
keyword[elif] identifier[len] ( identifier[split_key] )== literal[int] :
identifier[ref_name] , identifier[var_name] = identifier[key] , keyword[None]
keyword[else] :
keyword[raise] identifier[KeyError] ( identifier[key] )
keyword[if] identifier[ref_name] keyword[in] identifier[level_vars] :
identifier[dim_var] = identifier[variables] [ identifier[level_vars] [ identifier[ref_name] ]]
identifier[ref_var] = identifier[dim_var] . identifier[to_index_variable] (). identifier[get_level_variable] ( identifier[ref_name] )
keyword[else] :
identifier[ref_var] = identifier[variables] [ identifier[ref_name] ]
keyword[if] identifier[var_name] keyword[is] keyword[None] :
identifier[virtual_var] = identifier[ref_var]
identifier[var_name] = identifier[key]
keyword[else] :
keyword[if] identifier[_contains_datetime_like_objects] ( identifier[ref_var] ):
identifier[ref_var] = identifier[xr] . identifier[DataArray] ( identifier[ref_var] )
identifier[data] = identifier[getattr] ( identifier[ref_var] . identifier[dt] , identifier[var_name] ). identifier[data]
keyword[else] :
identifier[data] = identifier[getattr] ( identifier[ref_var] , identifier[var_name] ). identifier[data]
identifier[virtual_var] = identifier[Variable] ( identifier[ref_var] . identifier[dims] , identifier[data] )
keyword[return] identifier[ref_name] , identifier[var_name] , identifier[virtual_var] | def _get_virtual_variable(variables, key, level_vars=None, dim_sizes=None):
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
"""
if level_vars is None:
level_vars = {} # depends on [control=['if'], data=['level_vars']]
if dim_sizes is None:
dim_sizes = {} # depends on [control=['if'], data=['dim_sizes']]
if key in dim_sizes:
data = pd.Index(range(dim_sizes[key]), name=key)
variable = IndexVariable((key,), data)
return (key, key, variable) # depends on [control=['if'], data=['key', 'dim_sizes']]
if not isinstance(key, str):
raise KeyError(key) # depends on [control=['if'], data=[]]
split_key = key.split('.', 1)
if len(split_key) == 2:
(ref_name, var_name) = split_key # depends on [control=['if'], data=[]]
elif len(split_key) == 1:
(ref_name, var_name) = (key, None) # depends on [control=['if'], data=[]]
else:
raise KeyError(key)
if ref_name in level_vars:
dim_var = variables[level_vars[ref_name]]
ref_var = dim_var.to_index_variable().get_level_variable(ref_name) # depends on [control=['if'], data=['ref_name', 'level_vars']]
else:
ref_var = variables[ref_name]
if var_name is None:
virtual_var = ref_var
var_name = key # depends on [control=['if'], data=['var_name']]
else:
if _contains_datetime_like_objects(ref_var):
ref_var = xr.DataArray(ref_var)
data = getattr(ref_var.dt, var_name).data # depends on [control=['if'], data=[]]
else:
data = getattr(ref_var, var_name).data
virtual_var = Variable(ref_var.dims, data)
return (ref_name, var_name, virtual_var) |
def set_grade_system(self, grade_system_id):
"""Sets the grading system.
arg: grade_system_id (osid.id.Id): the grade system
raise: InvalidArgument - ``grade_system_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_grade_system_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_system_id):
raise errors.InvalidArgument()
self._my_map['gradeSystemId'] = str(grade_system_id) | def function[set_grade_system, parameter[self, grade_system_id]]:
constant[Sets the grading system.
arg: grade_system_id (osid.id.Id): the grade system
raise: InvalidArgument - ``grade_system_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
]
if call[call[name[self].get_grade_system_metadata, parameter[]].is_read_only, parameter[]] begin[:]
<ast.Raise object at 0x7da204620d90>
if <ast.UnaryOp object at 0x7da2046216f0> begin[:]
<ast.Raise object at 0x7da204622110>
call[name[self]._my_map][constant[gradeSystemId]] assign[=] call[name[str], parameter[name[grade_system_id]]] | keyword[def] identifier[set_grade_system] ( identifier[self] , identifier[grade_system_id] ):
literal[string]
keyword[if] identifier[self] . identifier[get_grade_system_metadata] (). identifier[is_read_only] ():
keyword[raise] identifier[errors] . identifier[NoAccess] ()
keyword[if] keyword[not] identifier[self] . identifier[_is_valid_id] ( identifier[grade_system_id] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ()
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[str] ( identifier[grade_system_id] ) | def set_grade_system(self, grade_system_id):
"""Sets the grading system.
arg: grade_system_id (osid.id.Id): the grade system
raise: InvalidArgument - ``grade_system_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_grade_system_metadata().is_read_only():
raise errors.NoAccess() # depends on [control=['if'], data=[]]
if not self._is_valid_id(grade_system_id):
raise errors.InvalidArgument() # depends on [control=['if'], data=[]]
self._my_map['gradeSystemId'] = str(grade_system_id) |
def main():
"""Run Chronophore based on the command line arguments."""
args = get_args()
# Make Chronophore's directories and files in $HOME
DATA_DIR = pathlib.Path(appdirs.user_data_dir(__title__))
LOG_FILE = pathlib.Path(appdirs.user_log_dir(__title__), 'debug.log')
os.makedirs(str(DATA_DIR), exist_ok=True)
os.makedirs(str(LOG_FILE.parent), exist_ok=True)
if args.version:
print('{} {}'.format(__title__, __version__))
raise SystemExit
if args.debug:
CONSOLE_LOG_LEVEL = logging.DEBUG
elif args.verbose:
CONSOLE_LOG_LEVEL = logging.INFO
else:
CONSOLE_LOG_LEVEL = logging.WARNING
logger = set_up_logging(LOG_FILE, CONSOLE_LOG_LEVEL)
logger.debug('-'*80)
logger.info('{} {}'.format(__title__, __version__))
logger.debug('Log File: {}'.format(LOG_FILE))
logger.debug('Data Directory: {}'.format(DATA_DIR))
if args.testdb:
DATABASE_FILE = DATA_DIR.joinpath('test.sqlite')
logger.info('Using test database.')
else:
DATABASE_FILE = DATA_DIR.joinpath('chronophore.sqlite')
logger.debug('Database File: {}'.format(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(str(DATABASE_FILE)))
Base.metadata.create_all(engine)
Session.configure(bind=engine)
if args.log_sql:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
if args.testdb:
add_test_users(session=Session())
controller.flag_forgotten_entries(session=Session())
if args.tk:
from chronophore.tkview import TkChronophoreUI
TkChronophoreUI()
else:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
print(
'Error: PyQt5, which chronophore uses for its'
+ ' graphical interface, is not installed.'
+ "\nInstall it with 'pip install PyQt5'"
+ " or use the old Tk ui with 'chronophore --tk'."
)
raise SystemExit
else:
from chronophore.qtview import QtChronophoreUI
app = QApplication(sys.argv)
chrono_ui = QtChronophoreUI()
chrono_ui.show()
sys.exit(app.exec_())
logger.debug('{} stopping'.format(__title__)) | def function[main, parameter[]]:
constant[Run Chronophore based on the command line arguments.]
variable[args] assign[=] call[name[get_args], parameter[]]
variable[DATA_DIR] assign[=] call[name[pathlib].Path, parameter[call[name[appdirs].user_data_dir, parameter[name[__title__]]]]]
variable[LOG_FILE] assign[=] call[name[pathlib].Path, parameter[call[name[appdirs].user_log_dir, parameter[name[__title__]]], constant[debug.log]]]
call[name[os].makedirs, parameter[call[name[str], parameter[name[DATA_DIR]]]]]
call[name[os].makedirs, parameter[call[name[str], parameter[name[LOG_FILE].parent]]]]
if name[args].version begin[:]
call[name[print], parameter[call[constant[{} {}].format, parameter[name[__title__], name[__version__]]]]]
<ast.Raise object at 0x7da1b2327a00>
if name[args].debug begin[:]
variable[CONSOLE_LOG_LEVEL] assign[=] name[logging].DEBUG
variable[logger] assign[=] call[name[set_up_logging], parameter[name[LOG_FILE], name[CONSOLE_LOG_LEVEL]]]
call[name[logger].debug, parameter[binary_operation[constant[-] * constant[80]]]]
call[name[logger].info, parameter[call[constant[{} {}].format, parameter[name[__title__], name[__version__]]]]]
call[name[logger].debug, parameter[call[constant[Log File: {}].format, parameter[name[LOG_FILE]]]]]
call[name[logger].debug, parameter[call[constant[Data Directory: {}].format, parameter[name[DATA_DIR]]]]]
if name[args].testdb begin[:]
variable[DATABASE_FILE] assign[=] call[name[DATA_DIR].joinpath, parameter[constant[test.sqlite]]]
call[name[logger].info, parameter[constant[Using test database.]]]
call[name[logger].debug, parameter[call[constant[Database File: {}].format, parameter[name[DATABASE_FILE]]]]]
variable[engine] assign[=] call[name[create_engine], parameter[call[constant[sqlite:///{}].format, parameter[call[name[str], parameter[name[DATABASE_FILE]]]]]]]
call[name[Base].metadata.create_all, parameter[name[engine]]]
call[name[Session].configure, parameter[]]
if name[args].log_sql begin[:]
call[call[name[logging].getLogger, parameter[constant[sqlalchemy.engine]]].setLevel, parameter[name[logging].INFO]]
if name[args].testdb begin[:]
call[name[add_test_users], parameter[]]
call[name[controller].flag_forgotten_entries, parameter[]]
if name[args].tk begin[:]
from relative_module[chronophore.tkview] import module[TkChronophoreUI]
call[name[TkChronophoreUI], parameter[]]
call[name[logger].debug, parameter[call[constant[{} stopping].format, parameter[name[__title__]]]]] | keyword[def] identifier[main] ():
literal[string]
identifier[args] = identifier[get_args] ()
identifier[DATA_DIR] = identifier[pathlib] . identifier[Path] ( identifier[appdirs] . identifier[user_data_dir] ( identifier[__title__] ))
identifier[LOG_FILE] = identifier[pathlib] . identifier[Path] ( identifier[appdirs] . identifier[user_log_dir] ( identifier[__title__] ), literal[string] )
identifier[os] . identifier[makedirs] ( identifier[str] ( identifier[DATA_DIR] ), identifier[exist_ok] = keyword[True] )
identifier[os] . identifier[makedirs] ( identifier[str] ( identifier[LOG_FILE] . identifier[parent] ), identifier[exist_ok] = keyword[True] )
keyword[if] identifier[args] . identifier[version] :
identifier[print] ( literal[string] . identifier[format] ( identifier[__title__] , identifier[__version__] ))
keyword[raise] identifier[SystemExit]
keyword[if] identifier[args] . identifier[debug] :
identifier[CONSOLE_LOG_LEVEL] = identifier[logging] . identifier[DEBUG]
keyword[elif] identifier[args] . identifier[verbose] :
identifier[CONSOLE_LOG_LEVEL] = identifier[logging] . identifier[INFO]
keyword[else] :
identifier[CONSOLE_LOG_LEVEL] = identifier[logging] . identifier[WARNING]
identifier[logger] = identifier[set_up_logging] ( identifier[LOG_FILE] , identifier[CONSOLE_LOG_LEVEL] )
identifier[logger] . identifier[debug] ( literal[string] * literal[int] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[__title__] , identifier[__version__] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[LOG_FILE] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[DATA_DIR] ))
keyword[if] identifier[args] . identifier[testdb] :
identifier[DATABASE_FILE] = identifier[DATA_DIR] . identifier[joinpath] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
identifier[DATABASE_FILE] = identifier[DATA_DIR] . identifier[joinpath] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[DATABASE_FILE] ))
identifier[engine] = identifier[create_engine] ( literal[string] . identifier[format] ( identifier[str] ( identifier[DATABASE_FILE] )))
identifier[Base] . identifier[metadata] . identifier[create_all] ( identifier[engine] )
identifier[Session] . identifier[configure] ( identifier[bind] = identifier[engine] )
keyword[if] identifier[args] . identifier[log_sql] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[setLevel] ( identifier[logging] . identifier[INFO] )
keyword[if] identifier[args] . identifier[testdb] :
identifier[add_test_users] ( identifier[session] = identifier[Session] ())
identifier[controller] . identifier[flag_forgotten_entries] ( identifier[session] = identifier[Session] ())
keyword[if] identifier[args] . identifier[tk] :
keyword[from] identifier[chronophore] . identifier[tkview] keyword[import] identifier[TkChronophoreUI]
identifier[TkChronophoreUI] ()
keyword[else] :
keyword[try] :
keyword[from] identifier[PyQt5] . identifier[QtWidgets] keyword[import] identifier[QApplication]
keyword[except] identifier[ImportError] :
identifier[print] (
literal[string]
+ literal[string]
+ literal[string]
+ literal[string]
)
keyword[raise] identifier[SystemExit]
keyword[else] :
keyword[from] identifier[chronophore] . identifier[qtview] keyword[import] identifier[QtChronophoreUI]
identifier[app] = identifier[QApplication] ( identifier[sys] . identifier[argv] )
identifier[chrono_ui] = identifier[QtChronophoreUI] ()
identifier[chrono_ui] . identifier[show] ()
identifier[sys] . identifier[exit] ( identifier[app] . identifier[exec_] ())
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[__title__] )) | def main():
"""Run Chronophore based on the command line arguments."""
args = get_args()
# Make Chronophore's directories and files in $HOME
DATA_DIR = pathlib.Path(appdirs.user_data_dir(__title__))
LOG_FILE = pathlib.Path(appdirs.user_log_dir(__title__), 'debug.log')
os.makedirs(str(DATA_DIR), exist_ok=True)
os.makedirs(str(LOG_FILE.parent), exist_ok=True)
if args.version:
print('{} {}'.format(__title__, __version__))
raise SystemExit # depends on [control=['if'], data=[]]
if args.debug:
CONSOLE_LOG_LEVEL = logging.DEBUG # depends on [control=['if'], data=[]]
elif args.verbose:
CONSOLE_LOG_LEVEL = logging.INFO # depends on [control=['if'], data=[]]
else:
CONSOLE_LOG_LEVEL = logging.WARNING
logger = set_up_logging(LOG_FILE, CONSOLE_LOG_LEVEL)
logger.debug('-' * 80)
logger.info('{} {}'.format(__title__, __version__))
logger.debug('Log File: {}'.format(LOG_FILE))
logger.debug('Data Directory: {}'.format(DATA_DIR))
if args.testdb:
DATABASE_FILE = DATA_DIR.joinpath('test.sqlite')
logger.info('Using test database.') # depends on [control=['if'], data=[]]
else:
DATABASE_FILE = DATA_DIR.joinpath('chronophore.sqlite')
logger.debug('Database File: {}'.format(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(str(DATABASE_FILE)))
Base.metadata.create_all(engine)
Session.configure(bind=engine)
if args.log_sql:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) # depends on [control=['if'], data=[]]
if args.testdb:
add_test_users(session=Session()) # depends on [control=['if'], data=[]]
controller.flag_forgotten_entries(session=Session())
if args.tk:
from chronophore.tkview import TkChronophoreUI
TkChronophoreUI() # depends on [control=['if'], data=[]]
else:
try:
from PyQt5.QtWidgets import QApplication # depends on [control=['try'], data=[]]
except ImportError:
print('Error: PyQt5, which chronophore uses for its' + ' graphical interface, is not installed.' + "\nInstall it with 'pip install PyQt5'" + " or use the old Tk ui with 'chronophore --tk'.")
raise SystemExit # depends on [control=['except'], data=[]]
else:
from chronophore.qtview import QtChronophoreUI
app = QApplication(sys.argv)
chrono_ui = QtChronophoreUI()
chrono_ui.show()
sys.exit(app.exec_())
logger.debug('{} stopping'.format(__title__)) |
def get_tier(self, name_num):
"""Gives a tier, when multiple tiers exist with that name only the
first is returned.
:param name_num: Name or number of the tier to return.
:type name_num: int or str
:returns: The tier.
:raises IndexError: If the tier doesn't exist.
"""
return self.tiers[name_num - 1] if isinstance(name_num, int) else\
[i for i in self.tiers if i.name == name_num][0] | def function[get_tier, parameter[self, name_num]]:
constant[Gives a tier, when multiple tiers exist with that name only the
first is returned.
:param name_num: Name or number of the tier to return.
:type name_num: int or str
:returns: The tier.
:raises IndexError: If the tier doesn't exist.
]
return[<ast.IfExp object at 0x7da1b02c34c0>] | keyword[def] identifier[get_tier] ( identifier[self] , identifier[name_num] ):
literal[string]
keyword[return] identifier[self] . identifier[tiers] [ identifier[name_num] - literal[int] ] keyword[if] identifier[isinstance] ( identifier[name_num] , identifier[int] ) keyword[else] [ identifier[i] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[tiers] keyword[if] identifier[i] . identifier[name] == identifier[name_num] ][ literal[int] ] | def get_tier(self, name_num):
"""Gives a tier, when multiple tiers exist with that name only the
first is returned.
:param name_num: Name or number of the tier to return.
:type name_num: int or str
:returns: The tier.
:raises IndexError: If the tier doesn't exist.
"""
return self.tiers[name_num - 1] if isinstance(name_num, int) else [i for i in self.tiers if i.name == name_num][0] |
def getPk(self):
'''
getPk - Resolve any absent pk's off the obj's (like if an obj has been saved), and return the pk.
'''
if not self.pk and self.obj:
if self.obj._id:
self.pk = self.obj._id
return self.pk | def function[getPk, parameter[self]]:
constant[
getPk - Resolve any absent pk's off the obj's (like if an obj has been saved), and return the pk.
]
if <ast.BoolOp object at 0x7da1b0022c50> begin[:]
if name[self].obj._id begin[:]
name[self].pk assign[=] name[self].obj._id
return[name[self].pk] | keyword[def] identifier[getPk] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[pk] keyword[and] identifier[self] . identifier[obj] :
keyword[if] identifier[self] . identifier[obj] . identifier[_id] :
identifier[self] . identifier[pk] = identifier[self] . identifier[obj] . identifier[_id]
keyword[return] identifier[self] . identifier[pk] | def getPk(self):
"""
getPk - Resolve any absent pk's off the obj's (like if an obj has been saved), and return the pk.
"""
if not self.pk and self.obj:
if self.obj._id:
self.pk = self.obj._id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self.pk |
def process(self, session: AppSession):
'''Populate the visits from the CDX into the URL table.'''
if not session.args.warc_dedup:
return
iterable = wpull.warc.format.read_cdx(
session.args.warc_dedup,
encoding=session.args.local_encoding or 'utf-8'
)
missing_url_msg = _('The URL ("a") is missing from the CDX file.')
missing_id_msg = _('The record ID ("u") is missing from the CDX file.')
missing_checksum_msg = \
_('The SHA1 checksum ("k") is missing from the CDX file.')
counter = 0
def visits():
nonlocal counter
checked_fields = False
for record in iterable:
if not checked_fields:
if 'a' not in record:
raise ValueError(missing_url_msg)
if 'u' not in record:
raise ValueError(missing_id_msg)
if 'k' not in record:
raise ValueError(missing_checksum_msg)
checked_fields = True
yield record['a'], record['u'], record['k']
counter += 1
url_table = session.factory['URLTable']
url_table.add_visits(visits())
_logger.info(__(
gettext.ngettext(
'Loaded {num} record from CDX file.',
'Loaded {num} records from CDX file.',
counter
),
num=counter
)) | def function[process, parameter[self, session]]:
constant[Populate the visits from the CDX into the URL table.]
if <ast.UnaryOp object at 0x7da2043459c0> begin[:]
return[None]
variable[iterable] assign[=] call[name[wpull].warc.format.read_cdx, parameter[name[session].args.warc_dedup]]
variable[missing_url_msg] assign[=] call[name[_], parameter[constant[The URL ("a") is missing from the CDX file.]]]
variable[missing_id_msg] assign[=] call[name[_], parameter[constant[The record ID ("u") is missing from the CDX file.]]]
variable[missing_checksum_msg] assign[=] call[name[_], parameter[constant[The SHA1 checksum ("k") is missing from the CDX file.]]]
variable[counter] assign[=] constant[0]
def function[visits, parameter[]]:
<ast.Nonlocal object at 0x7da2054a6800>
variable[checked_fields] assign[=] constant[False]
for taget[name[record]] in starred[name[iterable]] begin[:]
if <ast.UnaryOp object at 0x7da2054a4e50> begin[:]
if compare[constant[a] <ast.NotIn object at 0x7da2590d7190> name[record]] begin[:]
<ast.Raise object at 0x7da2054a7a30>
if compare[constant[u] <ast.NotIn object at 0x7da2590d7190> name[record]] begin[:]
<ast.Raise object at 0x7da2054a4250>
if compare[constant[k] <ast.NotIn object at 0x7da2590d7190> name[record]] begin[:]
<ast.Raise object at 0x7da2054a7f70>
variable[checked_fields] assign[=] constant[True]
<ast.Yield object at 0x7da2054a4af0>
<ast.AugAssign object at 0x7da2054a7970>
variable[url_table] assign[=] call[name[session].factory][constant[URLTable]]
call[name[url_table].add_visits, parameter[call[name[visits], parameter[]]]]
call[name[_logger].info, parameter[call[name[__], parameter[call[name[gettext].ngettext, parameter[constant[Loaded {num} record from CDX file.], constant[Loaded {num} records from CDX file.], name[counter]]]]]]] | keyword[def] identifier[process] ( identifier[self] , identifier[session] : identifier[AppSession] ):
literal[string]
keyword[if] keyword[not] identifier[session] . identifier[args] . identifier[warc_dedup] :
keyword[return]
identifier[iterable] = identifier[wpull] . identifier[warc] . identifier[format] . identifier[read_cdx] (
identifier[session] . identifier[args] . identifier[warc_dedup] ,
identifier[encoding] = identifier[session] . identifier[args] . identifier[local_encoding] keyword[or] literal[string]
)
identifier[missing_url_msg] = identifier[_] ( literal[string] )
identifier[missing_id_msg] = identifier[_] ( literal[string] )
identifier[missing_checksum_msg] = identifier[_] ( literal[string] )
identifier[counter] = literal[int]
keyword[def] identifier[visits] ():
keyword[nonlocal] identifier[counter]
identifier[checked_fields] = keyword[False]
keyword[for] identifier[record] keyword[in] identifier[iterable] :
keyword[if] keyword[not] identifier[checked_fields] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[record] :
keyword[raise] identifier[ValueError] ( identifier[missing_url_msg] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[record] :
keyword[raise] identifier[ValueError] ( identifier[missing_id_msg] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[record] :
keyword[raise] identifier[ValueError] ( identifier[missing_checksum_msg] )
identifier[checked_fields] = keyword[True]
keyword[yield] identifier[record] [ literal[string] ], identifier[record] [ literal[string] ], identifier[record] [ literal[string] ]
identifier[counter] += literal[int]
identifier[url_table] = identifier[session] . identifier[factory] [ literal[string] ]
identifier[url_table] . identifier[add_visits] ( identifier[visits] ())
identifier[_logger] . identifier[info] ( identifier[__] (
identifier[gettext] . identifier[ngettext] (
literal[string] ,
literal[string] ,
identifier[counter]
),
identifier[num] = identifier[counter]
)) | def process(self, session: AppSession):
"""Populate the visits from the CDX into the URL table."""
if not session.args.warc_dedup:
return # depends on [control=['if'], data=[]]
iterable = wpull.warc.format.read_cdx(session.args.warc_dedup, encoding=session.args.local_encoding or 'utf-8')
missing_url_msg = _('The URL ("a") is missing from the CDX file.')
missing_id_msg = _('The record ID ("u") is missing from the CDX file.')
missing_checksum_msg = _('The SHA1 checksum ("k") is missing from the CDX file.')
counter = 0
def visits():
nonlocal counter
checked_fields = False
for record in iterable:
if not checked_fields:
if 'a' not in record:
raise ValueError(missing_url_msg) # depends on [control=['if'], data=[]]
if 'u' not in record:
raise ValueError(missing_id_msg) # depends on [control=['if'], data=[]]
if 'k' not in record:
raise ValueError(missing_checksum_msg) # depends on [control=['if'], data=[]]
checked_fields = True # depends on [control=['if'], data=[]]
yield (record['a'], record['u'], record['k'])
counter += 1 # depends on [control=['for'], data=['record']]
url_table = session.factory['URLTable']
url_table.add_visits(visits())
_logger.info(__(gettext.ngettext('Loaded {num} record from CDX file.', 'Loaded {num} records from CDX file.', counter), num=counter)) |
def get_threadline_theming(self, thread, colourmode):
"""
look up how to display a Threadline wiidget in search mode
for a given thread.
:param thread: Thread to theme Threadline for
:type thread: alot.db.thread.Thread
:param colourmode: colourmode to use, one of 1,16,256.
:type colourmode: int
This will return a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:parts: to a list of strings indentifying subwidgets
to be displayed in this order.
Moreover, for every part listed this will map 'part' to a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:width: to a tuple indicating the width of the subpart.
This is either `('fit', min, max)` to force the widget
to be at least `min` and at most `max` characters wide,
or `('weight', n)` which makes it share remaining space
with other 'weight' parts.
:alignment: where to place the content if shorter than the widget.
This is either 'right', 'left' or 'center'.
"""
def pickcolour(triple):
return triple[self._colours.index(colourmode)]
def matches(sec, thread):
if sec.get('tagged_with') is not None:
if not set(sec['tagged_with']).issubset(thread.get_tags()):
return False
if sec.get('query') is not None:
if not thread.matches(sec['query']):
return False
return True
default = self._config['search']['threadline']
match = default
candidates = self._config['search'].sections
for candidatename in candidates:
candidate = self._config['search'][candidatename]
if (candidatename.startswith('threadline') and
(not candidatename == 'threadline') and
matches(candidate, thread)):
match = candidate
break
# fill in values
res = {}
res['normal'] = pickcolour(match.get('normal') or default['normal'])
res['focus'] = pickcolour(match.get('focus') or default['focus'])
res['parts'] = match.get('parts') or default['parts']
for part in res['parts']:
defaultsec = default.get(part)
partsec = match.get(part) or {}
def fill(key, fallback=None):
pvalue = partsec.get(key) or defaultsec.get(key)
return pvalue or fallback
res[part] = {}
res[part]['width'] = fill('width', ('fit', 0, 0))
res[part]['alignment'] = fill('alignment', 'right')
res[part]['normal'] = pickcolour(fill('normal'))
res[part]['focus'] = pickcolour(fill('focus'))
return res | def function[get_threadline_theming, parameter[self, thread, colourmode]]:
constant[
look up how to display a Threadline wiidget in search mode
for a given thread.
:param thread: Thread to theme Threadline for
:type thread: alot.db.thread.Thread
:param colourmode: colourmode to use, one of 1,16,256.
:type colourmode: int
This will return a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:parts: to a list of strings indentifying subwidgets
to be displayed in this order.
Moreover, for every part listed this will map 'part' to a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:width: to a tuple indicating the width of the subpart.
This is either `('fit', min, max)` to force the widget
to be at least `min` and at most `max` characters wide,
or `('weight', n)` which makes it share remaining space
with other 'weight' parts.
:alignment: where to place the content if shorter than the widget.
This is either 'right', 'left' or 'center'.
]
def function[pickcolour, parameter[triple]]:
return[call[name[triple]][call[name[self]._colours.index, parameter[name[colourmode]]]]]
def function[matches, parameter[sec, thread]]:
if compare[call[name[sec].get, parameter[constant[tagged_with]]] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b08474f0> begin[:]
return[constant[False]]
if compare[call[name[sec].get, parameter[constant[query]]] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b0847eb0> begin[:]
return[constant[False]]
return[constant[True]]
variable[default] assign[=] call[call[name[self]._config][constant[search]]][constant[threadline]]
variable[match] assign[=] name[default]
variable[candidates] assign[=] call[name[self]._config][constant[search]].sections
for taget[name[candidatename]] in starred[name[candidates]] begin[:]
variable[candidate] assign[=] call[call[name[self]._config][constant[search]]][name[candidatename]]
if <ast.BoolOp object at 0x7da1b0845330> begin[:]
variable[match] assign[=] name[candidate]
break
variable[res] assign[=] dictionary[[], []]
call[name[res]][constant[normal]] assign[=] call[name[pickcolour], parameter[<ast.BoolOp object at 0x7da1b0845780>]]
call[name[res]][constant[focus]] assign[=] call[name[pickcolour], parameter[<ast.BoolOp object at 0x7da1b0845540>]]
call[name[res]][constant[parts]] assign[=] <ast.BoolOp object at 0x7da1b0845c00>
for taget[name[part]] in starred[call[name[res]][constant[parts]]] begin[:]
variable[defaultsec] assign[=] call[name[default].get, parameter[name[part]]]
variable[partsec] assign[=] <ast.BoolOp object at 0x7da1b08479d0>
def function[fill, parameter[key, fallback]]:
variable[pvalue] assign[=] <ast.BoolOp object at 0x7da1b08440d0>
return[<ast.BoolOp object at 0x7da1b0846140>]
call[name[res]][name[part]] assign[=] dictionary[[], []]
call[call[name[res]][name[part]]][constant[width]] assign[=] call[name[fill], parameter[constant[width], tuple[[<ast.Constant object at 0x7da1b07bacb0>, <ast.Constant object at 0x7da1b07ba950>, <ast.Constant object at 0x7da1b07bb250>]]]]
call[call[name[res]][name[part]]][constant[alignment]] assign[=] call[name[fill], parameter[constant[alignment], constant[right]]]
call[call[name[res]][name[part]]][constant[normal]] assign[=] call[name[pickcolour], parameter[call[name[fill], parameter[constant[normal]]]]]
call[call[name[res]][name[part]]][constant[focus]] assign[=] call[name[pickcolour], parameter[call[name[fill], parameter[constant[focus]]]]]
return[name[res]] | keyword[def] identifier[get_threadline_theming] ( identifier[self] , identifier[thread] , identifier[colourmode] ):
literal[string]
keyword[def] identifier[pickcolour] ( identifier[triple] ):
keyword[return] identifier[triple] [ identifier[self] . identifier[_colours] . identifier[index] ( identifier[colourmode] )]
keyword[def] identifier[matches] ( identifier[sec] , identifier[thread] ):
keyword[if] identifier[sec] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[set] ( identifier[sec] [ literal[string] ]). identifier[issubset] ( identifier[thread] . identifier[get_tags] ()):
keyword[return] keyword[False]
keyword[if] identifier[sec] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[thread] . identifier[matches] ( identifier[sec] [ literal[string] ]):
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[default] = identifier[self] . identifier[_config] [ literal[string] ][ literal[string] ]
identifier[match] = identifier[default]
identifier[candidates] = identifier[self] . identifier[_config] [ literal[string] ]. identifier[sections]
keyword[for] identifier[candidatename] keyword[in] identifier[candidates] :
identifier[candidate] = identifier[self] . identifier[_config] [ literal[string] ][ identifier[candidatename] ]
keyword[if] ( identifier[candidatename] . identifier[startswith] ( literal[string] ) keyword[and]
( keyword[not] identifier[candidatename] == literal[string] ) keyword[and]
identifier[matches] ( identifier[candidate] , identifier[thread] )):
identifier[match] = identifier[candidate]
keyword[break]
identifier[res] ={}
identifier[res] [ literal[string] ]= identifier[pickcolour] ( identifier[match] . identifier[get] ( literal[string] ) keyword[or] identifier[default] [ literal[string] ])
identifier[res] [ literal[string] ]= identifier[pickcolour] ( identifier[match] . identifier[get] ( literal[string] ) keyword[or] identifier[default] [ literal[string] ])
identifier[res] [ literal[string] ]= identifier[match] . identifier[get] ( literal[string] ) keyword[or] identifier[default] [ literal[string] ]
keyword[for] identifier[part] keyword[in] identifier[res] [ literal[string] ]:
identifier[defaultsec] = identifier[default] . identifier[get] ( identifier[part] )
identifier[partsec] = identifier[match] . identifier[get] ( identifier[part] ) keyword[or] {}
keyword[def] identifier[fill] ( identifier[key] , identifier[fallback] = keyword[None] ):
identifier[pvalue] = identifier[partsec] . identifier[get] ( identifier[key] ) keyword[or] identifier[defaultsec] . identifier[get] ( identifier[key] )
keyword[return] identifier[pvalue] keyword[or] identifier[fallback]
identifier[res] [ identifier[part] ]={}
identifier[res] [ identifier[part] ][ literal[string] ]= identifier[fill] ( literal[string] ,( literal[string] , literal[int] , literal[int] ))
identifier[res] [ identifier[part] ][ literal[string] ]= identifier[fill] ( literal[string] , literal[string] )
identifier[res] [ identifier[part] ][ literal[string] ]= identifier[pickcolour] ( identifier[fill] ( literal[string] ))
identifier[res] [ identifier[part] ][ literal[string] ]= identifier[pickcolour] ( identifier[fill] ( literal[string] ))
keyword[return] identifier[res] | def get_threadline_theming(self, thread, colourmode):
"""
look up how to display a Threadline wiidget in search mode
for a given thread.
:param thread: Thread to theme Threadline for
:type thread: alot.db.thread.Thread
:param colourmode: colourmode to use, one of 1,16,256.
:type colourmode: int
This will return a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:parts: to a list of strings indentifying subwidgets
to be displayed in this order.
Moreover, for every part listed this will map 'part' to a dict mapping
:normal: to `urwid.AttrSpec`,
:focus: to `urwid.AttrSpec`,
:width: to a tuple indicating the width of the subpart.
This is either `('fit', min, max)` to force the widget
to be at least `min` and at most `max` characters wide,
or `('weight', n)` which makes it share remaining space
with other 'weight' parts.
:alignment: where to place the content if shorter than the widget.
This is either 'right', 'left' or 'center'.
"""
def pickcolour(triple):
return triple[self._colours.index(colourmode)]
def matches(sec, thread):
if sec.get('tagged_with') is not None:
if not set(sec['tagged_with']).issubset(thread.get_tags()):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if sec.get('query') is not None:
if not thread.matches(sec['query']):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True
default = self._config['search']['threadline']
match = default
candidates = self._config['search'].sections
for candidatename in candidates:
candidate = self._config['search'][candidatename]
if candidatename.startswith('threadline') and (not candidatename == 'threadline') and matches(candidate, thread):
match = candidate
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['candidatename']]
# fill in values
res = {}
res['normal'] = pickcolour(match.get('normal') or default['normal'])
res['focus'] = pickcolour(match.get('focus') or default['focus'])
res['parts'] = match.get('parts') or default['parts']
for part in res['parts']:
defaultsec = default.get(part)
partsec = match.get(part) or {}
def fill(key, fallback=None):
pvalue = partsec.get(key) or defaultsec.get(key)
return pvalue or fallback
res[part] = {}
res[part]['width'] = fill('width', ('fit', 0, 0))
res[part]['alignment'] = fill('alignment', 'right')
res[part]['normal'] = pickcolour(fill('normal'))
res[part]['focus'] = pickcolour(fill('focus')) # depends on [control=['for'], data=['part']]
return res |
def depth(self, *args):
"""
Get/set the depth
"""
if len(args):
self._depth = args[0]
else:
return self._depth | def function[depth, parameter[self]]:
constant[
Get/set the depth
]
if call[name[len], parameter[name[args]]] begin[:]
name[self]._depth assign[=] call[name[args]][constant[0]] | keyword[def] identifier[depth] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] ):
identifier[self] . identifier[_depth] = identifier[args] [ literal[int] ]
keyword[else] :
keyword[return] identifier[self] . identifier[_depth] | def depth(self, *args):
"""
Get/set the depth
"""
if len(args):
self._depth = args[0] # depends on [control=['if'], data=[]]
else:
return self._depth |
def convert_datetime_array(array):
''' Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
'''
if not isinstance(array, np.ndarray):
return array
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
# for compatibility with PyPy that doesn't have datetime64
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
# not quite correct, truncates to ms..
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10**6.0
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.
return array | def function[convert_datetime_array, parameter[array]]:
constant[ Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
]
if <ast.UnaryOp object at 0x7da1b1f61ed0> begin[:]
return[name[array]]
<ast.Try object at 0x7da1b1f630a0>
if compare[name[array].dtype.kind equal[==] constant[M]] begin[:]
if name[legacy_datetime64] begin[:]
if compare[name[array].dtype equal[==] call[name[np].dtype, parameter[constant[datetime64[ns]]]]] begin[:]
variable[array] assign[=] binary_operation[call[name[array].astype, parameter[constant[int64]]] / binary_operation[constant[10] ** constant[6.0]]]
return[name[array]] | keyword[def] identifier[convert_datetime_array] ( identifier[array] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[array] , identifier[np] . identifier[ndarray] ):
keyword[return] identifier[array]
keyword[try] :
identifier[dt2001] = identifier[np] . identifier[datetime64] ( literal[string] )
identifier[legacy_datetime64] =( identifier[dt2001] . identifier[astype] ( literal[string] )==
identifier[dt2001] . identifier[astype] ( literal[string] ). identifier[astype] ( literal[string] ))
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[args] ==( literal[string] ,):
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[version] :
identifier[legacy_datetime64] = keyword[False]
keyword[pass]
keyword[else] :
keyword[raise] identifier[e]
keyword[else] :
keyword[raise] identifier[e]
keyword[if] identifier[array] . identifier[dtype] . identifier[kind] == literal[string] :
keyword[if] identifier[legacy_datetime64] :
keyword[if] identifier[array] . identifier[dtype] == identifier[np] . identifier[dtype] ( literal[string] ):
identifier[array] = identifier[array] . identifier[astype] ( literal[string] )/ literal[int] ** literal[int]
keyword[else] :
identifier[array] = identifier[array] . identifier[astype] ( literal[string] ). identifier[astype] ( literal[string] )/ literal[int]
keyword[elif] identifier[array] . identifier[dtype] . identifier[kind] == literal[string] :
identifier[array] = identifier[array] . identifier[astype] ( literal[string] ). identifier[astype] ( literal[string] )/ literal[int]
keyword[return] identifier[array] | def convert_datetime_array(array):
""" Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
"""
if not isinstance(array, np.ndarray):
return array # depends on [control=['if'], data=[]]
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64') # depends on [control=['try'], data=[]]
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
# for compatibility with PyPy that doesn't have datetime64
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['except'], data=['e']]
# not quite correct, truncates to ms..
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10 ** 6.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.0 # depends on [control=['if'], data=[]]
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.0 # depends on [control=['if'], data=[]]
return array |
def _order_by_is_valid_or_none(self, params):
"""
Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed.
"""
if not "order_by" in params or not params["order_by"]:
return True
def _order_by_dict_is_not_well_formed(d):
if not isinstance(d, dict):
# Bad type.
return True
if "property_name" in d and d["property_name"]:
if "direction" in d and not direction.is_valid_direction(d["direction"]):
# Bad direction provided.
return True
for k in d:
if k != "property_name" and k != "direction":
# Unexpected key.
return True
# Everything looks good!
return False
# Missing required key.
return True
# order_by is converted to a list before this point if it wasn't one before.
order_by_list = json.loads(params["order_by"])
for order_by in order_by_list:
if _order_by_dict_is_not_well_formed(order_by):
return False
if not "group_by" in params or not params["group_by"]:
# We must have group_by to have order_by make sense.
return False
return True | def function[_order_by_is_valid_or_none, parameter[self, params]]:
constant[
Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed.
]
if <ast.BoolOp object at 0x7da18bcc8220> begin[:]
return[constant[True]]
def function[_order_by_dict_is_not_well_formed, parameter[d]]:
if <ast.UnaryOp object at 0x7da18bccb550> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da18bcc93f0> begin[:]
if <ast.BoolOp object at 0x7da18bcc9d80> begin[:]
return[constant[True]]
for taget[name[k]] in starred[name[d]] begin[:]
if <ast.BoolOp object at 0x7da18bccad70> begin[:]
return[constant[True]]
return[constant[False]]
return[constant[True]]
variable[order_by_list] assign[=] call[name[json].loads, parameter[call[name[params]][constant[order_by]]]]
for taget[name[order_by]] in starred[name[order_by_list]] begin[:]
if call[name[_order_by_dict_is_not_well_formed], parameter[name[order_by]]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b04a5a20> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_order_by_is_valid_or_none] ( identifier[self] , identifier[params] ):
literal[string]
keyword[if] keyword[not] literal[string] keyword[in] identifier[params] keyword[or] keyword[not] identifier[params] [ literal[string] ]:
keyword[return] keyword[True]
keyword[def] identifier[_order_by_dict_is_not_well_formed] ( identifier[d] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[d] , identifier[dict] ):
keyword[return] keyword[True]
keyword[if] literal[string] keyword[in] identifier[d] keyword[and] identifier[d] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[d] keyword[and] keyword[not] identifier[direction] . identifier[is_valid_direction] ( identifier[d] [ literal[string] ]):
keyword[return] keyword[True]
keyword[for] identifier[k] keyword[in] identifier[d] :
keyword[if] identifier[k] != literal[string] keyword[and] identifier[k] != literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[order_by_list] = identifier[json] . identifier[loads] ( identifier[params] [ literal[string] ])
keyword[for] identifier[order_by] keyword[in] identifier[order_by_list] :
keyword[if] identifier[_order_by_dict_is_not_well_formed] ( identifier[order_by] ):
keyword[return] keyword[False]
keyword[if] keyword[not] literal[string] keyword[in] identifier[params] keyword[or] keyword[not] identifier[params] [ literal[string] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def _order_by_is_valid_or_none(self, params):
"""
Validates that a given order_by has proper syntax.
:param params: Query params.
:return: Returns True if either no order_by is present, or if the order_by is well-formed.
"""
if not 'order_by' in params or not params['order_by']:
return True # depends on [control=['if'], data=[]]
def _order_by_dict_is_not_well_formed(d):
if not isinstance(d, dict):
# Bad type.
return True # depends on [control=['if'], data=[]]
if 'property_name' in d and d['property_name']:
if 'direction' in d and (not direction.is_valid_direction(d['direction'])):
# Bad direction provided.
return True # depends on [control=['if'], data=[]]
for k in d:
if k != 'property_name' and k != 'direction':
# Unexpected key.
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
# Everything looks good!
return False # depends on [control=['if'], data=[]]
# Missing required key.
return True
# order_by is converted to a list before this point if it wasn't one before.
order_by_list = json.loads(params['order_by'])
for order_by in order_by_list:
if _order_by_dict_is_not_well_formed(order_by):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['order_by']]
if not 'group_by' in params or not params['group_by']:
# We must have group_by to have order_by make sense.
return False # depends on [control=['if'], data=[]]
return True |
def expected_bar_values_2d(dates,
assets,
asset_info,
colname,
holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
if holes is not None:
expected = expected_bar_value_with_holes(
asset,
date,
colname,
holes,
missing,
)
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected
return data | def function[expected_bar_values_2d, parameter[dates, assets, asset_info, colname, holes]]:
constant[
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
]
if compare[name[colname] equal[==] constant[volume]] begin[:]
variable[dtype] assign[=] name[uint32]
variable[missing] assign[=] constant[0]
variable[data] assign[=] call[name[full], parameter[tuple[[<ast.Call object at 0x7da1b2010cd0>, <ast.Call object at 0x7da1b2010eb0>]], name[missing]]]
for taget[tuple[[<ast.Name object at 0x7da1b2012da0>, <ast.Name object at 0x7da1b2012dd0>]]] in starred[call[name[enumerate], parameter[name[assets]]]] begin[:]
if compare[name[asset] <ast.NotIn object at 0x7da2590d7190> name[asset_info].index] begin[:]
continue
variable[start] assign[=] call[name[asset_start], parameter[name[asset_info], name[asset]]]
variable[end] assign[=] call[name[asset_end], parameter[name[asset_info], name[asset]]]
for taget[tuple[[<ast.Name object at 0x7da1b2011f60>, <ast.Name object at 0x7da1b2010f70>]]] in starred[call[name[enumerate], parameter[name[dates]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b20124d0> begin[:]
continue
if compare[name[holes] is_not constant[None]] begin[:]
variable[expected] assign[=] call[name[expected_bar_value_with_holes], parameter[name[asset], name[date], name[colname], name[holes], name[missing]]]
call[name[data]][tuple[[<ast.Name object at 0x7da1b2010850>, <ast.Name object at 0x7da1b2024070>]]] assign[=] name[expected]
return[name[data]] | keyword[def] identifier[expected_bar_values_2d] ( identifier[dates] ,
identifier[assets] ,
identifier[asset_info] ,
identifier[colname] ,
identifier[holes] = keyword[None] ):
literal[string]
keyword[if] identifier[colname] == literal[string] :
identifier[dtype] = identifier[uint32]
identifier[missing] = literal[int]
keyword[else] :
identifier[dtype] = identifier[float64]
identifier[missing] = identifier[float] ( literal[string] )
identifier[data] = identifier[full] (( identifier[len] ( identifier[dates] ), identifier[len] ( identifier[assets] )), identifier[missing] , identifier[dtype] = identifier[dtype] )
keyword[for] identifier[j] , identifier[asset] keyword[in] identifier[enumerate] ( identifier[assets] ):
keyword[if] identifier[asset] keyword[not] keyword[in] identifier[asset_info] . identifier[index] :
keyword[continue]
identifier[start] = identifier[asset_start] ( identifier[asset_info] , identifier[asset] )
identifier[end] = identifier[asset_end] ( identifier[asset_info] , identifier[asset] )
keyword[for] identifier[i] , identifier[date] keyword[in] identifier[enumerate] ( identifier[dates] ):
keyword[if] keyword[not] ( identifier[start] <= identifier[date] <= identifier[end] ):
keyword[continue]
keyword[if] identifier[holes] keyword[is] keyword[not] keyword[None] :
identifier[expected] = identifier[expected_bar_value_with_holes] (
identifier[asset] ,
identifier[date] ,
identifier[colname] ,
identifier[holes] ,
identifier[missing] ,
)
keyword[else] :
identifier[expected] = identifier[expected_bar_value] ( identifier[asset] , identifier[date] , identifier[colname] )
identifier[data] [ identifier[i] , identifier[j] ]= identifier[expected]
keyword[return] identifier[data] | def expected_bar_values_2d(dates, assets, asset_info, colname, holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == 'volume':
dtype = uint32
missing = 0 # depends on [control=['if'], data=[]]
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for (j, asset) in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue # depends on [control=['if'], data=[]]
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for (i, date) in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not start <= date <= end:
continue # depends on [control=['if'], data=[]]
if holes is not None:
expected = expected_bar_value_with_holes(asset, date, colname, holes, missing) # depends on [control=['if'], data=['holes']]
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return data |
def sign(self, byts):
'''
Compute the ECC signature for the given bytestream.
Args:
byts (bytes): The bytes to sign.
Returns:
bytes: The RSA Signature bytes.
'''
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
return self.priv.sign(digest,
c_ec.ECDSA(c_utils.Prehashed(chosen_hash))
) | def function[sign, parameter[self, byts]]:
constant[
Compute the ECC signature for the given bytestream.
Args:
byts (bytes): The bytes to sign.
Returns:
bytes: The RSA Signature bytes.
]
variable[chosen_hash] assign[=] call[name[c_hashes].SHA256, parameter[]]
variable[hasher] assign[=] call[name[c_hashes].Hash, parameter[name[chosen_hash], call[name[default_backend], parameter[]]]]
call[name[hasher].update, parameter[name[byts]]]
variable[digest] assign[=] call[name[hasher].finalize, parameter[]]
return[call[name[self].priv.sign, parameter[name[digest], call[name[c_ec].ECDSA, parameter[call[name[c_utils].Prehashed, parameter[name[chosen_hash]]]]]]]] | keyword[def] identifier[sign] ( identifier[self] , identifier[byts] ):
literal[string]
identifier[chosen_hash] = identifier[c_hashes] . identifier[SHA256] ()
identifier[hasher] = identifier[c_hashes] . identifier[Hash] ( identifier[chosen_hash] , identifier[default_backend] ())
identifier[hasher] . identifier[update] ( identifier[byts] )
identifier[digest] = identifier[hasher] . identifier[finalize] ()
keyword[return] identifier[self] . identifier[priv] . identifier[sign] ( identifier[digest] ,
identifier[c_ec] . identifier[ECDSA] ( identifier[c_utils] . identifier[Prehashed] ( identifier[chosen_hash] ))
) | def sign(self, byts):
"""
Compute the ECC signature for the given bytestream.
Args:
byts (bytes): The bytes to sign.
Returns:
bytes: The RSA Signature bytes.
"""
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
return self.priv.sign(digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash))) |
def get(self, name):
"""
Get the resource URI for a specified resource name.
If an entry for the specified resource name does not exist in the
Name-URI cache, the cache is refreshed from the HMC with all resources
of the manager holding this cache.
If an entry for the specified resource name still does not exist after
that, ``NotFound`` is raised.
"""
self.auto_invalidate()
try:
return self._uris[name]
except KeyError:
self.refresh()
try:
return self._uris[name]
except KeyError:
raise NotFound({self._manager._name_prop: name}, self._manager) | def function[get, parameter[self, name]]:
constant[
Get the resource URI for a specified resource name.
If an entry for the specified resource name does not exist in the
Name-URI cache, the cache is refreshed from the HMC with all resources
of the manager holding this cache.
If an entry for the specified resource name still does not exist after
that, ``NotFound`` is raised.
]
call[name[self].auto_invalidate, parameter[]]
<ast.Try object at 0x7da18f810610> | keyword[def] identifier[get] ( identifier[self] , identifier[name] ):
literal[string]
identifier[self] . identifier[auto_invalidate] ()
keyword[try] :
keyword[return] identifier[self] . identifier[_uris] [ identifier[name] ]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[refresh] ()
keyword[try] :
keyword[return] identifier[self] . identifier[_uris] [ identifier[name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[NotFound] ({ identifier[self] . identifier[_manager] . identifier[_name_prop] : identifier[name] }, identifier[self] . identifier[_manager] ) | def get(self, name):
"""
Get the resource URI for a specified resource name.
If an entry for the specified resource name does not exist in the
Name-URI cache, the cache is refreshed from the HMC with all resources
of the manager holding this cache.
If an entry for the specified resource name still does not exist after
that, ``NotFound`` is raised.
"""
self.auto_invalidate()
try:
return self._uris[name] # depends on [control=['try'], data=[]]
except KeyError:
self.refresh()
try:
return self._uris[name] # depends on [control=['try'], data=[]]
except KeyError:
raise NotFound({self._manager._name_prop: name}, self._manager) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] |
def clear_list_value(self, value):
"""
Clean the argument value to eliminate None or Falsy values if needed.
"""
# Don't go any further: this value is empty.
if not value:
return self.empty_value
# Clean empty items if wanted
if self.clean_empty:
value = [v for v in value if v]
return value or self.empty_value | def function[clear_list_value, parameter[self, value]]:
constant[
Clean the argument value to eliminate None or Falsy values if needed.
]
if <ast.UnaryOp object at 0x7da18c4ce4d0> begin[:]
return[name[self].empty_value]
if name[self].clean_empty begin[:]
variable[value] assign[=] <ast.ListComp object at 0x7da18c4ce6e0>
return[<ast.BoolOp object at 0x7da18c4cc8e0>] | keyword[def] identifier[clear_list_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] identifier[self] . identifier[empty_value]
keyword[if] identifier[self] . identifier[clean_empty] :
identifier[value] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[value] keyword[if] identifier[v] ]
keyword[return] identifier[value] keyword[or] identifier[self] . identifier[empty_value] | def clear_list_value(self, value):
"""
Clean the argument value to eliminate None or Falsy values if needed.
"""
# Don't go any further: this value is empty.
if not value:
return self.empty_value # depends on [control=['if'], data=[]]
# Clean empty items if wanted
if self.clean_empty:
value = [v for v in value if v] # depends on [control=['if'], data=[]]
return value or self.empty_value |
def process_musicbed(vargs):
"""
Main MusicBed path.
"""
# let's validate given MusicBed url
validated = False
if vargs['artist_url'].startswith( 'https://www.musicbed.com/' ):
splitted = vargs['artist_url'][len('https://www.musicbed.com/'):].split( '/' )
if len( splitted ) == 3:
if ( splitted[0] == 'artists' or splitted[0] == 'albums' or splitted[0] == 'songs' ) and splitted[2].isdigit():
validated = True
if not validated:
puts( colored.red( 'process_musicbed: you provided incorrect MusicBed url. Aborting.' ) )
puts( colored.white( 'Please make sure that url is either artist-url, album-url or song-url.' ) )
puts( colored.white( 'Example of correct artist-url: https://www.musicbed.com/artists/lights-motion/5188' ) )
puts( colored.white( 'Example of correct album-url: https://www.musicbed.com/albums/be-still/2828' ) )
puts( colored.white( 'Example of correct song-url: https://www.musicbed.com/songs/be-still/24540' ) )
return
filenames = scrape_musicbed_url(vargs['artist_url'], vargs['login'], vargs['password'], num_tracks=vargs['num_tracks'], folders=vargs['folders'], custom_path=vargs['path'])
if vargs['open']:
open_files(filenames) | def function[process_musicbed, parameter[vargs]]:
constant[
Main MusicBed path.
]
variable[validated] assign[=] constant[False]
if call[call[name[vargs]][constant[artist_url]].startswith, parameter[constant[https://www.musicbed.com/]]] begin[:]
variable[splitted] assign[=] call[call[call[name[vargs]][constant[artist_url]]][<ast.Slice object at 0x7da1b1eeab60>].split, parameter[constant[/]]]
if compare[call[name[len], parameter[name[splitted]]] equal[==] constant[3]] begin[:]
if <ast.BoolOp object at 0x7da1b1ee8f70> begin[:]
variable[validated] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b1e02740> begin[:]
call[name[puts], parameter[call[name[colored].red, parameter[constant[process_musicbed: you provided incorrect MusicBed url. Aborting.]]]]]
call[name[puts], parameter[call[name[colored].white, parameter[constant[Please make sure that url is either artist-url, album-url or song-url.]]]]]
call[name[puts], parameter[call[name[colored].white, parameter[constant[Example of correct artist-url: https://www.musicbed.com/artists/lights-motion/5188]]]]]
call[name[puts], parameter[call[name[colored].white, parameter[constant[Example of correct album-url: https://www.musicbed.com/albums/be-still/2828]]]]]
call[name[puts], parameter[call[name[colored].white, parameter[constant[Example of correct song-url: https://www.musicbed.com/songs/be-still/24540]]]]]
return[None]
variable[filenames] assign[=] call[name[scrape_musicbed_url], parameter[call[name[vargs]][constant[artist_url]], call[name[vargs]][constant[login]], call[name[vargs]][constant[password]]]]
if call[name[vargs]][constant[open]] begin[:]
call[name[open_files], parameter[name[filenames]]] | keyword[def] identifier[process_musicbed] ( identifier[vargs] ):
literal[string]
identifier[validated] = keyword[False]
keyword[if] identifier[vargs] [ literal[string] ]. identifier[startswith] ( literal[string] ):
identifier[splitted] = identifier[vargs] [ literal[string] ][ identifier[len] ( literal[string] ):]. identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[splitted] )== literal[int] :
keyword[if] ( identifier[splitted] [ literal[int] ]== literal[string] keyword[or] identifier[splitted] [ literal[int] ]== literal[string] keyword[or] identifier[splitted] [ literal[int] ]== literal[string] ) keyword[and] identifier[splitted] [ literal[int] ]. identifier[isdigit] ():
identifier[validated] = keyword[True]
keyword[if] keyword[not] identifier[validated] :
identifier[puts] ( identifier[colored] . identifier[red] ( literal[string] ))
identifier[puts] ( identifier[colored] . identifier[white] ( literal[string] ))
identifier[puts] ( identifier[colored] . identifier[white] ( literal[string] ))
identifier[puts] ( identifier[colored] . identifier[white] ( literal[string] ))
identifier[puts] ( identifier[colored] . identifier[white] ( literal[string] ))
keyword[return]
identifier[filenames] = identifier[scrape_musicbed_url] ( identifier[vargs] [ literal[string] ], identifier[vargs] [ literal[string] ], identifier[vargs] [ literal[string] ], identifier[num_tracks] = identifier[vargs] [ literal[string] ], identifier[folders] = identifier[vargs] [ literal[string] ], identifier[custom_path] = identifier[vargs] [ literal[string] ])
keyword[if] identifier[vargs] [ literal[string] ]:
identifier[open_files] ( identifier[filenames] ) | def process_musicbed(vargs):
"""
Main MusicBed path.
"""
# let's validate given MusicBed url
validated = False
if vargs['artist_url'].startswith('https://www.musicbed.com/'):
splitted = vargs['artist_url'][len('https://www.musicbed.com/'):].split('/')
if len(splitted) == 3:
if (splitted[0] == 'artists' or splitted[0] == 'albums' or splitted[0] == 'songs') and splitted[2].isdigit():
validated = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not validated:
puts(colored.red('process_musicbed: you provided incorrect MusicBed url. Aborting.'))
puts(colored.white('Please make sure that url is either artist-url, album-url or song-url.'))
puts(colored.white('Example of correct artist-url: https://www.musicbed.com/artists/lights-motion/5188'))
puts(colored.white('Example of correct album-url: https://www.musicbed.com/albums/be-still/2828'))
puts(colored.white('Example of correct song-url: https://www.musicbed.com/songs/be-still/24540'))
return # depends on [control=['if'], data=[]]
filenames = scrape_musicbed_url(vargs['artist_url'], vargs['login'], vargs['password'], num_tracks=vargs['num_tracks'], folders=vargs['folders'], custom_path=vargs['path'])
if vargs['open']:
open_files(filenames) # depends on [control=['if'], data=[]] |
def export_maxloss_ruptures(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
oq = dstore['oqparam']
mesh = get_mesh(dstore['sitecol'])
rlzs_by_gsim = dstore['csm_info'].get_rlzs_by_gsim_grp()
num_ses = oq.ses_per_logic_tree_path
fnames = []
for loss_type in oq.loss_dt().names:
ebr = getters.get_maxloss_rupture(dstore, loss_type)
root = hazard_writers.rupture_to_element(
ebr.export(mesh, rlzs_by_gsim[ebr.grp_id], num_ses))
dest = dstore.export_path('rupture-%s.xml' % loss_type)
with open(dest, 'wb') as fh:
nrml.write(list(root), fh)
fnames.append(dest)
return fnames | def function[export_maxloss_ruptures, parameter[ekey, dstore]]:
constant[
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
]
variable[oq] assign[=] call[name[dstore]][constant[oqparam]]
variable[mesh] assign[=] call[name[get_mesh], parameter[call[name[dstore]][constant[sitecol]]]]
variable[rlzs_by_gsim] assign[=] call[call[name[dstore]][constant[csm_info]].get_rlzs_by_gsim_grp, parameter[]]
variable[num_ses] assign[=] name[oq].ses_per_logic_tree_path
variable[fnames] assign[=] list[[]]
for taget[name[loss_type]] in starred[call[name[oq].loss_dt, parameter[]].names] begin[:]
variable[ebr] assign[=] call[name[getters].get_maxloss_rupture, parameter[name[dstore], name[loss_type]]]
variable[root] assign[=] call[name[hazard_writers].rupture_to_element, parameter[call[name[ebr].export, parameter[name[mesh], call[name[rlzs_by_gsim]][name[ebr].grp_id], name[num_ses]]]]]
variable[dest] assign[=] call[name[dstore].export_path, parameter[binary_operation[constant[rupture-%s.xml] <ast.Mod object at 0x7da2590d6920> name[loss_type]]]]
with call[name[open], parameter[name[dest], constant[wb]]] begin[:]
call[name[nrml].write, parameter[call[name[list], parameter[name[root]]], name[fh]]]
call[name[fnames].append, parameter[name[dest]]]
return[name[fnames]] | keyword[def] identifier[export_maxloss_ruptures] ( identifier[ekey] , identifier[dstore] ):
literal[string]
identifier[oq] = identifier[dstore] [ literal[string] ]
identifier[mesh] = identifier[get_mesh] ( identifier[dstore] [ literal[string] ])
identifier[rlzs_by_gsim] = identifier[dstore] [ literal[string] ]. identifier[get_rlzs_by_gsim_grp] ()
identifier[num_ses] = identifier[oq] . identifier[ses_per_logic_tree_path]
identifier[fnames] =[]
keyword[for] identifier[loss_type] keyword[in] identifier[oq] . identifier[loss_dt] (). identifier[names] :
identifier[ebr] = identifier[getters] . identifier[get_maxloss_rupture] ( identifier[dstore] , identifier[loss_type] )
identifier[root] = identifier[hazard_writers] . identifier[rupture_to_element] (
identifier[ebr] . identifier[export] ( identifier[mesh] , identifier[rlzs_by_gsim] [ identifier[ebr] . identifier[grp_id] ], identifier[num_ses] ))
identifier[dest] = identifier[dstore] . identifier[export_path] ( literal[string] % identifier[loss_type] )
keyword[with] identifier[open] ( identifier[dest] , literal[string] ) keyword[as] identifier[fh] :
identifier[nrml] . identifier[write] ( identifier[list] ( identifier[root] ), identifier[fh] )
identifier[fnames] . identifier[append] ( identifier[dest] )
keyword[return] identifier[fnames] | def export_maxloss_ruptures(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
oq = dstore['oqparam']
mesh = get_mesh(dstore['sitecol'])
rlzs_by_gsim = dstore['csm_info'].get_rlzs_by_gsim_grp()
num_ses = oq.ses_per_logic_tree_path
fnames = []
for loss_type in oq.loss_dt().names:
ebr = getters.get_maxloss_rupture(dstore, loss_type)
root = hazard_writers.rupture_to_element(ebr.export(mesh, rlzs_by_gsim[ebr.grp_id], num_ses))
dest = dstore.export_path('rupture-%s.xml' % loss_type)
with open(dest, 'wb') as fh:
nrml.write(list(root), fh) # depends on [control=['with'], data=['fh']]
fnames.append(dest) # depends on [control=['for'], data=['loss_type']]
return fnames |
def split_dmap_overlay(obj, depth=0):
"""
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
"""
layers = []
if isinstance(obj, DynamicMap):
if issubclass(obj.type, NdOverlay) and not depth:
for v in obj.last.values():
layers.append(obj)
elif issubclass(obj.type, Overlay):
if obj.callback.inputs and is_dynamic_overlay(obj):
for inp in obj.callback.inputs:
layers += split_dmap_overlay(inp, depth+1)
else:
for v in obj.last.values():
layers.append(obj)
else:
layers.append(obj)
return layers
if isinstance(obj, Overlay):
for k, v in obj.items():
layers.append(v)
else:
layers.append(obj)
return layers | def function[split_dmap_overlay, parameter[obj, depth]]:
constant[
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
]
variable[layers] assign[=] list[[]]
if call[name[isinstance], parameter[name[obj], name[DynamicMap]]] begin[:]
if <ast.BoolOp object at 0x7da20c992020> begin[:]
for taget[name[v]] in starred[call[name[obj].last.values, parameter[]]] begin[:]
call[name[layers].append, parameter[name[obj]]]
return[name[layers]]
if call[name[isinstance], parameter[name[obj], name[Overlay]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c992200>, <ast.Name object at 0x7da20c990670>]]] in starred[call[name[obj].items, parameter[]]] begin[:]
call[name[layers].append, parameter[name[v]]]
return[name[layers]] | keyword[def] identifier[split_dmap_overlay] ( identifier[obj] , identifier[depth] = literal[int] ):
literal[string]
identifier[layers] =[]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[DynamicMap] ):
keyword[if] identifier[issubclass] ( identifier[obj] . identifier[type] , identifier[NdOverlay] ) keyword[and] keyword[not] identifier[depth] :
keyword[for] identifier[v] keyword[in] identifier[obj] . identifier[last] . identifier[values] ():
identifier[layers] . identifier[append] ( identifier[obj] )
keyword[elif] identifier[issubclass] ( identifier[obj] . identifier[type] , identifier[Overlay] ):
keyword[if] identifier[obj] . identifier[callback] . identifier[inputs] keyword[and] identifier[is_dynamic_overlay] ( identifier[obj] ):
keyword[for] identifier[inp] keyword[in] identifier[obj] . identifier[callback] . identifier[inputs] :
identifier[layers] += identifier[split_dmap_overlay] ( identifier[inp] , identifier[depth] + literal[int] )
keyword[else] :
keyword[for] identifier[v] keyword[in] identifier[obj] . identifier[last] . identifier[values] ():
identifier[layers] . identifier[append] ( identifier[obj] )
keyword[else] :
identifier[layers] . identifier[append] ( identifier[obj] )
keyword[return] identifier[layers]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Overlay] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[items] ():
identifier[layers] . identifier[append] ( identifier[v] )
keyword[else] :
identifier[layers] . identifier[append] ( identifier[obj] )
keyword[return] identifier[layers] | def split_dmap_overlay(obj, depth=0):
"""
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
"""
layers = []
if isinstance(obj, DynamicMap):
if issubclass(obj.type, NdOverlay) and (not depth):
for v in obj.last.values():
layers.append(obj) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif issubclass(obj.type, Overlay):
if obj.callback.inputs and is_dynamic_overlay(obj):
for inp in obj.callback.inputs:
layers += split_dmap_overlay(inp, depth + 1) # depends on [control=['for'], data=['inp']] # depends on [control=['if'], data=[]]
else:
for v in obj.last.values():
layers.append(obj) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
layers.append(obj)
return layers # depends on [control=['if'], data=[]]
if isinstance(obj, Overlay):
for (k, v) in obj.items():
layers.append(v) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
layers.append(obj)
return layers |
def get_sport(sport):
"""
Get live scores for all matches in a particular sport
:param sport: the sport being played
:type sport: string
:return: List containing Match objects
:rtype: list
"""
sport = sport.lower()
data = _request_xml(sport)
matches = []
for match in data:
if sport == constants.SOCCER:
desc = match.find('description').text
match_info = _parse_match_info(desc, soccer=True)
else:
desc = match.find('title').text
match_info = _parse_match_info(desc)
match_info['match_time'] = match.find('description').text
match_info['match_date'] = match.find('pubDate').text
match_info['match_link'] = match.find('guid').text
matches.append(Match(sport, match_info))
return matches | def function[get_sport, parameter[sport]]:
constant[
Get live scores for all matches in a particular sport
:param sport: the sport being played
:type sport: string
:return: List containing Match objects
:rtype: list
]
variable[sport] assign[=] call[name[sport].lower, parameter[]]
variable[data] assign[=] call[name[_request_xml], parameter[name[sport]]]
variable[matches] assign[=] list[[]]
for taget[name[match]] in starred[name[data]] begin[:]
if compare[name[sport] equal[==] name[constants].SOCCER] begin[:]
variable[desc] assign[=] call[name[match].find, parameter[constant[description]]].text
variable[match_info] assign[=] call[name[_parse_match_info], parameter[name[desc]]]
call[name[match_info]][constant[match_date]] assign[=] call[name[match].find, parameter[constant[pubDate]]].text
call[name[match_info]][constant[match_link]] assign[=] call[name[match].find, parameter[constant[guid]]].text
call[name[matches].append, parameter[call[name[Match], parameter[name[sport], name[match_info]]]]]
return[name[matches]] | keyword[def] identifier[get_sport] ( identifier[sport] ):
literal[string]
identifier[sport] = identifier[sport] . identifier[lower] ()
identifier[data] = identifier[_request_xml] ( identifier[sport] )
identifier[matches] =[]
keyword[for] identifier[match] keyword[in] identifier[data] :
keyword[if] identifier[sport] == identifier[constants] . identifier[SOCCER] :
identifier[desc] = identifier[match] . identifier[find] ( literal[string] ). identifier[text]
identifier[match_info] = identifier[_parse_match_info] ( identifier[desc] , identifier[soccer] = keyword[True] )
keyword[else] :
identifier[desc] = identifier[match] . identifier[find] ( literal[string] ). identifier[text]
identifier[match_info] = identifier[_parse_match_info] ( identifier[desc] )
identifier[match_info] [ literal[string] ]= identifier[match] . identifier[find] ( literal[string] ). identifier[text]
identifier[match_info] [ literal[string] ]= identifier[match] . identifier[find] ( literal[string] ). identifier[text]
identifier[match_info] [ literal[string] ]= identifier[match] . identifier[find] ( literal[string] ). identifier[text]
identifier[matches] . identifier[append] ( identifier[Match] ( identifier[sport] , identifier[match_info] ))
keyword[return] identifier[matches] | def get_sport(sport):
"""
Get live scores for all matches in a particular sport
:param sport: the sport being played
:type sport: string
:return: List containing Match objects
:rtype: list
"""
sport = sport.lower()
data = _request_xml(sport)
matches = []
for match in data:
if sport == constants.SOCCER:
desc = match.find('description').text
match_info = _parse_match_info(desc, soccer=True) # depends on [control=['if'], data=[]]
else:
desc = match.find('title').text
match_info = _parse_match_info(desc)
match_info['match_time'] = match.find('description').text
match_info['match_date'] = match.find('pubDate').text
match_info['match_link'] = match.find('guid').text
matches.append(Match(sport, match_info)) # depends on [control=['for'], data=['match']]
return matches |
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step | def function[__step4, parameter[self]]:
constant[
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
]
variable[step] assign[=] constant[0]
variable[done] assign[=] constant[False]
variable[row] assign[=] <ast.UnaryOp object at 0x7da18f00d3f0>
variable[col] assign[=] <ast.UnaryOp object at 0x7da18f00cf70>
variable[star_col] assign[=] <ast.UnaryOp object at 0x7da18f00f550>
while <ast.UnaryOp object at 0x7da18f00cd90> begin[:]
<ast.Tuple object at 0x7da18f00ed10> assign[=] call[name[self].__find_a_zero, parameter[]]
if compare[name[row] less[<] constant[0]] begin[:]
variable[done] assign[=] constant[True]
variable[step] assign[=] constant[6]
return[name[step]] | keyword[def] identifier[__step4] ( identifier[self] ):
literal[string]
identifier[step] = literal[int]
identifier[done] = keyword[False]
identifier[row] =- literal[int]
identifier[col] =- literal[int]
identifier[star_col] =- literal[int]
keyword[while] keyword[not] identifier[done] :
( identifier[row] , identifier[col] )= identifier[self] . identifier[__find_a_zero] ()
keyword[if] identifier[row] < literal[int] :
identifier[done] = keyword[True]
identifier[step] = literal[int]
keyword[else] :
identifier[self] . identifier[marked] [ identifier[row] ][ identifier[col] ]= literal[int]
identifier[star_col] = identifier[self] . identifier[__find_star_in_row] ( identifier[row] )
keyword[if] identifier[star_col] >= literal[int] :
identifier[col] = identifier[star_col]
identifier[self] . identifier[row_covered] [ identifier[row] ]= keyword[True]
identifier[self] . identifier[col_covered] [ identifier[col] ]= keyword[False]
keyword[else] :
identifier[done] = keyword[True]
identifier[self] . identifier[Z0_r] = identifier[row]
identifier[self] . identifier[Z0_c] = identifier[col]
identifier[step] = literal[int]
keyword[return] identifier[step] | def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6 # depends on [control=['if'], data=[]]
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False # depends on [control=['if'], data=['star_col']]
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5 # depends on [control=['while'], data=[]]
return step |
def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutput object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'generic' in _dict:
args['generic'] = [
DialogNodeOutputGeneric._from_dict(x)
for x in (_dict.get('generic'))
]
del xtra['generic']
if 'modifiers' in _dict:
args['modifiers'] = DialogNodeOutputModifiers._from_dict(
_dict.get('modifiers'))
del xtra['modifiers']
args.update(xtra)
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a DialogNodeOutput object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
variable[xtra] assign[=] call[name[_dict].copy, parameter[]]
if compare[constant[generic] in name[_dict]] begin[:]
call[name[args]][constant[generic]] assign[=] <ast.ListComp object at 0x7da2044c15d0>
<ast.Delete object at 0x7da2044c3340>
if compare[constant[modifiers] in name[_dict]] begin[:]
call[name[args]][constant[modifiers]] assign[=] call[name[DialogNodeOutputModifiers]._from_dict, parameter[call[name[_dict].get, parameter[constant[modifiers]]]]]
<ast.Delete object at 0x7da18c4ce7d0>
call[name[args].update, parameter[name[xtra]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
identifier[xtra] = identifier[_dict] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[DialogNodeOutputGeneric] . identifier[_from_dict] ( identifier[x] )
keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[del] identifier[xtra] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[DialogNodeOutputModifiers] . identifier[_from_dict] (
identifier[_dict] . identifier[get] ( literal[string] ))
keyword[del] identifier[xtra] [ literal[string] ]
identifier[args] . identifier[update] ( identifier[xtra] )
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a DialogNodeOutput object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'generic' in _dict:
args['generic'] = [DialogNodeOutputGeneric._from_dict(x) for x in _dict.get('generic')]
del xtra['generic'] # depends on [control=['if'], data=['_dict']]
if 'modifiers' in _dict:
args['modifiers'] = DialogNodeOutputModifiers._from_dict(_dict.get('modifiers'))
del xtra['modifiers'] # depends on [control=['if'], data=['_dict']]
args.update(xtra)
return cls(**args) |
def patterson_d(aca, acb, acc, acd):
"""Unbiased estimator for D(A, B; C, D), the normalised four-population
test for admixture between (A or B) and (C or D), also known as the
"ABBA BABA" test.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
Returns
-------
num : ndarray, float, shape (n_variants,)
Numerator (un-normalised f4 estimates).
den : ndarray, float, shape (n_variants,)
Denominator.
Notes
-----
See Patterson (2012), main text and Appendix A.
For un-normalized f4 statistics, ignore the `den` return value.
"""
# check inputs
aca = AlleleCountsArray(aca, copy=False)
assert aca.shape[1] == 2, 'only biallelic variants supported'
acb = AlleleCountsArray(acb, copy=False)
assert acb.shape[1] == 2, 'only biallelic variants supported'
acc = AlleleCountsArray(acc, copy=False)
assert acc.shape[1] == 2, 'only biallelic variants supported'
acd = AlleleCountsArray(acd, copy=False)
assert acd.shape[1] == 2, 'only biallelic variants supported'
check_dim0_aligned(aca, acb, acc, acd)
# compute sample frequencies for the alternate allele
a = aca.to_frequencies()[:, 1]
b = acb.to_frequencies()[:, 1]
c = acc.to_frequencies()[:, 1]
d = acd.to_frequencies()[:, 1]
# compute estimator
num = (a - b) * (c - d)
den = (a + b - (2 * a * b)) * (c + d - (2 * c * d))
return num, den | def function[patterson_d, parameter[aca, acb, acc, acd]]:
constant[Unbiased estimator for D(A, B; C, D), the normalised four-population
test for admixture between (A or B) and (C or D), also known as the
"ABBA BABA" test.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
Returns
-------
num : ndarray, float, shape (n_variants,)
Numerator (un-normalised f4 estimates).
den : ndarray, float, shape (n_variants,)
Denominator.
Notes
-----
See Patterson (2012), main text and Appendix A.
For un-normalized f4 statistics, ignore the `den` return value.
]
variable[aca] assign[=] call[name[AlleleCountsArray], parameter[name[aca]]]
assert[compare[call[name[aca].shape][constant[1]] equal[==] constant[2]]]
variable[acb] assign[=] call[name[AlleleCountsArray], parameter[name[acb]]]
assert[compare[call[name[acb].shape][constant[1]] equal[==] constant[2]]]
variable[acc] assign[=] call[name[AlleleCountsArray], parameter[name[acc]]]
assert[compare[call[name[acc].shape][constant[1]] equal[==] constant[2]]]
variable[acd] assign[=] call[name[AlleleCountsArray], parameter[name[acd]]]
assert[compare[call[name[acd].shape][constant[1]] equal[==] constant[2]]]
call[name[check_dim0_aligned], parameter[name[aca], name[acb], name[acc], name[acd]]]
variable[a] assign[=] call[call[name[aca].to_frequencies, parameter[]]][tuple[[<ast.Slice object at 0x7da2041db100>, <ast.Constant object at 0x7da2041d9600>]]]
variable[b] assign[=] call[call[name[acb].to_frequencies, parameter[]]][tuple[[<ast.Slice object at 0x7da1b2347bb0>, <ast.Constant object at 0x7da1b2345a50>]]]
variable[c] assign[=] call[call[name[acc].to_frequencies, parameter[]]][tuple[[<ast.Slice object at 0x7da1b2345c90>, <ast.Constant object at 0x7da1b23474f0>]]]
variable[d] assign[=] call[call[name[acd].to_frequencies, parameter[]]][tuple[[<ast.Slice object at 0x7da1b2346860>, <ast.Constant object at 0x7da1b2345bd0>]]]
variable[num] assign[=] binary_operation[binary_operation[name[a] - name[b]] * binary_operation[name[c] - name[d]]]
variable[den] assign[=] binary_operation[binary_operation[binary_operation[name[a] + name[b]] - binary_operation[binary_operation[constant[2] * name[a]] * name[b]]] * binary_operation[binary_operation[name[c] + name[d]] - binary_operation[binary_operation[constant[2] * name[c]] * name[d]]]]
return[tuple[[<ast.Name object at 0x7da18bc718d0>, <ast.Name object at 0x7da18bc71f30>]]] | keyword[def] identifier[patterson_d] ( identifier[aca] , identifier[acb] , identifier[acc] , identifier[acd] ):
literal[string]
identifier[aca] = identifier[AlleleCountsArray] ( identifier[aca] , identifier[copy] = keyword[False] )
keyword[assert] identifier[aca] . identifier[shape] [ literal[int] ]== literal[int] , literal[string]
identifier[acb] = identifier[AlleleCountsArray] ( identifier[acb] , identifier[copy] = keyword[False] )
keyword[assert] identifier[acb] . identifier[shape] [ literal[int] ]== literal[int] , literal[string]
identifier[acc] = identifier[AlleleCountsArray] ( identifier[acc] , identifier[copy] = keyword[False] )
keyword[assert] identifier[acc] . identifier[shape] [ literal[int] ]== literal[int] , literal[string]
identifier[acd] = identifier[AlleleCountsArray] ( identifier[acd] , identifier[copy] = keyword[False] )
keyword[assert] identifier[acd] . identifier[shape] [ literal[int] ]== literal[int] , literal[string]
identifier[check_dim0_aligned] ( identifier[aca] , identifier[acb] , identifier[acc] , identifier[acd] )
identifier[a] = identifier[aca] . identifier[to_frequencies] ()[:, literal[int] ]
identifier[b] = identifier[acb] . identifier[to_frequencies] ()[:, literal[int] ]
identifier[c] = identifier[acc] . identifier[to_frequencies] ()[:, literal[int] ]
identifier[d] = identifier[acd] . identifier[to_frequencies] ()[:, literal[int] ]
identifier[num] =( identifier[a] - identifier[b] )*( identifier[c] - identifier[d] )
identifier[den] =( identifier[a] + identifier[b] -( literal[int] * identifier[a] * identifier[b] ))*( identifier[c] + identifier[d] -( literal[int] * identifier[c] * identifier[d] ))
keyword[return] identifier[num] , identifier[den] | def patterson_d(aca, acb, acc, acd):
"""Unbiased estimator for D(A, B; C, D), the normalised four-population
test for admixture between (A or B) and (C or D), also known as the
"ABBA BABA" test.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
Returns
-------
num : ndarray, float, shape (n_variants,)
Numerator (un-normalised f4 estimates).
den : ndarray, float, shape (n_variants,)
Denominator.
Notes
-----
See Patterson (2012), main text and Appendix A.
For un-normalized f4 statistics, ignore the `den` return value.
"""
# check inputs
aca = AlleleCountsArray(aca, copy=False)
assert aca.shape[1] == 2, 'only biallelic variants supported'
acb = AlleleCountsArray(acb, copy=False)
assert acb.shape[1] == 2, 'only biallelic variants supported'
acc = AlleleCountsArray(acc, copy=False)
assert acc.shape[1] == 2, 'only biallelic variants supported'
acd = AlleleCountsArray(acd, copy=False)
assert acd.shape[1] == 2, 'only biallelic variants supported'
check_dim0_aligned(aca, acb, acc, acd)
# compute sample frequencies for the alternate allele
a = aca.to_frequencies()[:, 1]
b = acb.to_frequencies()[:, 1]
c = acc.to_frequencies()[:, 1]
d = acd.to_frequencies()[:, 1]
# compute estimator
num = (a - b) * (c - d)
den = (a + b - 2 * a * b) * (c + d - 2 * c * d)
return (num, den) |
def _is_interactive(self):
''' Prevent middlewares and orders to work outside live mode '''
return not (
self.realworld and (dt.date.today() > self.datetime.date())) | def function[_is_interactive, parameter[self]]:
constant[ Prevent middlewares and orders to work outside live mode ]
return[<ast.UnaryOp object at 0x7da1b0ef8520>] | keyword[def] identifier[_is_interactive] ( identifier[self] ):
literal[string]
keyword[return] keyword[not] (
identifier[self] . identifier[realworld] keyword[and] ( identifier[dt] . identifier[date] . identifier[today] ()> identifier[self] . identifier[datetime] . identifier[date] ())) | def _is_interactive(self):
""" Prevent middlewares and orders to work outside live mode """
return not (self.realworld and dt.date.today() > self.datetime.date()) |
def check_response(self, resp):
"""raise a descriptive exception on a "bad request" response"""
if resp.status_code == 400:
raise ApiException(json.loads(resp.content).get('message'))
return resp | def function[check_response, parameter[self, resp]]:
constant[raise a descriptive exception on a "bad request" response]
if compare[name[resp].status_code equal[==] constant[400]] begin[:]
<ast.Raise object at 0x7da1b2380370>
return[name[resp]] | keyword[def] identifier[check_response] ( identifier[self] , identifier[resp] ):
literal[string]
keyword[if] identifier[resp] . identifier[status_code] == literal[int] :
keyword[raise] identifier[ApiException] ( identifier[json] . identifier[loads] ( identifier[resp] . identifier[content] ). identifier[get] ( literal[string] ))
keyword[return] identifier[resp] | def check_response(self, resp):
"""raise a descriptive exception on a "bad request" response"""
if resp.status_code == 400:
raise ApiException(json.loads(resp.content).get('message')) # depends on [control=['if'], data=[]]
return resp |
async def reply_video_note(self, video_note: typing.Union[base.InputFile, base.String],
duration: typing.Union[base.Integer, None] = None,
length: typing.Union[base.Integer, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long.
Use this method to send video messages.
Source: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Video note to send.
:type video_note: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent video in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param length: Video width and height
:type length: :obj:`typing.Union[base.Integer, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
return await self.bot.send_video_note(chat_id=self.chat.id,
video_note=video_note,
duration=duration,
length=length,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | <ast.AsyncFunctionDef object at 0x7da1b17ba920> | keyword[async] keyword[def] identifier[reply_video_note] ( identifier[self] , identifier[video_note] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[InputFile] , identifier[base] . identifier[String] ],
identifier[duration] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , keyword[None] ]= keyword[None] ,
identifier[length] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , keyword[None] ]= keyword[None] ,
identifier[disable_notification] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Boolean] , keyword[None] ]= keyword[None] ,
identifier[reply_markup] = keyword[None] ,
identifier[reply] = keyword[True] )-> identifier[Message] :
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[bot] . identifier[send_video_note] ( identifier[chat_id] = identifier[self] . identifier[chat] . identifier[id] ,
identifier[video_note] = identifier[video_note] ,
identifier[duration] = identifier[duration] ,
identifier[length] = identifier[length] ,
identifier[disable_notification] = identifier[disable_notification] ,
identifier[reply_to_message_id] = identifier[self] . identifier[message_id] keyword[if] identifier[reply] keyword[else] keyword[None] ,
identifier[reply_markup] = identifier[reply_markup] ) | async def reply_video_note(self, video_note: typing.Union[base.InputFile, base.String], duration: typing.Union[base.Integer, None]=None, length: typing.Union[base.Integer, None]=None, disable_notification: typing.Union[base.Boolean, None]=None, reply_markup=None, reply=True) -> Message:
"""
As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long.
Use this method to send video messages.
Source: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Video note to send.
:type video_note: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent video in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param length: Video width and height
:type length: :obj:`typing.Union[base.Integer, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
return await self.bot.send_video_note(chat_id=self.chat.id, video_note=video_note, duration=duration, length=length, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup) |
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True | def function[should_generate_summaries, parameter[]]:
constant[Is this an appropriate context to generate summaries.
Returns:
a boolean
]
variable[name_scope] assign[=] call[name[tf].contrib.framework.get_name_scope, parameter[]]
if <ast.BoolOp object at 0x7da1b205bd60> begin[:]
return[constant[False]]
if call[name[tf].get_variable_scope, parameter[]].reuse begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[should_generate_summaries] ():
literal[string]
identifier[name_scope] = identifier[tf] . identifier[contrib] . identifier[framework] . identifier[get_name_scope] ()
keyword[if] identifier[name_scope] keyword[and] literal[string] keyword[in] identifier[name_scope] :
keyword[return] keyword[False]
keyword[if] identifier[tf] . identifier[get_variable_scope] (). identifier[reuse] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and 'while/' in name_scope:
# Summaries don't work well within tf.while_loop()
return False # depends on [control=['if'], data=[]]
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False # depends on [control=['if'], data=[]]
return True |
def frame_to_yaml_safe(frame, ordered=False):
"""
Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
if ordered:
return OrderedDict(tuple((col, series_to_yaml_safe(series, True))
for col, series in frame.iteritems()))
else:
return {col: series_to_yaml_safe(series)
for col, series in frame.iteritems()} | def function[frame_to_yaml_safe, parameter[frame, ordered]]:
constant[
Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
]
if name[ordered] begin[:]
return[call[name[OrderedDict], parameter[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da2054a7940>]]]]] | keyword[def] identifier[frame_to_yaml_safe] ( identifier[frame] , identifier[ordered] = keyword[False] ):
literal[string]
keyword[if] identifier[ordered] :
keyword[return] identifier[OrderedDict] ( identifier[tuple] (( identifier[col] , identifier[series_to_yaml_safe] ( identifier[series] , keyword[True] ))
keyword[for] identifier[col] , identifier[series] keyword[in] identifier[frame] . identifier[iteritems] ()))
keyword[else] :
keyword[return] { identifier[col] : identifier[series_to_yaml_safe] ( identifier[series] )
keyword[for] identifier[col] , identifier[series] keyword[in] identifier[frame] . identifier[iteritems] ()} | def frame_to_yaml_safe(frame, ordered=False):
"""
Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
if ordered:
return OrderedDict(tuple(((col, series_to_yaml_safe(series, True)) for (col, series) in frame.iteritems()))) # depends on [control=['if'], data=[]]
else:
return {col: series_to_yaml_safe(series) for (col, series) in frame.iteritems()} |
def _build_index(self):
"""Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1])
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return
size = 2 ** (int(log_e(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row)
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1 | def function[_build_index, parameter[self]]:
constant[Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
]
variable[row0] assign[=] call[name[list], parameter[call[name[map], parameter[name[len], name[self]._lists]]]]
if compare[call[name[len], parameter[name[row0]]] equal[==] constant[1]] begin[:]
call[name[self]._index][<ast.Slice object at 0x7da2054a4940>] assign[=] name[row0]
name[self]._offset assign[=] constant[0]
return[None]
variable[head] assign[=] call[name[iter], parameter[name[row0]]]
variable[tail] assign[=] call[name[iter], parameter[name[head]]]
variable[row1] assign[=] call[name[list], parameter[call[name[starmap], parameter[name[add], call[name[zip], parameter[name[head], name[tail]]]]]]]
if binary_operation[call[name[len], parameter[name[row0]]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] begin[:]
call[name[row1].append, parameter[call[name[row0]][<ast.UnaryOp object at 0x7da18f812f80>]]]
if compare[call[name[len], parameter[name[row1]]] equal[==] constant[1]] begin[:]
call[name[self]._index][<ast.Slice object at 0x7da18f812bf0>] assign[=] binary_operation[name[row1] + name[row0]]
name[self]._offset assign[=] constant[1]
return[None]
variable[size] assign[=] binary_operation[constant[2] ** binary_operation[call[name[int], parameter[call[name[log_e], parameter[binary_operation[call[name[len], parameter[name[row1]]] - constant[1]], constant[2]]]]] + constant[1]]]
call[name[row1].extend, parameter[call[name[repeat], parameter[constant[0], binary_operation[name[size] - call[name[len], parameter[name[row1]]]]]]]]
variable[tree] assign[=] list[[<ast.Name object at 0x7da18f8134c0>, <ast.Name object at 0x7da18f8112a0>]]
while compare[call[name[len], parameter[call[name[tree]][<ast.UnaryOp object at 0x7da18f8104f0>]]] greater[>] constant[1]] begin[:]
variable[head] assign[=] call[name[iter], parameter[call[name[tree]][<ast.UnaryOp object at 0x7da18f8117b0>]]]
variable[tail] assign[=] call[name[iter], parameter[name[head]]]
variable[row] assign[=] call[name[list], parameter[call[name[starmap], parameter[name[add], call[name[zip], parameter[name[head], name[tail]]]]]]]
call[name[tree].append, parameter[name[row]]]
call[name[reduce], parameter[name[iadd], call[name[reversed], parameter[name[tree]]], name[self]._index]]
name[self]._offset assign[=] binary_operation[binary_operation[name[size] * constant[2]] - constant[1]] | keyword[def] identifier[_build_index] ( identifier[self] ):
literal[string]
identifier[row0] = identifier[list] ( identifier[map] ( identifier[len] , identifier[self] . identifier[_lists] ))
keyword[if] identifier[len] ( identifier[row0] )== literal[int] :
identifier[self] . identifier[_index] [:]= identifier[row0]
identifier[self] . identifier[_offset] = literal[int]
keyword[return]
identifier[head] = identifier[iter] ( identifier[row0] )
identifier[tail] = identifier[iter] ( identifier[head] )
identifier[row1] = identifier[list] ( identifier[starmap] ( identifier[add] , identifier[zip] ( identifier[head] , identifier[tail] )))
keyword[if] identifier[len] ( identifier[row0] )& literal[int] :
identifier[row1] . identifier[append] ( identifier[row0] [- literal[int] ])
keyword[if] identifier[len] ( identifier[row1] )== literal[int] :
identifier[self] . identifier[_index] [:]= identifier[row1] + identifier[row0]
identifier[self] . identifier[_offset] = literal[int]
keyword[return]
identifier[size] = literal[int] **( identifier[int] ( identifier[log_e] ( identifier[len] ( identifier[row1] )- literal[int] , literal[int] ))+ literal[int] )
identifier[row1] . identifier[extend] ( identifier[repeat] ( literal[int] , identifier[size] - identifier[len] ( identifier[row1] )))
identifier[tree] =[ identifier[row0] , identifier[row1] ]
keyword[while] identifier[len] ( identifier[tree] [- literal[int] ])> literal[int] :
identifier[head] = identifier[iter] ( identifier[tree] [- literal[int] ])
identifier[tail] = identifier[iter] ( identifier[head] )
identifier[row] = identifier[list] ( identifier[starmap] ( identifier[add] , identifier[zip] ( identifier[head] , identifier[tail] )))
identifier[tree] . identifier[append] ( identifier[row] )
identifier[reduce] ( identifier[iadd] , identifier[reversed] ( identifier[tree] ), identifier[self] . identifier[_index] )
identifier[self] . identifier[_offset] = identifier[size] * literal[int] - literal[int] | def _build_index(self):
"""Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return # depends on [control=['if'], data=[]]
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1]) # depends on [control=['if'], data=[]]
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return # depends on [control=['if'], data=[]]
size = 2 ** (int(log_e(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row) # depends on [control=['while'], data=[]]
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1 |
def manager_view(request, managerTitle):
''' View the details of a manager position.
Parameters:
request is an HTTP request
managerTitle is the URL title of the manager.
'''
targetManager = get_object_or_404(Manager, url_title=managerTitle)
if not targetManager.active:
messages.add_message(request, messages.ERROR, MESSAGES['INACTIVE_MANAGER'].format(managerTitle=targetManager.title))
return HttpResponseRedirect(reverse('managers:list_managers'))
else:
return render_to_response('view_manager.html', {
'page_name': "View Manager",
'targetManager': targetManager,
}, context_instance=RequestContext(request)) | def function[manager_view, parameter[request, managerTitle]]:
constant[ View the details of a manager position.
Parameters:
request is an HTTP request
managerTitle is the URL title of the manager.
]
variable[targetManager] assign[=] call[name[get_object_or_404], parameter[name[Manager]]]
if <ast.UnaryOp object at 0x7da1b14e5a80> begin[:]
call[name[messages].add_message, parameter[name[request], name[messages].ERROR, call[call[name[MESSAGES]][constant[INACTIVE_MANAGER]].format, parameter[]]]]
return[call[name[HttpResponseRedirect], parameter[call[name[reverse], parameter[constant[managers:list_managers]]]]]] | keyword[def] identifier[manager_view] ( identifier[request] , identifier[managerTitle] ):
literal[string]
identifier[targetManager] = identifier[get_object_or_404] ( identifier[Manager] , identifier[url_title] = identifier[managerTitle] )
keyword[if] keyword[not] identifier[targetManager] . identifier[active] :
identifier[messages] . identifier[add_message] ( identifier[request] , identifier[messages] . identifier[ERROR] , identifier[MESSAGES] [ literal[string] ]. identifier[format] ( identifier[managerTitle] = identifier[targetManager] . identifier[title] ))
keyword[return] identifier[HttpResponseRedirect] ( identifier[reverse] ( literal[string] ))
keyword[else] :
keyword[return] identifier[render_to_response] ( literal[string] ,{
literal[string] : literal[string] ,
literal[string] : identifier[targetManager] ,
}, identifier[context_instance] = identifier[RequestContext] ( identifier[request] )) | def manager_view(request, managerTitle):
""" View the details of a manager position.
Parameters:
request is an HTTP request
managerTitle is the URL title of the manager.
"""
targetManager = get_object_or_404(Manager, url_title=managerTitle)
if not targetManager.active:
messages.add_message(request, messages.ERROR, MESSAGES['INACTIVE_MANAGER'].format(managerTitle=targetManager.title))
return HttpResponseRedirect(reverse('managers:list_managers')) # depends on [control=['if'], data=[]]
else:
return render_to_response('view_manager.html', {'page_name': 'View Manager', 'targetManager': targetManager}, context_instance=RequestContext(request)) |
def serve(config):
"Serve the app with Gevent"
from gevent.pywsgi import WSGIServer
app = make_app(config=config)
host = app.config.get("HOST", '127.0.0.1')
port = app.config.get("PORT", 5000)
http_server = WSGIServer((host, port), app)
http_server.serve_forever() | def function[serve, parameter[config]]:
constant[Serve the app with Gevent]
from relative_module[gevent.pywsgi] import module[WSGIServer]
variable[app] assign[=] call[name[make_app], parameter[]]
variable[host] assign[=] call[name[app].config.get, parameter[constant[HOST], constant[127.0.0.1]]]
variable[port] assign[=] call[name[app].config.get, parameter[constant[PORT], constant[5000]]]
variable[http_server] assign[=] call[name[WSGIServer], parameter[tuple[[<ast.Name object at 0x7da1b130b580>, <ast.Name object at 0x7da1b130bbe0>]], name[app]]]
call[name[http_server].serve_forever, parameter[]] | keyword[def] identifier[serve] ( identifier[config] ):
literal[string]
keyword[from] identifier[gevent] . identifier[pywsgi] keyword[import] identifier[WSGIServer]
identifier[app] = identifier[make_app] ( identifier[config] = identifier[config] )
identifier[host] = identifier[app] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[port] = identifier[app] . identifier[config] . identifier[get] ( literal[string] , literal[int] )
identifier[http_server] = identifier[WSGIServer] (( identifier[host] , identifier[port] ), identifier[app] )
identifier[http_server] . identifier[serve_forever] () | def serve(config):
"""Serve the app with Gevent"""
from gevent.pywsgi import WSGIServer
app = make_app(config=config)
host = app.config.get('HOST', '127.0.0.1')
port = app.config.get('PORT', 5000)
http_server = WSGIServer((host, port), app)
http_server.serve_forever() |
def getVoIPchanStats(self, chantype,
codec_list=('ulaw', 'alaw', 'gsm', 'g729')):
"""Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None
if chan == 'iax2':
cmd = "iax2 show channels"
elif chan == 'sip':
cmd = "sip show channels"
else:
raise AttributeError("Invalid channel type in query for Channel Stats.")
cmdresp = self.executeCommand(cmd)
lines = cmdresp.splitlines()
headers = re.split('\s\s+', lines[0])
try:
idx = headers.index('Format')
except ValueError:
try:
idx = headers.index('Form')
except:
raise Exception("Error in parsing header line of %s channel stats."
% chan)
codec_list = tuple(codec_list) + ('other', 'none')
info_dict = dict([(k,0) for k in codec_list])
for line in lines[1:-1]:
codec = None
cols = re.split('\s\s+', line)
colcodec = cols[idx]
mobj = re.match('0x\w+\s\((\w+)\)$', colcodec)
if mobj:
codec = mobj.group(1).lower()
elif re.match('\w+$', colcodec):
codec = colcodec.lower()
if codec:
if codec in info_dict:
info_dict[codec] += 1
elif codec == 'nothing' or codec[0:4] == 'unkn':
info_dict['none'] += 1
else:
info_dict['other'] += 1
return info_dict | def function[getVoIPchanStats, parameter[self, chantype, codec_list]]:
constant[Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
]
variable[chan] assign[=] call[name[chantype].lower, parameter[]]
if <ast.UnaryOp object at 0x7da1b10dedd0> begin[:]
return[constant[None]]
if compare[name[chan] equal[==] constant[iax2]] begin[:]
variable[cmd] assign[=] constant[iax2 show channels]
variable[cmdresp] assign[=] call[name[self].executeCommand, parameter[name[cmd]]]
variable[lines] assign[=] call[name[cmdresp].splitlines, parameter[]]
variable[headers] assign[=] call[name[re].split, parameter[constant[\s\s+], call[name[lines]][constant[0]]]]
<ast.Try object at 0x7da1b10b1a20>
variable[codec_list] assign[=] binary_operation[call[name[tuple], parameter[name[codec_list]]] + tuple[[<ast.Constant object at 0x7da1b10b1570>, <ast.Constant object at 0x7da1b10b2920>]]]
variable[info_dict] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b10b0670>]]
for taget[name[line]] in starred[call[name[lines]][<ast.Slice object at 0x7da1b10b2a40>]] begin[:]
variable[codec] assign[=] constant[None]
variable[cols] assign[=] call[name[re].split, parameter[constant[\s\s+], name[line]]]
variable[colcodec] assign[=] call[name[cols]][name[idx]]
variable[mobj] assign[=] call[name[re].match, parameter[constant[0x\w+\s\((\w+)\)$], name[colcodec]]]
if name[mobj] begin[:]
variable[codec] assign[=] call[call[name[mobj].group, parameter[constant[1]]].lower, parameter[]]
if name[codec] begin[:]
if compare[name[codec] in name[info_dict]] begin[:]
<ast.AugAssign object at 0x7da1b10b0e50>
return[name[info_dict]] | keyword[def] identifier[getVoIPchanStats] ( identifier[self] , identifier[chantype] ,
identifier[codec_list] =( literal[string] , literal[string] , literal[string] , literal[string] )):
literal[string]
identifier[chan] = identifier[chantype] . identifier[lower] ()
keyword[if] keyword[not] identifier[self] . identifier[hasChannelType] ( identifier[chan] ):
keyword[return] keyword[None]
keyword[if] identifier[chan] == literal[string] :
identifier[cmd] = literal[string]
keyword[elif] identifier[chan] == literal[string] :
identifier[cmd] = literal[string]
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[cmdresp] = identifier[self] . identifier[executeCommand] ( identifier[cmd] )
identifier[lines] = identifier[cmdresp] . identifier[splitlines] ()
identifier[headers] = identifier[re] . identifier[split] ( literal[string] , identifier[lines] [ literal[int] ])
keyword[try] :
identifier[idx] = identifier[headers] . identifier[index] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[idx] = identifier[headers] . identifier[index] ( literal[string] )
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string]
% identifier[chan] )
identifier[codec_list] = identifier[tuple] ( identifier[codec_list] )+( literal[string] , literal[string] )
identifier[info_dict] = identifier[dict] ([( identifier[k] , literal[int] ) keyword[for] identifier[k] keyword[in] identifier[codec_list] ])
keyword[for] identifier[line] keyword[in] identifier[lines] [ literal[int] :- literal[int] ]:
identifier[codec] = keyword[None]
identifier[cols] = identifier[re] . identifier[split] ( literal[string] , identifier[line] )
identifier[colcodec] = identifier[cols] [ identifier[idx] ]
identifier[mobj] = identifier[re] . identifier[match] ( literal[string] , identifier[colcodec] )
keyword[if] identifier[mobj] :
identifier[codec] = identifier[mobj] . identifier[group] ( literal[int] ). identifier[lower] ()
keyword[elif] identifier[re] . identifier[match] ( literal[string] , identifier[colcodec] ):
identifier[codec] = identifier[colcodec] . identifier[lower] ()
keyword[if] identifier[codec] :
keyword[if] identifier[codec] keyword[in] identifier[info_dict] :
identifier[info_dict] [ identifier[codec] ]+= literal[int]
keyword[elif] identifier[codec] == literal[string] keyword[or] identifier[codec] [ literal[int] : literal[int] ]== literal[string] :
identifier[info_dict] [ literal[string] ]+= literal[int]
keyword[else] :
identifier[info_dict] [ literal[string] ]+= literal[int]
keyword[return] identifier[info_dict] | def getVoIPchanStats(self, chantype, codec_list=('ulaw', 'alaw', 'gsm', 'g729')):
"""Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None # depends on [control=['if'], data=[]]
if chan == 'iax2':
cmd = 'iax2 show channels' # depends on [control=['if'], data=[]]
elif chan == 'sip':
cmd = 'sip show channels' # depends on [control=['if'], data=[]]
else:
raise AttributeError('Invalid channel type in query for Channel Stats.')
cmdresp = self.executeCommand(cmd)
lines = cmdresp.splitlines()
headers = re.split('\\s\\s+', lines[0])
try:
idx = headers.index('Format') # depends on [control=['try'], data=[]]
except ValueError:
try:
idx = headers.index('Form') # depends on [control=['try'], data=[]]
except:
raise Exception('Error in parsing header line of %s channel stats.' % chan) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
codec_list = tuple(codec_list) + ('other', 'none')
info_dict = dict([(k, 0) for k in codec_list])
for line in lines[1:-1]:
codec = None
cols = re.split('\\s\\s+', line)
colcodec = cols[idx]
mobj = re.match('0x\\w+\\s\\((\\w+)\\)$', colcodec)
if mobj:
codec = mobj.group(1).lower() # depends on [control=['if'], data=[]]
elif re.match('\\w+$', colcodec):
codec = colcodec.lower() # depends on [control=['if'], data=[]]
if codec:
if codec in info_dict:
info_dict[codec] += 1 # depends on [control=['if'], data=['codec', 'info_dict']]
elif codec == 'nothing' or codec[0:4] == 'unkn':
info_dict['none'] += 1 # depends on [control=['if'], data=[]]
else:
info_dict['other'] += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return info_dict |
def _unbind_topics(self, topics):
"""Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
"""
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response) | def function[_unbind_topics, parameter[self, topics]]:
constant[Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
]
call[name[self].client.unsubscribe, parameter[name[topics].status]]
call[name[self].client.unsubscribe, parameter[name[topics].tracing]]
call[name[self].client.unsubscribe, parameter[name[topics].streaming]]
call[name[self].client.unsubscribe, parameter[name[topics].response]] | keyword[def] identifier[_unbind_topics] ( identifier[self] , identifier[topics] ):
literal[string]
identifier[self] . identifier[client] . identifier[unsubscribe] ( identifier[topics] . identifier[status] )
identifier[self] . identifier[client] . identifier[unsubscribe] ( identifier[topics] . identifier[tracing] )
identifier[self] . identifier[client] . identifier[unsubscribe] ( identifier[topics] . identifier[streaming] )
identifier[self] . identifier[client] . identifier[unsubscribe] ( identifier[topics] . identifier[response] ) | def _unbind_topics(self, topics):
"""Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to.
"""
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response) |
def insert(self, index, value):
'''Insert a node in-place. It is highly suggested that you do not
use this method. Use assoc instead'''
newnode = LookupTreeNode(index, value)
level = 0
node = self.root
while True:
ind = _getbits(newnode.index, level)
level += 1
child = node.children[ind]
if child is None or child.index == newnode.index:
if child:
assert child.value == newnode.value
node.children[ind] = newnode
break
elif child.index == _root_index:
# This is a branch
node = child
else:
branch = LookupTreeNode()
nind = _getbits(newnode.index, level)
cind = _getbits(child.index, level)
node.children[ind] = branch
# Life gets tricky when...
if nind == cind:
branch.children[cind] = child
# recurse
node = branch
else:
branch.children[nind] = newnode
branch.children[cind] = child
break | def function[insert, parameter[self, index, value]]:
constant[Insert a node in-place. It is highly suggested that you do not
use this method. Use assoc instead]
variable[newnode] assign[=] call[name[LookupTreeNode], parameter[name[index], name[value]]]
variable[level] assign[=] constant[0]
variable[node] assign[=] name[self].root
while constant[True] begin[:]
variable[ind] assign[=] call[name[_getbits], parameter[name[newnode].index, name[level]]]
<ast.AugAssign object at 0x7da18f09c6a0>
variable[child] assign[=] call[name[node].children][name[ind]]
if <ast.BoolOp object at 0x7da18f09ec20> begin[:]
if name[child] begin[:]
assert[compare[name[child].value equal[==] name[newnode].value]]
call[name[node].children][name[ind]] assign[=] name[newnode]
break | keyword[def] identifier[insert] ( identifier[self] , identifier[index] , identifier[value] ):
literal[string]
identifier[newnode] = identifier[LookupTreeNode] ( identifier[index] , identifier[value] )
identifier[level] = literal[int]
identifier[node] = identifier[self] . identifier[root]
keyword[while] keyword[True] :
identifier[ind] = identifier[_getbits] ( identifier[newnode] . identifier[index] , identifier[level] )
identifier[level] += literal[int]
identifier[child] = identifier[node] . identifier[children] [ identifier[ind] ]
keyword[if] identifier[child] keyword[is] keyword[None] keyword[or] identifier[child] . identifier[index] == identifier[newnode] . identifier[index] :
keyword[if] identifier[child] :
keyword[assert] identifier[child] . identifier[value] == identifier[newnode] . identifier[value]
identifier[node] . identifier[children] [ identifier[ind] ]= identifier[newnode]
keyword[break]
keyword[elif] identifier[child] . identifier[index] == identifier[_root_index] :
identifier[node] = identifier[child]
keyword[else] :
identifier[branch] = identifier[LookupTreeNode] ()
identifier[nind] = identifier[_getbits] ( identifier[newnode] . identifier[index] , identifier[level] )
identifier[cind] = identifier[_getbits] ( identifier[child] . identifier[index] , identifier[level] )
identifier[node] . identifier[children] [ identifier[ind] ]= identifier[branch]
keyword[if] identifier[nind] == identifier[cind] :
identifier[branch] . identifier[children] [ identifier[cind] ]= identifier[child]
identifier[node] = identifier[branch]
keyword[else] :
identifier[branch] . identifier[children] [ identifier[nind] ]= identifier[newnode]
identifier[branch] . identifier[children] [ identifier[cind] ]= identifier[child]
keyword[break] | def insert(self, index, value):
"""Insert a node in-place. It is highly suggested that you do not
use this method. Use assoc instead"""
newnode = LookupTreeNode(index, value)
level = 0
node = self.root
while True:
ind = _getbits(newnode.index, level)
level += 1
child = node.children[ind]
if child is None or child.index == newnode.index:
if child:
assert child.value == newnode.value # depends on [control=['if'], data=[]]
node.children[ind] = newnode
break # depends on [control=['if'], data=[]]
elif child.index == _root_index:
# This is a branch
node = child # depends on [control=['if'], data=[]]
else:
branch = LookupTreeNode()
nind = _getbits(newnode.index, level)
cind = _getbits(child.index, level)
node.children[ind] = branch
# Life gets tricky when...
if nind == cind:
branch.children[cind] = child
# recurse
node = branch # depends on [control=['if'], data=['cind']]
else:
branch.children[nind] = newnode
branch.children[cind] = child
break # depends on [control=['while'], data=[]] |
def before_update(self, context, current, resource):
"""
If the resource has changed we try to generate a budget data
package, but if it hasn't then we don't do anything
"""
# Return if the budget data package fields have not been filled in
if not self.are_budget_data_package_fields_filled_in(resource):
return
if resource.get('upload', '') == '':
# If it isn't an upload we check if it's the same url
if current['url'] == resource['url']:
# Return if it's the same
return
else:
self.data.load(resource['url'])
else:
self.data.load(resource['upload'].file)
self.generate_budget_data_package(resource) | def function[before_update, parameter[self, context, current, resource]]:
constant[
If the resource has changed we try to generate a budget data
package, but if it hasn't then we don't do anything
]
if <ast.UnaryOp object at 0x7da20cabc550> begin[:]
return[None]
if compare[call[name[resource].get, parameter[constant[upload], constant[]]] equal[==] constant[]] begin[:]
if compare[call[name[current]][constant[url]] equal[==] call[name[resource]][constant[url]]] begin[:]
return[None]
call[name[self].generate_budget_data_package, parameter[name[resource]]] | keyword[def] identifier[before_update] ( identifier[self] , identifier[context] , identifier[current] , identifier[resource] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[are_budget_data_package_fields_filled_in] ( identifier[resource] ):
keyword[return]
keyword[if] identifier[resource] . identifier[get] ( literal[string] , literal[string] )== literal[string] :
keyword[if] identifier[current] [ literal[string] ]== identifier[resource] [ literal[string] ]:
keyword[return]
keyword[else] :
identifier[self] . identifier[data] . identifier[load] ( identifier[resource] [ literal[string] ])
keyword[else] :
identifier[self] . identifier[data] . identifier[load] ( identifier[resource] [ literal[string] ]. identifier[file] )
identifier[self] . identifier[generate_budget_data_package] ( identifier[resource] ) | def before_update(self, context, current, resource):
"""
If the resource has changed we try to generate a budget data
package, but if it hasn't then we don't do anything
"""
# Return if the budget data package fields have not been filled in
if not self.are_budget_data_package_fields_filled_in(resource):
return # depends on [control=['if'], data=[]]
if resource.get('upload', '') == '':
# If it isn't an upload we check if it's the same url
if current['url'] == resource['url']:
# Return if it's the same
return # depends on [control=['if'], data=[]]
else:
self.data.load(resource['url']) # depends on [control=['if'], data=[]]
else:
self.data.load(resource['upload'].file)
self.generate_budget_data_package(resource) |
def build(self):
"""
The decoder computational graph consists of three components:
(1) the input node `decoder_input`
(2) the embedding node `decoder_embed`
(3) the recurrent (RNN) part `decoder_rnn`
(4) the output of the decoder RNN `decoder_output`
(5) the classification output layer `decoder_dense`
"""
# Grab hyperparameters from self.config:
hidden_dim = self.config['encoding-layer-width']
recurrent_unit = self.config['recurrent-unit-type']
bidirectional = False #self.config['encoding-layer-bidirectional']
vocab_size = self.data.properties.vocab_size
embedding_dim = math.ceil(math.log(vocab_size, 2)) # self.config['embedding-dim']
input_length = self.data.properties['max-utterance-length'] + 1
# Assemble the network components:
decoder_input = Input(shape=(None,))
decoder_embed = Embedding(vocab_size, embedding_dim, mask_zero=True)(decoder_input) #, input_length=input_length)(decoder_input)
if recurrent_unit == 'lstm':
decoder_rnn = LSTM(hidden_dim, return_sequences=True, return_state=True)
decoder_output, decoder_h, decoder_c = decoder_rnn(decoder_embed,
initial_state=self.encoder.encoder_hidden_state)
elif recurrent_unit == 'gru':
decoder_rnn = GRU(hidden_dim, return_sequences=True, return_state=True)
decoder_output, _ = decoder_rnn(decoder_embed,
initial_state=self.encoder.encoder_hidden_state)
else:
raise Exception('Invalid recurrent unit type: {}'.format(recurrent_unit))
# make the RNN component bidirectional, if desired
if bidirectional:
decoder_rnn = Bidirectional(decoder_rnn, merge_mode='ave')
decoder_dense = Dense(vocab_size, activation='softmax')
decoder_output = decoder_dense(decoder_output)
# save the four Decoder components as class state
self.decoder_input = decoder_input
self.decoder_embed = decoder_embed
self.decoder_rnn = decoder_rnn
self.decoder_dense = decoder_dense
self.decoder_output = decoder_output
return | def function[build, parameter[self]]:
constant[
The decoder computational graph consists of three components:
(1) the input node `decoder_input`
(2) the embedding node `decoder_embed`
(3) the recurrent (RNN) part `decoder_rnn`
(4) the output of the decoder RNN `decoder_output`
(5) the classification output layer `decoder_dense`
]
variable[hidden_dim] assign[=] call[name[self].config][constant[encoding-layer-width]]
variable[recurrent_unit] assign[=] call[name[self].config][constant[recurrent-unit-type]]
variable[bidirectional] assign[=] constant[False]
variable[vocab_size] assign[=] name[self].data.properties.vocab_size
variable[embedding_dim] assign[=] call[name[math].ceil, parameter[call[name[math].log, parameter[name[vocab_size], constant[2]]]]]
variable[input_length] assign[=] binary_operation[call[name[self].data.properties][constant[max-utterance-length]] + constant[1]]
variable[decoder_input] assign[=] call[name[Input], parameter[]]
variable[decoder_embed] assign[=] call[call[name[Embedding], parameter[name[vocab_size], name[embedding_dim]]], parameter[name[decoder_input]]]
if compare[name[recurrent_unit] equal[==] constant[lstm]] begin[:]
variable[decoder_rnn] assign[=] call[name[LSTM], parameter[name[hidden_dim]]]
<ast.Tuple object at 0x7da1b16420e0> assign[=] call[name[decoder_rnn], parameter[name[decoder_embed]]]
if name[bidirectional] begin[:]
variable[decoder_rnn] assign[=] call[name[Bidirectional], parameter[name[decoder_rnn]]]
variable[decoder_dense] assign[=] call[name[Dense], parameter[name[vocab_size]]]
variable[decoder_output] assign[=] call[name[decoder_dense], parameter[name[decoder_output]]]
name[self].decoder_input assign[=] name[decoder_input]
name[self].decoder_embed assign[=] name[decoder_embed]
name[self].decoder_rnn assign[=] name[decoder_rnn]
name[self].decoder_dense assign[=] name[decoder_dense]
name[self].decoder_output assign[=] name[decoder_output]
return[None] | keyword[def] identifier[build] ( identifier[self] ):
literal[string]
identifier[hidden_dim] = identifier[self] . identifier[config] [ literal[string] ]
identifier[recurrent_unit] = identifier[self] . identifier[config] [ literal[string] ]
identifier[bidirectional] = keyword[False]
identifier[vocab_size] = identifier[self] . identifier[data] . identifier[properties] . identifier[vocab_size]
identifier[embedding_dim] = identifier[math] . identifier[ceil] ( identifier[math] . identifier[log] ( identifier[vocab_size] , literal[int] ))
identifier[input_length] = identifier[self] . identifier[data] . identifier[properties] [ literal[string] ]+ literal[int]
identifier[decoder_input] = identifier[Input] ( identifier[shape] =( keyword[None] ,))
identifier[decoder_embed] = identifier[Embedding] ( identifier[vocab_size] , identifier[embedding_dim] , identifier[mask_zero] = keyword[True] )( identifier[decoder_input] )
keyword[if] identifier[recurrent_unit] == literal[string] :
identifier[decoder_rnn] = identifier[LSTM] ( identifier[hidden_dim] , identifier[return_sequences] = keyword[True] , identifier[return_state] = keyword[True] )
identifier[decoder_output] , identifier[decoder_h] , identifier[decoder_c] = identifier[decoder_rnn] ( identifier[decoder_embed] ,
identifier[initial_state] = identifier[self] . identifier[encoder] . identifier[encoder_hidden_state] )
keyword[elif] identifier[recurrent_unit] == literal[string] :
identifier[decoder_rnn] = identifier[GRU] ( identifier[hidden_dim] , identifier[return_sequences] = keyword[True] , identifier[return_state] = keyword[True] )
identifier[decoder_output] , identifier[_] = identifier[decoder_rnn] ( identifier[decoder_embed] ,
identifier[initial_state] = identifier[self] . identifier[encoder] . identifier[encoder_hidden_state] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[recurrent_unit] ))
keyword[if] identifier[bidirectional] :
identifier[decoder_rnn] = identifier[Bidirectional] ( identifier[decoder_rnn] , identifier[merge_mode] = literal[string] )
identifier[decoder_dense] = identifier[Dense] ( identifier[vocab_size] , identifier[activation] = literal[string] )
identifier[decoder_output] = identifier[decoder_dense] ( identifier[decoder_output] )
identifier[self] . identifier[decoder_input] = identifier[decoder_input]
identifier[self] . identifier[decoder_embed] = identifier[decoder_embed]
identifier[self] . identifier[decoder_rnn] = identifier[decoder_rnn]
identifier[self] . identifier[decoder_dense] = identifier[decoder_dense]
identifier[self] . identifier[decoder_output] = identifier[decoder_output]
keyword[return] | def build(self):
"""
The decoder computational graph consists of three components:
(1) the input node `decoder_input`
(2) the embedding node `decoder_embed`
(3) the recurrent (RNN) part `decoder_rnn`
(4) the output of the decoder RNN `decoder_output`
(5) the classification output layer `decoder_dense`
"""
# Grab hyperparameters from self.config:
hidden_dim = self.config['encoding-layer-width']
recurrent_unit = self.config['recurrent-unit-type']
bidirectional = False #self.config['encoding-layer-bidirectional']
vocab_size = self.data.properties.vocab_size
embedding_dim = math.ceil(math.log(vocab_size, 2)) # self.config['embedding-dim']
input_length = self.data.properties['max-utterance-length'] + 1
# Assemble the network components:
decoder_input = Input(shape=(None,))
decoder_embed = Embedding(vocab_size, embedding_dim, mask_zero=True)(decoder_input) #, input_length=input_length)(decoder_input)
if recurrent_unit == 'lstm':
decoder_rnn = LSTM(hidden_dim, return_sequences=True, return_state=True)
(decoder_output, decoder_h, decoder_c) = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) # depends on [control=['if'], data=[]]
elif recurrent_unit == 'gru':
decoder_rnn = GRU(hidden_dim, return_sequences=True, return_state=True)
(decoder_output, _) = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) # depends on [control=['if'], data=[]]
else:
raise Exception('Invalid recurrent unit type: {}'.format(recurrent_unit))
# make the RNN component bidirectional, if desired
if bidirectional:
decoder_rnn = Bidirectional(decoder_rnn, merge_mode='ave') # depends on [control=['if'], data=[]]
decoder_dense = Dense(vocab_size, activation='softmax')
decoder_output = decoder_dense(decoder_output)
# save the four Decoder components as class state
self.decoder_input = decoder_input
self.decoder_embed = decoder_embed
self.decoder_rnn = decoder_rnn
self.decoder_dense = decoder_dense
self.decoder_output = decoder_output
return |
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if element.startswith(('der', 'die', 'das')):
tags.append('NN')
if ' VERB' in element:
tags.append('VB')
if ' ADJ' in element:
tags.append('JJ')
else:
for element in self.elements:
if self.word in unicode(element):
return self.pos(element)
return tags | def function[pos, parameter[self, element]]:
constant[ Tries to decide about the part of speech. ]
variable[tags] assign[=] list[[]]
if name[element] begin[:]
if call[name[element].startswith, parameter[tuple[[<ast.Constant object at 0x7da1b10e44c0>, <ast.Constant object at 0x7da1b10e4df0>, <ast.Constant object at 0x7da1b10e5c60>]]]] begin[:]
call[name[tags].append, parameter[constant[NN]]]
if compare[constant[ VERB] in name[element]] begin[:]
call[name[tags].append, parameter[constant[VB]]]
if compare[constant[ ADJ] in name[element]] begin[:]
call[name[tags].append, parameter[constant[JJ]]]
return[name[tags]] | keyword[def] identifier[pos] ( identifier[self] , identifier[element] = keyword[None] ):
literal[string]
identifier[tags] =[]
keyword[if] identifier[element] :
keyword[if] identifier[element] . identifier[startswith] (( literal[string] , literal[string] , literal[string] )):
identifier[tags] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[element] :
identifier[tags] . identifier[append] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[element] :
identifier[tags] . identifier[append] ( literal[string] )
keyword[else] :
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[elements] :
keyword[if] identifier[self] . identifier[word] keyword[in] identifier[unicode] ( identifier[element] ):
keyword[return] identifier[self] . identifier[pos] ( identifier[element] )
keyword[return] identifier[tags] | def pos(self, element=None):
""" Tries to decide about the part of speech. """
tags = []
if element:
if element.startswith(('der', 'die', 'das')):
tags.append('NN') # depends on [control=['if'], data=[]]
if ' VERB' in element:
tags.append('VB') # depends on [control=['if'], data=[]]
if ' ADJ' in element:
tags.append('JJ') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for element in self.elements:
if self.word in unicode(element):
return self.pos(element) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
return tags |
def create_generic_instances(self, instances):
"""
Generalize each (single) Istio instance into two OpenMetricsBaseCheck instances
"""
generic_instances = []
for instance in instances:
istio_mesh_instance = self._create_istio_mesh_instance(instance)
process_mixer_instance = self._create_process_mixer_instance(instance)
generic_instances.extend([istio_mesh_instance, process_mixer_instance])
return generic_instances | def function[create_generic_instances, parameter[self, instances]]:
constant[
Generalize each (single) Istio instance into two OpenMetricsBaseCheck instances
]
variable[generic_instances] assign[=] list[[]]
for taget[name[instance]] in starred[name[instances]] begin[:]
variable[istio_mesh_instance] assign[=] call[name[self]._create_istio_mesh_instance, parameter[name[instance]]]
variable[process_mixer_instance] assign[=] call[name[self]._create_process_mixer_instance, parameter[name[instance]]]
call[name[generic_instances].extend, parameter[list[[<ast.Name object at 0x7da207f03b80>, <ast.Name object at 0x7da207f013c0>]]]]
return[name[generic_instances]] | keyword[def] identifier[create_generic_instances] ( identifier[self] , identifier[instances] ):
literal[string]
identifier[generic_instances] =[]
keyword[for] identifier[instance] keyword[in] identifier[instances] :
identifier[istio_mesh_instance] = identifier[self] . identifier[_create_istio_mesh_instance] ( identifier[instance] )
identifier[process_mixer_instance] = identifier[self] . identifier[_create_process_mixer_instance] ( identifier[instance] )
identifier[generic_instances] . identifier[extend] ([ identifier[istio_mesh_instance] , identifier[process_mixer_instance] ])
keyword[return] identifier[generic_instances] | def create_generic_instances(self, instances):
"""
Generalize each (single) Istio instance into two OpenMetricsBaseCheck instances
"""
generic_instances = []
for instance in instances:
istio_mesh_instance = self._create_istio_mesh_instance(instance)
process_mixer_instance = self._create_process_mixer_instance(instance)
generic_instances.extend([istio_mesh_instance, process_mixer_instance]) # depends on [control=['for'], data=['instance']]
return generic_instances |
def downgrade():
"""Downgrade database."""
# table ObjectVersionTag
op.drop_table('files_objecttags')
# table ObjectVersion: modify primary_key
if op.get_context().dialect.name == 'mysql':
op.execute(
'ALTER TABLE files_object '
'DROP INDEX uq_files_object_bucket_id, '
'DROP PRIMARY KEY, '
'ADD PRIMARY KEY(`bucket_id`, `key`, `version_id`)')
else:
op.drop_constraint(
'pk_files_object', 'files_object', type_='primary')
op.create_primary_key('pk_files_object', 'files_object',
['bucket_id', 'key', 'version_id']) | def function[downgrade, parameter[]]:
constant[Downgrade database.]
call[name[op].drop_table, parameter[constant[files_objecttags]]]
if compare[call[name[op].get_context, parameter[]].dialect.name equal[==] constant[mysql]] begin[:]
call[name[op].execute, parameter[constant[ALTER TABLE files_object DROP INDEX uq_files_object_bucket_id, DROP PRIMARY KEY, ADD PRIMARY KEY(`bucket_id`, `key`, `version_id`)]]] | keyword[def] identifier[downgrade] ():
literal[string]
identifier[op] . identifier[drop_table] ( literal[string] )
keyword[if] identifier[op] . identifier[get_context] (). identifier[dialect] . identifier[name] == literal[string] :
identifier[op] . identifier[execute] (
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[else] :
identifier[op] . identifier[drop_constraint] (
literal[string] , literal[string] , identifier[type_] = literal[string] )
identifier[op] . identifier[create_primary_key] ( literal[string] , literal[string] ,
[ literal[string] , literal[string] , literal[string] ]) | def downgrade():
"""Downgrade database."""
# table ObjectVersionTag
op.drop_table('files_objecttags')
# table ObjectVersion: modify primary_key
if op.get_context().dialect.name == 'mysql':
op.execute('ALTER TABLE files_object DROP INDEX uq_files_object_bucket_id, DROP PRIMARY KEY, ADD PRIMARY KEY(`bucket_id`, `key`, `version_id`)') # depends on [control=['if'], data=[]]
else:
op.drop_constraint('pk_files_object', 'files_object', type_='primary')
op.create_primary_key('pk_files_object', 'files_object', ['bucket_id', 'key', 'version_id']) |
def pixels(self):
# type: () -> Pixels
"""
:return list: RGB tuples.
"""
if not self.__pixels:
rgb_tuples = zip(
self.raw[2::4], self.raw[1::4], self.raw[0::4]
) # type: Iterator[Pixel]
self.__pixels = list(zip(*[iter(rgb_tuples)] * self.width)) # type: ignore
return self.__pixels | def function[pixels, parameter[self]]:
constant[
:return list: RGB tuples.
]
if <ast.UnaryOp object at 0x7da1b07e2800> begin[:]
variable[rgb_tuples] assign[=] call[name[zip], parameter[call[name[self].raw][<ast.Slice object at 0x7da1b07e15d0>], call[name[self].raw][<ast.Slice object at 0x7da1b07e2830>], call[name[self].raw][<ast.Slice object at 0x7da1b07e2f20>]]]
name[self].__pixels assign[=] call[name[list], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da1b07e11e0>]]]]
return[name[self].__pixels] | keyword[def] identifier[pixels] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__pixels] :
identifier[rgb_tuples] = identifier[zip] (
identifier[self] . identifier[raw] [ literal[int] :: literal[int] ], identifier[self] . identifier[raw] [ literal[int] :: literal[int] ], identifier[self] . identifier[raw] [ literal[int] :: literal[int] ]
)
identifier[self] . identifier[__pixels] = identifier[list] ( identifier[zip] (*[ identifier[iter] ( identifier[rgb_tuples] )]* identifier[self] . identifier[width] ))
keyword[return] identifier[self] . identifier[__pixels] | def pixels(self):
# type: () -> Pixels
'\n :return list: RGB tuples.\n '
if not self.__pixels:
rgb_tuples = zip(self.raw[2::4], self.raw[1::4], self.raw[0::4]) # type: Iterator[Pixel]
self.__pixels = list(zip(*[iter(rgb_tuples)] * self.width)) # type: ignore # depends on [control=['if'], data=[]]
return self.__pixels |
def response_json(self, status, response, content_type='application/json', encoding='utf-8', headers=None, jsonp=None):
"""
Send a JSON response
"""
encoder = JSONEncoder(
check_circular=self.app.validate_output,
allow_nan=False,
sort_keys=True,
indent=2 if self.app.pretty_output else None,
separators=(',', ': ') if self.app.pretty_output else (',', ':')
)
content = encoder.encode(response)
if jsonp:
content_list = [jsonp.encode(encoding), b'(', content.encode(encoding), b');']
else:
content_list = [content.encode(encoding)]
return self.response(status, content_type, content_list, headers=headers) | def function[response_json, parameter[self, status, response, content_type, encoding, headers, jsonp]]:
constant[
Send a JSON response
]
variable[encoder] assign[=] call[name[JSONEncoder], parameter[]]
variable[content] assign[=] call[name[encoder].encode, parameter[name[response]]]
if name[jsonp] begin[:]
variable[content_list] assign[=] list[[<ast.Call object at 0x7da1b1803b20>, <ast.Constant object at 0x7da1b1803ca0>, <ast.Call object at 0x7da1b18006d0>, <ast.Constant object at 0x7da1b1803880>]]
return[call[name[self].response, parameter[name[status], name[content_type], name[content_list]]]] | keyword[def] identifier[response_json] ( identifier[self] , identifier[status] , identifier[response] , identifier[content_type] = literal[string] , identifier[encoding] = literal[string] , identifier[headers] = keyword[None] , identifier[jsonp] = keyword[None] ):
literal[string]
identifier[encoder] = identifier[JSONEncoder] (
identifier[check_circular] = identifier[self] . identifier[app] . identifier[validate_output] ,
identifier[allow_nan] = keyword[False] ,
identifier[sort_keys] = keyword[True] ,
identifier[indent] = literal[int] keyword[if] identifier[self] . identifier[app] . identifier[pretty_output] keyword[else] keyword[None] ,
identifier[separators] =( literal[string] , literal[string] ) keyword[if] identifier[self] . identifier[app] . identifier[pretty_output] keyword[else] ( literal[string] , literal[string] )
)
identifier[content] = identifier[encoder] . identifier[encode] ( identifier[response] )
keyword[if] identifier[jsonp] :
identifier[content_list] =[ identifier[jsonp] . identifier[encode] ( identifier[encoding] ), literal[string] , identifier[content] . identifier[encode] ( identifier[encoding] ), literal[string] ]
keyword[else] :
identifier[content_list] =[ identifier[content] . identifier[encode] ( identifier[encoding] )]
keyword[return] identifier[self] . identifier[response] ( identifier[status] , identifier[content_type] , identifier[content_list] , identifier[headers] = identifier[headers] ) | def response_json(self, status, response, content_type='application/json', encoding='utf-8', headers=None, jsonp=None):
"""
Send a JSON response
"""
encoder = JSONEncoder(check_circular=self.app.validate_output, allow_nan=False, sort_keys=True, indent=2 if self.app.pretty_output else None, separators=(',', ': ') if self.app.pretty_output else (',', ':'))
content = encoder.encode(response)
if jsonp:
content_list = [jsonp.encode(encoding), b'(', content.encode(encoding), b');'] # depends on [control=['if'], data=[]]
else:
content_list = [content.encode(encoding)]
return self.response(status, content_type, content_list, headers=headers) |
def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for fs_struct in client_utils_osx.GetFileSystems():
yield rdf_client_fs.Filesystem(
device=fs_struct.f_mntfromname,
mount_point=fs_struct.f_mntonname,
type=fs_struct.f_fstypename)
drive_re = re.compile("r?disk[0-9].*")
for drive in os.listdir("/dev"):
if not drive_re.match(drive):
continue
path = os.path.join("/dev", drive)
try:
img_inf = pytsk3.Img_Info(path)
# This is a volume or a partition - we send back a TSK device.
yield rdf_client_fs.Filesystem(device=path)
vol_inf = pytsk3.Volume_Info(img_inf)
for volume in vol_inf:
if volume.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
offset = volume.start * vol_inf.info.block_size
yield rdf_client_fs.Filesystem(
device="{path}:{offset}".format(path=path, offset=offset),
type="partition")
except (IOError, RuntimeError):
continue | def function[EnumerateFilesystemsFromClient, parameter[args]]:
constant[List all local filesystems mounted on this system.]
<ast.Delete object at 0x7da1b1c24df0>
for taget[name[fs_struct]] in starred[call[name[client_utils_osx].GetFileSystems, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1c25540>
variable[drive_re] assign[=] call[name[re].compile, parameter[constant[r?disk[0-9].*]]]
for taget[name[drive]] in starred[call[name[os].listdir, parameter[constant[/dev]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1c24310> begin[:]
continue
variable[path] assign[=] call[name[os].path.join, parameter[constant[/dev], name[drive]]]
<ast.Try object at 0x7da1b1c1bd60> | keyword[def] identifier[EnumerateFilesystemsFromClient] ( identifier[args] ):
literal[string]
keyword[del] identifier[args]
keyword[for] identifier[fs_struct] keyword[in] identifier[client_utils_osx] . identifier[GetFileSystems] ():
keyword[yield] identifier[rdf_client_fs] . identifier[Filesystem] (
identifier[device] = identifier[fs_struct] . identifier[f_mntfromname] ,
identifier[mount_point] = identifier[fs_struct] . identifier[f_mntonname] ,
identifier[type] = identifier[fs_struct] . identifier[f_fstypename] )
identifier[drive_re] = identifier[re] . identifier[compile] ( literal[string] )
keyword[for] identifier[drive] keyword[in] identifier[os] . identifier[listdir] ( literal[string] ):
keyword[if] keyword[not] identifier[drive_re] . identifier[match] ( identifier[drive] ):
keyword[continue]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[drive] )
keyword[try] :
identifier[img_inf] = identifier[pytsk3] . identifier[Img_Info] ( identifier[path] )
keyword[yield] identifier[rdf_client_fs] . identifier[Filesystem] ( identifier[device] = identifier[path] )
identifier[vol_inf] = identifier[pytsk3] . identifier[Volume_Info] ( identifier[img_inf] )
keyword[for] identifier[volume] keyword[in] identifier[vol_inf] :
keyword[if] identifier[volume] . identifier[flags] == identifier[pytsk3] . identifier[TSK_VS_PART_FLAG_ALLOC] :
identifier[offset] = identifier[volume] . identifier[start] * identifier[vol_inf] . identifier[info] . identifier[block_size]
keyword[yield] identifier[rdf_client_fs] . identifier[Filesystem] (
identifier[device] = literal[string] . identifier[format] ( identifier[path] = identifier[path] , identifier[offset] = identifier[offset] ),
identifier[type] = literal[string] )
keyword[except] ( identifier[IOError] , identifier[RuntimeError] ):
keyword[continue] | def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for fs_struct in client_utils_osx.GetFileSystems():
yield rdf_client_fs.Filesystem(device=fs_struct.f_mntfromname, mount_point=fs_struct.f_mntonname, type=fs_struct.f_fstypename) # depends on [control=['for'], data=['fs_struct']]
drive_re = re.compile('r?disk[0-9].*')
for drive in os.listdir('/dev'):
if not drive_re.match(drive):
continue # depends on [control=['if'], data=[]]
path = os.path.join('/dev', drive)
try:
img_inf = pytsk3.Img_Info(path)
# This is a volume or a partition - we send back a TSK device.
yield rdf_client_fs.Filesystem(device=path)
vol_inf = pytsk3.Volume_Info(img_inf)
for volume in vol_inf:
if volume.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
offset = volume.start * vol_inf.info.block_size
yield rdf_client_fs.Filesystem(device='{path}:{offset}'.format(path=path, offset=offset), type='partition') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['volume']] # depends on [control=['try'], data=[]]
except (IOError, RuntimeError):
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['drive']] |
def save(self, obj, usePipeline=True, forceID=False, cascadeSave=True, conn=None):
'''
save - Save an object / objects associated with this model.
You probably want to just do object.save() instead of this, but to save multiple objects at once in a single transaction,
you can use:
MyModel.saver.save(myObjs)
@param obj <IndexedRedisModel or list<IndexedRedisModel> - The object to save, or a list of objects to save
@param usePipeline - Use a pipeline for saving. You should always want this, unless you are calling this function from within an existing pipeline.
@param forceID - if not False, force ID to this. If obj is list, this is also list. Forcing IDs also forces insert. Up to you to ensure ID will not clash.
@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered
or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.
@param conn - A connection or None
@note - if no ID is specified
@return - List of pks
'''
if conn is None:
conn = self._get_connection()
# If we are in a pipeline, we need an external connection to fetch any potential IDs for inserts.
if usePipeline is True:
idConn = conn
else:
idConn = self._get_new_connection()
if issubclass(obj.__class__, (list, tuple)):
objs = obj
else:
objs = [obj]
if usePipeline is True:
pipeline = conn.pipeline()
else:
pipeline = conn
oga = object.__getattribute__
if cascadeSave is True:
# TODO: Confirm that this pipeline logic works even when doPipeline is False
# (i.e. that cascading works through calls to reset)
# foreignPipelines = OrderedDict()
foreignSavers = {}
for thisObj in objs:
if not thisObj.foreignFields:
continue
foreignFields = thisObj.foreignFields
for foreignField in foreignFields:
rawObj = oga(thisObj, str(foreignField))
if rawObj in (None, irNull) or not rawObj.isFetched():
continue
foreignObjects = oga(thisObj, str(foreignField)).getObjs()
for foreignObject in foreignObjects:
doSaveForeign = False
if getattr(foreignObject, '_id', None):
if foreignObject.hasUnsavedChanges(cascadeObjects=True):
doSaveForeign = True
else:
doSaveForeign = True
# OLD:
# Assemble each level of Foreign fields into an ordered pipeline. Based on semi-recursion,
# we will save the deepest level first in a pipeline, then the next up, on until we complete any subs
# NEW:
# Assemble all foreign fields into current pipeline and execute all in one block
if doSaveForeign is True:
if foreignField not in foreignSavers:
# foreignPipelines[foreignField] = self._get_new_connection().pipeline()
foreignSavers[foreignField] = IndexedRedisSave(foreignObject.__class__)
#foreignSavers[foreignField].save(foreignObject, usePipeline=False, cascadeSave=True, conn=foreignPipelines[foreignField])
foreignSavers[foreignField].save(foreignObject, usePipeline=False, cascadeSave=True, conn=pipeline)
# if foreignPipelines:
# for foreignPipeline in foreignPipelines.values():
# foreignPipeline.execute()
objsLen = len(objs)
if forceID is not False:
# Compat with old poor design.. :(
if isinstance(forceID, (list, tuple)):
forceIDs = forceID
else:
forceIDs = [forceID]
isInserts = []
i = 0
while i < objsLen:
if forceIDs[i] is not False:
objs[i]._id = forceIDs[i]
isInserts.append(True)
else:
isInsert = not bool(getattr(obj, '_id', None))
if isInsert is True:
objs[i]._id = self._getNextID(idConn)
isInserts.append(isInsert)
i += 1
else:
isInserts = []
for obj in objs:
isInsert = not bool(getattr(obj, '_id', None))
if isInsert is True:
obj._id = self._getNextID(idConn)
isInserts.append(isInsert)
ids = [] # Note ids can be derived with all information above..
i = 0
while i < objsLen:
self._doSave(objs[i], isInserts[i], conn, pipeline)
ids.append(objs[i]._id)
i += 1
if usePipeline is True:
pipeline.execute()
return ids | def function[save, parameter[self, obj, usePipeline, forceID, cascadeSave, conn]]:
constant[
save - Save an object / objects associated with this model.
You probably want to just do object.save() instead of this, but to save multiple objects at once in a single transaction,
you can use:
MyModel.saver.save(myObjs)
@param obj <IndexedRedisModel or list<IndexedRedisModel> - The object to save, or a list of objects to save
@param usePipeline - Use a pipeline for saving. You should always want this, unless you are calling this function from within an existing pipeline.
@param forceID - if not False, force ID to this. If obj is list, this is also list. Forcing IDs also forces insert. Up to you to ensure ID will not clash.
@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered
or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.
@param conn - A connection or None
@note - if no ID is specified
@return - List of pks
]
if compare[name[conn] is constant[None]] begin[:]
variable[conn] assign[=] call[name[self]._get_connection, parameter[]]
if compare[name[usePipeline] is constant[True]] begin[:]
variable[idConn] assign[=] name[conn]
if call[name[issubclass], parameter[name[obj].__class__, tuple[[<ast.Name object at 0x7da1b009b790>, <ast.Name object at 0x7da1b009b760>]]]] begin[:]
variable[objs] assign[=] name[obj]
if compare[name[usePipeline] is constant[True]] begin[:]
variable[pipeline] assign[=] call[name[conn].pipeline, parameter[]]
variable[oga] assign[=] name[object].__getattribute__
if compare[name[cascadeSave] is constant[True]] begin[:]
variable[foreignSavers] assign[=] dictionary[[], []]
for taget[name[thisObj]] in starred[name[objs]] begin[:]
if <ast.UnaryOp object at 0x7da1b009b0d0> begin[:]
continue
variable[foreignFields] assign[=] name[thisObj].foreignFields
for taget[name[foreignField]] in starred[name[foreignFields]] begin[:]
variable[rawObj] assign[=] call[name[oga], parameter[name[thisObj], call[name[str], parameter[name[foreignField]]]]]
if <ast.BoolOp object at 0x7da1b009ace0> begin[:]
continue
variable[foreignObjects] assign[=] call[call[name[oga], parameter[name[thisObj], call[name[str], parameter[name[foreignField]]]]].getObjs, parameter[]]
for taget[name[foreignObject]] in starred[name[foreignObjects]] begin[:]
variable[doSaveForeign] assign[=] constant[False]
if call[name[getattr], parameter[name[foreignObject], constant[_id], constant[None]]] begin[:]
if call[name[foreignObject].hasUnsavedChanges, parameter[]] begin[:]
variable[doSaveForeign] assign[=] constant[True]
if compare[name[doSaveForeign] is constant[True]] begin[:]
if compare[name[foreignField] <ast.NotIn object at 0x7da2590d7190> name[foreignSavers]] begin[:]
call[name[foreignSavers]][name[foreignField]] assign[=] call[name[IndexedRedisSave], parameter[name[foreignObject].__class__]]
call[call[name[foreignSavers]][name[foreignField]].save, parameter[name[foreignObject]]]
variable[objsLen] assign[=] call[name[len], parameter[name[objs]]]
if compare[name[forceID] is_not constant[False]] begin[:]
if call[name[isinstance], parameter[name[forceID], tuple[[<ast.Name object at 0x7da1b0099c00>, <ast.Name object at 0x7da1b0099bd0>]]]] begin[:]
variable[forceIDs] assign[=] name[forceID]
variable[isInserts] assign[=] list[[]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] name[objsLen]] begin[:]
if compare[call[name[forceIDs]][name[i]] is_not constant[False]] begin[:]
call[name[objs]][name[i]]._id assign[=] call[name[forceIDs]][name[i]]
call[name[isInserts].append, parameter[constant[True]]]
<ast.AugAssign object at 0x7da1b0098f40>
variable[ids] assign[=] list[[]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] name[objsLen]] begin[:]
call[name[self]._doSave, parameter[call[name[objs]][name[i]], call[name[isInserts]][name[i]], name[conn], name[pipeline]]]
call[name[ids].append, parameter[call[name[objs]][name[i]]._id]]
<ast.AugAssign object at 0x7da1b004f400>
if compare[name[usePipeline] is constant[True]] begin[:]
call[name[pipeline].execute, parameter[]]
return[name[ids]] | keyword[def] identifier[save] ( identifier[self] , identifier[obj] , identifier[usePipeline] = keyword[True] , identifier[forceID] = keyword[False] , identifier[cascadeSave] = keyword[True] , identifier[conn] = keyword[None] ):
literal[string]
keyword[if] identifier[conn] keyword[is] keyword[None] :
identifier[conn] = identifier[self] . identifier[_get_connection] ()
keyword[if] identifier[usePipeline] keyword[is] keyword[True] :
identifier[idConn] = identifier[conn]
keyword[else] :
identifier[idConn] = identifier[self] . identifier[_get_new_connection] ()
keyword[if] identifier[issubclass] ( identifier[obj] . identifier[__class__] ,( identifier[list] , identifier[tuple] )):
identifier[objs] = identifier[obj]
keyword[else] :
identifier[objs] =[ identifier[obj] ]
keyword[if] identifier[usePipeline] keyword[is] keyword[True] :
identifier[pipeline] = identifier[conn] . identifier[pipeline] ()
keyword[else] :
identifier[pipeline] = identifier[conn]
identifier[oga] = identifier[object] . identifier[__getattribute__]
keyword[if] identifier[cascadeSave] keyword[is] keyword[True] :
identifier[foreignSavers] ={}
keyword[for] identifier[thisObj] keyword[in] identifier[objs] :
keyword[if] keyword[not] identifier[thisObj] . identifier[foreignFields] :
keyword[continue]
identifier[foreignFields] = identifier[thisObj] . identifier[foreignFields]
keyword[for] identifier[foreignField] keyword[in] identifier[foreignFields] :
identifier[rawObj] = identifier[oga] ( identifier[thisObj] , identifier[str] ( identifier[foreignField] ))
keyword[if] identifier[rawObj] keyword[in] ( keyword[None] , identifier[irNull] ) keyword[or] keyword[not] identifier[rawObj] . identifier[isFetched] ():
keyword[continue]
identifier[foreignObjects] = identifier[oga] ( identifier[thisObj] , identifier[str] ( identifier[foreignField] )). identifier[getObjs] ()
keyword[for] identifier[foreignObject] keyword[in] identifier[foreignObjects] :
identifier[doSaveForeign] = keyword[False]
keyword[if] identifier[getattr] ( identifier[foreignObject] , literal[string] , keyword[None] ):
keyword[if] identifier[foreignObject] . identifier[hasUnsavedChanges] ( identifier[cascadeObjects] = keyword[True] ):
identifier[doSaveForeign] = keyword[True]
keyword[else] :
identifier[doSaveForeign] = keyword[True]
keyword[if] identifier[doSaveForeign] keyword[is] keyword[True] :
keyword[if] identifier[foreignField] keyword[not] keyword[in] identifier[foreignSavers] :
identifier[foreignSavers] [ identifier[foreignField] ]= identifier[IndexedRedisSave] ( identifier[foreignObject] . identifier[__class__] )
identifier[foreignSavers] [ identifier[foreignField] ]. identifier[save] ( identifier[foreignObject] , identifier[usePipeline] = keyword[False] , identifier[cascadeSave] = keyword[True] , identifier[conn] = identifier[pipeline] )
identifier[objsLen] = identifier[len] ( identifier[objs] )
keyword[if] identifier[forceID] keyword[is] keyword[not] keyword[False] :
keyword[if] identifier[isinstance] ( identifier[forceID] ,( identifier[list] , identifier[tuple] )):
identifier[forceIDs] = identifier[forceID]
keyword[else] :
identifier[forceIDs] =[ identifier[forceID] ]
identifier[isInserts] =[]
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[objsLen] :
keyword[if] identifier[forceIDs] [ identifier[i] ] keyword[is] keyword[not] keyword[False] :
identifier[objs] [ identifier[i] ]. identifier[_id] = identifier[forceIDs] [ identifier[i] ]
identifier[isInserts] . identifier[append] ( keyword[True] )
keyword[else] :
identifier[isInsert] = keyword[not] identifier[bool] ( identifier[getattr] ( identifier[obj] , literal[string] , keyword[None] ))
keyword[if] identifier[isInsert] keyword[is] keyword[True] :
identifier[objs] [ identifier[i] ]. identifier[_id] = identifier[self] . identifier[_getNextID] ( identifier[idConn] )
identifier[isInserts] . identifier[append] ( identifier[isInsert] )
identifier[i] += literal[int]
keyword[else] :
identifier[isInserts] =[]
keyword[for] identifier[obj] keyword[in] identifier[objs] :
identifier[isInsert] = keyword[not] identifier[bool] ( identifier[getattr] ( identifier[obj] , literal[string] , keyword[None] ))
keyword[if] identifier[isInsert] keyword[is] keyword[True] :
identifier[obj] . identifier[_id] = identifier[self] . identifier[_getNextID] ( identifier[idConn] )
identifier[isInserts] . identifier[append] ( identifier[isInsert] )
identifier[ids] =[]
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[objsLen] :
identifier[self] . identifier[_doSave] ( identifier[objs] [ identifier[i] ], identifier[isInserts] [ identifier[i] ], identifier[conn] , identifier[pipeline] )
identifier[ids] . identifier[append] ( identifier[objs] [ identifier[i] ]. identifier[_id] )
identifier[i] += literal[int]
keyword[if] identifier[usePipeline] keyword[is] keyword[True] :
identifier[pipeline] . identifier[execute] ()
keyword[return] identifier[ids] | def save(self, obj, usePipeline=True, forceID=False, cascadeSave=True, conn=None):
"""
save - Save an object / objects associated with this model.
You probably want to just do object.save() instead of this, but to save multiple objects at once in a single transaction,
you can use:
MyModel.saver.save(myObjs)
@param obj <IndexedRedisModel or list<IndexedRedisModel> - The object to save, or a list of objects to save
@param usePipeline - Use a pipeline for saving. You should always want this, unless you are calling this function from within an existing pipeline.
@param forceID - if not False, force ID to this. If obj is list, this is also list. Forcing IDs also forces insert. Up to you to ensure ID will not clash.
@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered
or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.
@param conn - A connection or None
@note - if no ID is specified
@return - List of pks
"""
if conn is None:
conn = self._get_connection() # depends on [control=['if'], data=['conn']] # If we are in a pipeline, we need an external connection to fetch any potential IDs for inserts.
if usePipeline is True:
idConn = conn # depends on [control=['if'], data=[]]
else:
idConn = self._get_new_connection()
if issubclass(obj.__class__, (list, tuple)):
objs = obj # depends on [control=['if'], data=[]]
else:
objs = [obj]
if usePipeline is True:
pipeline = conn.pipeline() # depends on [control=['if'], data=[]]
else:
pipeline = conn
oga = object.__getattribute__
if cascadeSave is True: # TODO: Confirm that this pipeline logic works even when doPipeline is False
# (i.e. that cascading works through calls to reset)
# foreignPipelines = OrderedDict()
foreignSavers = {}
for thisObj in objs:
if not thisObj.foreignFields:
continue # depends on [control=['if'], data=[]]
foreignFields = thisObj.foreignFields
for foreignField in foreignFields:
rawObj = oga(thisObj, str(foreignField))
if rawObj in (None, irNull) or not rawObj.isFetched():
continue # depends on [control=['if'], data=[]]
foreignObjects = oga(thisObj, str(foreignField)).getObjs()
for foreignObject in foreignObjects:
doSaveForeign = False
if getattr(foreignObject, '_id', None):
if foreignObject.hasUnsavedChanges(cascadeObjects=True):
doSaveForeign = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
doSaveForeign = True # OLD:
# Assemble each level of Foreign fields into an ordered pipeline. Based on semi-recursion,
# we will save the deepest level first in a pipeline, then the next up, on until we complete any subs
# NEW:
# Assemble all foreign fields into current pipeline and execute all in one block
if doSaveForeign is True:
if foreignField not in foreignSavers:
# foreignPipelines[foreignField] = self._get_new_connection().pipeline()
foreignSavers[foreignField] = IndexedRedisSave(foreignObject.__class__) # depends on [control=['if'], data=['foreignField', 'foreignSavers']] #foreignSavers[foreignField].save(foreignObject, usePipeline=False, cascadeSave=True, conn=foreignPipelines[foreignField])
foreignSavers[foreignField].save(foreignObject, usePipeline=False, cascadeSave=True, conn=pipeline) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['foreignObject']] # depends on [control=['for'], data=['foreignField']] # depends on [control=['for'], data=['thisObj']] # depends on [control=['if'], data=[]]
# if foreignPipelines:
# for foreignPipeline in foreignPipelines.values():
# foreignPipeline.execute()
objsLen = len(objs)
if forceID is not False: # Compat with old poor design.. :(
if isinstance(forceID, (list, tuple)):
forceIDs = forceID # depends on [control=['if'], data=[]]
else:
forceIDs = [forceID]
isInserts = []
i = 0
while i < objsLen:
if forceIDs[i] is not False:
objs[i]._id = forceIDs[i]
isInserts.append(True) # depends on [control=['if'], data=[]]
else:
isInsert = not bool(getattr(obj, '_id', None))
if isInsert is True:
objs[i]._id = self._getNextID(idConn) # depends on [control=['if'], data=[]]
isInserts.append(isInsert)
i += 1 # depends on [control=['while'], data=['i']] # depends on [control=['if'], data=['forceID']]
else:
isInserts = []
for obj in objs:
isInsert = not bool(getattr(obj, '_id', None))
if isInsert is True:
obj._id = self._getNextID(idConn) # depends on [control=['if'], data=[]]
isInserts.append(isInsert) # depends on [control=['for'], data=['obj']]
ids = [] # Note ids can be derived with all information above..
i = 0
while i < objsLen:
self._doSave(objs[i], isInserts[i], conn, pipeline)
ids.append(objs[i]._id)
i += 1 # depends on [control=['while'], data=['i']]
if usePipeline is True:
pipeline.execute() # depends on [control=['if'], data=[]]
return ids |
def replace(self, old_patch, new_patch):
""" Replace old_patch with new_patch
The method only replaces the patch and doesn't change any comments.
"""
self._check_patch(old_patch)
old_patchline = self.patch2line[old_patch]
index = self.patchlines.index(old_patchline)
self.patchlines.pop(index)
new_patchline = PatchLine(new_patch)
new_patchline.set_comment(old_patchline.get_comment())
self.patchlines.insert(index, new_patchline)
del self.patch2line[old_patch]
self.patch2line[new_patch] = new_patchline | def function[replace, parameter[self, old_patch, new_patch]]:
constant[ Replace old_patch with new_patch
The method only replaces the patch and doesn't change any comments.
]
call[name[self]._check_patch, parameter[name[old_patch]]]
variable[old_patchline] assign[=] call[name[self].patch2line][name[old_patch]]
variable[index] assign[=] call[name[self].patchlines.index, parameter[name[old_patchline]]]
call[name[self].patchlines.pop, parameter[name[index]]]
variable[new_patchline] assign[=] call[name[PatchLine], parameter[name[new_patch]]]
call[name[new_patchline].set_comment, parameter[call[name[old_patchline].get_comment, parameter[]]]]
call[name[self].patchlines.insert, parameter[name[index], name[new_patchline]]]
<ast.Delete object at 0x7da1b0388cd0>
call[name[self].patch2line][name[new_patch]] assign[=] name[new_patchline] | keyword[def] identifier[replace] ( identifier[self] , identifier[old_patch] , identifier[new_patch] ):
literal[string]
identifier[self] . identifier[_check_patch] ( identifier[old_patch] )
identifier[old_patchline] = identifier[self] . identifier[patch2line] [ identifier[old_patch] ]
identifier[index] = identifier[self] . identifier[patchlines] . identifier[index] ( identifier[old_patchline] )
identifier[self] . identifier[patchlines] . identifier[pop] ( identifier[index] )
identifier[new_patchline] = identifier[PatchLine] ( identifier[new_patch] )
identifier[new_patchline] . identifier[set_comment] ( identifier[old_patchline] . identifier[get_comment] ())
identifier[self] . identifier[patchlines] . identifier[insert] ( identifier[index] , identifier[new_patchline] )
keyword[del] identifier[self] . identifier[patch2line] [ identifier[old_patch] ]
identifier[self] . identifier[patch2line] [ identifier[new_patch] ]= identifier[new_patchline] | def replace(self, old_patch, new_patch):
""" Replace old_patch with new_patch
The method only replaces the patch and doesn't change any comments.
"""
self._check_patch(old_patch)
old_patchline = self.patch2line[old_patch]
index = self.patchlines.index(old_patchline)
self.patchlines.pop(index)
new_patchline = PatchLine(new_patch)
new_patchline.set_comment(old_patchline.get_comment())
self.patchlines.insert(index, new_patchline)
del self.patch2line[old_patch]
self.patch2line[new_patch] = new_patchline |
def delete(self, filename, storage_type=None, bucket_name=None):
"""Deletes the specified file, either locally or from S3, depending on the file's storage type."""
if not (storage_type and bucket_name):
self._delete_local(filename)
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type)
self._delete_s3(filename, bucket_name) | def function[delete, parameter[self, filename, storage_type, bucket_name]]:
constant[Deletes the specified file, either locally or from S3, depending on the file's storage type.]
if <ast.UnaryOp object at 0x7da1b01feb60> begin[:]
call[name[self]._delete_local, parameter[name[filename]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[filename] , identifier[storage_type] = keyword[None] , identifier[bucket_name] = keyword[None] ):
literal[string]
keyword[if] keyword[not] ( identifier[storage_type] keyword[and] identifier[bucket_name] ):
identifier[self] . identifier[_delete_local] ( identifier[filename] )
keyword[else] :
keyword[if] identifier[storage_type] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[storage_type] )
identifier[self] . identifier[_delete_s3] ( identifier[filename] , identifier[bucket_name] ) | def delete(self, filename, storage_type=None, bucket_name=None):
"""Deletes the specified file, either locally or from S3, depending on the file's storage type."""
if not (storage_type and bucket_name):
self._delete_local(filename) # depends on [control=['if'], data=[]]
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type) # depends on [control=['if'], data=['storage_type']]
self._delete_s3(filename, bucket_name) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.