code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def get_primitive_name(schema):
""" Get a human-friendly name for the given primitive.
:param schema: Schema
:type schema: *
:rtype: unicode
"""
try:
return {
const.COMPILED_TYPE.LITERAL: six.text_type,
const.COMPILED_TYPE.TYPE: get_type_name,
const.COMPILED_TYPE.ENUM: get_type_name,
const.COMPILED_TYPE.CALLABLE: get_callable_name,
const.COMPILED_TYPE.ITERABLE: lambda x: _(u'{type}[{content}]').format(type=get_type_name(list), content=_(u'...') if x else _(u'-')),
const.COMPILED_TYPE.MAPPING: lambda x: _(u'{type}[{content}]').format(type=get_type_name(dict), content=_(u'...') if x else _(u'-')),
}[primitive_type(schema)](schema)
except KeyError:
return six.text_type(repr(schema)) | def function[get_primitive_name, parameter[schema]]:
constant[ Get a human-friendly name for the given primitive.
:param schema: Schema
:type schema: *
:rtype: unicode
]
<ast.Try object at 0x7da1b26c9c00> | keyword[def] identifier[get_primitive_name] ( identifier[schema] ):
literal[string]
keyword[try] :
keyword[return] {
identifier[const] . identifier[COMPILED_TYPE] . identifier[LITERAL] : identifier[six] . identifier[text_type] ,
identifier[const] . identifier[COMPILED_TYPE] . identifier[TYPE] : identifier[get_type_name] ,
identifier[const] . identifier[COMPILED_TYPE] . identifier[ENUM] : identifier[get_type_name] ,
identifier[const] . identifier[COMPILED_TYPE] . identifier[CALLABLE] : identifier[get_callable_name] ,
identifier[const] . identifier[COMPILED_TYPE] . identifier[ITERABLE] : keyword[lambda] identifier[x] : identifier[_] ( literal[string] ). identifier[format] ( identifier[type] = identifier[get_type_name] ( identifier[list] ), identifier[content] = identifier[_] ( literal[string] ) keyword[if] identifier[x] keyword[else] identifier[_] ( literal[string] )),
identifier[const] . identifier[COMPILED_TYPE] . identifier[MAPPING] : keyword[lambda] identifier[x] : identifier[_] ( literal[string] ). identifier[format] ( identifier[type] = identifier[get_type_name] ( identifier[dict] ), identifier[content] = identifier[_] ( literal[string] ) keyword[if] identifier[x] keyword[else] identifier[_] ( literal[string] )),
}[ identifier[primitive_type] ( identifier[schema] )]( identifier[schema] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[six] . identifier[text_type] ( identifier[repr] ( identifier[schema] )) | def get_primitive_name(schema):
""" Get a human-friendly name for the given primitive.
:param schema: Schema
:type schema: *
:rtype: unicode
"""
try:
return {const.COMPILED_TYPE.LITERAL: six.text_type, const.COMPILED_TYPE.TYPE: get_type_name, const.COMPILED_TYPE.ENUM: get_type_name, const.COMPILED_TYPE.CALLABLE: get_callable_name, const.COMPILED_TYPE.ITERABLE: lambda x: _(u'{type}[{content}]').format(type=get_type_name(list), content=_(u'...') if x else _(u'-')), const.COMPILED_TYPE.MAPPING: lambda x: _(u'{type}[{content}]').format(type=get_type_name(dict), content=_(u'...') if x else _(u'-'))}[primitive_type(schema)](schema) # depends on [control=['try'], data=[]]
except KeyError:
return six.text_type(repr(schema)) # depends on [control=['except'], data=[]] |
def format_frame_info(frame):
"""
Formats the given stack frame to show its position in the code and
part of its context
:param frame: A stack frame
"""
# Same as in traceback.extract_stack
line_no = frame.f_lineno
code = frame.f_code
filename = code.co_filename
method_name = code.co_name
linecache.checkcache(filename)
try:
# Try to get the type of the calling object
instance = frame.f_locals["self"]
method_name = "{0}::{1}".format(type(instance).__name__, method_name)
except KeyError:
# Not called from a bound method
pass
# File & line
output_lines = [
' File "{0}", line {1}, in {2}'.format(filename, line_no, method_name)
]
# Arguments
if frame.f_locals:
# Pypy keeps f_locals as an empty dictionary
arg_info = inspect.getargvalues(frame)
for name in arg_info.args:
try:
output_lines.append(
" - {0:s} = {1}".format(name, repr(frame.f_locals[name]))
)
except TypeError:
# Happens in dict/list-comprehensions in Python 2.x
name = name[0]
output_lines.append(
" - {0:s} = {1}".format(name, repr(frame.f_locals[name]))
)
if arg_info.varargs:
output_lines.append(
" - *{0:s} = {1}".format(
arg_info.varargs, frame.f_locals[arg_info.varargs]
)
)
if arg_info.keywords:
output_lines.append(
" - **{0:s} = {1}".format(
arg_info.keywords, frame.f_locals[arg_info.keywords]
)
)
# Line block
lines = _extract_lines(filename, frame.f_globals, line_no, 3)
if lines:
output_lines.append("")
prefix = " "
output_lines.append(
"{0}{1}".format(prefix, "\n{0}".format(prefix).join(lines))
)
return "\n".join(output_lines) | def function[format_frame_info, parameter[frame]]:
constant[
Formats the given stack frame to show its position in the code and
part of its context
:param frame: A stack frame
]
variable[line_no] assign[=] name[frame].f_lineno
variable[code] assign[=] name[frame].f_code
variable[filename] assign[=] name[code].co_filename
variable[method_name] assign[=] name[code].co_name
call[name[linecache].checkcache, parameter[name[filename]]]
<ast.Try object at 0x7da1b033f130>
variable[output_lines] assign[=] list[[<ast.Call object at 0x7da1b033f760>]]
if name[frame].f_locals begin[:]
variable[arg_info] assign[=] call[name[inspect].getargvalues, parameter[name[frame]]]
for taget[name[name]] in starred[name[arg_info].args] begin[:]
<ast.Try object at 0x7da1b033de70>
if name[arg_info].varargs begin[:]
call[name[output_lines].append, parameter[call[constant[ - *{0:s} = {1}].format, parameter[name[arg_info].varargs, call[name[frame].f_locals][name[arg_info].varargs]]]]]
if name[arg_info].keywords begin[:]
call[name[output_lines].append, parameter[call[constant[ - **{0:s} = {1}].format, parameter[name[arg_info].keywords, call[name[frame].f_locals][name[arg_info].keywords]]]]]
variable[lines] assign[=] call[name[_extract_lines], parameter[name[filename], name[frame].f_globals, name[line_no], constant[3]]]
if name[lines] begin[:]
call[name[output_lines].append, parameter[constant[]]]
variable[prefix] assign[=] constant[ ]
call[name[output_lines].append, parameter[call[constant[{0}{1}].format, parameter[name[prefix], call[call[constant[
{0}].format, parameter[name[prefix]]].join, parameter[name[lines]]]]]]]
return[call[constant[
].join, parameter[name[output_lines]]]] | keyword[def] identifier[format_frame_info] ( identifier[frame] ):
literal[string]
identifier[line_no] = identifier[frame] . identifier[f_lineno]
identifier[code] = identifier[frame] . identifier[f_code]
identifier[filename] = identifier[code] . identifier[co_filename]
identifier[method_name] = identifier[code] . identifier[co_name]
identifier[linecache] . identifier[checkcache] ( identifier[filename] )
keyword[try] :
identifier[instance] = identifier[frame] . identifier[f_locals] [ literal[string] ]
identifier[method_name] = literal[string] . identifier[format] ( identifier[type] ( identifier[instance] ). identifier[__name__] , identifier[method_name] )
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[output_lines] =[
literal[string] . identifier[format] ( identifier[filename] , identifier[line_no] , identifier[method_name] )
]
keyword[if] identifier[frame] . identifier[f_locals] :
identifier[arg_info] = identifier[inspect] . identifier[getargvalues] ( identifier[frame] )
keyword[for] identifier[name] keyword[in] identifier[arg_info] . identifier[args] :
keyword[try] :
identifier[output_lines] . identifier[append] (
literal[string] . identifier[format] ( identifier[name] , identifier[repr] ( identifier[frame] . identifier[f_locals] [ identifier[name] ]))
)
keyword[except] identifier[TypeError] :
identifier[name] = identifier[name] [ literal[int] ]
identifier[output_lines] . identifier[append] (
literal[string] . identifier[format] ( identifier[name] , identifier[repr] ( identifier[frame] . identifier[f_locals] [ identifier[name] ]))
)
keyword[if] identifier[arg_info] . identifier[varargs] :
identifier[output_lines] . identifier[append] (
literal[string] . identifier[format] (
identifier[arg_info] . identifier[varargs] , identifier[frame] . identifier[f_locals] [ identifier[arg_info] . identifier[varargs] ]
)
)
keyword[if] identifier[arg_info] . identifier[keywords] :
identifier[output_lines] . identifier[append] (
literal[string] . identifier[format] (
identifier[arg_info] . identifier[keywords] , identifier[frame] . identifier[f_locals] [ identifier[arg_info] . identifier[keywords] ]
)
)
identifier[lines] = identifier[_extract_lines] ( identifier[filename] , identifier[frame] . identifier[f_globals] , identifier[line_no] , literal[int] )
keyword[if] identifier[lines] :
identifier[output_lines] . identifier[append] ( literal[string] )
identifier[prefix] = literal[string]
identifier[output_lines] . identifier[append] (
literal[string] . identifier[format] ( identifier[prefix] , literal[string] . identifier[format] ( identifier[prefix] ). identifier[join] ( identifier[lines] ))
)
keyword[return] literal[string] . identifier[join] ( identifier[output_lines] ) | def format_frame_info(frame):
"""
Formats the given stack frame to show its position in the code and
part of its context
:param frame: A stack frame
"""
# Same as in traceback.extract_stack
line_no = frame.f_lineno
code = frame.f_code
filename = code.co_filename
method_name = code.co_name
linecache.checkcache(filename)
try:
# Try to get the type of the calling object
instance = frame.f_locals['self']
method_name = '{0}::{1}'.format(type(instance).__name__, method_name) # depends on [control=['try'], data=[]]
except KeyError:
# Not called from a bound method
pass # depends on [control=['except'], data=[]]
# File & line
output_lines = [' File "{0}", line {1}, in {2}'.format(filename, line_no, method_name)]
# Arguments
if frame.f_locals:
# Pypy keeps f_locals as an empty dictionary
arg_info = inspect.getargvalues(frame)
for name in arg_info.args:
try:
output_lines.append(' - {0:s} = {1}'.format(name, repr(frame.f_locals[name]))) # depends on [control=['try'], data=[]]
except TypeError:
# Happens in dict/list-comprehensions in Python 2.x
name = name[0]
output_lines.append(' - {0:s} = {1}'.format(name, repr(frame.f_locals[name]))) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']]
if arg_info.varargs:
output_lines.append(' - *{0:s} = {1}'.format(arg_info.varargs, frame.f_locals[arg_info.varargs])) # depends on [control=['if'], data=[]]
if arg_info.keywords:
output_lines.append(' - **{0:s} = {1}'.format(arg_info.keywords, frame.f_locals[arg_info.keywords])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Line block
lines = _extract_lines(filename, frame.f_globals, line_no, 3)
if lines:
output_lines.append('')
prefix = ' '
output_lines.append('{0}{1}'.format(prefix, '\n{0}'.format(prefix).join(lines))) # depends on [control=['if'], data=[]]
return '\n'.join(output_lines) |
def execute(self, sql, *args, **kwargs):
"""Executes an SQL INSERT/UPDATE/DELETE query with the given parameters and returns the number of affected rows.
:param sql: statement to execute
:param args: parameters iterable
:param kwargs: parameters iterable
:return: number of affected rows
:rtype: int
"""
self.ensure_connected()
return Statement(self).execute(sql, *args, **kwargs) | def function[execute, parameter[self, sql]]:
constant[Executes an SQL INSERT/UPDATE/DELETE query with the given parameters and returns the number of affected rows.
:param sql: statement to execute
:param args: parameters iterable
:param kwargs: parameters iterable
:return: number of affected rows
:rtype: int
]
call[name[self].ensure_connected, parameter[]]
return[call[call[name[Statement], parameter[name[self]]].execute, parameter[name[sql], <ast.Starred object at 0x7da1b2347a90>]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[sql] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[ensure_connected] ()
keyword[return] identifier[Statement] ( identifier[self] ). identifier[execute] ( identifier[sql] ,* identifier[args] ,** identifier[kwargs] ) | def execute(self, sql, *args, **kwargs):
"""Executes an SQL INSERT/UPDATE/DELETE query with the given parameters and returns the number of affected rows.
:param sql: statement to execute
:param args: parameters iterable
:param kwargs: parameters iterable
:return: number of affected rows
:rtype: int
"""
self.ensure_connected()
return Statement(self).execute(sql, *args, **kwargs) |
def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is |ASN.1| type,
:class:`False` otherwise.
"""
return (self is other or
(not matchTags or self.tagSet == other.tagSet) and
(not matchConstraints or self.subtypeSpec == other.subtypeSpec)) | def function[isSameTypeWith, parameter[self, other, matchTags, matchConstraints]]:
constant[Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is |ASN.1| type,
:class:`False` otherwise.
]
return[<ast.BoolOp object at 0x7da18f721e40>] | keyword[def] identifier[isSameTypeWith] ( identifier[self] , identifier[other] , identifier[matchTags] = keyword[True] , identifier[matchConstraints] = keyword[True] ):
literal[string]
keyword[return] ( identifier[self] keyword[is] identifier[other] keyword[or]
( keyword[not] identifier[matchTags] keyword[or] identifier[self] . identifier[tagSet] == identifier[other] . identifier[tagSet] ) keyword[and]
( keyword[not] identifier[matchConstraints] keyword[or] identifier[self] . identifier[subtypeSpec] == identifier[other] . identifier[subtypeSpec] )) | def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is |ASN.1| type,
:class:`False` otherwise.
"""
return self is other or ((not matchTags or self.tagSet == other.tagSet) and (not matchConstraints or self.subtypeSpec == other.subtypeSpec)) |
def iter(self, match="*", count=1000):
""" Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
"""
replace_this = self.key_prefix+":"
for key in self._client.scan_iter(
match="{}:{}".format(self.key_prefix, match), count=count):
yield self._decode(key).replace(replace_this, "", 1) | def function[iter, parameter[self, match, count]]:
constant[ Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
]
variable[replace_this] assign[=] binary_operation[name[self].key_prefix + constant[:]]
for taget[name[key]] in starred[call[name[self]._client.scan_iter, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b271ff70> | keyword[def] identifier[iter] ( identifier[self] , identifier[match] = literal[string] , identifier[count] = literal[int] ):
literal[string]
identifier[replace_this] = identifier[self] . identifier[key_prefix] + literal[string]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_client] . identifier[scan_iter] (
identifier[match] = literal[string] . identifier[format] ( identifier[self] . identifier[key_prefix] , identifier[match] ), identifier[count] = identifier[count] ):
keyword[yield] identifier[self] . identifier[_decode] ( identifier[key] ). identifier[replace] ( identifier[replace_this] , literal[string] , literal[int] ) | def iter(self, match='*', count=1000):
""" Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
"""
replace_this = self.key_prefix + ':'
for key in self._client.scan_iter(match='{}:{}'.format(self.key_prefix, match), count=count):
yield self._decode(key).replace(replace_this, '', 1) # depends on [control=['for'], data=['key']] |
def ones(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs) | def function[ones, parameter[shape, ctx, dtype]]:
constant[Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
]
if compare[name[ctx] is constant[None]] begin[:]
variable[ctx] assign[=] call[name[current_context], parameter[]]
variable[dtype] assign[=] <ast.IfExp object at 0x7da1b2008a00>
return[call[name[_internal]._ones, parameter[]]] | keyword[def] identifier[ones] ( identifier[shape] , identifier[ctx] = keyword[None] , identifier[dtype] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[ctx] keyword[is] keyword[None] :
identifier[ctx] = identifier[current_context] ()
identifier[dtype] = identifier[mx_real_t] keyword[if] identifier[dtype] keyword[is] keyword[None] keyword[else] identifier[dtype]
keyword[return] identifier[_internal] . identifier[_ones] ( identifier[shape] = identifier[shape] , identifier[ctx] = identifier[ctx] , identifier[dtype] = identifier[dtype] ,** identifier[kwargs] ) | def ones(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context() # depends on [control=['if'], data=['ctx']]
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs) |
def edit_preferences(self, resource):
"""Edit preferences in /usr/cdrouter-data/etc/config.yml.
:param resource: :class:`system.Preferences <system.Preferences>` object
:return: :class:`system.Preferences <system.Preferences>` object
:rtype: system.Preferences
"""
schema = PreferencesSchema()
json = self.service.encode(schema, resource)
schema = PreferencesSchema()
resp = self.service.patch(self.base+'preferences/', json=json)
return self.service.decode(schema, resp) | def function[edit_preferences, parameter[self, resource]]:
constant[Edit preferences in /usr/cdrouter-data/etc/config.yml.
:param resource: :class:`system.Preferences <system.Preferences>` object
:return: :class:`system.Preferences <system.Preferences>` object
:rtype: system.Preferences
]
variable[schema] assign[=] call[name[PreferencesSchema], parameter[]]
variable[json] assign[=] call[name[self].service.encode, parameter[name[schema], name[resource]]]
variable[schema] assign[=] call[name[PreferencesSchema], parameter[]]
variable[resp] assign[=] call[name[self].service.patch, parameter[binary_operation[name[self].base + constant[preferences/]]]]
return[call[name[self].service.decode, parameter[name[schema], name[resp]]]] | keyword[def] identifier[edit_preferences] ( identifier[self] , identifier[resource] ):
literal[string]
identifier[schema] = identifier[PreferencesSchema] ()
identifier[json] = identifier[self] . identifier[service] . identifier[encode] ( identifier[schema] , identifier[resource] )
identifier[schema] = identifier[PreferencesSchema] ()
identifier[resp] = identifier[self] . identifier[service] . identifier[patch] ( identifier[self] . identifier[base] + literal[string] , identifier[json] = identifier[json] )
keyword[return] identifier[self] . identifier[service] . identifier[decode] ( identifier[schema] , identifier[resp] ) | def edit_preferences(self, resource):
"""Edit preferences in /usr/cdrouter-data/etc/config.yml.
:param resource: :class:`system.Preferences <system.Preferences>` object
:return: :class:`system.Preferences <system.Preferences>` object
:rtype: system.Preferences
"""
schema = PreferencesSchema()
json = self.service.encode(schema, resource)
schema = PreferencesSchema()
resp = self.service.patch(self.base + 'preferences/', json=json)
return self.service.decode(schema, resp) |
def get_repo(self, auth, username, repo_name):
"""
Returns a the repository with name ``repo_name`` owned by
the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: a representation of the retrieved repository
:rtype: GogsRepo
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}".format(u=username, r=repo_name)
response = self.get(path, auth=auth)
return GogsRepo.from_json(response.json()) | def function[get_repo, parameter[self, auth, username, repo_name]]:
constant[
Returns a the repository with name ``repo_name`` owned by
the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: a representation of the retrieved repository
:rtype: GogsRepo
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
]
variable[path] assign[=] call[constant[/repos/{u}/{r}].format, parameter[]]
variable[response] assign[=] call[name[self].get, parameter[name[path]]]
return[call[name[GogsRepo].from_json, parameter[call[name[response].json, parameter[]]]]] | keyword[def] identifier[get_repo] ( identifier[self] , identifier[auth] , identifier[username] , identifier[repo_name] ):
literal[string]
identifier[path] = literal[string] . identifier[format] ( identifier[u] = identifier[username] , identifier[r] = identifier[repo_name] )
identifier[response] = identifier[self] . identifier[get] ( identifier[path] , identifier[auth] = identifier[auth] )
keyword[return] identifier[GogsRepo] . identifier[from_json] ( identifier[response] . identifier[json] ()) | def get_repo(self, auth, username, repo_name):
"""
Returns a the repository with name ``repo_name`` owned by
the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: a representation of the retrieved repository
:rtype: GogsRepo
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = '/repos/{u}/{r}'.format(u=username, r=repo_name)
response = self.get(path, auth=auth)
return GogsRepo.from_json(response.json()) |
def list_tasks(collector):
"""List the available_tasks"""
print("Usage: dashmat <task>")
print("")
print("Available tasks to choose from are:")
print("-----------------------------------")
print("")
keygetter = lambda item: item[1].label
tasks = sorted(available_actions.items(), key=keygetter)
sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))
max_length = max(len(name) for name, _ in sorted_tasks)
for key, task in sorted_tasks:
desc = dedent(task.__doc__ or "").strip().split('\n')[0]
print("\t{0}{1} :-: {2}".format(" " * (max_length-len(key)), key, desc))
print("") | def function[list_tasks, parameter[collector]]:
constant[List the available_tasks]
call[name[print], parameter[constant[Usage: dashmat <task>]]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[Available tasks to choose from are:]]]
call[name[print], parameter[constant[-----------------------------------]]]
call[name[print], parameter[constant[]]]
variable[keygetter] assign[=] <ast.Lambda object at 0x7da18fe91e70>
variable[tasks] assign[=] call[name[sorted], parameter[call[name[available_actions].items, parameter[]]]]
variable[sorted_tasks] assign[=] call[name[sorted], parameter[call[name[list], parameter[name[tasks]]]]]
variable[max_length] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da18f00f4f0>]]
for taget[tuple[[<ast.Name object at 0x7da18f00e6b0>, <ast.Name object at 0x7da18f00fdf0>]]] in starred[name[sorted_tasks]] begin[:]
variable[desc] assign[=] call[call[call[call[name[dedent], parameter[<ast.BoolOp object at 0x7da18fe93dc0>]].strip, parameter[]].split, parameter[constant[
]]]][constant[0]]
call[name[print], parameter[call[constant[ {0}{1} :-: {2}].format, parameter[binary_operation[constant[ ] * binary_operation[name[max_length] - call[name[len], parameter[name[key]]]]], name[key], name[desc]]]]]
call[name[print], parameter[constant[]]] | keyword[def] identifier[list_tasks] ( identifier[collector] ):
literal[string]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[keygetter] = keyword[lambda] identifier[item] : identifier[item] [ literal[int] ]. identifier[label]
identifier[tasks] = identifier[sorted] ( identifier[available_actions] . identifier[items] (), identifier[key] = identifier[keygetter] )
identifier[sorted_tasks] = identifier[sorted] ( identifier[list] ( identifier[tasks] ), identifier[key] = keyword[lambda] identifier[item] : identifier[len] ( identifier[item] [ literal[int] ]))
identifier[max_length] = identifier[max] ( identifier[len] ( identifier[name] ) keyword[for] identifier[name] , identifier[_] keyword[in] identifier[sorted_tasks] )
keyword[for] identifier[key] , identifier[task] keyword[in] identifier[sorted_tasks] :
identifier[desc] = identifier[dedent] ( identifier[task] . identifier[__doc__] keyword[or] literal[string] ). identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]
identifier[print] ( literal[string] . identifier[format] ( literal[string] *( identifier[max_length] - identifier[len] ( identifier[key] )), identifier[key] , identifier[desc] ))
identifier[print] ( literal[string] ) | def list_tasks(collector):
"""List the available_tasks"""
print('Usage: dashmat <task>')
print('')
print('Available tasks to choose from are:')
print('-----------------------------------')
print('')
keygetter = lambda item: item[1].label
tasks = sorted(available_actions.items(), key=keygetter)
sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))
max_length = max((len(name) for (name, _) in sorted_tasks))
for (key, task) in sorted_tasks:
desc = dedent(task.__doc__ or '').strip().split('\n')[0]
print('\t{0}{1} :-: {2}'.format(' ' * (max_length - len(key)), key, desc)) # depends on [control=['for'], data=[]]
print('') |
def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.Bucket(bucket)
# root
if self._is_root(key):
raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)
# file
if self._exists(bucket, key):
self.s3.meta.client.delete_object(Bucket=bucket, Key=key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))]
# delete the directory marker file if it exists
if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)):
delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)})
if len(delete_key_list) > 0:
n = 1000
for i in range(0, len(delete_key_list), n):
self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]})
return True
return False | def function[remove, parameter[self, path, recursive]]:
constant[
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
]
if <ast.UnaryOp object at 0x7da1b1f471c0> begin[:]
call[name[logger].debug, parameter[constant[Could not delete %s; path does not exist], name[path]]]
return[constant[False]]
<ast.Tuple object at 0x7da1b1f463b0> assign[=] call[name[self]._path_to_bucket_and_key, parameter[name[path]]]
variable[s3_bucket] assign[=] call[name[self].s3.Bucket, parameter[name[bucket]]]
if call[name[self]._is_root, parameter[name[key]]] begin[:]
<ast.Raise object at 0x7da1b1fa1f30>
if call[name[self]._exists, parameter[name[bucket], name[key]]] begin[:]
call[name[self].s3.meta.client.delete_object, parameter[]]
call[name[logger].debug, parameter[constant[Deleting %s from bucket %s], name[key], name[bucket]]]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b1fa3b20> begin[:]
<ast.Raise object at 0x7da1b1fa1cc0>
variable[delete_key_list] assign[=] <ast.ListComp object at 0x7da1b1fa0d30>
if call[name[self]._exists, parameter[name[bucket], call[constant[{}{}].format, parameter[name[key], name[S3_DIRECTORY_MARKER_SUFFIX_0]]]]] begin[:]
call[name[delete_key_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1fa0370>], [<ast.Call object at 0x7da1b1fa32b0>]]]]
if compare[call[name[len], parameter[name[delete_key_list]]] greater[>] constant[0]] begin[:]
variable[n] assign[=] constant[1000]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[delete_key_list]]], name[n]]]] begin[:]
call[name[self].s3.meta.client.delete_objects, parameter[]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[remove] ( identifier[self] , identifier[path] , identifier[recursive] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[exists] ( identifier[path] ):
identifier[logger] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[False]
( identifier[bucket] , identifier[key] )= identifier[self] . identifier[_path_to_bucket_and_key] ( identifier[path] )
identifier[s3_bucket] = identifier[self] . identifier[s3] . identifier[Bucket] ( identifier[bucket] )
keyword[if] identifier[self] . identifier[_is_root] ( identifier[key] ):
keyword[raise] identifier[InvalidDeleteException] ( literal[string] % identifier[path] )
keyword[if] identifier[self] . identifier[_exists] ( identifier[bucket] , identifier[key] ):
identifier[self] . identifier[s3] . identifier[meta] . identifier[client] . identifier[delete_object] ( identifier[Bucket] = identifier[bucket] , identifier[Key] = identifier[key] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[key] , identifier[bucket] )
keyword[return] keyword[True]
keyword[if] identifier[self] . identifier[isdir] ( identifier[path] ) keyword[and] keyword[not] identifier[recursive] :
keyword[raise] identifier[InvalidDeleteException] ( literal[string] % identifier[path] )
identifier[delete_key_list] =[{ literal[string] : identifier[obj] . identifier[key] } keyword[for] identifier[obj] keyword[in] identifier[s3_bucket] . identifier[objects] . identifier[filter] ( identifier[Prefix] = identifier[self] . identifier[_add_path_delimiter] ( identifier[key] ))]
keyword[if] identifier[self] . identifier[_exists] ( identifier[bucket] , literal[string] . identifier[format] ( identifier[key] , identifier[S3_DIRECTORY_MARKER_SUFFIX_0] )):
identifier[delete_key_list] . identifier[append] ({ literal[string] : literal[string] . identifier[format] ( identifier[key] , identifier[S3_DIRECTORY_MARKER_SUFFIX_0] )})
keyword[if] identifier[len] ( identifier[delete_key_list] )> literal[int] :
identifier[n] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[delete_key_list] ), identifier[n] ):
identifier[self] . identifier[s3] . identifier[meta] . identifier[client] . identifier[delete_objects] ( identifier[Bucket] = identifier[bucket] , identifier[Delete] ={ literal[string] : identifier[delete_key_list] [ identifier[i] : identifier[i] + identifier[n] ]})
keyword[return] keyword[True]
keyword[return] keyword[False] | def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
:param path: File or directory to remove
:param recursive: Boolean indicator to remove object and children
:return: Boolean indicator denoting success of the removal of 1 or more files
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False # depends on [control=['if'], data=[]]
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.Bucket(bucket)
# root
if self._is_root(key):
raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path) # depends on [control=['if'], data=[]]
# file
if self._exists(bucket, key):
self.s3.meta.client.delete_object(Bucket=bucket, Key=key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True # depends on [control=['if'], data=[]]
if self.isdir(path) and (not recursive):
raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path) # depends on [control=['if'], data=[]]
delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))]
# delete the directory marker file if it exists
if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)):
delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)}) # depends on [control=['if'], data=[]]
if len(delete_key_list) > 0:
n = 1000
for i in range(0, len(delete_key_list), n):
self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i:i + n]}) # depends on [control=['for'], data=['i']]
return True # depends on [control=['if'], data=[]]
return False |
def generate_bug_changes(self, startday, endday, alt_startday, alt_endday):
"""Returns a list of dicts containing a bug id, a bug comment (only
for bugs whose total number of daily or weekly occurrences meet
the appropriate threshold) and potentially an updated whiteboard
or priority status."""
bug_stats, bug_ids = self.get_bug_stats(startday, endday)
alt_date_bug_totals = self.get_alt_date_bug_totals(alt_startday, alt_endday, bug_ids)
test_run_count = self.get_test_runs(startday, endday)
# if fetch_bug_details fails, None is returned
bug_info = self.fetch_all_bug_details(bug_ids)
all_bug_changes = []
template = Template(self.open_file('comment.template', False))
if self.weekly_mode:
top_bugs = [bug[0] for bug in sorted(bug_stats.items(), key=lambda x: x[1]['total'],
reverse=True)][:50]
for bug_id, counts in bug_stats.items():
change_priority = None
change_whiteboard = None
priority = 0
rank = top_bugs.index(bug_id)+1 if self.weekly_mode and bug_id in top_bugs else None
if bug_info and bug_id in bug_info:
if self.weekly_mode:
priority = self.assign_priority(counts)
if priority == 2:
change_priority, change_whiteboard = self.check_needswork_owner(bug_info[bug_id])
# change [stockwell needswork] to [stockwell unknown] when failures drop below 20 failures/week
# if this block is true, it implies a priority of 0 (mutually exclusive to previous block)
if (counts['total'] < 20):
change_whiteboard = self.check_needswork(bug_info[bug_id]['whiteboard'])
else:
change_priority, change_whiteboard = self.check_needswork_owner(bug_info[bug_id])
# recommend disabling when more than 150 failures tracked over 21 days and
# takes precedence over any prevous change_whiteboard assignments
if (bug_id in alt_date_bug_totals and not self.check_whiteboard_status(bug_info[bug_id]['whiteboard'])):
priority = 3
change_whiteboard = self.update_whiteboard(bug_info[bug_id]['whiteboard'], '[stockwell disable-recommended]')
comment = template.render(bug_id=bug_id,
total=counts['total'],
test_run_count=test_run_count,
rank=rank,
priority=priority,
failure_rate=round(counts['total']/float(test_run_count), 3),
repositories=counts['per_repository'],
platforms=counts['per_platform'],
startday=startday,
endday=endday.split()[0],
weekly_mode=self.weekly_mode)
bug_changes = {'bug_id': bug_id,
'changes': {
'comment': {'body': comment}
}
}
if change_whiteboard:
bug_changes['changes']['whiteboard'] = change_whiteboard
if change_priority:
bug_changes['changes']['priority'] = change_priority
all_bug_changes.append(bug_changes)
return all_bug_changes | def function[generate_bug_changes, parameter[self, startday, endday, alt_startday, alt_endday]]:
constant[Returns a list of dicts containing a bug id, a bug comment (only
for bugs whose total number of daily or weekly occurrences meet
the appropriate threshold) and potentially an updated whiteboard
or priority status.]
<ast.Tuple object at 0x7da1b086ae60> assign[=] call[name[self].get_bug_stats, parameter[name[startday], name[endday]]]
variable[alt_date_bug_totals] assign[=] call[name[self].get_alt_date_bug_totals, parameter[name[alt_startday], name[alt_endday], name[bug_ids]]]
variable[test_run_count] assign[=] call[name[self].get_test_runs, parameter[name[startday], name[endday]]]
variable[bug_info] assign[=] call[name[self].fetch_all_bug_details, parameter[name[bug_ids]]]
variable[all_bug_changes] assign[=] list[[]]
variable[template] assign[=] call[name[Template], parameter[call[name[self].open_file, parameter[constant[comment.template], constant[False]]]]]
if name[self].weekly_mode begin[:]
variable[top_bugs] assign[=] call[<ast.ListComp object at 0x7da1b086b940>][<ast.Slice object at 0x7da1b086a980>]
for taget[tuple[[<ast.Name object at 0x7da1b086a200>, <ast.Name object at 0x7da1b086b280>]]] in starred[call[name[bug_stats].items, parameter[]]] begin[:]
variable[change_priority] assign[=] constant[None]
variable[change_whiteboard] assign[=] constant[None]
variable[priority] assign[=] constant[0]
variable[rank] assign[=] <ast.IfExp object at 0x7da1b08696c0>
if <ast.BoolOp object at 0x7da1b086a170> begin[:]
if name[self].weekly_mode begin[:]
variable[priority] assign[=] call[name[self].assign_priority, parameter[name[counts]]]
if compare[name[priority] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da1b086a0e0> assign[=] call[name[self].check_needswork_owner, parameter[call[name[bug_info]][name[bug_id]]]]
if compare[call[name[counts]][constant[total]] less[<] constant[20]] begin[:]
variable[change_whiteboard] assign[=] call[name[self].check_needswork, parameter[call[call[name[bug_info]][name[bug_id]]][constant[whiteboard]]]]
if <ast.BoolOp object at 0x7da1b086b730> begin[:]
variable[priority] assign[=] constant[3]
variable[change_whiteboard] assign[=] call[name[self].update_whiteboard, parameter[call[call[name[bug_info]][name[bug_id]]][constant[whiteboard]], constant[[stockwell disable-recommended]]]]
variable[comment] assign[=] call[name[template].render, parameter[]]
variable[bug_changes] assign[=] dictionary[[<ast.Constant object at 0x7da1b060f760>, <ast.Constant object at 0x7da1b060e5c0>], [<ast.Name object at 0x7da1b060c160>, <ast.Dict object at 0x7da1b060e1d0>]]
if name[change_whiteboard] begin[:]
call[call[name[bug_changes]][constant[changes]]][constant[whiteboard]] assign[=] name[change_whiteboard]
if name[change_priority] begin[:]
call[call[name[bug_changes]][constant[changes]]][constant[priority]] assign[=] name[change_priority]
call[name[all_bug_changes].append, parameter[name[bug_changes]]]
return[name[all_bug_changes]] | keyword[def] identifier[generate_bug_changes] ( identifier[self] , identifier[startday] , identifier[endday] , identifier[alt_startday] , identifier[alt_endday] ):
literal[string]
identifier[bug_stats] , identifier[bug_ids] = identifier[self] . identifier[get_bug_stats] ( identifier[startday] , identifier[endday] )
identifier[alt_date_bug_totals] = identifier[self] . identifier[get_alt_date_bug_totals] ( identifier[alt_startday] , identifier[alt_endday] , identifier[bug_ids] )
identifier[test_run_count] = identifier[self] . identifier[get_test_runs] ( identifier[startday] , identifier[endday] )
identifier[bug_info] = identifier[self] . identifier[fetch_all_bug_details] ( identifier[bug_ids] )
identifier[all_bug_changes] =[]
identifier[template] = identifier[Template] ( identifier[self] . identifier[open_file] ( literal[string] , keyword[False] ))
keyword[if] identifier[self] . identifier[weekly_mode] :
identifier[top_bugs] =[ identifier[bug] [ literal[int] ] keyword[for] identifier[bug] keyword[in] identifier[sorted] ( identifier[bug_stats] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ][ literal[string] ],
identifier[reverse] = keyword[True] )][: literal[int] ]
keyword[for] identifier[bug_id] , identifier[counts] keyword[in] identifier[bug_stats] . identifier[items] ():
identifier[change_priority] = keyword[None]
identifier[change_whiteboard] = keyword[None]
identifier[priority] = literal[int]
identifier[rank] = identifier[top_bugs] . identifier[index] ( identifier[bug_id] )+ literal[int] keyword[if] identifier[self] . identifier[weekly_mode] keyword[and] identifier[bug_id] keyword[in] identifier[top_bugs] keyword[else] keyword[None]
keyword[if] identifier[bug_info] keyword[and] identifier[bug_id] keyword[in] identifier[bug_info] :
keyword[if] identifier[self] . identifier[weekly_mode] :
identifier[priority] = identifier[self] . identifier[assign_priority] ( identifier[counts] )
keyword[if] identifier[priority] == literal[int] :
identifier[change_priority] , identifier[change_whiteboard] = identifier[self] . identifier[check_needswork_owner] ( identifier[bug_info] [ identifier[bug_id] ])
keyword[if] ( identifier[counts] [ literal[string] ]< literal[int] ):
identifier[change_whiteboard] = identifier[self] . identifier[check_needswork] ( identifier[bug_info] [ identifier[bug_id] ][ literal[string] ])
keyword[else] :
identifier[change_priority] , identifier[change_whiteboard] = identifier[self] . identifier[check_needswork_owner] ( identifier[bug_info] [ identifier[bug_id] ])
keyword[if] ( identifier[bug_id] keyword[in] identifier[alt_date_bug_totals] keyword[and] keyword[not] identifier[self] . identifier[check_whiteboard_status] ( identifier[bug_info] [ identifier[bug_id] ][ literal[string] ])):
identifier[priority] = literal[int]
identifier[change_whiteboard] = identifier[self] . identifier[update_whiteboard] ( identifier[bug_info] [ identifier[bug_id] ][ literal[string] ], literal[string] )
identifier[comment] = identifier[template] . identifier[render] ( identifier[bug_id] = identifier[bug_id] ,
identifier[total] = identifier[counts] [ literal[string] ],
identifier[test_run_count] = identifier[test_run_count] ,
identifier[rank] = identifier[rank] ,
identifier[priority] = identifier[priority] ,
identifier[failure_rate] = identifier[round] ( identifier[counts] [ literal[string] ]/ identifier[float] ( identifier[test_run_count] ), literal[int] ),
identifier[repositories] = identifier[counts] [ literal[string] ],
identifier[platforms] = identifier[counts] [ literal[string] ],
identifier[startday] = identifier[startday] ,
identifier[endday] = identifier[endday] . identifier[split] ()[ literal[int] ],
identifier[weekly_mode] = identifier[self] . identifier[weekly_mode] )
identifier[bug_changes] ={ literal[string] : identifier[bug_id] ,
literal[string] :{
literal[string] :{ literal[string] : identifier[comment] }
}
}
keyword[if] identifier[change_whiteboard] :
identifier[bug_changes] [ literal[string] ][ literal[string] ]= identifier[change_whiteboard]
keyword[if] identifier[change_priority] :
identifier[bug_changes] [ literal[string] ][ literal[string] ]= identifier[change_priority]
identifier[all_bug_changes] . identifier[append] ( identifier[bug_changes] )
keyword[return] identifier[all_bug_changes] | def generate_bug_changes(self, startday, endday, alt_startday, alt_endday):
"""Returns a list of dicts containing a bug id, a bug comment (only
for bugs whose total number of daily or weekly occurrences meet
the appropriate threshold) and potentially an updated whiteboard
or priority status."""
(bug_stats, bug_ids) = self.get_bug_stats(startday, endday)
alt_date_bug_totals = self.get_alt_date_bug_totals(alt_startday, alt_endday, bug_ids)
test_run_count = self.get_test_runs(startday, endday)
# if fetch_bug_details fails, None is returned
bug_info = self.fetch_all_bug_details(bug_ids)
all_bug_changes = []
template = Template(self.open_file('comment.template', False))
if self.weekly_mode:
top_bugs = [bug[0] for bug in sorted(bug_stats.items(), key=lambda x: x[1]['total'], reverse=True)][:50] # depends on [control=['if'], data=[]]
for (bug_id, counts) in bug_stats.items():
change_priority = None
change_whiteboard = None
priority = 0
rank = top_bugs.index(bug_id) + 1 if self.weekly_mode and bug_id in top_bugs else None
if bug_info and bug_id in bug_info:
if self.weekly_mode:
priority = self.assign_priority(counts)
if priority == 2:
(change_priority, change_whiteboard) = self.check_needswork_owner(bug_info[bug_id]) # depends on [control=['if'], data=[]]
# change [stockwell needswork] to [stockwell unknown] when failures drop below 20 failures/week
# if this block is true, it implies a priority of 0 (mutually exclusive to previous block)
if counts['total'] < 20:
change_whiteboard = self.check_needswork(bug_info[bug_id]['whiteboard']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
(change_priority, change_whiteboard) = self.check_needswork_owner(bug_info[bug_id])
# recommend disabling when more than 150 failures tracked over 21 days and
# takes precedence over any prevous change_whiteboard assignments
if bug_id in alt_date_bug_totals and (not self.check_whiteboard_status(bug_info[bug_id]['whiteboard'])):
priority = 3
change_whiteboard = self.update_whiteboard(bug_info[bug_id]['whiteboard'], '[stockwell disable-recommended]') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
comment = template.render(bug_id=bug_id, total=counts['total'], test_run_count=test_run_count, rank=rank, priority=priority, failure_rate=round(counts['total'] / float(test_run_count), 3), repositories=counts['per_repository'], platforms=counts['per_platform'], startday=startday, endday=endday.split()[0], weekly_mode=self.weekly_mode)
bug_changes = {'bug_id': bug_id, 'changes': {'comment': {'body': comment}}}
if change_whiteboard:
bug_changes['changes']['whiteboard'] = change_whiteboard # depends on [control=['if'], data=[]]
if change_priority:
bug_changes['changes']['priority'] = change_priority # depends on [control=['if'], data=[]]
all_bug_changes.append(bug_changes) # depends on [control=['for'], data=[]]
return all_bug_changes |
def get_repos(config, force=False):
"""Return a :py:obj:`list` list of repos from config file.
:param config: the repos config in :py:class:`dict` format.
:param bool force: Force aggregate dirty repos or not.
:type config: dict
:rtype: list
"""
repo_list = []
for directory, repo_data in config.items():
if not os.path.isabs(directory):
directory = os.path.abspath(directory)
repo_dict = {
'cwd': directory,
'defaults': repo_data.get('defaults', dict()),
'force': force,
}
remote_names = set()
if 'remotes' in repo_data:
repo_dict['remotes'] = []
remotes_data = repo_data['remotes'] or {}
for remote_name, url in remotes_data.items():
if not url:
raise ConfigException(
'%s: No url defined for remote %s.' %
(directory, remote_name))
remote_dict = {
'name': remote_name,
'url': url
}
repo_dict['remotes'].append(remote_dict)
remote_names.add(remote_name)
if not remote_names:
raise ConfigException(
'%s: You should at least define one remote.' % directory)
else:
raise ConfigException('%s: remotes is not defined.' % directory)
if 'merges' in repo_data:
merges = []
merge_data = repo_data.get('merges') or []
for merge in merge_data:
try:
# Assume parts is a str
parts = merge.split(' ')
if len(parts) != 2:
raise ConfigException(
'%s: Merge must be formatted as '
'"remote_name ref".' % directory)
merge = {
"remote": parts[0],
"ref": parts[1],
}
except AttributeError:
# Parts is a dict
try:
merge["remote"] = str(merge["remote"])
merge["ref"] = str(merge["ref"])
except KeyError:
raise ConfigException(
'%s: Merge lacks mandatory '
'`remote` or `ref` keys.' % directory)
# Check remote is available
if merge["remote"] not in remote_names:
raise ConfigException(
'%s: Merge remote %s not defined in remotes.' %
(directory, merge["remote"]))
merges.append(merge)
repo_dict['merges'] = merges
if not merges:
raise ConfigException(
'%s: You should at least define one merge.' % directory)
else:
raise ConfigException(
'%s: merges is not defined.' % directory)
# Only fetch required remotes by default
repo_dict["fetch_all"] = repo_data.get("fetch_all", False)
if isinstance(repo_dict["fetch_all"], string_types):
repo_dict["fetch_all"] = frozenset((repo_dict["fetch_all"],))
elif isinstance(repo_dict["fetch_all"], list):
repo_dict["fetch_all"] = frozenset(repo_dict["fetch_all"])
if 'target' not in repo_data:
raise ConfigException('%s: No target defined.' % directory)
parts = (repo_data.get('target') or "") .split(' ')
if len(parts) != 2:
raise ConfigException(
'%s: Target must be formatted as '
'"remote_name branch_name"' % directory)
remote_name, branch = repo_data.get('target').split(' ')
if remote_name not in remote_names:
raise ConfigException(
'%s: Target remote %s not defined in remotes.' %
(directory, remote_name))
repo_dict['target'] = {
'remote': remote_name,
'branch': branch,
}
commands = []
if 'shell_command_after' in repo_data:
cmds = repo_data['shell_command_after']
# if str: turn to list
if cmds:
if isinstance(cmds, string_types):
cmds = [cmds]
commands = cmds
repo_dict['shell_command_after'] = commands
repo_list.append(repo_dict)
return repo_list | def function[get_repos, parameter[config, force]]:
constant[Return a :py:obj:`list` list of repos from config file.
:param config: the repos config in :py:class:`dict` format.
:param bool force: Force aggregate dirty repos or not.
:type config: dict
:rtype: list
]
variable[repo_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b03da650>, <ast.Name object at 0x7da1b03db640>]]] in starred[call[name[config].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b03dbdf0> begin[:]
variable[directory] assign[=] call[name[os].path.abspath, parameter[name[directory]]]
variable[repo_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b03db160>, <ast.Constant object at 0x7da1b03db190>, <ast.Constant object at 0x7da1b03dac20>], [<ast.Name object at 0x7da1b03db130>, <ast.Call object at 0x7da1b03db250>, <ast.Name object at 0x7da1b03db8b0>]]
variable[remote_names] assign[=] call[name[set], parameter[]]
if compare[constant[remotes] in name[repo_data]] begin[:]
call[name[repo_dict]][constant[remotes]] assign[=] list[[]]
variable[remotes_data] assign[=] <ast.BoolOp object at 0x7da1b03dbbe0>
for taget[tuple[[<ast.Name object at 0x7da1b03daf80>, <ast.Name object at 0x7da1b03dad10>]]] in starred[call[name[remotes_data].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b03dad40> begin[:]
<ast.Raise object at 0x7da1b03daef0>
variable[remote_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b03da3e0>, <ast.Constant object at 0x7da1b03db520>], [<ast.Name object at 0x7da1b03dae30>, <ast.Name object at 0x7da1b03da950>]]
call[call[name[repo_dict]][constant[remotes]].append, parameter[name[remote_dict]]]
call[name[remote_names].add, parameter[name[remote_name]]]
if <ast.UnaryOp object at 0x7da1b03dba60> begin[:]
<ast.Raise object at 0x7da1b03db880>
if compare[constant[merges] in name[repo_data]] begin[:]
variable[merges] assign[=] list[[]]
variable[merge_data] assign[=] <ast.BoolOp object at 0x7da1b03db790>
for taget[name[merge]] in starred[name[merge_data]] begin[:]
<ast.Try object at 0x7da1b03da560>
if compare[call[name[merge]][constant[remote]] <ast.NotIn object at 0x7da2590d7190> name[remote_names]] begin[:]
<ast.Raise object at 0x7da1b02bd8a0>
call[name[merges].append, parameter[name[merge]]]
call[name[repo_dict]][constant[merges]] assign[=] name[merges]
if <ast.UnaryOp object at 0x7da1b02bd120> begin[:]
<ast.Raise object at 0x7da1b02bd000>
call[name[repo_dict]][constant[fetch_all]] assign[=] call[name[repo_data].get, parameter[constant[fetch_all], constant[False]]]
if call[name[isinstance], parameter[call[name[repo_dict]][constant[fetch_all]], name[string_types]]] begin[:]
call[name[repo_dict]][constant[fetch_all]] assign[=] call[name[frozenset], parameter[tuple[[<ast.Subscript object at 0x7da1b02bfe50>]]]]
if compare[constant[target] <ast.NotIn object at 0x7da2590d7190> name[repo_data]] begin[:]
<ast.Raise object at 0x7da1b02bc790>
variable[parts] assign[=] call[<ast.BoolOp object at 0x7da1b02bcb20>.split, parameter[constant[ ]]]
if compare[call[name[len], parameter[name[parts]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b02bc7f0>
<ast.Tuple object at 0x7da1b02bcbe0> assign[=] call[call[name[repo_data].get, parameter[constant[target]]].split, parameter[constant[ ]]]
if compare[name[remote_name] <ast.NotIn object at 0x7da2590d7190> name[remote_names]] begin[:]
<ast.Raise object at 0x7da1b02beef0>
call[name[repo_dict]][constant[target]] assign[=] dictionary[[<ast.Constant object at 0x7da1b02bded0>, <ast.Constant object at 0x7da1b02bdf00>], [<ast.Name object at 0x7da1b02be170>, <ast.Name object at 0x7da1b02bdde0>]]
variable[commands] assign[=] list[[]]
if compare[constant[shell_command_after] in name[repo_data]] begin[:]
variable[cmds] assign[=] call[name[repo_data]][constant[shell_command_after]]
if name[cmds] begin[:]
if call[name[isinstance], parameter[name[cmds], name[string_types]]] begin[:]
variable[cmds] assign[=] list[[<ast.Name object at 0x7da1b02bf670>]]
variable[commands] assign[=] name[cmds]
call[name[repo_dict]][constant[shell_command_after]] assign[=] name[commands]
call[name[repo_list].append, parameter[name[repo_dict]]]
return[name[repo_list]] | keyword[def] identifier[get_repos] ( identifier[config] , identifier[force] = keyword[False] ):
literal[string]
identifier[repo_list] =[]
keyword[for] identifier[directory] , identifier[repo_data] keyword[in] identifier[config] . identifier[items] ():
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[directory] ):
identifier[directory] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] )
identifier[repo_dict] ={
literal[string] : identifier[directory] ,
literal[string] : identifier[repo_data] . identifier[get] ( literal[string] , identifier[dict] ()),
literal[string] : identifier[force] ,
}
identifier[remote_names] = identifier[set] ()
keyword[if] literal[string] keyword[in] identifier[repo_data] :
identifier[repo_dict] [ literal[string] ]=[]
identifier[remotes_data] = identifier[repo_data] [ literal[string] ] keyword[or] {}
keyword[for] identifier[remote_name] , identifier[url] keyword[in] identifier[remotes_data] . identifier[items] ():
keyword[if] keyword[not] identifier[url] :
keyword[raise] identifier[ConfigException] (
literal[string] %
( identifier[directory] , identifier[remote_name] ))
identifier[remote_dict] ={
literal[string] : identifier[remote_name] ,
literal[string] : identifier[url]
}
identifier[repo_dict] [ literal[string] ]. identifier[append] ( identifier[remote_dict] )
identifier[remote_names] . identifier[add] ( identifier[remote_name] )
keyword[if] keyword[not] identifier[remote_names] :
keyword[raise] identifier[ConfigException] (
literal[string] % identifier[directory] )
keyword[else] :
keyword[raise] identifier[ConfigException] ( literal[string] % identifier[directory] )
keyword[if] literal[string] keyword[in] identifier[repo_data] :
identifier[merges] =[]
identifier[merge_data] = identifier[repo_data] . identifier[get] ( literal[string] ) keyword[or] []
keyword[for] identifier[merge] keyword[in] identifier[merge_data] :
keyword[try] :
identifier[parts] = identifier[merge] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] :
keyword[raise] identifier[ConfigException] (
literal[string]
literal[string] % identifier[directory] )
identifier[merge] ={
literal[string] : identifier[parts] [ literal[int] ],
literal[string] : identifier[parts] [ literal[int] ],
}
keyword[except] identifier[AttributeError] :
keyword[try] :
identifier[merge] [ literal[string] ]= identifier[str] ( identifier[merge] [ literal[string] ])
identifier[merge] [ literal[string] ]= identifier[str] ( identifier[merge] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ConfigException] (
literal[string]
literal[string] % identifier[directory] )
keyword[if] identifier[merge] [ literal[string] ] keyword[not] keyword[in] identifier[remote_names] :
keyword[raise] identifier[ConfigException] (
literal[string] %
( identifier[directory] , identifier[merge] [ literal[string] ]))
identifier[merges] . identifier[append] ( identifier[merge] )
identifier[repo_dict] [ literal[string] ]= identifier[merges]
keyword[if] keyword[not] identifier[merges] :
keyword[raise] identifier[ConfigException] (
literal[string] % identifier[directory] )
keyword[else] :
keyword[raise] identifier[ConfigException] (
literal[string] % identifier[directory] )
identifier[repo_dict] [ literal[string] ]= identifier[repo_data] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[isinstance] ( identifier[repo_dict] [ literal[string] ], identifier[string_types] ):
identifier[repo_dict] [ literal[string] ]= identifier[frozenset] (( identifier[repo_dict] [ literal[string] ],))
keyword[elif] identifier[isinstance] ( identifier[repo_dict] [ literal[string] ], identifier[list] ):
identifier[repo_dict] [ literal[string] ]= identifier[frozenset] ( identifier[repo_dict] [ literal[string] ])
keyword[if] literal[string] keyword[not] keyword[in] identifier[repo_data] :
keyword[raise] identifier[ConfigException] ( literal[string] % identifier[directory] )
identifier[parts] =( identifier[repo_data] . identifier[get] ( literal[string] ) keyword[or] literal[string] ). identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] :
keyword[raise] identifier[ConfigException] (
literal[string]
literal[string] % identifier[directory] )
identifier[remote_name] , identifier[branch] = identifier[repo_data] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] )
keyword[if] identifier[remote_name] keyword[not] keyword[in] identifier[remote_names] :
keyword[raise] identifier[ConfigException] (
literal[string] %
( identifier[directory] , identifier[remote_name] ))
identifier[repo_dict] [ literal[string] ]={
literal[string] : identifier[remote_name] ,
literal[string] : identifier[branch] ,
}
identifier[commands] =[]
keyword[if] literal[string] keyword[in] identifier[repo_data] :
identifier[cmds] = identifier[repo_data] [ literal[string] ]
keyword[if] identifier[cmds] :
keyword[if] identifier[isinstance] ( identifier[cmds] , identifier[string_types] ):
identifier[cmds] =[ identifier[cmds] ]
identifier[commands] = identifier[cmds]
identifier[repo_dict] [ literal[string] ]= identifier[commands]
identifier[repo_list] . identifier[append] ( identifier[repo_dict] )
keyword[return] identifier[repo_list] | def get_repos(config, force=False):
"""Return a :py:obj:`list` list of repos from config file.
:param config: the repos config in :py:class:`dict` format.
:param bool force: Force aggregate dirty repos or not.
:type config: dict
:rtype: list
"""
repo_list = []
for (directory, repo_data) in config.items():
if not os.path.isabs(directory):
directory = os.path.abspath(directory) # depends on [control=['if'], data=[]]
repo_dict = {'cwd': directory, 'defaults': repo_data.get('defaults', dict()), 'force': force}
remote_names = set()
if 'remotes' in repo_data:
repo_dict['remotes'] = []
remotes_data = repo_data['remotes'] or {}
for (remote_name, url) in remotes_data.items():
if not url:
raise ConfigException('%s: No url defined for remote %s.' % (directory, remote_name)) # depends on [control=['if'], data=[]]
remote_dict = {'name': remote_name, 'url': url}
repo_dict['remotes'].append(remote_dict)
remote_names.add(remote_name) # depends on [control=['for'], data=[]]
if not remote_names:
raise ConfigException('%s: You should at least define one remote.' % directory) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['repo_data']]
else:
raise ConfigException('%s: remotes is not defined.' % directory)
if 'merges' in repo_data:
merges = []
merge_data = repo_data.get('merges') or []
for merge in merge_data:
try:
# Assume parts is a str
parts = merge.split(' ')
if len(parts) != 2:
raise ConfigException('%s: Merge must be formatted as "remote_name ref".' % directory) # depends on [control=['if'], data=[]]
merge = {'remote': parts[0], 'ref': parts[1]} # depends on [control=['try'], data=[]]
except AttributeError:
# Parts is a dict
try:
merge['remote'] = str(merge['remote'])
merge['ref'] = str(merge['ref']) # depends on [control=['try'], data=[]]
except KeyError:
raise ConfigException('%s: Merge lacks mandatory `remote` or `ref` keys.' % directory) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
# Check remote is available
if merge['remote'] not in remote_names:
raise ConfigException('%s: Merge remote %s not defined in remotes.' % (directory, merge['remote'])) # depends on [control=['if'], data=[]]
merges.append(merge) # depends on [control=['for'], data=['merge']]
repo_dict['merges'] = merges
if not merges:
raise ConfigException('%s: You should at least define one merge.' % directory) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['repo_data']]
else:
raise ConfigException('%s: merges is not defined.' % directory)
# Only fetch required remotes by default
repo_dict['fetch_all'] = repo_data.get('fetch_all', False)
if isinstance(repo_dict['fetch_all'], string_types):
repo_dict['fetch_all'] = frozenset((repo_dict['fetch_all'],)) # depends on [control=['if'], data=[]]
elif isinstance(repo_dict['fetch_all'], list):
repo_dict['fetch_all'] = frozenset(repo_dict['fetch_all']) # depends on [control=['if'], data=[]]
if 'target' not in repo_data:
raise ConfigException('%s: No target defined.' % directory) # depends on [control=['if'], data=[]]
parts = (repo_data.get('target') or '').split(' ')
if len(parts) != 2:
raise ConfigException('%s: Target must be formatted as "remote_name branch_name"' % directory) # depends on [control=['if'], data=[]]
(remote_name, branch) = repo_data.get('target').split(' ')
if remote_name not in remote_names:
raise ConfigException('%s: Target remote %s not defined in remotes.' % (directory, remote_name)) # depends on [control=['if'], data=['remote_name']]
repo_dict['target'] = {'remote': remote_name, 'branch': branch}
commands = []
if 'shell_command_after' in repo_data:
cmds = repo_data['shell_command_after']
# if str: turn to list
if cmds:
if isinstance(cmds, string_types):
cmds = [cmds] # depends on [control=['if'], data=[]]
commands = cmds # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['repo_data']]
repo_dict['shell_command_after'] = commands
repo_list.append(repo_dict) # depends on [control=['for'], data=[]]
return repo_list |
def ReleaseProcessedFlow(self, flow_obj, cursor=None):
"""Releases a flow that the worker was processing to the database."""
update_query = """
UPDATE flows
LEFT OUTER JOIN (
SELECT client_id, flow_id, needs_processing
FROM flow_requests
WHERE
client_id = %(client_id)s AND
flow_id = %(flow_id)s AND
request_id = %(next_request_to_process)s AND
needs_processing
) AS needs_processing
ON
flows.client_id = needs_processing.client_id AND
flows.flow_id = needs_processing.flow_id
SET
flows.flow = %(flow)s,
flows.processing_on = NULL,
flows.processing_since = NULL,
flows.processing_deadline = NULL,
flows.next_request_to_process = %(next_request_to_process)s,
flows.flow_state = %(flow_state)s,
flows.user_cpu_time_used_micros = %(user_cpu_time_used_micros)s,
flows.system_cpu_time_used_micros = %(system_cpu_time_used_micros)s,
flows.network_bytes_sent = %(network_bytes_sent)s,
flows.num_replies_sent = %(num_replies_sent)s,
flows.last_update = NOW(6)
WHERE
flows.client_id = %(client_id)s AND
flows.flow_id = %(flow_id)s AND (
needs_processing.needs_processing = FALSE OR
needs_processing.needs_processing IS NULL)
"""
clone = flow_obj.Copy()
clone.processing_on = None
clone.processing_since = None
clone.processing_deadline = None
args = {
"client_id":
db_utils.ClientIDToInt(flow_obj.client_id),
"flow":
clone.SerializeToString(),
"flow_id":
db_utils.FlowIDToInt(flow_obj.flow_id),
"flow_state":
int(clone.flow_state),
"network_bytes_sent":
flow_obj.network_bytes_sent,
"next_request_to_process":
flow_obj.next_request_to_process,
"num_replies_sent":
flow_obj.num_replies_sent,
"system_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time),
"user_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time),
}
rows_updated = cursor.execute(update_query, args)
return rows_updated == 1 | def function[ReleaseProcessedFlow, parameter[self, flow_obj, cursor]]:
constant[Releases a flow that the worker was processing to the database.]
variable[update_query] assign[=] constant[
UPDATE flows
LEFT OUTER JOIN (
SELECT client_id, flow_id, needs_processing
FROM flow_requests
WHERE
client_id = %(client_id)s AND
flow_id = %(flow_id)s AND
request_id = %(next_request_to_process)s AND
needs_processing
) AS needs_processing
ON
flows.client_id = needs_processing.client_id AND
flows.flow_id = needs_processing.flow_id
SET
flows.flow = %(flow)s,
flows.processing_on = NULL,
flows.processing_since = NULL,
flows.processing_deadline = NULL,
flows.next_request_to_process = %(next_request_to_process)s,
flows.flow_state = %(flow_state)s,
flows.user_cpu_time_used_micros = %(user_cpu_time_used_micros)s,
flows.system_cpu_time_used_micros = %(system_cpu_time_used_micros)s,
flows.network_bytes_sent = %(network_bytes_sent)s,
flows.num_replies_sent = %(num_replies_sent)s,
flows.last_update = NOW(6)
WHERE
flows.client_id = %(client_id)s AND
flows.flow_id = %(flow_id)s AND (
needs_processing.needs_processing = FALSE OR
needs_processing.needs_processing IS NULL)
]
variable[clone] assign[=] call[name[flow_obj].Copy, parameter[]]
name[clone].processing_on assign[=] constant[None]
name[clone].processing_since assign[=] constant[None]
name[clone].processing_deadline assign[=] constant[None]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b69630>, <ast.Constant object at 0x7da1b1b6abc0>, <ast.Constant object at 0x7da1b1b69660>, <ast.Constant object at 0x7da1b1b68d30>, <ast.Constant object at 0x7da1b1b6b1f0>, <ast.Constant object at 0x7da1b1b6b490>, <ast.Constant object at 0x7da1b1b68850>, <ast.Constant object at 0x7da1b1b69150>, <ast.Constant object at 0x7da1b1b6bbb0>], [<ast.Call object at 0x7da1b1b6a9b0>, <ast.Call object at 0x7da1b1b69180>, <ast.Call object at 0x7da1b1b69c00>, <ast.Call object at 0x7da1b1b69510>, <ast.Attribute object at 0x7da1b1b69330>, <ast.Attribute object at 0x7da1b1b6bb80>, <ast.Attribute object at 0x7da1b1b6aec0>, <ast.Call object at 0x7da1b1b6add0>, <ast.Call object at 0x7da1b1b694e0>]]
variable[rows_updated] assign[=] call[name[cursor].execute, parameter[name[update_query], name[args]]]
return[compare[name[rows_updated] equal[==] constant[1]]] | keyword[def] identifier[ReleaseProcessedFlow] ( identifier[self] , identifier[flow_obj] , identifier[cursor] = keyword[None] ):
literal[string]
identifier[update_query] = literal[string]
identifier[clone] = identifier[flow_obj] . identifier[Copy] ()
identifier[clone] . identifier[processing_on] = keyword[None]
identifier[clone] . identifier[processing_since] = keyword[None]
identifier[clone] . identifier[processing_deadline] = keyword[None]
identifier[args] ={
literal[string] :
identifier[db_utils] . identifier[ClientIDToInt] ( identifier[flow_obj] . identifier[client_id] ),
literal[string] :
identifier[clone] . identifier[SerializeToString] (),
literal[string] :
identifier[db_utils] . identifier[FlowIDToInt] ( identifier[flow_obj] . identifier[flow_id] ),
literal[string] :
identifier[int] ( identifier[clone] . identifier[flow_state] ),
literal[string] :
identifier[flow_obj] . identifier[network_bytes_sent] ,
literal[string] :
identifier[flow_obj] . identifier[next_request_to_process] ,
literal[string] :
identifier[flow_obj] . identifier[num_replies_sent] ,
literal[string] :
identifier[db_utils] . identifier[SecondsToMicros] ( identifier[flow_obj] . identifier[cpu_time_used] . identifier[system_cpu_time] ),
literal[string] :
identifier[db_utils] . identifier[SecondsToMicros] ( identifier[flow_obj] . identifier[cpu_time_used] . identifier[user_cpu_time] ),
}
identifier[rows_updated] = identifier[cursor] . identifier[execute] ( identifier[update_query] , identifier[args] )
keyword[return] identifier[rows_updated] == literal[int] | def ReleaseProcessedFlow(self, flow_obj, cursor=None):
"""Releases a flow that the worker was processing to the database."""
update_query = '\n UPDATE flows\n LEFT OUTER JOIN (\n SELECT client_id, flow_id, needs_processing\n FROM flow_requests\n WHERE\n client_id = %(client_id)s AND\n flow_id = %(flow_id)s AND\n request_id = %(next_request_to_process)s AND\n needs_processing\n ) AS needs_processing\n ON\n flows.client_id = needs_processing.client_id AND\n flows.flow_id = needs_processing.flow_id\n SET\n flows.flow = %(flow)s,\n flows.processing_on = NULL,\n flows.processing_since = NULL,\n flows.processing_deadline = NULL,\n flows.next_request_to_process = %(next_request_to_process)s,\n flows.flow_state = %(flow_state)s,\n flows.user_cpu_time_used_micros = %(user_cpu_time_used_micros)s,\n flows.system_cpu_time_used_micros = %(system_cpu_time_used_micros)s,\n flows.network_bytes_sent = %(network_bytes_sent)s,\n flows.num_replies_sent = %(num_replies_sent)s,\n flows.last_update = NOW(6)\n WHERE\n flows.client_id = %(client_id)s AND\n flows.flow_id = %(flow_id)s AND (\n needs_processing.needs_processing = FALSE OR\n needs_processing.needs_processing IS NULL)\n '
clone = flow_obj.Copy()
clone.processing_on = None
clone.processing_since = None
clone.processing_deadline = None
args = {'client_id': db_utils.ClientIDToInt(flow_obj.client_id), 'flow': clone.SerializeToString(), 'flow_id': db_utils.FlowIDToInt(flow_obj.flow_id), 'flow_state': int(clone.flow_state), 'network_bytes_sent': flow_obj.network_bytes_sent, 'next_request_to_process': flow_obj.next_request_to_process, 'num_replies_sent': flow_obj.num_replies_sent, 'system_cpu_time_used_micros': db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time), 'user_cpu_time_used_micros': db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time)}
rows_updated = cursor.execute(update_query, args)
return rows_updated == 1 |
def _make_handler(state_token, done_function):
'''
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
'''
class LocalServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def error_response(self, msg):
logging.warn(
'Error response: %(msg)s. %(path)s',
msg=msg,
path=self.path)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(msg)
def do_GET(self):
parsed = urlparse.urlparse(self.path)
if len(parsed.query) == 0 or parsed.path != '/callback':
self.error_response(
'We encountered a problem with your request.')
return
params = urlparse.parse_qs(parsed.query)
if params['state'] != [state_token]:
self.error_response(
'Attack detected: state tokens did not match!')
return
if len(params['code']) != 1:
self.error_response('Wrong number of "code" query parameters.')
return
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(
"courseraoauth2client: we have captured Coursera's response "
"code. Feel free to close this browser window now and return "
"to your terminal. Thanks!")
done_function(params['code'][0])
return LocalServerHandler | def function[_make_handler, parameter[state_token, done_function]]:
constant[
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
]
class class[LocalServerHandler, parameter[]] begin[:]
def function[error_response, parameter[self, msg]]:
call[name[logging].warn, parameter[constant[Error response: %(msg)s. %(path)s]]]
call[name[self].send_response, parameter[constant[400]]]
call[name[self].send_header, parameter[constant[Content-type], constant[text/plain]]]
call[name[self].end_headers, parameter[]]
call[name[self].wfile.write, parameter[name[msg]]]
def function[do_GET, parameter[self]]:
variable[parsed] assign[=] call[name[urlparse].urlparse, parameter[name[self].path]]
if <ast.BoolOp object at 0x7da18fe90520> begin[:]
call[name[self].error_response, parameter[constant[We encountered a problem with your request.]]]
return[None]
variable[params] assign[=] call[name[urlparse].parse_qs, parameter[name[parsed].query]]
if compare[call[name[params]][constant[state]] not_equal[!=] list[[<ast.Name object at 0x7da18fe914e0>]]] begin[:]
call[name[self].error_response, parameter[constant[Attack detected: state tokens did not match!]]]
return[None]
if compare[call[name[len], parameter[call[name[params]][constant[code]]]] not_equal[!=] constant[1]] begin[:]
call[name[self].error_response, parameter[constant[Wrong number of "code" query parameters.]]]
return[None]
call[name[self].send_response, parameter[constant[200]]]
call[name[self].send_header, parameter[constant[Content-type], constant[text/plain]]]
call[name[self].end_headers, parameter[]]
call[name[self].wfile.write, parameter[constant[courseraoauth2client: we have captured Coursera's response code. Feel free to close this browser window now and return to your terminal. Thanks!]]]
call[name[done_function], parameter[call[call[name[params]][constant[code]]][constant[0]]]]
return[name[LocalServerHandler]] | keyword[def] identifier[_make_handler] ( identifier[state_token] , identifier[done_function] ):
literal[string]
keyword[class] identifier[LocalServerHandler] ( identifier[BaseHTTPServer] . identifier[BaseHTTPRequestHandler] ):
keyword[def] identifier[error_response] ( identifier[self] , identifier[msg] ):
identifier[logging] . identifier[warn] (
literal[string] ,
identifier[msg] = identifier[msg] ,
identifier[path] = identifier[self] . identifier[path] )
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( identifier[msg] )
keyword[def] identifier[do_GET] ( identifier[self] ):
identifier[parsed] = identifier[urlparse] . identifier[urlparse] ( identifier[self] . identifier[path] )
keyword[if] identifier[len] ( identifier[parsed] . identifier[query] )== literal[int] keyword[or] identifier[parsed] . identifier[path] != literal[string] :
identifier[self] . identifier[error_response] (
literal[string] )
keyword[return]
identifier[params] = identifier[urlparse] . identifier[parse_qs] ( identifier[parsed] . identifier[query] )
keyword[if] identifier[params] [ literal[string] ]!=[ identifier[state_token] ]:
identifier[self] . identifier[error_response] (
literal[string] )
keyword[return]
keyword[if] identifier[len] ( identifier[params] [ literal[string] ])!= literal[int] :
identifier[self] . identifier[error_response] ( literal[string] )
keyword[return]
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] (
literal[string]
literal[string]
literal[string] )
identifier[done_function] ( identifier[params] [ literal[string] ][ literal[int] ])
keyword[return] identifier[LocalServerHandler] | def _make_handler(state_token, done_function):
"""
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
"""
class LocalServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def error_response(self, msg):
logging.warn('Error response: %(msg)s. %(path)s', msg=msg, path=self.path)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(msg)
def do_GET(self):
parsed = urlparse.urlparse(self.path)
if len(parsed.query) == 0 or parsed.path != '/callback':
self.error_response('We encountered a problem with your request.')
return # depends on [control=['if'], data=[]]
params = urlparse.parse_qs(parsed.query)
if params['state'] != [state_token]:
self.error_response('Attack detected: state tokens did not match!')
return # depends on [control=['if'], data=[]]
if len(params['code']) != 1:
self.error_response('Wrong number of "code" query parameters.')
return # depends on [control=['if'], data=[]]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("courseraoauth2client: we have captured Coursera's response code. Feel free to close this browser window now and return to your terminal. Thanks!")
done_function(params['code'][0])
return LocalServerHandler |
def feed(self, new_bytes):
"""Feed a new set of bytes into the protocol handler
These bytes will be immediately fed into the parsing state machine and
if new packets are found, the ``packet_callback`` will be executed
with the fully-formed message.
:param new_bytes: The new bytes to be fed into the stream protocol
handler.
"""
self._available_bytes += new_bytes
callbacks = []
try:
while True:
packet = six.next(self._packet_generator)
if packet is None:
break
else:
callbacks.append(partial(self.packet_callback, packet))
except Exception:
# When we receive an exception, we assume that the _available_bytes
# has already been updated and we just choked on a field. That
# is, unless the number of _available_bytes has not changed. In
# that case, we reset the buffered entirely
# TODO: black hole may not be the best. What should the logging
# behavior be?
self.reset()
# callbacks are partials that are bound to packet already. We do
# this in order to separate out parsing activity (and error handling)
# from the execution of callbacks. Callbacks should not in any way
# rely on the parsers position in the byte stream.
for callback in callbacks:
callback() | def function[feed, parameter[self, new_bytes]]:
constant[Feed a new set of bytes into the protocol handler
These bytes will be immediately fed into the parsing state machine and
if new packets are found, the ``packet_callback`` will be executed
with the fully-formed message.
:param new_bytes: The new bytes to be fed into the stream protocol
handler.
]
<ast.AugAssign object at 0x7da1b02e6950>
variable[callbacks] assign[=] list[[]]
<ast.Try object at 0x7da1b02e65f0>
for taget[name[callback]] in starred[name[callbacks]] begin[:]
call[name[callback], parameter[]] | keyword[def] identifier[feed] ( identifier[self] , identifier[new_bytes] ):
literal[string]
identifier[self] . identifier[_available_bytes] += identifier[new_bytes]
identifier[callbacks] =[]
keyword[try] :
keyword[while] keyword[True] :
identifier[packet] = identifier[six] . identifier[next] ( identifier[self] . identifier[_packet_generator] )
keyword[if] identifier[packet] keyword[is] keyword[None] :
keyword[break]
keyword[else] :
identifier[callbacks] . identifier[append] ( identifier[partial] ( identifier[self] . identifier[packet_callback] , identifier[packet] ))
keyword[except] identifier[Exception] :
identifier[self] . identifier[reset] ()
keyword[for] identifier[callback] keyword[in] identifier[callbacks] :
identifier[callback] () | def feed(self, new_bytes):
"""Feed a new set of bytes into the protocol handler
These bytes will be immediately fed into the parsing state machine and
if new packets are found, the ``packet_callback`` will be executed
with the fully-formed message.
:param new_bytes: The new bytes to be fed into the stream protocol
handler.
"""
self._available_bytes += new_bytes
callbacks = []
try:
while True:
packet = six.next(self._packet_generator)
if packet is None:
break # depends on [control=['if'], data=[]]
else:
callbacks.append(partial(self.packet_callback, packet)) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
# When we receive an exception, we assume that the _available_bytes
# has already been updated and we just choked on a field. That
# is, unless the number of _available_bytes has not changed. In
# that case, we reset the buffered entirely
# TODO: black hole may not be the best. What should the logging
# behavior be?
self.reset() # depends on [control=['except'], data=[]]
# callbacks are partials that are bound to packet already. We do
# this in order to separate out parsing activity (and error handling)
# from the execution of callbacks. Callbacks should not in any way
# rely on the parsers position in the byte stream.
for callback in callbacks:
callback() # depends on [control=['for'], data=['callback']] |
def estimate_tuning(input_file):
'''Load an audio file and estimate tuning (in cents)'''
print('Loading ', input_file)
y, sr = librosa.load(input_file)
print('Separating harmonic component ... ')
y_harm = librosa.effects.harmonic(y)
print('Estimating tuning ... ')
# Just track the pitches associated with high magnitude
tuning = librosa.estimate_tuning(y=y_harm, sr=sr)
print('{:+0.2f} cents'.format(100 * tuning)) | def function[estimate_tuning, parameter[input_file]]:
constant[Load an audio file and estimate tuning (in cents)]
call[name[print], parameter[constant[Loading ], name[input_file]]]
<ast.Tuple object at 0x7da20c991420> assign[=] call[name[librosa].load, parameter[name[input_file]]]
call[name[print], parameter[constant[Separating harmonic component ... ]]]
variable[y_harm] assign[=] call[name[librosa].effects.harmonic, parameter[name[y]]]
call[name[print], parameter[constant[Estimating tuning ... ]]]
variable[tuning] assign[=] call[name[librosa].estimate_tuning, parameter[]]
call[name[print], parameter[call[constant[{:+0.2f} cents].format, parameter[binary_operation[constant[100] * name[tuning]]]]]] | keyword[def] identifier[estimate_tuning] ( identifier[input_file] ):
literal[string]
identifier[print] ( literal[string] , identifier[input_file] )
identifier[y] , identifier[sr] = identifier[librosa] . identifier[load] ( identifier[input_file] )
identifier[print] ( literal[string] )
identifier[y_harm] = identifier[librosa] . identifier[effects] . identifier[harmonic] ( identifier[y] )
identifier[print] ( literal[string] )
identifier[tuning] = identifier[librosa] . identifier[estimate_tuning] ( identifier[y] = identifier[y_harm] , identifier[sr] = identifier[sr] )
identifier[print] ( literal[string] . identifier[format] ( literal[int] * identifier[tuning] )) | def estimate_tuning(input_file):
"""Load an audio file and estimate tuning (in cents)"""
print('Loading ', input_file)
(y, sr) = librosa.load(input_file)
print('Separating harmonic component ... ')
y_harm = librosa.effects.harmonic(y)
print('Estimating tuning ... ')
# Just track the pitches associated with high magnitude
tuning = librosa.estimate_tuning(y=y_harm, sr=sr)
print('{:+0.2f} cents'.format(100 * tuning)) |
def subtree_leaf_positions(subtree):
"""Return tree positions of all leaves of a subtree."""
relative_leaf_positions = subtree.treepositions('leaves')
subtree_root_pos = subtree.treeposition()
absolute_leaf_positions = []
for rel_leaf_pos in relative_leaf_positions:
absolute_leaf_positions.append( subtree_root_pos + rel_leaf_pos)
return absolute_leaf_positions | def function[subtree_leaf_positions, parameter[subtree]]:
constant[Return tree positions of all leaves of a subtree.]
variable[relative_leaf_positions] assign[=] call[name[subtree].treepositions, parameter[constant[leaves]]]
variable[subtree_root_pos] assign[=] call[name[subtree].treeposition, parameter[]]
variable[absolute_leaf_positions] assign[=] list[[]]
for taget[name[rel_leaf_pos]] in starred[name[relative_leaf_positions]] begin[:]
call[name[absolute_leaf_positions].append, parameter[binary_operation[name[subtree_root_pos] + name[rel_leaf_pos]]]]
return[name[absolute_leaf_positions]] | keyword[def] identifier[subtree_leaf_positions] ( identifier[subtree] ):
literal[string]
identifier[relative_leaf_positions] = identifier[subtree] . identifier[treepositions] ( literal[string] )
identifier[subtree_root_pos] = identifier[subtree] . identifier[treeposition] ()
identifier[absolute_leaf_positions] =[]
keyword[for] identifier[rel_leaf_pos] keyword[in] identifier[relative_leaf_positions] :
identifier[absolute_leaf_positions] . identifier[append] ( identifier[subtree_root_pos] + identifier[rel_leaf_pos] )
keyword[return] identifier[absolute_leaf_positions] | def subtree_leaf_positions(subtree):
"""Return tree positions of all leaves of a subtree."""
relative_leaf_positions = subtree.treepositions('leaves')
subtree_root_pos = subtree.treeposition()
absolute_leaf_positions = []
for rel_leaf_pos in relative_leaf_positions:
absolute_leaf_positions.append(subtree_root_pos + rel_leaf_pos) # depends on [control=['for'], data=['rel_leaf_pos']]
return absolute_leaf_positions |
def reset_secret(self):
"""
Resets the client secret for this client.
"""
result = self._client.post("{}/reset_secret".format(OAuthClient.api_endpoint), model=self)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result)
self._populate(result)
return self.secret | def function[reset_secret, parameter[self]]:
constant[
Resets the client secret for this client.
]
variable[result] assign[=] call[name[self]._client.post, parameter[call[constant[{}/reset_secret].format, parameter[name[OAuthClient].api_endpoint]]]]
if <ast.UnaryOp object at 0x7da18dc99f60> begin[:]
<ast.Raise object at 0x7da18dc9bfa0>
call[name[self]._populate, parameter[name[result]]]
return[name[self].secret] | keyword[def] identifier[reset_secret] ( identifier[self] ):
literal[string]
identifier[result] = identifier[self] . identifier[_client] . identifier[post] ( literal[string] . identifier[format] ( identifier[OAuthClient] . identifier[api_endpoint] ), identifier[model] = identifier[self] )
keyword[if] keyword[not] literal[string] keyword[in] identifier[result] :
keyword[raise] identifier[UnexpectedResponseError] ( literal[string] , identifier[json] = identifier[result] )
identifier[self] . identifier[_populate] ( identifier[result] )
keyword[return] identifier[self] . identifier[secret] | def reset_secret(self):
"""
Resets the client secret for this client.
"""
result = self._client.post('{}/reset_secret'.format(OAuthClient.api_endpoint), model=self)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result) # depends on [control=['if'], data=[]]
self._populate(result)
return self.secret |
def setup_pixelmap(hashcode):
"""Creates and combines all required layers to
build a pixelmap for creating the virtual
pixels."""
# Color distribution.
# colors = hashgrinder.grindIpForColors(ip)
colors = hashgrinder.grind_hash_for_colors(hashcode)
color_body = colors[0]
color_subfield = colors[1]
color_weapon_a = colors[2]
color_weapon_b = colors[3]
color_shield_deco = colors[4]
color_boots = colors[5]
color_hair = colors[6]
color_top = colors[7]
# Grinds for the aspect.
aspect = hashgrinder.grind_hash_for_aspect(hashcode)
#Determine weapons of the avatar.
weapons = hashgrinder.grind_hash_for_weapon(hashcode)
if DEBUG:
print ("Current aspect: %r" % aspect)
print ("Current weapons: %r" % weapons)
# There is just one body template. The optional pixels need to be mirrored so
# the body layout will be symmetric to avoid uncanny looks.
layer_body = pgnreader.parse_pagan_file(FILE_BODY, hashcode, invert=False, sym=True)
layer_hair = create_hair_layer(aspect, hashcode)
layer_boots = create_boots_layer(aspect, hashcode)
layer_torso = create_torso_layer(aspect, hashcode)
has_shield = (weapons[0] in hashgrinder.SHIELDS)
if has_shield:
layer_weapon_a = create_shield_layer(weapons[0], hashcode)
layer_weapon_b = create_weapon_layer(weapons[1], hashcode)
else:
layer_weapon_a = create_weapon_layer(weapons[0], hashcode)
if (len(weapons) == 2):
layer_weapon_b = create_weapon_layer(weapons[1], hashcode, True)
layer_subfield = create_subfield_layer(aspect, hashcode)
layer_deco = create_shield_deco_layer(weapons, hashcode)
pixelmap = scale_pixels(color_body, layer_body)
pixelmap += scale_pixels(color_top, layer_torso)
pixelmap += scale_pixels(color_hair, layer_hair)
pixelmap += scale_pixels(color_subfield, layer_subfield)
pixelmap += scale_pixels(color_boots, layer_boots)
pixelmap += scale_pixels(color_weapon_a, layer_weapon_a)
if (len(weapons) == 2):
pixelmap += scale_pixels(color_weapon_b, layer_weapon_b)
pixelmap += scale_pixels(color_shield_deco, layer_deco)
return pixelmap | def function[setup_pixelmap, parameter[hashcode]]:
constant[Creates and combines all required layers to
build a pixelmap for creating the virtual
pixels.]
variable[colors] assign[=] call[name[hashgrinder].grind_hash_for_colors, parameter[name[hashcode]]]
variable[color_body] assign[=] call[name[colors]][constant[0]]
variable[color_subfield] assign[=] call[name[colors]][constant[1]]
variable[color_weapon_a] assign[=] call[name[colors]][constant[2]]
variable[color_weapon_b] assign[=] call[name[colors]][constant[3]]
variable[color_shield_deco] assign[=] call[name[colors]][constant[4]]
variable[color_boots] assign[=] call[name[colors]][constant[5]]
variable[color_hair] assign[=] call[name[colors]][constant[6]]
variable[color_top] assign[=] call[name[colors]][constant[7]]
variable[aspect] assign[=] call[name[hashgrinder].grind_hash_for_aspect, parameter[name[hashcode]]]
variable[weapons] assign[=] call[name[hashgrinder].grind_hash_for_weapon, parameter[name[hashcode]]]
if name[DEBUG] begin[:]
call[name[print], parameter[binary_operation[constant[Current aspect: %r] <ast.Mod object at 0x7da2590d6920> name[aspect]]]]
call[name[print], parameter[binary_operation[constant[Current weapons: %r] <ast.Mod object at 0x7da2590d6920> name[weapons]]]]
variable[layer_body] assign[=] call[name[pgnreader].parse_pagan_file, parameter[name[FILE_BODY], name[hashcode]]]
variable[layer_hair] assign[=] call[name[create_hair_layer], parameter[name[aspect], name[hashcode]]]
variable[layer_boots] assign[=] call[name[create_boots_layer], parameter[name[aspect], name[hashcode]]]
variable[layer_torso] assign[=] call[name[create_torso_layer], parameter[name[aspect], name[hashcode]]]
variable[has_shield] assign[=] compare[call[name[weapons]][constant[0]] in name[hashgrinder].SHIELDS]
if name[has_shield] begin[:]
variable[layer_weapon_a] assign[=] call[name[create_shield_layer], parameter[call[name[weapons]][constant[0]], name[hashcode]]]
variable[layer_weapon_b] assign[=] call[name[create_weapon_layer], parameter[call[name[weapons]][constant[1]], name[hashcode]]]
variable[layer_subfield] assign[=] call[name[create_subfield_layer], parameter[name[aspect], name[hashcode]]]
variable[layer_deco] assign[=] call[name[create_shield_deco_layer], parameter[name[weapons], name[hashcode]]]
variable[pixelmap] assign[=] call[name[scale_pixels], parameter[name[color_body], name[layer_body]]]
<ast.AugAssign object at 0x7da20e954d60>
<ast.AugAssign object at 0x7da20e955cc0>
<ast.AugAssign object at 0x7da20e955510>
<ast.AugAssign object at 0x7da20e957d60>
<ast.AugAssign object at 0x7da20e9540a0>
if compare[call[name[len], parameter[name[weapons]]] equal[==] constant[2]] begin[:]
<ast.AugAssign object at 0x7da20e955a80>
<ast.AugAssign object at 0x7da20e955450>
return[name[pixelmap]] | keyword[def] identifier[setup_pixelmap] ( identifier[hashcode] ):
literal[string]
identifier[colors] = identifier[hashgrinder] . identifier[grind_hash_for_colors] ( identifier[hashcode] )
identifier[color_body] = identifier[colors] [ literal[int] ]
identifier[color_subfield] = identifier[colors] [ literal[int] ]
identifier[color_weapon_a] = identifier[colors] [ literal[int] ]
identifier[color_weapon_b] = identifier[colors] [ literal[int] ]
identifier[color_shield_deco] = identifier[colors] [ literal[int] ]
identifier[color_boots] = identifier[colors] [ literal[int] ]
identifier[color_hair] = identifier[colors] [ literal[int] ]
identifier[color_top] = identifier[colors] [ literal[int] ]
identifier[aspect] = identifier[hashgrinder] . identifier[grind_hash_for_aspect] ( identifier[hashcode] )
identifier[weapons] = identifier[hashgrinder] . identifier[grind_hash_for_weapon] ( identifier[hashcode] )
keyword[if] identifier[DEBUG] :
identifier[print] ( literal[string] % identifier[aspect] )
identifier[print] ( literal[string] % identifier[weapons] )
identifier[layer_body] = identifier[pgnreader] . identifier[parse_pagan_file] ( identifier[FILE_BODY] , identifier[hashcode] , identifier[invert] = keyword[False] , identifier[sym] = keyword[True] )
identifier[layer_hair] = identifier[create_hair_layer] ( identifier[aspect] , identifier[hashcode] )
identifier[layer_boots] = identifier[create_boots_layer] ( identifier[aspect] , identifier[hashcode] )
identifier[layer_torso] = identifier[create_torso_layer] ( identifier[aspect] , identifier[hashcode] )
identifier[has_shield] =( identifier[weapons] [ literal[int] ] keyword[in] identifier[hashgrinder] . identifier[SHIELDS] )
keyword[if] identifier[has_shield] :
identifier[layer_weapon_a] = identifier[create_shield_layer] ( identifier[weapons] [ literal[int] ], identifier[hashcode] )
identifier[layer_weapon_b] = identifier[create_weapon_layer] ( identifier[weapons] [ literal[int] ], identifier[hashcode] )
keyword[else] :
identifier[layer_weapon_a] = identifier[create_weapon_layer] ( identifier[weapons] [ literal[int] ], identifier[hashcode] )
keyword[if] ( identifier[len] ( identifier[weapons] )== literal[int] ):
identifier[layer_weapon_b] = identifier[create_weapon_layer] ( identifier[weapons] [ literal[int] ], identifier[hashcode] , keyword[True] )
identifier[layer_subfield] = identifier[create_subfield_layer] ( identifier[aspect] , identifier[hashcode] )
identifier[layer_deco] = identifier[create_shield_deco_layer] ( identifier[weapons] , identifier[hashcode] )
identifier[pixelmap] = identifier[scale_pixels] ( identifier[color_body] , identifier[layer_body] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_top] , identifier[layer_torso] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_hair] , identifier[layer_hair] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_subfield] , identifier[layer_subfield] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_boots] , identifier[layer_boots] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_weapon_a] , identifier[layer_weapon_a] )
keyword[if] ( identifier[len] ( identifier[weapons] )== literal[int] ):
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_weapon_b] , identifier[layer_weapon_b] )
identifier[pixelmap] += identifier[scale_pixels] ( identifier[color_shield_deco] , identifier[layer_deco] )
keyword[return] identifier[pixelmap] | def setup_pixelmap(hashcode):
"""Creates and combines all required layers to
build a pixelmap for creating the virtual
pixels."""
# Color distribution.
# colors = hashgrinder.grindIpForColors(ip)
colors = hashgrinder.grind_hash_for_colors(hashcode)
color_body = colors[0]
color_subfield = colors[1]
color_weapon_a = colors[2]
color_weapon_b = colors[3]
color_shield_deco = colors[4]
color_boots = colors[5]
color_hair = colors[6]
color_top = colors[7]
# Grinds for the aspect.
aspect = hashgrinder.grind_hash_for_aspect(hashcode)
#Determine weapons of the avatar.
weapons = hashgrinder.grind_hash_for_weapon(hashcode)
if DEBUG:
print('Current aspect: %r' % aspect)
print('Current weapons: %r' % weapons) # depends on [control=['if'], data=[]]
# There is just one body template. The optional pixels need to be mirrored so
# the body layout will be symmetric to avoid uncanny looks.
layer_body = pgnreader.parse_pagan_file(FILE_BODY, hashcode, invert=False, sym=True)
layer_hair = create_hair_layer(aspect, hashcode)
layer_boots = create_boots_layer(aspect, hashcode)
layer_torso = create_torso_layer(aspect, hashcode)
has_shield = weapons[0] in hashgrinder.SHIELDS
if has_shield:
layer_weapon_a = create_shield_layer(weapons[0], hashcode)
layer_weapon_b = create_weapon_layer(weapons[1], hashcode) # depends on [control=['if'], data=[]]
else:
layer_weapon_a = create_weapon_layer(weapons[0], hashcode)
if len(weapons) == 2:
layer_weapon_b = create_weapon_layer(weapons[1], hashcode, True) # depends on [control=['if'], data=[]]
layer_subfield = create_subfield_layer(aspect, hashcode)
layer_deco = create_shield_deco_layer(weapons, hashcode)
pixelmap = scale_pixels(color_body, layer_body)
pixelmap += scale_pixels(color_top, layer_torso)
pixelmap += scale_pixels(color_hair, layer_hair)
pixelmap += scale_pixels(color_subfield, layer_subfield)
pixelmap += scale_pixels(color_boots, layer_boots)
pixelmap += scale_pixels(color_weapon_a, layer_weapon_a)
if len(weapons) == 2:
pixelmap += scale_pixels(color_weapon_b, layer_weapon_b) # depends on [control=['if'], data=[]]
pixelmap += scale_pixels(color_shield_deco, layer_deco)
return pixelmap |
def bbknn(adata, batch_key='batch', save_knn=False, copy=False, **kwargs):
"""\
Batch balanced kNN [Park18]_.
Batch balanced kNN alters the kNN procedure to identify each
cell's top neighbours in each batch separately instead of the
entire cell pool with no accounting for batch. Aligns batches in a
quick and lightweight manner.
For use in the scanpy workflow as an alternative to :func:`scanpi.pp.neighbors`.
.. note::
This is just a wrapper of :func:`bbknn.bbknn`: more information
and bug reports `here <https://github.com/Teichlab/bbknn>`__.
Params
------
adata : ``AnnData``
Needs the PCA computed and stored in ``adata.obsm["X_pca"]``.
batch_key : ``str``, optional (default: "batch")
``adata.obs`` column name discriminating between your batches.
neighbors_within_batch : ``int``, optional (default: 3)
How many top neighbours to report for each batch; total number of neighbours
will be this number times the number of batches.
n_pcs : ``int``, optional (default: 50)
How many principal components to use in the analysis.
trim : ``int`` or ``None``, optional (default: ``None``)
If not ``None``, trim the neighbours of each cell to these
many top connectivities. May help with population
independence and improve the tidiness of clustering.
approx : ``bool``, optional (default: ``True``)
If ``True``, use annoy's approximate neighbour finding. This
results in a quicker run time for large datasets while also
potentially increasing the degree of batch correction.
n_trees : ``int``, optional (default: 10)
Only used when ``approx=True``. The number of trees to
construct in the annoy forest. More trees give higher
precision when querying, at the cost of increased run time and
resource intensity.
use_faiss : ``bool``, optional (default: ``True``)
If ``approx=False`` and the metric is "euclidean", use the
faiss package to compute nearest neighbours if installed. This
improves performance at a minor cost to numerical precision as
faiss operates on float32.
metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular")
What distance metric to use. If using ``approx=True``, the
options are "angular", "euclidean", "manhattan" and
"hamming". Otherwise, the options are "euclidean", a member of
the ``sklearn.neighbors.KDTree.valid_metrics`` list, or
parameterised ``sklearn.neighbors.DistanceMetric`` `objects
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_::
>>> from sklearn import neighbors
>>> neighbors.KDTree.valid_metrics
['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity',
'l2', 'euclidean', 'manhattan', 'l1']
>>> pass_as_metric = neighbors.DistanceMetric.get_metric('minkowski', p=3)
bandwidth : ``float``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
higher values result in a gentler slope of the connectivities
exponentials (i.e. larger connectivity values being returned)
local_connectivity : ``int``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
how many nearest neighbors of each cell are assumed to be
fully connected (and given a connectivity value of 1)
save_knn : ``bool``, optional (default: ``False``)
If ``True``, save the indices of the nearest neighbours for
each cell in ``adata.uns['bbknn']``.
copy : ``bool``, optional (default: ``False``)
If ``True``, return a copy instead of writing to the supplied adata.
Returns
-------
The `adata` with the batch-corrected graph.
"""
params = locals() # Has to be first
kwargs = params.pop('kwargs')
try:
from bbknn import bbknn
except ImportError:
raise ImportError('Please install bbknn: `pip install bbknn`.')
return bbknn(**params, **kwargs) | def function[bbknn, parameter[adata, batch_key, save_knn, copy]]:
constant[ Batch balanced kNN [Park18]_.
Batch balanced kNN alters the kNN procedure to identify each
cell's top neighbours in each batch separately instead of the
entire cell pool with no accounting for batch. Aligns batches in a
quick and lightweight manner.
For use in the scanpy workflow as an alternative to :func:`scanpi.pp.neighbors`.
.. note::
This is just a wrapper of :func:`bbknn.bbknn`: more information
and bug reports `here <https://github.com/Teichlab/bbknn>`__.
Params
------
adata : ``AnnData``
Needs the PCA computed and stored in ``adata.obsm["X_pca"]``.
batch_key : ``str``, optional (default: "batch")
``adata.obs`` column name discriminating between your batches.
neighbors_within_batch : ``int``, optional (default: 3)
How many top neighbours to report for each batch; total number of neighbours
will be this number times the number of batches.
n_pcs : ``int``, optional (default: 50)
How many principal components to use in the analysis.
trim : ``int`` or ``None``, optional (default: ``None``)
If not ``None``, trim the neighbours of each cell to these
many top connectivities. May help with population
independence and improve the tidiness of clustering.
approx : ``bool``, optional (default: ``True``)
If ``True``, use annoy's approximate neighbour finding. This
results in a quicker run time for large datasets while also
potentially increasing the degree of batch correction.
n_trees : ``int``, optional (default: 10)
Only used when ``approx=True``. The number of trees to
construct in the annoy forest. More trees give higher
precision when querying, at the cost of increased run time and
resource intensity.
use_faiss : ``bool``, optional (default: ``True``)
If ``approx=False`` and the metric is "euclidean", use the
faiss package to compute nearest neighbours if installed. This
improves performance at a minor cost to numerical precision as
faiss operates on float32.
metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular")
What distance metric to use. If using ``approx=True``, the
options are "angular", "euclidean", "manhattan" and
"hamming". Otherwise, the options are "euclidean", a member of
the ``sklearn.neighbors.KDTree.valid_metrics`` list, or
parameterised ``sklearn.neighbors.DistanceMetric`` `objects
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_::
>>> from sklearn import neighbors
>>> neighbors.KDTree.valid_metrics
['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity',
'l2', 'euclidean', 'manhattan', 'l1']
>>> pass_as_metric = neighbors.DistanceMetric.get_metric('minkowski', p=3)
bandwidth : ``float``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
higher values result in a gentler slope of the connectivities
exponentials (i.e. larger connectivity values being returned)
local_connectivity : ``int``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
how many nearest neighbors of each cell are assumed to be
fully connected (and given a connectivity value of 1)
save_knn : ``bool``, optional (default: ``False``)
If ``True``, save the indices of the nearest neighbours for
each cell in ``adata.uns['bbknn']``.
copy : ``bool``, optional (default: ``False``)
If ``True``, return a copy instead of writing to the supplied adata.
Returns
-------
The `adata` with the batch-corrected graph.
]
variable[params] assign[=] call[name[locals], parameter[]]
variable[kwargs] assign[=] call[name[params].pop, parameter[constant[kwargs]]]
<ast.Try object at 0x7da20c990be0>
return[call[name[bbknn], parameter[]]] | keyword[def] identifier[bbknn] ( identifier[adata] , identifier[batch_key] = literal[string] , identifier[save_knn] = keyword[False] , identifier[copy] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[locals] ()
identifier[kwargs] = identifier[params] . identifier[pop] ( literal[string] )
keyword[try] :
keyword[from] identifier[bbknn] keyword[import] identifier[bbknn]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[ImportError] ( literal[string] )
keyword[return] identifier[bbknn] (** identifier[params] ,** identifier[kwargs] ) | def bbknn(adata, batch_key='batch', save_knn=False, copy=False, **kwargs):
""" Batch balanced kNN [Park18]_.
Batch balanced kNN alters the kNN procedure to identify each
cell's top neighbours in each batch separately instead of the
entire cell pool with no accounting for batch. Aligns batches in a
quick and lightweight manner.
For use in the scanpy workflow as an alternative to :func:`scanpi.pp.neighbors`.
.. note::
This is just a wrapper of :func:`bbknn.bbknn`: more information
and bug reports `here <https://github.com/Teichlab/bbknn>`__.
Params
------
adata : ``AnnData``
Needs the PCA computed and stored in ``adata.obsm["X_pca"]``.
batch_key : ``str``, optional (default: "batch")
``adata.obs`` column name discriminating between your batches.
neighbors_within_batch : ``int``, optional (default: 3)
How many top neighbours to report for each batch; total number of neighbours
will be this number times the number of batches.
n_pcs : ``int``, optional (default: 50)
How many principal components to use in the analysis.
trim : ``int`` or ``None``, optional (default: ``None``)
If not ``None``, trim the neighbours of each cell to these
many top connectivities. May help with population
independence and improve the tidiness of clustering.
approx : ``bool``, optional (default: ``True``)
If ``True``, use annoy's approximate neighbour finding. This
results in a quicker run time for large datasets while also
potentially increasing the degree of batch correction.
n_trees : ``int``, optional (default: 10)
Only used when ``approx=True``. The number of trees to
construct in the annoy forest. More trees give higher
precision when querying, at the cost of increased run time and
resource intensity.
use_faiss : ``bool``, optional (default: ``True``)
If ``approx=False`` and the metric is "euclidean", use the
faiss package to compute nearest neighbours if installed. This
improves performance at a minor cost to numerical precision as
faiss operates on float32.
metric : ``str`` or ``sklearn.neighbors.DistanceMetric``, optional (default: "angular")
What distance metric to use. If using ``approx=True``, the
options are "angular", "euclidean", "manhattan" and
"hamming". Otherwise, the options are "euclidean", a member of
the ``sklearn.neighbors.KDTree.valid_metrics`` list, or
parameterised ``sklearn.neighbors.DistanceMetric`` `objects
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html>`_::
>>> from sklearn import neighbors
>>> neighbors.KDTree.valid_metrics
['p', 'chebyshev', 'cityblock', 'minkowski', 'infinity',
'l2', 'euclidean', 'manhattan', 'l1']
>>> pass_as_metric = neighbors.DistanceMetric.get_metric('minkowski', p=3)
bandwidth : ``float``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
higher values result in a gentler slope of the connectivities
exponentials (i.e. larger connectivity values being returned)
local_connectivity : ``int``, optional (default: 1)
``scanpy.neighbors.compute_connectivities_umap`` parameter,
how many nearest neighbors of each cell are assumed to be
fully connected (and given a connectivity value of 1)
save_knn : ``bool``, optional (default: ``False``)
If ``True``, save the indices of the nearest neighbours for
each cell in ``adata.uns['bbknn']``.
copy : ``bool``, optional (default: ``False``)
If ``True``, return a copy instead of writing to the supplied adata.
Returns
-------
The `adata` with the batch-corrected graph.
"""
params = locals() # Has to be first
kwargs = params.pop('kwargs')
try:
from bbknn import bbknn # depends on [control=['try'], data=[]]
except ImportError:
raise ImportError('Please install bbknn: `pip install bbknn`.') # depends on [control=['except'], data=[]]
return bbknn(**params, **kwargs) |
def plt_goea_results(fout_img, goea_results, **kws):
"""Plot a single page."""
go_sources = [rec.GO for rec in goea_results]
go2obj = {rec.GO:rec.goterm for rec in goea_results}
gosubdag = GoSubDag(go_sources, go2obj, rcntobj=True)
godagplot = GoSubDagPlot(gosubdag, goea_results=goea_results, **kws)
godagplot.plt_dag(fout_img) | def function[plt_goea_results, parameter[fout_img, goea_results]]:
constant[Plot a single page.]
variable[go_sources] assign[=] <ast.ListComp object at 0x7da20e955420>
variable[go2obj] assign[=] <ast.DictComp object at 0x7da1b26af370>
variable[gosubdag] assign[=] call[name[GoSubDag], parameter[name[go_sources], name[go2obj]]]
variable[godagplot] assign[=] call[name[GoSubDagPlot], parameter[name[gosubdag]]]
call[name[godagplot].plt_dag, parameter[name[fout_img]]] | keyword[def] identifier[plt_goea_results] ( identifier[fout_img] , identifier[goea_results] ,** identifier[kws] ):
literal[string]
identifier[go_sources] =[ identifier[rec] . identifier[GO] keyword[for] identifier[rec] keyword[in] identifier[goea_results] ]
identifier[go2obj] ={ identifier[rec] . identifier[GO] : identifier[rec] . identifier[goterm] keyword[for] identifier[rec] keyword[in] identifier[goea_results] }
identifier[gosubdag] = identifier[GoSubDag] ( identifier[go_sources] , identifier[go2obj] , identifier[rcntobj] = keyword[True] )
identifier[godagplot] = identifier[GoSubDagPlot] ( identifier[gosubdag] , identifier[goea_results] = identifier[goea_results] ,** identifier[kws] )
identifier[godagplot] . identifier[plt_dag] ( identifier[fout_img] ) | def plt_goea_results(fout_img, goea_results, **kws):
"""Plot a single page."""
go_sources = [rec.GO for rec in goea_results]
go2obj = {rec.GO: rec.goterm for rec in goea_results}
gosubdag = GoSubDag(go_sources, go2obj, rcntobj=True)
godagplot = GoSubDagPlot(gosubdag, goea_results=goea_results, **kws)
godagplot.plt_dag(fout_img) |
def reload_module(self, module_path=None):
"""
Reloads a module into the framework. If module_path is not
specified, then the current_module variable is used. Returns True
on success, False on error.
@type module_path: String
@param module_path: The name of the module to reload
"""
if module_path is None:
if self.current_module is not None:
module_path = self.current_module.name
else:
self.logger.warning('must specify module if not module is currently being used')
return False
if module_path not in self.module:
self.logger.error('invalid module requested for reload')
raise termineter.errors.FrameworkRuntimeError('invalid module requested for reload')
self.logger.info('reloading module: ' + module_path)
module_instance = self.import_module(module_path, reload_module=True)
if not isinstance(module_instance, termineter.module.TermineterModule):
self.logger.error('module: ' + module_path + ' is not derived from the TermineterModule class')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' is not derived from the TermineterModule class')
if not hasattr(module_instance, 'run'):
self.logger.error('module: ' + module_path + ' has no run() method')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' has no run() method')
if not isinstance(module_instance.options, termineter.options.Options) or not isinstance(module_instance.advanced_options, termineter.options.Options):
self.logger.error('module: ' + module_path + ' options and advanced_options must be termineter.options.Options instances')
raise termineter.errors.FrameworkRuntimeError('options and advanced_options must be termineter.options.Options instances')
module_instance.name = module_path.split('/')[-1]
module_instance.path = module_path
self.modules[module_path] = module_instance
if self.current_module is not None:
if self.current_module.path == module_instance.path:
self.current_module = module_instance
return True | def function[reload_module, parameter[self, module_path]]:
constant[
Reloads a module into the framework. If module_path is not
specified, then the current_module variable is used. Returns True
on success, False on error.
@type module_path: String
@param module_path: The name of the module to reload
]
if compare[name[module_path] is constant[None]] begin[:]
if compare[name[self].current_module is_not constant[None]] begin[:]
variable[module_path] assign[=] name[self].current_module.name
if compare[name[module_path] <ast.NotIn object at 0x7da2590d7190> name[self].module] begin[:]
call[name[self].logger.error, parameter[constant[invalid module requested for reload]]]
<ast.Raise object at 0x7da1b1836020>
call[name[self].logger.info, parameter[binary_operation[constant[reloading module: ] + name[module_path]]]]
variable[module_instance] assign[=] call[name[self].import_module, parameter[name[module_path]]]
if <ast.UnaryOp object at 0x7da1b18363e0> begin[:]
call[name[self].logger.error, parameter[binary_operation[binary_operation[constant[module: ] + name[module_path]] + constant[ is not derived from the TermineterModule class]]]]
<ast.Raise object at 0x7da1b1835030>
if <ast.UnaryOp object at 0x7da1b18df490> begin[:]
call[name[self].logger.error, parameter[binary_operation[binary_operation[constant[module: ] + name[module_path]] + constant[ has no run() method]]]]
<ast.Raise object at 0x7da1b18ddf00>
if <ast.BoolOp object at 0x7da1b18dd7b0> begin[:]
call[name[self].logger.error, parameter[binary_operation[binary_operation[constant[module: ] + name[module_path]] + constant[ options and advanced_options must be termineter.options.Options instances]]]]
<ast.Raise object at 0x7da1b18dda20>
name[module_instance].name assign[=] call[call[name[module_path].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b18cb1c0>]
name[module_instance].path assign[=] name[module_path]
call[name[self].modules][name[module_path]] assign[=] name[module_instance]
if compare[name[self].current_module is_not constant[None]] begin[:]
if compare[name[self].current_module.path equal[==] name[module_instance].path] begin[:]
name[self].current_module assign[=] name[module_instance]
return[constant[True]] | keyword[def] identifier[reload_module] ( identifier[self] , identifier[module_path] = keyword[None] ):
literal[string]
keyword[if] identifier[module_path] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[current_module] keyword[is] keyword[not] keyword[None] :
identifier[module_path] = identifier[self] . identifier[current_module] . identifier[name]
keyword[else] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[module_path] keyword[not] keyword[in] identifier[self] . identifier[module] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[termineter] . identifier[errors] . identifier[FrameworkRuntimeError] ( literal[string] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] + identifier[module_path] )
identifier[module_instance] = identifier[self] . identifier[import_module] ( identifier[module_path] , identifier[reload_module] = keyword[True] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[module_instance] , identifier[termineter] . identifier[module] . identifier[TermineterModule] ):
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[module_path] + literal[string] )
keyword[raise] identifier[termineter] . identifier[errors] . identifier[FrameworkRuntimeError] ( literal[string] + identifier[module_path] + literal[string] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[module_instance] , literal[string] ):
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[module_path] + literal[string] )
keyword[raise] identifier[termineter] . identifier[errors] . identifier[FrameworkRuntimeError] ( literal[string] + identifier[module_path] + literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[module_instance] . identifier[options] , identifier[termineter] . identifier[options] . identifier[Options] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[module_instance] . identifier[advanced_options] , identifier[termineter] . identifier[options] . identifier[Options] ):
identifier[self] . identifier[logger] . identifier[error] ( literal[string] + identifier[module_path] + literal[string] )
keyword[raise] identifier[termineter] . identifier[errors] . identifier[FrameworkRuntimeError] ( literal[string] )
identifier[module_instance] . identifier[name] = identifier[module_path] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[module_instance] . identifier[path] = identifier[module_path]
identifier[self] . identifier[modules] [ identifier[module_path] ]= identifier[module_instance]
keyword[if] identifier[self] . identifier[current_module] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[current_module] . identifier[path] == identifier[module_instance] . identifier[path] :
identifier[self] . identifier[current_module] = identifier[module_instance]
keyword[return] keyword[True] | def reload_module(self, module_path=None):
"""
Reloads a module into the framework. If module_path is not
specified, then the current_module variable is used. Returns True
on success, False on error.
@type module_path: String
@param module_path: The name of the module to reload
"""
if module_path is None:
if self.current_module is not None:
module_path = self.current_module.name # depends on [control=['if'], data=[]]
else:
self.logger.warning('must specify module if not module is currently being used')
return False # depends on [control=['if'], data=['module_path']]
if module_path not in self.module:
self.logger.error('invalid module requested for reload')
raise termineter.errors.FrameworkRuntimeError('invalid module requested for reload') # depends on [control=['if'], data=[]]
self.logger.info('reloading module: ' + module_path)
module_instance = self.import_module(module_path, reload_module=True)
if not isinstance(module_instance, termineter.module.TermineterModule):
self.logger.error('module: ' + module_path + ' is not derived from the TermineterModule class')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' is not derived from the TermineterModule class') # depends on [control=['if'], data=[]]
if not hasattr(module_instance, 'run'):
self.logger.error('module: ' + module_path + ' has no run() method')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' has no run() method') # depends on [control=['if'], data=[]]
if not isinstance(module_instance.options, termineter.options.Options) or not isinstance(module_instance.advanced_options, termineter.options.Options):
self.logger.error('module: ' + module_path + ' options and advanced_options must be termineter.options.Options instances')
raise termineter.errors.FrameworkRuntimeError('options and advanced_options must be termineter.options.Options instances') # depends on [control=['if'], data=[]]
module_instance.name = module_path.split('/')[-1]
module_instance.path = module_path
self.modules[module_path] = module_instance
if self.current_module is not None:
if self.current_module.path == module_instance.path:
self.current_module = module_instance # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def emd_eq(self, other):
"""Return whether this concept is equal to another in the context of
an EMD calculation.
"""
return (self.phi == other.phi and
self.mechanism == other.mechanism and
self.eq_repertoires(other)) | def function[emd_eq, parameter[self, other]]:
constant[Return whether this concept is equal to another in the context of
an EMD calculation.
]
return[<ast.BoolOp object at 0x7da18ede4f40>] | keyword[def] identifier[emd_eq] ( identifier[self] , identifier[other] ):
literal[string]
keyword[return] ( identifier[self] . identifier[phi] == identifier[other] . identifier[phi] keyword[and]
identifier[self] . identifier[mechanism] == identifier[other] . identifier[mechanism] keyword[and]
identifier[self] . identifier[eq_repertoires] ( identifier[other] )) | def emd_eq(self, other):
"""Return whether this concept is equal to another in the context of
an EMD calculation.
"""
return self.phi == other.phi and self.mechanism == other.mechanism and self.eq_repertoires(other) |
def json_loads(cls, s, **kwargs):
"""
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
"""
if 'cls' not in kwargs:
kwargs['cls'] = cls.json_decoder
return json.loads(s, **kwargs) | def function[json_loads, parameter[cls, s]]:
constant[
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
]
if compare[constant[cls] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[cls]] assign[=] name[cls].json_decoder
return[call[name[json].loads, parameter[name[s]]]] | keyword[def] identifier[json_loads] ( identifier[cls] , identifier[s] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[cls] . identifier[json_decoder]
keyword[return] identifier[json] . identifier[loads] ( identifier[s] ,** identifier[kwargs] ) | def json_loads(cls, s, **kwargs):
"""
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
"""
if 'cls' not in kwargs:
kwargs['cls'] = cls.json_decoder # depends on [control=['if'], data=['kwargs']]
return json.loads(s, **kwargs) |
def _create_filter(self, server_id, source_namespace, query,
query_language, owned, filter_id, name):
"""
Create a :term:`dynamic indication filter` instance in the Interop
namespace of a WBEM server and return that instance.
In order to catch any changes the server applies, the instance is
retrieved again using the instance path returned by instance creation.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
source_namespace (:term:`string`):
Source namespace of the indication filter.
query (:term:`string`):
Filter query in the specified query language.
query_language (:term:`string`):
Query language for the specified filter query.
Examples: 'WQL', 'DMTF:CQL'.
owned (:class:`py:bool`):
Defines whether or not the created instance is *owned* by the
subscription manager.
filter_id (:term:`string`):
Filter ID string to be incorporated into the `Name` property of the
filter instance, as detailed for `subscription_manager_id`, or
`None`.
Mutually exclusive with the `name` parameter.
name (:term:`string`):
Value for the `Name` property of the filter instance, or `None`.
Mutually exclusive with the `filter_id` parameter.
Returns:
:class:`~pywbem.CIMInstance`: The created instance, as retrieved
from the server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# Validate server_id
server = self._get_server(server_id)
this_host = getfqdn()
ownership = "owned" if owned else "permanent"
filter_path = CIMInstanceName(FILTER_CLASSNAME,
namespace=server.interop_ns)
filter_inst = CIMInstance(FILTER_CLASSNAME)
filter_inst.path = filter_path
filter_inst['CreationClassName'] = FILTER_CLASSNAME
filter_inst['SystemCreationClassName'] = SYSTEM_CREATION_CLASSNAME
filter_inst['SystemName'] = this_host
if filter_id:
filter_inst['Name'] = _format(
'pywbemfilter:{0}:{1}:{2}:{3}',
ownership, self._subscription_manager_id, filter_id,
uuid.uuid4())
if name:
filter_inst['Name'] = name
filter_inst['SourceNamespace'] = source_namespace
filter_inst['Query'] = query
filter_inst['QueryLanguage'] = query_language
if owned:
for i, inst in enumerate(self._owned_filters[server_id]):
if inst.path == filter_path:
# It already exists, now check its properties
if inst != filter_inst:
server.conn.ModifyInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
self._owned_filters[server_id][i] = filter_inst
return filter_inst
filter_path = server.conn.CreateInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
self._owned_filters[server_id].append(filter_inst)
else:
# Responsibility to ensure it does not exist yet is with the user
filter_path = server.conn.CreateInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
return filter_inst | def function[_create_filter, parameter[self, server_id, source_namespace, query, query_language, owned, filter_id, name]]:
constant[
Create a :term:`dynamic indication filter` instance in the Interop
namespace of a WBEM server and return that instance.
In order to catch any changes the server applies, the instance is
retrieved again using the instance path returned by instance creation.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
source_namespace (:term:`string`):
Source namespace of the indication filter.
query (:term:`string`):
Filter query in the specified query language.
query_language (:term:`string`):
Query language for the specified filter query.
Examples: 'WQL', 'DMTF:CQL'.
owned (:class:`py:bool`):
Defines whether or not the created instance is *owned* by the
subscription manager.
filter_id (:term:`string`):
Filter ID string to be incorporated into the `Name` property of the
filter instance, as detailed for `subscription_manager_id`, or
`None`.
Mutually exclusive with the `name` parameter.
name (:term:`string`):
Value for the `Name` property of the filter instance, or `None`.
Mutually exclusive with the `filter_id` parameter.
Returns:
:class:`~pywbem.CIMInstance`: The created instance, as retrieved
from the server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
]
variable[server] assign[=] call[name[self]._get_server, parameter[name[server_id]]]
variable[this_host] assign[=] call[name[getfqdn], parameter[]]
variable[ownership] assign[=] <ast.IfExp object at 0x7da18dc050c0>
variable[filter_path] assign[=] call[name[CIMInstanceName], parameter[name[FILTER_CLASSNAME]]]
variable[filter_inst] assign[=] call[name[CIMInstance], parameter[name[FILTER_CLASSNAME]]]
name[filter_inst].path assign[=] name[filter_path]
call[name[filter_inst]][constant[CreationClassName]] assign[=] name[FILTER_CLASSNAME]
call[name[filter_inst]][constant[SystemCreationClassName]] assign[=] name[SYSTEM_CREATION_CLASSNAME]
call[name[filter_inst]][constant[SystemName]] assign[=] name[this_host]
if name[filter_id] begin[:]
call[name[filter_inst]][constant[Name]] assign[=] call[name[_format], parameter[constant[pywbemfilter:{0}:{1}:{2}:{3}], name[ownership], name[self]._subscription_manager_id, name[filter_id], call[name[uuid].uuid4, parameter[]]]]
if name[name] begin[:]
call[name[filter_inst]][constant[Name]] assign[=] name[name]
call[name[filter_inst]][constant[SourceNamespace]] assign[=] name[source_namespace]
call[name[filter_inst]][constant[Query]] assign[=] name[query]
call[name[filter_inst]][constant[QueryLanguage]] assign[=] name[query_language]
if name[owned] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18dc05060>, <ast.Name object at 0x7da18dc05090>]]] in starred[call[name[enumerate], parameter[call[name[self]._owned_filters][name[server_id]]]]] begin[:]
if compare[name[inst].path equal[==] name[filter_path]] begin[:]
if compare[name[inst] not_equal[!=] name[filter_inst]] begin[:]
call[name[server].conn.ModifyInstance, parameter[name[filter_inst]]]
variable[filter_inst] assign[=] call[name[server].conn.GetInstance, parameter[name[filter_path]]]
call[call[name[self]._owned_filters][name[server_id]]][name[i]] assign[=] name[filter_inst]
return[name[filter_inst]]
variable[filter_path] assign[=] call[name[server].conn.CreateInstance, parameter[name[filter_inst]]]
variable[filter_inst] assign[=] call[name[server].conn.GetInstance, parameter[name[filter_path]]]
call[call[name[self]._owned_filters][name[server_id]].append, parameter[name[filter_inst]]]
return[name[filter_inst]] | keyword[def] identifier[_create_filter] ( identifier[self] , identifier[server_id] , identifier[source_namespace] , identifier[query] ,
identifier[query_language] , identifier[owned] , identifier[filter_id] , identifier[name] ):
literal[string]
identifier[server] = identifier[self] . identifier[_get_server] ( identifier[server_id] )
identifier[this_host] = identifier[getfqdn] ()
identifier[ownership] = literal[string] keyword[if] identifier[owned] keyword[else] literal[string]
identifier[filter_path] = identifier[CIMInstanceName] ( identifier[FILTER_CLASSNAME] ,
identifier[namespace] = identifier[server] . identifier[interop_ns] )
identifier[filter_inst] = identifier[CIMInstance] ( identifier[FILTER_CLASSNAME] )
identifier[filter_inst] . identifier[path] = identifier[filter_path]
identifier[filter_inst] [ literal[string] ]= identifier[FILTER_CLASSNAME]
identifier[filter_inst] [ literal[string] ]= identifier[SYSTEM_CREATION_CLASSNAME]
identifier[filter_inst] [ literal[string] ]= identifier[this_host]
keyword[if] identifier[filter_id] :
identifier[filter_inst] [ literal[string] ]= identifier[_format] (
literal[string] ,
identifier[ownership] , identifier[self] . identifier[_subscription_manager_id] , identifier[filter_id] ,
identifier[uuid] . identifier[uuid4] ())
keyword[if] identifier[name] :
identifier[filter_inst] [ literal[string] ]= identifier[name]
identifier[filter_inst] [ literal[string] ]= identifier[source_namespace]
identifier[filter_inst] [ literal[string] ]= identifier[query]
identifier[filter_inst] [ literal[string] ]= identifier[query_language]
keyword[if] identifier[owned] :
keyword[for] identifier[i] , identifier[inst] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_owned_filters] [ identifier[server_id] ]):
keyword[if] identifier[inst] . identifier[path] == identifier[filter_path] :
keyword[if] identifier[inst] != identifier[filter_inst] :
identifier[server] . identifier[conn] . identifier[ModifyInstance] ( identifier[filter_inst] )
identifier[filter_inst] = identifier[server] . identifier[conn] . identifier[GetInstance] ( identifier[filter_path] )
identifier[self] . identifier[_owned_filters] [ identifier[server_id] ][ identifier[i] ]= identifier[filter_inst]
keyword[return] identifier[filter_inst]
identifier[filter_path] = identifier[server] . identifier[conn] . identifier[CreateInstance] ( identifier[filter_inst] )
identifier[filter_inst] = identifier[server] . identifier[conn] . identifier[GetInstance] ( identifier[filter_path] )
identifier[self] . identifier[_owned_filters] [ identifier[server_id] ]. identifier[append] ( identifier[filter_inst] )
keyword[else] :
identifier[filter_path] = identifier[server] . identifier[conn] . identifier[CreateInstance] ( identifier[filter_inst] )
identifier[filter_inst] = identifier[server] . identifier[conn] . identifier[GetInstance] ( identifier[filter_path] )
keyword[return] identifier[filter_inst] | def _create_filter(self, server_id, source_namespace, query, query_language, owned, filter_id, name):
"""
Create a :term:`dynamic indication filter` instance in the Interop
namespace of a WBEM server and return that instance.
In order to catch any changes the server applies, the instance is
retrieved again using the instance path returned by instance creation.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
source_namespace (:term:`string`):
Source namespace of the indication filter.
query (:term:`string`):
Filter query in the specified query language.
query_language (:term:`string`):
Query language for the specified filter query.
Examples: 'WQL', 'DMTF:CQL'.
owned (:class:`py:bool`):
Defines whether or not the created instance is *owned* by the
subscription manager.
filter_id (:term:`string`):
Filter ID string to be incorporated into the `Name` property of the
filter instance, as detailed for `subscription_manager_id`, or
`None`.
Mutually exclusive with the `name` parameter.
name (:term:`string`):
Value for the `Name` property of the filter instance, or `None`.
Mutually exclusive with the `filter_id` parameter.
Returns:
:class:`~pywbem.CIMInstance`: The created instance, as retrieved
from the server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# Validate server_id
server = self._get_server(server_id)
this_host = getfqdn()
ownership = 'owned' if owned else 'permanent'
filter_path = CIMInstanceName(FILTER_CLASSNAME, namespace=server.interop_ns)
filter_inst = CIMInstance(FILTER_CLASSNAME)
filter_inst.path = filter_path
filter_inst['CreationClassName'] = FILTER_CLASSNAME
filter_inst['SystemCreationClassName'] = SYSTEM_CREATION_CLASSNAME
filter_inst['SystemName'] = this_host
if filter_id:
filter_inst['Name'] = _format('pywbemfilter:{0}:{1}:{2}:{3}', ownership, self._subscription_manager_id, filter_id, uuid.uuid4()) # depends on [control=['if'], data=[]]
if name:
filter_inst['Name'] = name # depends on [control=['if'], data=[]]
filter_inst['SourceNamespace'] = source_namespace
filter_inst['Query'] = query
filter_inst['QueryLanguage'] = query_language
if owned:
for (i, inst) in enumerate(self._owned_filters[server_id]):
if inst.path == filter_path:
# It already exists, now check its properties
if inst != filter_inst:
server.conn.ModifyInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
self._owned_filters[server_id][i] = filter_inst # depends on [control=['if'], data=['filter_inst']]
return filter_inst # depends on [control=['if'], data=['filter_path']] # depends on [control=['for'], data=[]]
filter_path = server.conn.CreateInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
self._owned_filters[server_id].append(filter_inst) # depends on [control=['if'], data=[]]
else:
# Responsibility to ensure it does not exist yet is with the user
filter_path = server.conn.CreateInstance(filter_inst)
filter_inst = server.conn.GetInstance(filter_path)
return filter_inst |
def methods(self) -> 'PrettyDir':
"""Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
) | def function[methods, parameter[self]]:
constant[Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
]
return[call[name[PrettyDir], parameter[name[self].obj, <ast.ListComp object at 0x7da2054a77c0>]]] | keyword[def] identifier[methods] ( identifier[self] )-> literal[string] :
literal[string]
keyword[return] identifier[PrettyDir] (
identifier[self] . identifier[obj] ,
[
identifier[pattr]
keyword[for] identifier[pattr] keyword[in] identifier[self] . identifier[pattrs]
keyword[if] identifier[category_match] ( identifier[pattr] . identifier[category] , identifier[AttrCategory] . identifier[FUNCTION] )
],
) | def methods(self) -> 'PrettyDir':
"""Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
"""
return PrettyDir(self.obj, [pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION)]) |
def result(self, timeout=None):
"""Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
"""
start = time.time()
while True:
task = self.get_task()
if not task or task.status not in (FINISHED, FAILED):
if not timeout:
continue
elif time.time() - start < timeout:
continue
else:
raise TimeoutError()
if task.status == FAILED:
raise task.result
return task.result | def function[result, parameter[self, timeout]]:
constant[Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
]
variable[start] assign[=] call[name[time].time, parameter[]]
while constant[True] begin[:]
variable[task] assign[=] call[name[self].get_task, parameter[]]
if <ast.BoolOp object at 0x7da1b000ee30> begin[:]
if <ast.UnaryOp object at 0x7da1b000d600> begin[:]
continue
if compare[name[task].status equal[==] name[FAILED]] begin[:]
<ast.Raise object at 0x7da1b000c6d0>
return[name[task].result] | keyword[def] identifier[result] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[start] = identifier[time] . identifier[time] ()
keyword[while] keyword[True] :
identifier[task] = identifier[self] . identifier[get_task] ()
keyword[if] keyword[not] identifier[task] keyword[or] identifier[task] . identifier[status] keyword[not] keyword[in] ( identifier[FINISHED] , identifier[FAILED] ):
keyword[if] keyword[not] identifier[timeout] :
keyword[continue]
keyword[elif] identifier[time] . identifier[time] ()- identifier[start] < identifier[timeout] :
keyword[continue]
keyword[else] :
keyword[raise] identifier[TimeoutError] ()
keyword[if] identifier[task] . identifier[status] == identifier[FAILED] :
keyword[raise] identifier[task] . identifier[result]
keyword[return] identifier[task] . identifier[result] | def result(self, timeout=None):
"""Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
"""
start = time.time()
while True:
task = self.get_task()
if not task or task.status not in (FINISHED, FAILED):
if not timeout:
continue # depends on [control=['if'], data=[]]
elif time.time() - start < timeout:
continue # depends on [control=['if'], data=[]]
else:
raise TimeoutError() # depends on [control=['if'], data=[]]
if task.status == FAILED:
raise task.result # depends on [control=['if'], data=[]]
return task.result # depends on [control=['while'], data=[]] |
def open_for_group_write(fp, mode, encoding='utf-8'):
"""Open with mode=mode and permissions '-rw-rw-r--' group writable is
the default on some systems/accounts, but it is important that it be present on our deployment machine
"""
d = os.path.split(fp)[0]
if not os.path.exists(d):
os.makedirs(d)
o = codecs.open(fp, mode, encoding=encoding)
o.flush()
os.chmod(fp, stat.S_IRGRP | stat.S_IROTH | stat.S_IRUSR | stat.S_IWGRP | stat.S_IWUSR)
return o | def function[open_for_group_write, parameter[fp, mode, encoding]]:
constant[Open with mode=mode and permissions '-rw-rw-r--' group writable is
the default on some systems/accounts, but it is important that it be present on our deployment machine
]
variable[d] assign[=] call[call[name[os].path.split, parameter[name[fp]]]][constant[0]]
if <ast.UnaryOp object at 0x7da2044c1690> begin[:]
call[name[os].makedirs, parameter[name[d]]]
variable[o] assign[=] call[name[codecs].open, parameter[name[fp], name[mode]]]
call[name[o].flush, parameter[]]
call[name[os].chmod, parameter[name[fp], binary_operation[binary_operation[binary_operation[binary_operation[name[stat].S_IRGRP <ast.BitOr object at 0x7da2590d6aa0> name[stat].S_IROTH] <ast.BitOr object at 0x7da2590d6aa0> name[stat].S_IRUSR] <ast.BitOr object at 0x7da2590d6aa0> name[stat].S_IWGRP] <ast.BitOr object at 0x7da2590d6aa0> name[stat].S_IWUSR]]]
return[name[o]] | keyword[def] identifier[open_for_group_write] ( identifier[fp] , identifier[mode] , identifier[encoding] = literal[string] ):
literal[string]
identifier[d] = identifier[os] . identifier[path] . identifier[split] ( identifier[fp] )[ literal[int] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[d] ):
identifier[os] . identifier[makedirs] ( identifier[d] )
identifier[o] = identifier[codecs] . identifier[open] ( identifier[fp] , identifier[mode] , identifier[encoding] = identifier[encoding] )
identifier[o] . identifier[flush] ()
identifier[os] . identifier[chmod] ( identifier[fp] , identifier[stat] . identifier[S_IRGRP] | identifier[stat] . identifier[S_IROTH] | identifier[stat] . identifier[S_IRUSR] | identifier[stat] . identifier[S_IWGRP] | identifier[stat] . identifier[S_IWUSR] )
keyword[return] identifier[o] | def open_for_group_write(fp, mode, encoding='utf-8'):
"""Open with mode=mode and permissions '-rw-rw-r--' group writable is
the default on some systems/accounts, but it is important that it be present on our deployment machine
"""
d = os.path.split(fp)[0]
if not os.path.exists(d):
os.makedirs(d) # depends on [control=['if'], data=[]]
o = codecs.open(fp, mode, encoding=encoding)
o.flush()
os.chmod(fp, stat.S_IRGRP | stat.S_IROTH | stat.S_IRUSR | stat.S_IWGRP | stat.S_IWUSR)
return o |
def _loop_topo(self, topologyType, topologicalEntity=None, topologyTypeToAvoid=None):
'''
this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face)
'''
topoTypes = {TopAbs_VERTEX: TopoDS_Vertex,
TopAbs_EDGE: TopoDS_Edge,
TopAbs_FACE: TopoDS_Face,
TopAbs_WIRE: TopoDS_Wire,
TopAbs_SHELL: TopoDS_Shell,
TopAbs_SOLID: TopoDS_Solid,
TopAbs_COMPOUND: TopoDS_Compound,
TopAbs_COMPSOLID: TopoDS_CompSolid}
assert topologyType in topoTypes.keys(), '%s not one of %s' % (topologyType, topoTypes.keys())
self.topExp = TopExp_Explorer()
# use self.myShape if nothing is specified
if topologicalEntity is None and topologyTypeToAvoid is None:
self.topExp.Init(self.myShape, topologyType)
elif topologicalEntity is None and topologyTypeToAvoid is not None:
self.topExp.Init(self.myShape, topologyType, topologyTypeToAvoid)
elif topologyTypeToAvoid is None:
self.topExp.Init(topologicalEntity, topologyType)
elif topologyTypeToAvoid:
self.topExp.Init(topologicalEntity,
topologyType,
topologyTypeToAvoid)
seq = []
hashes = [] # list that stores hashes to avoid redundancy
occ_seq = TopTools_ListOfShape()
while self.topExp.More():
current_item = self.topExp.Current()
current_item_hash = current_item.__hash__()
if not current_item_hash in hashes:
hashes.append(current_item_hash)
occ_seq.Append(current_item)
self.topExp.Next()
# Convert occ_seq to python list
occ_iterator = TopTools_ListIteratorOfListOfShape(occ_seq)
while occ_iterator.More():
topo_to_add = self.topoFactory[topologyType](occ_iterator.Value())
seq.append(topo_to_add)
occ_iterator.Next()
if self.ignore_orientation:
# filter out those entities that share the same TShape
# but do *not* share the same orientation
filter_orientation_seq = []
for i in seq:
_present = False
for j in filter_orientation_seq:
if i.IsSame(j):
_present = True
break
if _present is False:
filter_orientation_seq.append(i)
return filter_orientation_seq
else:
return iter(seq) | def function[_loop_topo, parameter[self, topologyType, topologicalEntity, topologyTypeToAvoid]]:
constant[
this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face)
]
variable[topoTypes] assign[=] dictionary[[<ast.Name object at 0x7da18f810be0>, <ast.Name object at 0x7da18f810a60>, <ast.Name object at 0x7da18f8131c0>, <ast.Name object at 0x7da18f812920>, <ast.Name object at 0x7da18f811390>, <ast.Name object at 0x7da18f811fc0>, <ast.Name object at 0x7da18f813d90>, <ast.Name object at 0x7da18f813e20>], [<ast.Name object at 0x7da18f811930>, <ast.Name object at 0x7da18f811a20>, <ast.Name object at 0x7da18f813df0>, <ast.Name object at 0x7da18f8134c0>, <ast.Name object at 0x7da18f812350>, <ast.Name object at 0x7da18f813b80>, <ast.Name object at 0x7da18f813340>, <ast.Name object at 0x7da18f813460>]]
assert[compare[name[topologyType] in call[name[topoTypes].keys, parameter[]]]]
name[self].topExp assign[=] call[name[TopExp_Explorer], parameter[]]
if <ast.BoolOp object at 0x7da18f812da0> begin[:]
call[name[self].topExp.Init, parameter[name[self].myShape, name[topologyType]]]
variable[seq] assign[=] list[[]]
variable[hashes] assign[=] list[[]]
variable[occ_seq] assign[=] call[name[TopTools_ListOfShape], parameter[]]
while call[name[self].topExp.More, parameter[]] begin[:]
variable[current_item] assign[=] call[name[self].topExp.Current, parameter[]]
variable[current_item_hash] assign[=] call[name[current_item].__hash__, parameter[]]
if <ast.UnaryOp object at 0x7da18f811d80> begin[:]
call[name[hashes].append, parameter[name[current_item_hash]]]
call[name[occ_seq].Append, parameter[name[current_item]]]
call[name[self].topExp.Next, parameter[]]
variable[occ_iterator] assign[=] call[name[TopTools_ListIteratorOfListOfShape], parameter[name[occ_seq]]]
while call[name[occ_iterator].More, parameter[]] begin[:]
variable[topo_to_add] assign[=] call[call[name[self].topoFactory][name[topologyType]], parameter[call[name[occ_iterator].Value, parameter[]]]]
call[name[seq].append, parameter[name[topo_to_add]]]
call[name[occ_iterator].Next, parameter[]]
if name[self].ignore_orientation begin[:]
variable[filter_orientation_seq] assign[=] list[[]]
for taget[name[i]] in starred[name[seq]] begin[:]
variable[_present] assign[=] constant[False]
for taget[name[j]] in starred[name[filter_orientation_seq]] begin[:]
if call[name[i].IsSame, parameter[name[j]]] begin[:]
variable[_present] assign[=] constant[True]
break
if compare[name[_present] is constant[False]] begin[:]
call[name[filter_orientation_seq].append, parameter[name[i]]]
return[name[filter_orientation_seq]] | keyword[def] identifier[_loop_topo] ( identifier[self] , identifier[topologyType] , identifier[topologicalEntity] = keyword[None] , identifier[topologyTypeToAvoid] = keyword[None] ):
literal[string]
identifier[topoTypes] ={ identifier[TopAbs_VERTEX] : identifier[TopoDS_Vertex] ,
identifier[TopAbs_EDGE] : identifier[TopoDS_Edge] ,
identifier[TopAbs_FACE] : identifier[TopoDS_Face] ,
identifier[TopAbs_WIRE] : identifier[TopoDS_Wire] ,
identifier[TopAbs_SHELL] : identifier[TopoDS_Shell] ,
identifier[TopAbs_SOLID] : identifier[TopoDS_Solid] ,
identifier[TopAbs_COMPOUND] : identifier[TopoDS_Compound] ,
identifier[TopAbs_COMPSOLID] : identifier[TopoDS_CompSolid] }
keyword[assert] identifier[topologyType] keyword[in] identifier[topoTypes] . identifier[keys] (), literal[string] %( identifier[topologyType] , identifier[topoTypes] . identifier[keys] ())
identifier[self] . identifier[topExp] = identifier[TopExp_Explorer] ()
keyword[if] identifier[topologicalEntity] keyword[is] keyword[None] keyword[and] identifier[topologyTypeToAvoid] keyword[is] keyword[None] :
identifier[self] . identifier[topExp] . identifier[Init] ( identifier[self] . identifier[myShape] , identifier[topologyType] )
keyword[elif] identifier[topologicalEntity] keyword[is] keyword[None] keyword[and] identifier[topologyTypeToAvoid] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[topExp] . identifier[Init] ( identifier[self] . identifier[myShape] , identifier[topologyType] , identifier[topologyTypeToAvoid] )
keyword[elif] identifier[topologyTypeToAvoid] keyword[is] keyword[None] :
identifier[self] . identifier[topExp] . identifier[Init] ( identifier[topologicalEntity] , identifier[topologyType] )
keyword[elif] identifier[topologyTypeToAvoid] :
identifier[self] . identifier[topExp] . identifier[Init] ( identifier[topologicalEntity] ,
identifier[topologyType] ,
identifier[topologyTypeToAvoid] )
identifier[seq] =[]
identifier[hashes] =[]
identifier[occ_seq] = identifier[TopTools_ListOfShape] ()
keyword[while] identifier[self] . identifier[topExp] . identifier[More] ():
identifier[current_item] = identifier[self] . identifier[topExp] . identifier[Current] ()
identifier[current_item_hash] = identifier[current_item] . identifier[__hash__] ()
keyword[if] keyword[not] identifier[current_item_hash] keyword[in] identifier[hashes] :
identifier[hashes] . identifier[append] ( identifier[current_item_hash] )
identifier[occ_seq] . identifier[Append] ( identifier[current_item] )
identifier[self] . identifier[topExp] . identifier[Next] ()
identifier[occ_iterator] = identifier[TopTools_ListIteratorOfListOfShape] ( identifier[occ_seq] )
keyword[while] identifier[occ_iterator] . identifier[More] ():
identifier[topo_to_add] = identifier[self] . identifier[topoFactory] [ identifier[topologyType] ]( identifier[occ_iterator] . identifier[Value] ())
identifier[seq] . identifier[append] ( identifier[topo_to_add] )
identifier[occ_iterator] . identifier[Next] ()
keyword[if] identifier[self] . identifier[ignore_orientation] :
identifier[filter_orientation_seq] =[]
keyword[for] identifier[i] keyword[in] identifier[seq] :
identifier[_present] = keyword[False]
keyword[for] identifier[j] keyword[in] identifier[filter_orientation_seq] :
keyword[if] identifier[i] . identifier[IsSame] ( identifier[j] ):
identifier[_present] = keyword[True]
keyword[break]
keyword[if] identifier[_present] keyword[is] keyword[False] :
identifier[filter_orientation_seq] . identifier[append] ( identifier[i] )
keyword[return] identifier[filter_orientation_seq]
keyword[else] :
keyword[return] identifier[iter] ( identifier[seq] ) | def _loop_topo(self, topologyType, topologicalEntity=None, topologyTypeToAvoid=None):
"""
this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face)
"""
topoTypes = {TopAbs_VERTEX: TopoDS_Vertex, TopAbs_EDGE: TopoDS_Edge, TopAbs_FACE: TopoDS_Face, TopAbs_WIRE: TopoDS_Wire, TopAbs_SHELL: TopoDS_Shell, TopAbs_SOLID: TopoDS_Solid, TopAbs_COMPOUND: TopoDS_Compound, TopAbs_COMPSOLID: TopoDS_CompSolid}
assert topologyType in topoTypes.keys(), '%s not one of %s' % (topologyType, topoTypes.keys())
self.topExp = TopExp_Explorer() # use self.myShape if nothing is specified
if topologicalEntity is None and topologyTypeToAvoid is None:
self.topExp.Init(self.myShape, topologyType) # depends on [control=['if'], data=[]]
elif topologicalEntity is None and topologyTypeToAvoid is not None:
self.topExp.Init(self.myShape, topologyType, topologyTypeToAvoid) # depends on [control=['if'], data=[]]
elif topologyTypeToAvoid is None:
self.topExp.Init(topologicalEntity, topologyType) # depends on [control=['if'], data=[]]
elif topologyTypeToAvoid:
self.topExp.Init(topologicalEntity, topologyType, topologyTypeToAvoid) # depends on [control=['if'], data=[]]
seq = []
hashes = [] # list that stores hashes to avoid redundancy
occ_seq = TopTools_ListOfShape()
while self.topExp.More():
current_item = self.topExp.Current()
current_item_hash = current_item.__hash__()
if not current_item_hash in hashes:
hashes.append(current_item_hash)
occ_seq.Append(current_item) # depends on [control=['if'], data=[]]
self.topExp.Next() # depends on [control=['while'], data=[]] # Convert occ_seq to python list
occ_iterator = TopTools_ListIteratorOfListOfShape(occ_seq)
while occ_iterator.More():
topo_to_add = self.topoFactory[topologyType](occ_iterator.Value())
seq.append(topo_to_add)
occ_iterator.Next() # depends on [control=['while'], data=[]]
if self.ignore_orientation: # filter out those entities that share the same TShape
# but do *not* share the same orientation
filter_orientation_seq = []
for i in seq:
_present = False
for j in filter_orientation_seq:
if i.IsSame(j):
_present = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
if _present is False:
filter_orientation_seq.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return filter_orientation_seq # depends on [control=['if'], data=[]]
else:
return iter(seq) |
def __collect_interfaces_return(interfaces):
"""Collect new style (44.1+) return values to old-style kv-list"""
acc = []
for (interfaceName, interfaceData) in interfaces.items():
signalValues = interfaceData.get("signals", {})
for (signalName, signalValue) in signalValues.items():
pinName = "{0}.{1}".format(interfaceName, signalName)
acc.append({'id': pinName, 'value': signalValue})
return acc | def function[__collect_interfaces_return, parameter[interfaces]]:
constant[Collect new style (44.1+) return values to old-style kv-list]
variable[acc] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b100f910>, <ast.Name object at 0x7da1b100f0d0>]]] in starred[call[name[interfaces].items, parameter[]]] begin[:]
variable[signalValues] assign[=] call[name[interfaceData].get, parameter[constant[signals], dictionary[[], []]]]
for taget[tuple[[<ast.Name object at 0x7da1b100c700>, <ast.Name object at 0x7da1b100c9d0>]]] in starred[call[name[signalValues].items, parameter[]]] begin[:]
variable[pinName] assign[=] call[constant[{0}.{1}].format, parameter[name[interfaceName], name[signalName]]]
call[name[acc].append, parameter[dictionary[[<ast.Constant object at 0x7da1b100dbd0>, <ast.Constant object at 0x7da1b100ca00>], [<ast.Name object at 0x7da1b100f460>, <ast.Name object at 0x7da1b100c550>]]]]
return[name[acc]] | keyword[def] identifier[__collect_interfaces_return] ( identifier[interfaces] ):
literal[string]
identifier[acc] =[]
keyword[for] ( identifier[interfaceName] , identifier[interfaceData] ) keyword[in] identifier[interfaces] . identifier[items] ():
identifier[signalValues] = identifier[interfaceData] . identifier[get] ( literal[string] ,{})
keyword[for] ( identifier[signalName] , identifier[signalValue] ) keyword[in] identifier[signalValues] . identifier[items] ():
identifier[pinName] = literal[string] . identifier[format] ( identifier[interfaceName] , identifier[signalName] )
identifier[acc] . identifier[append] ({ literal[string] : identifier[pinName] , literal[string] : identifier[signalValue] })
keyword[return] identifier[acc] | def __collect_interfaces_return(interfaces):
"""Collect new style (44.1+) return values to old-style kv-list"""
acc = []
for (interfaceName, interfaceData) in interfaces.items():
signalValues = interfaceData.get('signals', {})
for (signalName, signalValue) in signalValues.items():
pinName = '{0}.{1}'.format(interfaceName, signalName)
acc.append({'id': pinName, 'value': signalValue}) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return acc |
def hacking_assert_is_none(logical_line, noqa):
"""Use assertIs(Not)None to check for None in assertions.
Okay: self.assertEqual('foo', 'bar')
Okay: self.assertNotEqual('foo', {}.get('bar', None))
Okay: self.assertIs('foo', 'bar')
Okay: self.assertIsNot('foo', 'bar', None)
Okay: foo(self.assertIsNot('foo', 'bar'))
H203: self.assertEqual(None, 'foo')
H203: self.assertNotEqual('foo', None)
H203: self.assertIs(None, 'foo', 'bar')
H203: self.assertIsNot('foo', None, 'bar')
H203: foo(self.assertIsNot('foo', None, 'bar'))
Okay: self.assertEqual(None, 'foo') # noqa
Okay: self.assertIs(None, 'foo') # noqa
Okay: self.assertIsNone('foo')
"""
if noqa:
return
for func_name in ('assertEqual', 'assertIs', 'assertNotEqual',
'assertIsNot'):
try:
start = logical_line.index('.%s(' % func_name) + 1
except ValueError:
continue
checker = NoneArgChecker(func_name)
checker.visit(ast.parse(logical_line))
if checker.none_found:
yield start, "H203: Use assertIs(Not)None to check for None" | def function[hacking_assert_is_none, parameter[logical_line, noqa]]:
constant[Use assertIs(Not)None to check for None in assertions.
Okay: self.assertEqual('foo', 'bar')
Okay: self.assertNotEqual('foo', {}.get('bar', None))
Okay: self.assertIs('foo', 'bar')
Okay: self.assertIsNot('foo', 'bar', None)
Okay: foo(self.assertIsNot('foo', 'bar'))
H203: self.assertEqual(None, 'foo')
H203: self.assertNotEqual('foo', None)
H203: self.assertIs(None, 'foo', 'bar')
H203: self.assertIsNot('foo', None, 'bar')
H203: foo(self.assertIsNot('foo', None, 'bar'))
Okay: self.assertEqual(None, 'foo') # noqa
Okay: self.assertIs(None, 'foo') # noqa
Okay: self.assertIsNone('foo')
]
if name[noqa] begin[:]
return[None]
for taget[name[func_name]] in starred[tuple[[<ast.Constant object at 0x7da1b04a5d50>, <ast.Constant object at 0x7da1b04a64a0>, <ast.Constant object at 0x7da1b04a7580>, <ast.Constant object at 0x7da1b04a72e0>]]] begin[:]
<ast.Try object at 0x7da1b04a54b0>
variable[checker] assign[=] call[name[NoneArgChecker], parameter[name[func_name]]]
call[name[checker].visit, parameter[call[name[ast].parse, parameter[name[logical_line]]]]]
if name[checker].none_found begin[:]
<ast.Yield object at 0x7da1b04a6770> | keyword[def] identifier[hacking_assert_is_none] ( identifier[logical_line] , identifier[noqa] ):
literal[string]
keyword[if] identifier[noqa] :
keyword[return]
keyword[for] identifier[func_name] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] ):
keyword[try] :
identifier[start] = identifier[logical_line] . identifier[index] ( literal[string] % identifier[func_name] )+ literal[int]
keyword[except] identifier[ValueError] :
keyword[continue]
identifier[checker] = identifier[NoneArgChecker] ( identifier[func_name] )
identifier[checker] . identifier[visit] ( identifier[ast] . identifier[parse] ( identifier[logical_line] ))
keyword[if] identifier[checker] . identifier[none_found] :
keyword[yield] identifier[start] , literal[string] | def hacking_assert_is_none(logical_line, noqa):
"""Use assertIs(Not)None to check for None in assertions.
Okay: self.assertEqual('foo', 'bar')
Okay: self.assertNotEqual('foo', {}.get('bar', None))
Okay: self.assertIs('foo', 'bar')
Okay: self.assertIsNot('foo', 'bar', None)
Okay: foo(self.assertIsNot('foo', 'bar'))
H203: self.assertEqual(None, 'foo')
H203: self.assertNotEqual('foo', None)
H203: self.assertIs(None, 'foo', 'bar')
H203: self.assertIsNot('foo', None, 'bar')
H203: foo(self.assertIsNot('foo', None, 'bar'))
Okay: self.assertEqual(None, 'foo') # noqa
Okay: self.assertIs(None, 'foo') # noqa
Okay: self.assertIsNone('foo')
"""
if noqa:
return # depends on [control=['if'], data=[]]
for func_name in ('assertEqual', 'assertIs', 'assertNotEqual', 'assertIsNot'):
try:
start = logical_line.index('.%s(' % func_name) + 1 # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
checker = NoneArgChecker(func_name)
checker.visit(ast.parse(logical_line))
if checker.none_found:
yield (start, 'H203: Use assertIs(Not)None to check for None') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func_name']] |
def make_exporter_resources(nb_name, out_folder, images_folder=None):
"""
Creates resources dict for the exporter
"""
resources = defaultdict(str)
resources['metadata'] = defaultdict(str)
resources['metadata']['name'] = nb_name
resources['metadata']['path'] = out_folder
# This results in images like AB_5_1.png for a notebook called AB.ipynb
resources['unique_key'] = nb_name
resources['output_files_dir'] = images_folder
return resources | def function[make_exporter_resources, parameter[nb_name, out_folder, images_folder]]:
constant[
Creates resources dict for the exporter
]
variable[resources] assign[=] call[name[defaultdict], parameter[name[str]]]
call[name[resources]][constant[metadata]] assign[=] call[name[defaultdict], parameter[name[str]]]
call[call[name[resources]][constant[metadata]]][constant[name]] assign[=] name[nb_name]
call[call[name[resources]][constant[metadata]]][constant[path]] assign[=] name[out_folder]
call[name[resources]][constant[unique_key]] assign[=] name[nb_name]
call[name[resources]][constant[output_files_dir]] assign[=] name[images_folder]
return[name[resources]] | keyword[def] identifier[make_exporter_resources] ( identifier[nb_name] , identifier[out_folder] , identifier[images_folder] = keyword[None] ):
literal[string]
identifier[resources] = identifier[defaultdict] ( identifier[str] )
identifier[resources] [ literal[string] ]= identifier[defaultdict] ( identifier[str] )
identifier[resources] [ literal[string] ][ literal[string] ]= identifier[nb_name]
identifier[resources] [ literal[string] ][ literal[string] ]= identifier[out_folder]
identifier[resources] [ literal[string] ]= identifier[nb_name]
identifier[resources] [ literal[string] ]= identifier[images_folder]
keyword[return] identifier[resources] | def make_exporter_resources(nb_name, out_folder, images_folder=None):
"""
Creates resources dict for the exporter
"""
resources = defaultdict(str)
resources['metadata'] = defaultdict(str)
resources['metadata']['name'] = nb_name
resources['metadata']['path'] = out_folder
# This results in images like AB_5_1.png for a notebook called AB.ipynb
resources['unique_key'] = nb_name
resources['output_files_dir'] = images_folder
return resources |
def _make_sections(self, **section_hdr_params):
"""Flatten the sections into a single story list."""
sect_story = []
if not self.section_headings and len(self.sections):
self.section_headings = self.sections.keys()
for section_name in self.section_headings:
section_story = self.sections[section_name]
line = '-'*20
section_head_text = '%s %s %s' % (line, section_name, line)
title, title_sp = self._preformat_text(section_head_text,
**section_hdr_params)
sect_story += [title, title_sp] + section_story
return sect_story | def function[_make_sections, parameter[self]]:
constant[Flatten the sections into a single story list.]
variable[sect_story] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18bcca8f0> begin[:]
name[self].section_headings assign[=] call[name[self].sections.keys, parameter[]]
for taget[name[section_name]] in starred[name[self].section_headings] begin[:]
variable[section_story] assign[=] call[name[self].sections][name[section_name]]
variable[line] assign[=] binary_operation[constant[-] * constant[20]]
variable[section_head_text] assign[=] binary_operation[constant[%s %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0d3fac0>, <ast.Name object at 0x7da1b0d3c280>, <ast.Name object at 0x7da1b0d3e770>]]]
<ast.Tuple object at 0x7da1b0d3f040> assign[=] call[name[self]._preformat_text, parameter[name[section_head_text]]]
<ast.AugAssign object at 0x7da1b0d3ef80>
return[name[sect_story]] | keyword[def] identifier[_make_sections] ( identifier[self] ,** identifier[section_hdr_params] ):
literal[string]
identifier[sect_story] =[]
keyword[if] keyword[not] identifier[self] . identifier[section_headings] keyword[and] identifier[len] ( identifier[self] . identifier[sections] ):
identifier[self] . identifier[section_headings] = identifier[self] . identifier[sections] . identifier[keys] ()
keyword[for] identifier[section_name] keyword[in] identifier[self] . identifier[section_headings] :
identifier[section_story] = identifier[self] . identifier[sections] [ identifier[section_name] ]
identifier[line] = literal[string] * literal[int]
identifier[section_head_text] = literal[string] %( identifier[line] , identifier[section_name] , identifier[line] )
identifier[title] , identifier[title_sp] = identifier[self] . identifier[_preformat_text] ( identifier[section_head_text] ,
** identifier[section_hdr_params] )
identifier[sect_story] +=[ identifier[title] , identifier[title_sp] ]+ identifier[section_story]
keyword[return] identifier[sect_story] | def _make_sections(self, **section_hdr_params):
"""Flatten the sections into a single story list."""
sect_story = []
if not self.section_headings and len(self.sections):
self.section_headings = self.sections.keys() # depends on [control=['if'], data=[]]
for section_name in self.section_headings:
section_story = self.sections[section_name]
line = '-' * 20
section_head_text = '%s %s %s' % (line, section_name, line)
(title, title_sp) = self._preformat_text(section_head_text, **section_hdr_params)
sect_story += [title, title_sp] + section_story # depends on [control=['for'], data=['section_name']]
return sect_story |
def colfieldnames(self, columnname, keyword=''):
"""Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
"""
if isinstance(keyword, str):
return self._getfieldnames(columnname, keyword, -1)
else:
return self._getfieldnames(columnname, '', keyword) | def function[colfieldnames, parameter[self, columnname, keyword]]:
constant[Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
]
if call[name[isinstance], parameter[name[keyword], name[str]]] begin[:]
return[call[name[self]._getfieldnames, parameter[name[columnname], name[keyword], <ast.UnaryOp object at 0x7da20c7943a0>]]] | keyword[def] identifier[colfieldnames] ( identifier[self] , identifier[columnname] , identifier[keyword] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[keyword] , identifier[str] ):
keyword[return] identifier[self] . identifier[_getfieldnames] ( identifier[columnname] , identifier[keyword] ,- literal[int] )
keyword[else] :
keyword[return] identifier[self] . identifier[_getfieldnames] ( identifier[columnname] , literal[string] , identifier[keyword] ) | def colfieldnames(self, columnname, keyword=''):
"""Get the names of the fields in a column keyword value.
The value of a keyword can be a struct (python dict). This method
returns the names of the fields in that struct.
Each field in a struct can be a struct in itself. Names of fields in a
sub-struct can be obtained by giving a keyword name consisting of
multiple parts separated by dots (e.g. 'key1.sub1.sub2').
If an empty keyword name is given (which is the default), all keyword
names of the column are shown and its behaviour is the same as
:func:`colkeywordnames`.
Instead of a keyword name an index can be given which returns the names
of the struct value of the i-th keyword.
"""
if isinstance(keyword, str):
return self._getfieldnames(columnname, keyword, -1) # depends on [control=['if'], data=[]]
else:
return self._getfieldnames(columnname, '', keyword) |
def _additional_rows_date2int(self, keys, rows):
"""
Replaces start and end dates of the additional date intervals in the row set with their integer representation
:param list[tuple[str,str]] keys: The other keys with start and end date.
:param list[dict[str,T]] rows: The list of rows.
:rtype: list[dict[str,T]]
"""
for row in rows:
for key_start_date, key_end_date in keys:
if key_start_date not in [self._key_start_date, self._key_end_date]:
row[key_start_date] = self._date2int(row[key_start_date])
if key_end_date not in [self._key_start_date, self._key_end_date]:
row[key_end_date] = self._date2int(row[key_end_date]) | def function[_additional_rows_date2int, parameter[self, keys, rows]]:
constant[
Replaces start and end dates of the additional date intervals in the row set with their integer representation
:param list[tuple[str,str]] keys: The other keys with start and end date.
:param list[dict[str,T]] rows: The list of rows.
:rtype: list[dict[str,T]]
]
for taget[name[row]] in starred[name[rows]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0a36a10>, <ast.Name object at 0x7da1b0a37d30>]]] in starred[name[keys]] begin[:]
if compare[name[key_start_date] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Attribute object at 0x7da1b0a36020>, <ast.Attribute object at 0x7da1b0a34130>]]] begin[:]
call[name[row]][name[key_start_date]] assign[=] call[name[self]._date2int, parameter[call[name[row]][name[key_start_date]]]]
if compare[name[key_end_date] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Attribute object at 0x7da18f00c580>, <ast.Attribute object at 0x7da18f00cbe0>]]] begin[:]
call[name[row]][name[key_end_date]] assign[=] call[name[self]._date2int, parameter[call[name[row]][name[key_end_date]]]] | keyword[def] identifier[_additional_rows_date2int] ( identifier[self] , identifier[keys] , identifier[rows] ):
literal[string]
keyword[for] identifier[row] keyword[in] identifier[rows] :
keyword[for] identifier[key_start_date] , identifier[key_end_date] keyword[in] identifier[keys] :
keyword[if] identifier[key_start_date] keyword[not] keyword[in] [ identifier[self] . identifier[_key_start_date] , identifier[self] . identifier[_key_end_date] ]:
identifier[row] [ identifier[key_start_date] ]= identifier[self] . identifier[_date2int] ( identifier[row] [ identifier[key_start_date] ])
keyword[if] identifier[key_end_date] keyword[not] keyword[in] [ identifier[self] . identifier[_key_start_date] , identifier[self] . identifier[_key_end_date] ]:
identifier[row] [ identifier[key_end_date] ]= identifier[self] . identifier[_date2int] ( identifier[row] [ identifier[key_end_date] ]) | def _additional_rows_date2int(self, keys, rows):
"""
Replaces start and end dates of the additional date intervals in the row set with their integer representation
:param list[tuple[str,str]] keys: The other keys with start and end date.
:param list[dict[str,T]] rows: The list of rows.
:rtype: list[dict[str,T]]
"""
for row in rows:
for (key_start_date, key_end_date) in keys:
if key_start_date not in [self._key_start_date, self._key_end_date]:
row[key_start_date] = self._date2int(row[key_start_date]) # depends on [control=['if'], data=['key_start_date']]
if key_end_date not in [self._key_start_date, self._key_end_date]:
row[key_end_date] = self._date2int(row[key_end_date]) # depends on [control=['if'], data=['key_end_date']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['row']] |
def save(self, mark):
"""Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
"""
self._check_exists()
obj = mark.as_dict()
try:
# Make a 'filter' to find/update existing record, which uses
# the field name and operation (but not the position).
filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)}
_log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj))
self._track.update(filt, obj, upsert=True)
except pymongo.errors.PyMongoError as err:
raise DBError("{}".format(err)) | def function[save, parameter[self, mark]]:
constant[Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
]
call[name[self]._check_exists, parameter[]]
variable[obj] assign[=] call[name[mark].as_dict, parameter[]]
<ast.Try object at 0x7da18f58d330> | keyword[def] identifier[save] ( identifier[self] , identifier[mark] ):
literal[string]
identifier[self] . identifier[_check_exists] ()
identifier[obj] = identifier[mark] . identifier[as_dict] ()
keyword[try] :
identifier[filt] ={ identifier[k] : identifier[obj] [ identifier[k] ] keyword[for] identifier[k] keyword[in] ( identifier[mark] . identifier[FLD_FLD] , identifier[mark] . identifier[FLD_OP] )}
identifier[_log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[filt] , identifier[obj] ))
identifier[self] . identifier[_track] . identifier[update] ( identifier[filt] , identifier[obj] , identifier[upsert] = keyword[True] )
keyword[except] identifier[pymongo] . identifier[errors] . identifier[PyMongoError] keyword[as] identifier[err] :
keyword[raise] identifier[DBError] ( literal[string] . identifier[format] ( identifier[err] )) | def save(self, mark):
"""Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
"""
self._check_exists()
obj = mark.as_dict()
try:
# Make a 'filter' to find/update existing record, which uses
# the field name and operation (but not the position).
filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)}
_log.debug('save: upsert-spec={} upsert-obj={}'.format(filt, obj))
self._track.update(filt, obj, upsert=True) # depends on [control=['try'], data=[]]
except pymongo.errors.PyMongoError as err:
raise DBError('{}'.format(err)) # depends on [control=['except'], data=['err']] |
def tolocal(self):
"""
Convert to local mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'local':
logging.getLogger('thunder').warn('images already in local mode')
pass
return fromarray(self.toarray()) | def function[tolocal, parameter[self]]:
constant[
Convert to local mode.
]
from relative_module[thunder.images.readers] import module[fromarray]
if compare[name[self].mode equal[==] constant[local]] begin[:]
call[call[name[logging].getLogger, parameter[constant[thunder]]].warn, parameter[constant[images already in local mode]]]
pass
return[call[name[fromarray], parameter[call[name[self].toarray, parameter[]]]]] | keyword[def] identifier[tolocal] ( identifier[self] ):
literal[string]
keyword[from] identifier[thunder] . identifier[images] . identifier[readers] keyword[import] identifier[fromarray]
keyword[if] identifier[self] . identifier[mode] == literal[string] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[warn] ( literal[string] )
keyword[pass]
keyword[return] identifier[fromarray] ( identifier[self] . identifier[toarray] ()) | def tolocal(self):
"""
Convert to local mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'local':
logging.getLogger('thunder').warn('images already in local mode')
pass # depends on [control=['if'], data=[]]
return fromarray(self.toarray()) |
def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this'
' task!'))
return fn(*args, **kwargs)
return wrapper | def function[require_server, parameter[fn]]:
constant[
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
]
def function[wrapper, parameter[]]:
if compare[name[env].machine is constant[None]] begin[:]
call[name[abort], parameter[call[name[red], parameter[constant[ERROR: You must provide a server name to call this task!]]]]]
return[call[name[fn], parameter[<ast.Starred object at 0x7da1b09e8850>]]]
return[name[wrapper]] | keyword[def] identifier[require_server] ( identifier[fn] ):
literal[string]
@ identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[env] . identifier[machine] keyword[is] keyword[None] :
identifier[abort] ( identifier[red] ( literal[string]
literal[string] ))
keyword[return] identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this task!')) # depends on [control=['if'], data=[]]
return fn(*args, **kwargs)
return wrapper |
def __fetch_issue_messages(self, issue_id):
"""Get messages of an issue"""
for messages_raw in self.client.issue_collection(issue_id, "messages"):
messages = json.loads(messages_raw)
for msg in messages['entries']:
msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link'])
yield msg | def function[__fetch_issue_messages, parameter[self, issue_id]]:
constant[Get messages of an issue]
for taget[name[messages_raw]] in starred[call[name[self].client.issue_collection, parameter[name[issue_id], constant[messages]]]] begin[:]
variable[messages] assign[=] call[name[json].loads, parameter[name[messages_raw]]]
for taget[name[msg]] in starred[call[name[messages]][constant[entries]]] begin[:]
call[name[msg]][constant[owner_data]] assign[=] call[name[self].__fetch_user_data, parameter[constant[{OWNER}], call[name[msg]][constant[owner_link]]]]
<ast.Yield object at 0x7da1b02f1060> | keyword[def] identifier[__fetch_issue_messages] ( identifier[self] , identifier[issue_id] ):
literal[string]
keyword[for] identifier[messages_raw] keyword[in] identifier[self] . identifier[client] . identifier[issue_collection] ( identifier[issue_id] , literal[string] ):
identifier[messages] = identifier[json] . identifier[loads] ( identifier[messages_raw] )
keyword[for] identifier[msg] keyword[in] identifier[messages] [ literal[string] ]:
identifier[msg] [ literal[string] ]= identifier[self] . identifier[__fetch_user_data] ( literal[string] , identifier[msg] [ literal[string] ])
keyword[yield] identifier[msg] | def __fetch_issue_messages(self, issue_id):
"""Get messages of an issue"""
for messages_raw in self.client.issue_collection(issue_id, 'messages'):
messages = json.loads(messages_raw)
for msg in messages['entries']:
msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link'])
yield msg # depends on [control=['for'], data=['msg']] # depends on [control=['for'], data=['messages_raw']] |
def on_btn_demag_gui(self, event):
"""
Open Demag GUI
"""
if not self.check_for_meas_file():
return
if not self.check_for_uncombined_files():
return
outstring = "demag_gui.py -WD %s"%self.WD
print("-I- running python script:\n %s"%(outstring))
if self.data_model_num == 2:
demag_gui.start(self.WD, standalone_app=False, parent=self, DM=self.data_model_num)
else:
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
# create custom Demag GUI closing event and bind it
DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT = newevent.NewCommandEvent()
self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Demag GUI frame
demag_gui_frame = demag_gui.Demag_GUI(self.WD, self,
write_to_log_file=False,
data_model=self.data_model_num,
evt_quit=DemagGuiExitEvent)
demag_gui_frame.Centre()
demag_gui_frame.Show()
del wait | def function[on_btn_demag_gui, parameter[self, event]]:
constant[
Open Demag GUI
]
if <ast.UnaryOp object at 0x7da2041da680> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da2041db4c0> begin[:]
return[None]
variable[outstring] assign[=] binary_operation[constant[demag_gui.py -WD %s] <ast.Mod object at 0x7da2590d6920> name[self].WD]
call[name[print], parameter[binary_operation[constant[-I- running python script:
%s] <ast.Mod object at 0x7da2590d6920> name[outstring]]]]
if compare[name[self].data_model_num equal[==] constant[2]] begin[:]
call[name[demag_gui].start, parameter[name[self].WD]] | keyword[def] identifier[on_btn_demag_gui] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[check_for_meas_file] ():
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[check_for_uncombined_files] ():
keyword[return]
identifier[outstring] = literal[string] % identifier[self] . identifier[WD]
identifier[print] ( literal[string] %( identifier[outstring] ))
keyword[if] identifier[self] . identifier[data_model_num] == literal[int] :
identifier[demag_gui] . identifier[start] ( identifier[self] . identifier[WD] , identifier[standalone_app] = keyword[False] , identifier[parent] = identifier[self] , identifier[DM] = identifier[self] . identifier[data_model_num] )
keyword[else] :
identifier[self] . identifier[Disable] ()
identifier[self] . identifier[Hide] ()
identifier[wait] = identifier[wx] . identifier[BusyInfo] ( literal[string] )
identifier[wx] . identifier[SafeYield] ()
identifier[DemagGuiExitEvent] , identifier[EVT_DEMAG_GUI_EXIT] = identifier[newevent] . identifier[NewCommandEvent] ()
identifier[self] . identifier[Bind] ( identifier[EVT_DEMAG_GUI_EXIT] , identifier[self] . identifier[on_analysis_gui_exit] )
identifier[demag_gui_frame] = identifier[demag_gui] . identifier[Demag_GUI] ( identifier[self] . identifier[WD] , identifier[self] ,
identifier[write_to_log_file] = keyword[False] ,
identifier[data_model] = identifier[self] . identifier[data_model_num] ,
identifier[evt_quit] = identifier[DemagGuiExitEvent] )
identifier[demag_gui_frame] . identifier[Centre] ()
identifier[demag_gui_frame] . identifier[Show] ()
keyword[del] identifier[wait] | def on_btn_demag_gui(self, event):
"""
Open Demag GUI
"""
if not self.check_for_meas_file():
return # depends on [control=['if'], data=[]]
if not self.check_for_uncombined_files():
return # depends on [control=['if'], data=[]]
outstring = 'demag_gui.py -WD %s' % self.WD
print('-I- running python script:\n %s' % outstring)
if self.data_model_num == 2:
demag_gui.start(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) # depends on [control=['if'], data=[]]
else:
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
# create custom Demag GUI closing event and bind it
(DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT) = newevent.NewCommandEvent()
self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Demag GUI frame
demag_gui_frame = demag_gui.Demag_GUI(self.WD, self, write_to_log_file=False, data_model=self.data_model_num, evt_quit=DemagGuiExitEvent)
demag_gui_frame.Centre()
demag_gui_frame.Show()
del wait |
def list(self, obj, filter=False, only_id=False, limit=20):
""" Function list
Get the list of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param filter: filter for objects
@param only_id: boolean to only return dict with name/id
@return RETURN: the list of the object
"""
self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
self.method = 'GET'
if filter:
self.url += '&search={}'.format(filter)
self.resp = requests.get(url=self.url, auth=self.auth,
headers=self.headers, cert=self.ca_cert)
if only_id:
if self.__process_resp__(obj) is False:
return False
if type(self.res['results']) is list:
return dict((x['name'], x['id']) for x in self.res['results'])
elif type(self.res['results']) is dict:
r = {}
for v in self.res['results'].values():
for vv in v:
r[vv['name']] = vv['id']
return r
else:
return False
else:
return self.__process_resp__(obj) | def function[list, parameter[self, obj, filter, only_id, limit]]:
constant[ Function list
Get the list of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param filter: filter for objects
@param only_id: boolean to only return dict with name/id
@return RETURN: the list of the object
]
name[self].url assign[=] call[constant[{}{}/?per_page={}].format, parameter[name[self].base_url, name[obj], name[limit]]]
name[self].method assign[=] constant[GET]
if name[filter] begin[:]
<ast.AugAssign object at 0x7da20c6abfa0>
name[self].resp assign[=] call[name[requests].get, parameter[]]
if name[only_id] begin[:]
if compare[call[name[self].__process_resp__, parameter[name[obj]]] is constant[False]] begin[:]
return[constant[False]]
if compare[call[name[type], parameter[call[name[self].res][constant[results]]]] is name[list]] begin[:]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b26adf30>]]] | keyword[def] identifier[list] ( identifier[self] , identifier[obj] , identifier[filter] = keyword[False] , identifier[only_id] = keyword[False] , identifier[limit] = literal[int] ):
literal[string]
identifier[self] . identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[obj] , identifier[limit] )
identifier[self] . identifier[method] = literal[string]
keyword[if] identifier[filter] :
identifier[self] . identifier[url] += literal[string] . identifier[format] ( identifier[filter] )
identifier[self] . identifier[resp] = identifier[requests] . identifier[get] ( identifier[url] = identifier[self] . identifier[url] , identifier[auth] = identifier[self] . identifier[auth] ,
identifier[headers] = identifier[self] . identifier[headers] , identifier[cert] = identifier[self] . identifier[ca_cert] )
keyword[if] identifier[only_id] :
keyword[if] identifier[self] . identifier[__process_resp__] ( identifier[obj] ) keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[if] identifier[type] ( identifier[self] . identifier[res] [ literal[string] ]) keyword[is] identifier[list] :
keyword[return] identifier[dict] (( identifier[x] [ literal[string] ], identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[res] [ literal[string] ])
keyword[elif] identifier[type] ( identifier[self] . identifier[res] [ literal[string] ]) keyword[is] identifier[dict] :
identifier[r] ={}
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[res] [ literal[string] ]. identifier[values] ():
keyword[for] identifier[vv] keyword[in] identifier[v] :
identifier[r] [ identifier[vv] [ literal[string] ]]= identifier[vv] [ literal[string] ]
keyword[return] identifier[r]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[self] . identifier[__process_resp__] ( identifier[obj] ) | def list(self, obj, filter=False, only_id=False, limit=20):
""" Function list
Get the list of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param filter: filter for objects
@param only_id: boolean to only return dict with name/id
@return RETURN: the list of the object
"""
self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
self.method = 'GET'
if filter:
self.url += '&search={}'.format(filter) # depends on [control=['if'], data=[]]
self.resp = requests.get(url=self.url, auth=self.auth, headers=self.headers, cert=self.ca_cert)
if only_id:
if self.__process_resp__(obj) is False:
return False # depends on [control=['if'], data=[]]
if type(self.res['results']) is list:
return dict(((x['name'], x['id']) for x in self.res['results'])) # depends on [control=['if'], data=[]]
elif type(self.res['results']) is dict:
r = {}
for v in self.res['results'].values():
for vv in v:
r[vv['name']] = vv['id'] # depends on [control=['for'], data=['vv']] # depends on [control=['for'], data=['v']]
return r # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
else:
return self.__process_resp__(obj) |
def get_units(**kwargs):
"""
Returns all the units
"""
units_list = db.DBSession.query(Unit).all()
units = []
for unit in units_list:
new_unit = JSONObject(unit)
units.append(new_unit)
return units | def function[get_units, parameter[]]:
constant[
Returns all the units
]
variable[units_list] assign[=] call[call[name[db].DBSession.query, parameter[name[Unit]]].all, parameter[]]
variable[units] assign[=] list[[]]
for taget[name[unit]] in starred[name[units_list]] begin[:]
variable[new_unit] assign[=] call[name[JSONObject], parameter[name[unit]]]
call[name[units].append, parameter[name[new_unit]]]
return[name[units]] | keyword[def] identifier[get_units] (** identifier[kwargs] ):
literal[string]
identifier[units_list] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Unit] ). identifier[all] ()
identifier[units] =[]
keyword[for] identifier[unit] keyword[in] identifier[units_list] :
identifier[new_unit] = identifier[JSONObject] ( identifier[unit] )
identifier[units] . identifier[append] ( identifier[new_unit] )
keyword[return] identifier[units] | def get_units(**kwargs):
"""
Returns all the units
"""
units_list = db.DBSession.query(Unit).all()
units = []
for unit in units_list:
new_unit = JSONObject(unit)
units.append(new_unit) # depends on [control=['for'], data=['unit']]
return units |
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates | def function[_validate_parse_dates_arg, parameter[parse_dates]]:
constant[
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
]
variable[msg] assign[=] constant[Only booleans, lists, and dictionaries are accepted for the 'parse_dates' parameter]
if compare[name[parse_dates] is_not constant[None]] begin[:]
if call[name[is_scalar], parameter[name[parse_dates]]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cf760> begin[:]
<ast.Raise object at 0x7da18c4cf130>
return[name[parse_dates]] | keyword[def] identifier[_validate_parse_dates_arg] ( identifier[parse_dates] ):
literal[string]
identifier[msg] =( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[parse_dates] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[is_scalar] ( identifier[parse_dates] ):
keyword[if] keyword[not] identifier[lib] . identifier[is_bool] ( identifier[parse_dates] ):
keyword[raise] identifier[TypeError] ( identifier[msg] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[parse_dates] ,( identifier[list] , identifier[dict] )):
keyword[raise] identifier[TypeError] ( identifier[msg] )
keyword[return] identifier[parse_dates] | def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = "Only booleans, lists, and dictionaries are accepted for the 'parse_dates' parameter"
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['parse_dates']]
return parse_dates |
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False) | def function[connect_checkable_button, parameter[instance, prop, widget]]:
constant[
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
]
call[name[add_callback], parameter[name[instance], name[prop], name[widget].setChecked]]
call[name[widget].toggled.connect, parameter[call[name[partial], parameter[name[setattr], name[instance], name[prop]]]]]
call[name[widget].setChecked, parameter[<ast.BoolOp object at 0x7da18ede63e0>]] | keyword[def] identifier[connect_checkable_button] ( identifier[instance] , identifier[prop] , identifier[widget] ):
literal[string]
identifier[add_callback] ( identifier[instance] , identifier[prop] , identifier[widget] . identifier[setChecked] )
identifier[widget] . identifier[toggled] . identifier[connect] ( identifier[partial] ( identifier[setattr] , identifier[instance] , identifier[prop] ))
identifier[widget] . identifier[setChecked] ( identifier[getattr] ( identifier[instance] , identifier[prop] ) keyword[or] keyword[False] ) | def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False) |
def encode(df, encoding='utf8', verbosity=1):
"""If you try to encode each element individually with python, this would take days!"""
if verbosity > 0:
# pbar_i = 0
pbar = progressbar.ProgressBar(maxval=df.shape[1])
pbar.start()
# encode strings as UTF-8 so they'll work in python2 and python3
for colnum, col in enumerate(df.columns):
if isinstance(df[col], pd.Series):
if verbosity:
pbar.update(colnum)
if df[col].dtype in (np.dtype('object'), np.dtype('U'), np.dtype('S')) and any(isinstance(obj, basestring) for obj in df[col]):
strmask = np.array([isinstance(obj, basestring) for obj in df[col]])
series = df[col].copy()
try:
series[strmask] = np.char.encode(series[strmask].values.astype('U'))
except TypeError:
print("Unable to convert {} elements starting at position {} in column {}".format(
sum(strmask), [i for i, b in enumerate(strmask) if b][:1], col))
raise
except (UnicodeDecodeError, UnicodeEncodeError):
try:
series[strmask] = np.array([eval(s, {}, {}) for s in series[strmask]])
# FIXME: do something different for unicode and decode errors
except (SyntaxError, UnicodeDecodeError, UnicodeEncodeError):
newseries = []
for s in series[strmask]:
try:
newseries += [s.encode('utf8')]
except:
print(u'Had trouble encoding {} so used repr to turn it into {}'.format(s, repr(transcode_unicode(s))))
# strip all unicode chars are convert to ASCII str
newseries += [transcode_unicode(s)]
# for dtype('U'): UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 207: ordinal not in r
series[strmask] = np.array(newseries).astype('O')
df[col] = series
# df[col] = np.array([x.encode('utf8') if isinstance(x, unicode) else x for x in df[col]])
# WARNING: this takes DAYS for only 100k tweets!
# series = df[col].copy()
# for i, value in series.iteritems():
# if isinstance(value, basestring):
# series[i] = str(value.encode(encoding))
# df[col] = series
if verbosity:
pbar.finish()
return df | def function[encode, parameter[df, encoding, verbosity]]:
constant[If you try to encode each element individually with python, this would take days!]
if compare[name[verbosity] greater[>] constant[0]] begin[:]
variable[pbar] assign[=] call[name[progressbar].ProgressBar, parameter[]]
call[name[pbar].start, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20cabc370>, <ast.Name object at 0x7da20cabee90>]]] in starred[call[name[enumerate], parameter[name[df].columns]]] begin[:]
if call[name[isinstance], parameter[call[name[df]][name[col]], name[pd].Series]] begin[:]
if name[verbosity] begin[:]
call[name[pbar].update, parameter[name[colnum]]]
if <ast.BoolOp object at 0x7da20cabf130> begin[:]
variable[strmask] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da20cabc550>]]
variable[series] assign[=] call[call[name[df]][name[col]].copy, parameter[]]
<ast.Try object at 0x7da20cabe650>
call[name[df]][name[col]] assign[=] name[series]
if name[verbosity] begin[:]
call[name[pbar].finish, parameter[]]
return[name[df]] | keyword[def] identifier[encode] ( identifier[df] , identifier[encoding] = literal[string] , identifier[verbosity] = literal[int] ):
literal[string]
keyword[if] identifier[verbosity] > literal[int] :
identifier[pbar] = identifier[progressbar] . identifier[ProgressBar] ( identifier[maxval] = identifier[df] . identifier[shape] [ literal[int] ])
identifier[pbar] . identifier[start] ()
keyword[for] identifier[colnum] , identifier[col] keyword[in] identifier[enumerate] ( identifier[df] . identifier[columns] ):
keyword[if] identifier[isinstance] ( identifier[df] [ identifier[col] ], identifier[pd] . identifier[Series] ):
keyword[if] identifier[verbosity] :
identifier[pbar] . identifier[update] ( identifier[colnum] )
keyword[if] identifier[df] [ identifier[col] ]. identifier[dtype] keyword[in] ( identifier[np] . identifier[dtype] ( literal[string] ), identifier[np] . identifier[dtype] ( literal[string] ), identifier[np] . identifier[dtype] ( literal[string] )) keyword[and] identifier[any] ( identifier[isinstance] ( identifier[obj] , identifier[basestring] ) keyword[for] identifier[obj] keyword[in] identifier[df] [ identifier[col] ]):
identifier[strmask] = identifier[np] . identifier[array] ([ identifier[isinstance] ( identifier[obj] , identifier[basestring] ) keyword[for] identifier[obj] keyword[in] identifier[df] [ identifier[col] ]])
identifier[series] = identifier[df] [ identifier[col] ]. identifier[copy] ()
keyword[try] :
identifier[series] [ identifier[strmask] ]= identifier[np] . identifier[char] . identifier[encode] ( identifier[series] [ identifier[strmask] ]. identifier[values] . identifier[astype] ( literal[string] ))
keyword[except] identifier[TypeError] :
identifier[print] ( literal[string] . identifier[format] (
identifier[sum] ( identifier[strmask] ),[ identifier[i] keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[strmask] ) keyword[if] identifier[b] ][: literal[int] ], identifier[col] ))
keyword[raise]
keyword[except] ( identifier[UnicodeDecodeError] , identifier[UnicodeEncodeError] ):
keyword[try] :
identifier[series] [ identifier[strmask] ]= identifier[np] . identifier[array] ([ identifier[eval] ( identifier[s] ,{},{}) keyword[for] identifier[s] keyword[in] identifier[series] [ identifier[strmask] ]])
keyword[except] ( identifier[SyntaxError] , identifier[UnicodeDecodeError] , identifier[UnicodeEncodeError] ):
identifier[newseries] =[]
keyword[for] identifier[s] keyword[in] identifier[series] [ identifier[strmask] ]:
keyword[try] :
identifier[newseries] +=[ identifier[s] . identifier[encode] ( literal[string] )]
keyword[except] :
identifier[print] ( literal[string] . identifier[format] ( identifier[s] , identifier[repr] ( identifier[transcode_unicode] ( identifier[s] ))))
identifier[newseries] +=[ identifier[transcode_unicode] ( identifier[s] )]
identifier[series] [ identifier[strmask] ]= identifier[np] . identifier[array] ( identifier[newseries] ). identifier[astype] ( literal[string] )
identifier[df] [ identifier[col] ]= identifier[series]
keyword[if] identifier[verbosity] :
identifier[pbar] . identifier[finish] ()
keyword[return] identifier[df] | def encode(df, encoding='utf8', verbosity=1):
"""If you try to encode each element individually with python, this would take days!"""
if verbosity > 0:
# pbar_i = 0
pbar = progressbar.ProgressBar(maxval=df.shape[1])
pbar.start() # depends on [control=['if'], data=[]]
# encode strings as UTF-8 so they'll work in python2 and python3
for (colnum, col) in enumerate(df.columns):
if isinstance(df[col], pd.Series):
if verbosity:
pbar.update(colnum) # depends on [control=['if'], data=[]]
if df[col].dtype in (np.dtype('object'), np.dtype('U'), np.dtype('S')) and any((isinstance(obj, basestring) for obj in df[col])):
strmask = np.array([isinstance(obj, basestring) for obj in df[col]])
series = df[col].copy()
try:
series[strmask] = np.char.encode(series[strmask].values.astype('U')) # depends on [control=['try'], data=[]]
except TypeError:
print('Unable to convert {} elements starting at position {} in column {}'.format(sum(strmask), [i for (i, b) in enumerate(strmask) if b][:1], col))
raise # depends on [control=['except'], data=[]]
except (UnicodeDecodeError, UnicodeEncodeError):
try:
series[strmask] = np.array([eval(s, {}, {}) for s in series[strmask]]) # depends on [control=['try'], data=[]]
# FIXME: do something different for unicode and decode errors
except (SyntaxError, UnicodeDecodeError, UnicodeEncodeError):
newseries = []
for s in series[strmask]:
try:
newseries += [s.encode('utf8')] # depends on [control=['try'], data=[]]
except:
print(u'Had trouble encoding {} so used repr to turn it into {}'.format(s, repr(transcode_unicode(s))))
# strip all unicode chars are convert to ASCII str
newseries += [transcode_unicode(s)] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['s']]
# for dtype('U'): UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 207: ordinal not in r
series[strmask] = np.array(newseries).astype('O') # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
df[col] = series # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# df[col] = np.array([x.encode('utf8') if isinstance(x, unicode) else x for x in df[col]])
# WARNING: this takes DAYS for only 100k tweets!
# series = df[col].copy()
# for i, value in series.iteritems():
# if isinstance(value, basestring):
# series[i] = str(value.encode(encoding))
# df[col] = series
if verbosity:
pbar.finish() # depends on [control=['if'], data=[]]
return df |
def render_as_xml(func):
"""
Decorator to render as XML
:param func:
:return:
"""
if inspect.isclass(func):
setattr(func, "_renderer", xml_renderer)
return func
else:
@functools.wraps(func)
def decorated_view(*args, **kwargs):
data = func(*args, **kwargs)
return _build_response(data, dicttoxml)
return decorated_view | def function[render_as_xml, parameter[func]]:
constant[
Decorator to render as XML
:param func:
:return:
]
if call[name[inspect].isclass, parameter[name[func]]] begin[:]
call[name[setattr], parameter[name[func], constant[_renderer], name[xml_renderer]]]
return[name[func]] | keyword[def] identifier[render_as_xml] ( identifier[func] ):
literal[string]
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[func] ):
identifier[setattr] ( identifier[func] , literal[string] , identifier[xml_renderer] )
keyword[return] identifier[func]
keyword[else] :
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[decorated_view] (* identifier[args] ,** identifier[kwargs] ):
identifier[data] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_build_response] ( identifier[data] , identifier[dicttoxml] )
keyword[return] identifier[decorated_view] | def render_as_xml(func):
"""
Decorator to render as XML
:param func:
:return:
"""
if inspect.isclass(func):
setattr(func, '_renderer', xml_renderer)
return func # depends on [control=['if'], data=[]]
else:
@functools.wraps(func)
def decorated_view(*args, **kwargs):
data = func(*args, **kwargs)
return _build_response(data, dicttoxml)
return decorated_view |
def node_sub(self, node_self, node_other):
'''node_sub
High-level api: Compute the delta of two config nodes. This method is
recursive. Assume two config nodes are different.
Parameters
----------
node_self : `Element`
A config node in the destination config that is being processed.
node_self cannot be a leaf node.
node_other : `Element`
A config node in the source config that is being processed.
Returns
-------
tuple
There are three elements in the tuple: a list of Restconf DELETE
Requests, a list of Restconf PUT Requests, and a list of Restconf
PATCH Requests.
'''
deletes = []
puts = []
patches = []
# if a leaf-list node, delete the leaf-list totally
# if a list node, by default delete the list instance
# if a list node and delete_whole=True, delete the list totally
def generate_delete(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
deletes.append(requests.Request('DELETE', url, headers=header_json))
# if a leaf-list node, replace the leaf-list totally
# if a list node, replace the list totally
def generate_put(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
data_json = composer.get_json(instance=instance)
puts.append(requests.Request('PUT', url, headers=header_json,
data=data_json))
# if a leaf-list node, update the leaf-list totally
# if a list node, by default update the list instance
# if a list node and update_whole=True, update the list totally
def generate_patch(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
data_json = composer.get_json(instance=instance)
patches.append(requests.Request('PATCH', url, headers=header_json,
data=data_json))
# the sequence of list instances under node_self is different from the
# one under node_other
def list_seq_is_different(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
if [self.device.get_xpath(n) for n in s_list] == \
[self.device.get_xpath(n) for n in o_list]:
return False
else:
return True
# all list instances under node_self have peers under node_other, and
# the sequence of list instances under node_self that have peers under
# node_other is same as the sequence of list instances under node_other
def list_seq_is_inclusive(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
s_seq = [self.device.get_xpath(n) for n in s_list]
o_seq = [self.device.get_xpath(n) for n in o_list]
if set(s_seq) <= set(o_seq) and \
[i for i in s_seq if i in o_seq] == o_seq:
return True
else:
return False
in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \
self._group_kids(node_self, node_other)
for child_s in in_s_not_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf' or \
schema_node.get('type') == 'container':
generate_patch(child_s)
elif schema_node.get('type') == 'leaf-list' or \
schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
return ([], [generate_put(node_self, instance=True)], [])
else:
generate_put(child_s, instance=True)
for child_o in in_o_not_in_s:
schema_node = self.device.get_schema_node(child_o)
if schema_node.get('type') == 'leaf' or \
schema_node.get('type') == 'container':
generate_delete(child_o)
elif schema_node.get('type') == 'leaf-list' or \
schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_inclusive(child_o.tag):
generate_delete(child_o, instance=True)
else:
return ([], [generate_put(node_self, instance=True)],
[])
else:
generate_delete(child_o, instance=True)
for child_s, child_o in in_s_and_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
if child_s.text != child_o.text:
generate_patch(child_s)
elif schema_node.get('type') == 'leaf-list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
return ([], [generate_put(node_self, instance=True)],
[])
elif schema_node.get('type') == 'container':
if BaseCalculator(self.device, child_s, child_o).ne:
x, y, z = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
return ([], [generate_put(node_self, instance=True)],
[])
else:
if BaseCalculator(self.device, child_s, child_o).ne:
x, y, z = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z
else:
if BaseCalculator(self.device, child_s, child_o).ne:
x, y, z = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z
return (deletes, puts, patches) | def function[node_sub, parameter[self, node_self, node_other]]:
constant[node_sub
High-level api: Compute the delta of two config nodes. This method is
recursive. Assume two config nodes are different.
Parameters
----------
node_self : `Element`
A config node in the destination config that is being processed.
node_self cannot be a leaf node.
node_other : `Element`
A config node in the source config that is being processed.
Returns
-------
tuple
There are three elements in the tuple: a list of Restconf DELETE
Requests, a list of Restconf PUT Requests, and a list of Restconf
PATCH Requests.
]
variable[deletes] assign[=] list[[]]
variable[puts] assign[=] list[[]]
variable[patches] assign[=] list[[]]
def function[generate_delete, parameter[node, instance]]:
variable[composer] assign[=] call[name[RestconfComposer], parameter[name[self].device, name[node]]]
variable[url] assign[=] call[constant[https://{}:{}].format, parameter[name[self].ip, name[self].port]]
<ast.AugAssign object at 0x7da1b2615840>
call[name[deletes].append, parameter[call[name[requests].Request, parameter[constant[DELETE], name[url]]]]]
def function[generate_put, parameter[node, instance]]:
variable[composer] assign[=] call[name[RestconfComposer], parameter[name[self].device, name[node]]]
variable[url] assign[=] call[constant[https://{}:{}].format, parameter[name[self].ip, name[self].port]]
<ast.AugAssign object at 0x7da1b2616050>
variable[data_json] assign[=] call[name[composer].get_json, parameter[]]
call[name[puts].append, parameter[call[name[requests].Request, parameter[constant[PUT], name[url]]]]]
def function[generate_patch, parameter[node, instance]]:
variable[composer] assign[=] call[name[RestconfComposer], parameter[name[self].device, name[node]]]
variable[url] assign[=] call[constant[https://{}:{}].format, parameter[name[self].ip, name[self].port]]
<ast.AugAssign object at 0x7da1b2616d70>
variable[data_json] assign[=] call[name[composer].get_json, parameter[]]
call[name[patches].append, parameter[call[name[requests].Request, parameter[constant[PATCH], name[url]]]]]
def function[list_seq_is_different, parameter[tag]]:
variable[s_list] assign[=] <ast.ListComp object at 0x7da1b2617970>
variable[o_list] assign[=] <ast.ListComp object at 0x7da1b2616aa0>
if compare[<ast.ListComp object at 0x7da1b2615240> equal[==] <ast.ListComp object at 0x7da1b2617be0>] begin[:]
return[constant[False]]
def function[list_seq_is_inclusive, parameter[tag]]:
variable[s_list] assign[=] <ast.ListComp object at 0x7da1b2554580>
variable[o_list] assign[=] <ast.ListComp object at 0x7da1b2554340>
variable[s_seq] assign[=] <ast.ListComp object at 0x7da1b25542e0>
variable[o_seq] assign[=] <ast.ListComp object at 0x7da1b2554910>
if <ast.BoolOp object at 0x7da1b25551e0> begin[:]
return[constant[True]]
<ast.Tuple object at 0x7da1b2554b50> assign[=] call[name[self]._group_kids, parameter[name[node_self], name[node_other]]]
for taget[name[child_s]] in starred[name[in_s_not_in_o]] begin[:]
variable[schema_node] assign[=] call[name[self].device.get_schema_node, parameter[name[child_s]]]
if <ast.BoolOp object at 0x7da1b2554760> begin[:]
call[name[generate_patch], parameter[name[child_s]]]
for taget[name[child_o]] in starred[name[in_o_not_in_s]] begin[:]
variable[schema_node] assign[=] call[name[self].device.get_schema_node, parameter[name[child_o]]]
if <ast.BoolOp object at 0x7da1b2503eb0> begin[:]
call[name[generate_delete], parameter[name[child_o]]]
for taget[tuple[[<ast.Name object at 0x7da1b2524d30>, <ast.Name object at 0x7da1b2526380>]]] in starred[name[in_s_and_in_o]] begin[:]
variable[schema_node] assign[=] call[name[self].device.get_schema_node, parameter[name[child_s]]]
if compare[call[name[schema_node].get, parameter[constant[type]]] equal[==] constant[leaf]] begin[:]
if compare[name[child_s].text not_equal[!=] name[child_o].text] begin[:]
call[name[generate_patch], parameter[name[child_s]]]
return[tuple[[<ast.Name object at 0x7da1b2570dc0>, <ast.Name object at 0x7da1b2570a30>, <ast.Name object at 0x7da1b25714b0>]]] | keyword[def] identifier[node_sub] ( identifier[self] , identifier[node_self] , identifier[node_other] ):
literal[string]
identifier[deletes] =[]
identifier[puts] =[]
identifier[patches] =[]
keyword[def] identifier[generate_delete] ( identifier[node] , identifier[instance] = keyword[True] ):
identifier[composer] = identifier[RestconfComposer] ( identifier[self] . identifier[device] , identifier[node] )
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[ip] , identifier[self] . identifier[port] )
identifier[url] += identifier[composer] . identifier[get_url] ( identifier[instance] = identifier[instance] )
identifier[deletes] . identifier[append] ( identifier[requests] . identifier[Request] ( literal[string] , identifier[url] , identifier[headers] = identifier[header_json] ))
keyword[def] identifier[generate_put] ( identifier[node] , identifier[instance] = keyword[True] ):
identifier[composer] = identifier[RestconfComposer] ( identifier[self] . identifier[device] , identifier[node] )
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[ip] , identifier[self] . identifier[port] )
identifier[url] += identifier[composer] . identifier[get_url] ( identifier[instance] = identifier[instance] )
identifier[data_json] = identifier[composer] . identifier[get_json] ( identifier[instance] = identifier[instance] )
identifier[puts] . identifier[append] ( identifier[requests] . identifier[Request] ( literal[string] , identifier[url] , identifier[headers] = identifier[header_json] ,
identifier[data] = identifier[data_json] ))
keyword[def] identifier[generate_patch] ( identifier[node] , identifier[instance] = keyword[True] ):
identifier[composer] = identifier[RestconfComposer] ( identifier[self] . identifier[device] , identifier[node] )
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[ip] , identifier[self] . identifier[port] )
identifier[url] += identifier[composer] . identifier[get_url] ( identifier[instance] = identifier[instance] )
identifier[data_json] = identifier[composer] . identifier[get_json] ( identifier[instance] = identifier[instance] )
identifier[patches] . identifier[append] ( identifier[requests] . identifier[Request] ( literal[string] , identifier[url] , identifier[headers] = identifier[header_json] ,
identifier[data] = identifier[data_json] ))
keyword[def] identifier[list_seq_is_different] ( identifier[tag] ):
identifier[s_list] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[node_self] . identifier[iterchildren] ( identifier[tag] = identifier[tag] )]
identifier[o_list] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[node_other] . identifier[iterchildren] ( identifier[tag] = identifier[tag] )]
keyword[if] [ identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[s_list] ]==[ identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[o_list] ]:
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True]
keyword[def] identifier[list_seq_is_inclusive] ( identifier[tag] ):
identifier[s_list] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[node_self] . identifier[iterchildren] ( identifier[tag] = identifier[tag] )]
identifier[o_list] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[node_other] . identifier[iterchildren] ( identifier[tag] = identifier[tag] )]
identifier[s_seq] =[ identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[s_list] ]
identifier[o_seq] =[ identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[o_list] ]
keyword[if] identifier[set] ( identifier[s_seq] )<= identifier[set] ( identifier[o_seq] ) keyword[and] [ identifier[i] keyword[for] identifier[i] keyword[in] identifier[s_seq] keyword[if] identifier[i] keyword[in] identifier[o_seq] ]== identifier[o_seq] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
identifier[in_s_not_in_o] , identifier[in_o_not_in_s] , identifier[in_s_and_in_o] = identifier[self] . identifier[_group_kids] ( identifier[node_self] , identifier[node_other] )
keyword[for] identifier[child_s] keyword[in] identifier[in_s_not_in_o] :
identifier[schema_node] = identifier[self] . identifier[device] . identifier[get_schema_node] ( identifier[child_s] )
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
identifier[generate_patch] ( identifier[child_s] )
keyword[elif] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[return] ([],[ identifier[generate_put] ( identifier[node_self] , identifier[instance] = keyword[True] )],[])
keyword[else] :
identifier[generate_put] ( identifier[child_s] , identifier[instance] = keyword[True] )
keyword[for] identifier[child_o] keyword[in] identifier[in_o_not_in_s] :
identifier[schema_node] = identifier[self] . identifier[device] . identifier[get_schema_node] ( identifier[child_o] )
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
identifier[generate_delete] ( identifier[child_o] )
keyword[elif] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[list_seq_is_inclusive] ( identifier[child_o] . identifier[tag] ):
identifier[generate_delete] ( identifier[child_o] , identifier[instance] = keyword[True] )
keyword[else] :
keyword[return] ([],[ identifier[generate_put] ( identifier[node_self] , identifier[instance] = keyword[True] )],
[])
keyword[else] :
identifier[generate_delete] ( identifier[child_o] , identifier[instance] = keyword[True] )
keyword[for] identifier[child_s] , identifier[child_o] keyword[in] identifier[in_s_and_in_o] :
identifier[schema_node] = identifier[self] . identifier[device] . identifier[get_schema_node] ( identifier[child_s] )
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[child_s] . identifier[text] != identifier[child_o] . identifier[text] :
identifier[generate_patch] ( identifier[child_s] )
keyword[elif] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[list_seq_is_different] ( identifier[child_s] . identifier[tag] ):
keyword[return] ([],[ identifier[generate_put] ( identifier[node_self] , identifier[instance] = keyword[True] )],
[])
keyword[elif] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[BaseCalculator] ( identifier[self] . identifier[device] , identifier[child_s] , identifier[child_o] ). identifier[ne] :
identifier[x] , identifier[y] , identifier[z] = identifier[self] . identifier[node_sub] ( identifier[child_s] , identifier[child_o] )
identifier[deletes] += identifier[x]
identifier[puts] += identifier[y]
identifier[patches] += identifier[z]
keyword[elif] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[schema_node] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[list_seq_is_different] ( identifier[child_s] . identifier[tag] ):
keyword[return] ([],[ identifier[generate_put] ( identifier[node_self] , identifier[instance] = keyword[True] )],
[])
keyword[else] :
keyword[if] identifier[BaseCalculator] ( identifier[self] . identifier[device] , identifier[child_s] , identifier[child_o] ). identifier[ne] :
identifier[x] , identifier[y] , identifier[z] = identifier[self] . identifier[node_sub] ( identifier[child_s] , identifier[child_o] )
identifier[deletes] += identifier[x]
identifier[puts] += identifier[y]
identifier[patches] += identifier[z]
keyword[else] :
keyword[if] identifier[BaseCalculator] ( identifier[self] . identifier[device] , identifier[child_s] , identifier[child_o] ). identifier[ne] :
identifier[x] , identifier[y] , identifier[z] = identifier[self] . identifier[node_sub] ( identifier[child_s] , identifier[child_o] )
identifier[deletes] += identifier[x]
identifier[puts] += identifier[y]
identifier[patches] += identifier[z]
keyword[return] ( identifier[deletes] , identifier[puts] , identifier[patches] ) | def node_sub(self, node_self, node_other):
"""node_sub
High-level api: Compute the delta of two config nodes. This method is
recursive. Assume two config nodes are different.
Parameters
----------
node_self : `Element`
A config node in the destination config that is being processed.
node_self cannot be a leaf node.
node_other : `Element`
A config node in the source config that is being processed.
Returns
-------
tuple
There are three elements in the tuple: a list of Restconf DELETE
Requests, a list of Restconf PUT Requests, and a list of Restconf
PATCH Requests.
"""
deletes = []
puts = []
patches = []
# if a leaf-list node, delete the leaf-list totally
# if a list node, by default delete the list instance
# if a list node and delete_whole=True, delete the list totally
def generate_delete(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
deletes.append(requests.Request('DELETE', url, headers=header_json))
# if a leaf-list node, replace the leaf-list totally
# if a list node, replace the list totally
def generate_put(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
data_json = composer.get_json(instance=instance)
puts.append(requests.Request('PUT', url, headers=header_json, data=data_json))
# if a leaf-list node, update the leaf-list totally
# if a list node, by default update the list instance
# if a list node and update_whole=True, update the list totally
def generate_patch(node, instance=True):
composer = RestconfComposer(self.device, node)
url = 'https://{}:{}'.format(self.ip, self.port)
url += composer.get_url(instance=instance)
data_json = composer.get_json(instance=instance)
patches.append(requests.Request('PATCH', url, headers=header_json, data=data_json))
# the sequence of list instances under node_self is different from the
# one under node_other
def list_seq_is_different(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
if [self.device.get_xpath(n) for n in s_list] == [self.device.get_xpath(n) for n in o_list]:
return False # depends on [control=['if'], data=[]]
else:
return True
# all list instances under node_self have peers under node_other, and
# the sequence of list instances under node_self that have peers under
# node_other is same as the sequence of list instances under node_other
def list_seq_is_inclusive(tag):
s_list = [i for i in node_self.iterchildren(tag=tag)]
o_list = [i for i in node_other.iterchildren(tag=tag)]
s_seq = [self.device.get_xpath(n) for n in s_list]
o_seq = [self.device.get_xpath(n) for n in o_list]
if set(s_seq) <= set(o_seq) and [i for i in s_seq if i in o_seq] == o_seq:
return True # depends on [control=['if'], data=[]]
else:
return False
(in_s_not_in_o, in_o_not_in_s, in_s_and_in_o) = self._group_kids(node_self, node_other)
for child_s in in_s_not_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf' or schema_node.get('type') == 'container':
generate_patch(child_s) # depends on [control=['if'], data=[]]
elif schema_node.get('type') == 'leaf-list' or schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
return ([], [generate_put(node_self, instance=True)], []) # depends on [control=['if'], data=[]]
else:
generate_put(child_s, instance=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child_s']]
for child_o in in_o_not_in_s:
schema_node = self.device.get_schema_node(child_o)
if schema_node.get('type') == 'leaf' or schema_node.get('type') == 'container':
generate_delete(child_o) # depends on [control=['if'], data=[]]
elif schema_node.get('type') == 'leaf-list' or schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_inclusive(child_o.tag):
generate_delete(child_o, instance=True) # depends on [control=['if'], data=[]]
else:
return ([], [generate_put(node_self, instance=True)], []) # depends on [control=['if'], data=[]]
else:
generate_delete(child_o, instance=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child_o']]
for (child_s, child_o) in in_s_and_in_o:
schema_node = self.device.get_schema_node(child_s)
if schema_node.get('type') == 'leaf':
if child_s.text != child_o.text:
generate_patch(child_s) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif schema_node.get('type') == 'leaf-list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
return ([], [generate_put(node_self, instance=True)], []) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif schema_node.get('type') == 'container':
if BaseCalculator(self.device, child_s, child_o).ne:
(x, y, z) = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif schema_node.get('type') == 'list':
if schema_node.get('ordered-by') == 'user':
if list_seq_is_different(child_s.tag):
return ([], [generate_put(node_self, instance=True)], []) # depends on [control=['if'], data=[]]
elif BaseCalculator(self.device, child_s, child_o).ne:
(x, y, z) = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif BaseCalculator(self.device, child_s, child_o).ne:
(x, y, z) = self.node_sub(child_s, child_o)
deletes += x
puts += y
patches += z # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (deletes, puts, patches) |
def write_jsonl_file(fname, data):
"""Writes a jsonl file.
Args:
data: list of json encoded data
"""
if not isinstance(data, list):
print('warning: malformed json data for file', fname)
return
with open(fname, 'w') as of:
for row in data:
# TODO: other malformed cases?
if row.strip():
of.write('%s\n' % row.strip()) | def function[write_jsonl_file, parameter[fname, data]]:
constant[Writes a jsonl file.
Args:
data: list of json encoded data
]
if <ast.UnaryOp object at 0x7da18bcc8d30> begin[:]
call[name[print], parameter[constant[warning: malformed json data for file], name[fname]]]
return[None]
with call[name[open], parameter[name[fname], constant[w]]] begin[:]
for taget[name[row]] in starred[name[data]] begin[:]
if call[name[row].strip, parameter[]] begin[:]
call[name[of].write, parameter[binary_operation[constant[%s
] <ast.Mod object at 0x7da2590d6920> call[name[row].strip, parameter[]]]]] | keyword[def] identifier[write_jsonl_file] ( identifier[fname] , identifier[data] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[list] ):
identifier[print] ( literal[string] , identifier[fname] )
keyword[return]
keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[of] :
keyword[for] identifier[row] keyword[in] identifier[data] :
keyword[if] identifier[row] . identifier[strip] ():
identifier[of] . identifier[write] ( literal[string] % identifier[row] . identifier[strip] ()) | def write_jsonl_file(fname, data):
"""Writes a jsonl file.
Args:
data: list of json encoded data
"""
if not isinstance(data, list):
print('warning: malformed json data for file', fname)
return # depends on [control=['if'], data=[]]
with open(fname, 'w') as of:
for row in data:
# TODO: other malformed cases?
if row.strip():
of.write('%s\n' % row.strip()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['of']] |
def shorten(text):
""" Reduce text length for displaying / logging purposes.
"""
if len(text) >= MAX_DISPLAY_LEN:
text = text[:MAX_DISPLAY_LEN//2]+"..."+text[-MAX_DISPLAY_LEN//2:]
return text | def function[shorten, parameter[text]]:
constant[ Reduce text length for displaying / logging purposes.
]
if compare[call[name[len], parameter[name[text]]] greater_or_equal[>=] name[MAX_DISPLAY_LEN]] begin[:]
variable[text] assign[=] binary_operation[binary_operation[call[name[text]][<ast.Slice object at 0x7da2041db910>] + constant[...]] + call[name[text]][<ast.Slice object at 0x7da2041d98a0>]]
return[name[text]] | keyword[def] identifier[shorten] ( identifier[text] ):
literal[string]
keyword[if] identifier[len] ( identifier[text] )>= identifier[MAX_DISPLAY_LEN] :
identifier[text] = identifier[text] [: identifier[MAX_DISPLAY_LEN] // literal[int] ]+ literal[string] + identifier[text] [- identifier[MAX_DISPLAY_LEN] // literal[int] :]
keyword[return] identifier[text] | def shorten(text):
""" Reduce text length for displaying / logging purposes.
"""
if len(text) >= MAX_DISPLAY_LEN:
text = text[:MAX_DISPLAY_LEN // 2] + '...' + text[-MAX_DISPLAY_LEN // 2:] # depends on [control=['if'], data=['MAX_DISPLAY_LEN']]
return text |
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Generates a confusion matrix between rater_a and rater_b
A confusion matrix shows how often 2 values agree and disagree
See quadratic_weighted_kappa for argument descriptions
"""
assert(len(rater_a) == len(rater_b))
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
min_rating = int(min_rating)
max_rating = int(max_rating)
if min_rating is None:
min_rating = min(rater_a)
if max_rating is None:
max_rating = max(rater_a)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[int(a - min_rating)][int(b - min_rating)] += 1
return conf_mat | def function[confusion_matrix, parameter[rater_a, rater_b, min_rating, max_rating]]:
constant[
Generates a confusion matrix between rater_a and rater_b
A confusion matrix shows how often 2 values agree and disagree
See quadratic_weighted_kappa for argument descriptions
]
assert[compare[call[name[len], parameter[name[rater_a]]] equal[==] call[name[len], parameter[name[rater_b]]]]]
variable[rater_a] assign[=] <ast.ListComp object at 0x7da207f03640>
variable[rater_b] assign[=] <ast.ListComp object at 0x7da207f00af0>
variable[min_rating] assign[=] call[name[int], parameter[name[min_rating]]]
variable[max_rating] assign[=] call[name[int], parameter[name[max_rating]]]
if compare[name[min_rating] is constant[None]] begin[:]
variable[min_rating] assign[=] call[name[min], parameter[name[rater_a]]]
if compare[name[max_rating] is constant[None]] begin[:]
variable[max_rating] assign[=] call[name[max], parameter[name[rater_a]]]
variable[num_ratings] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[max_rating] - name[min_rating]] + constant[1]]]]
variable[conf_mat] assign[=] <ast.ListComp object at 0x7da207f000d0>
for taget[tuple[[<ast.Name object at 0x7da207f00910>, <ast.Name object at 0x7da207f03100>]]] in starred[call[name[zip], parameter[name[rater_a], name[rater_b]]]] begin[:]
<ast.AugAssign object at 0x7da207f02500>
return[name[conf_mat]] | keyword[def] identifier[confusion_matrix] ( identifier[rater_a] , identifier[rater_b] , identifier[min_rating] = keyword[None] , identifier[max_rating] = keyword[None] ):
literal[string]
keyword[assert] ( identifier[len] ( identifier[rater_a] )== identifier[len] ( identifier[rater_b] ))
identifier[rater_a] =[ identifier[int] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[rater_a] ]
identifier[rater_b] =[ identifier[int] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[rater_b] ]
identifier[min_rating] = identifier[int] ( identifier[min_rating] )
identifier[max_rating] = identifier[int] ( identifier[max_rating] )
keyword[if] identifier[min_rating] keyword[is] keyword[None] :
identifier[min_rating] = identifier[min] ( identifier[rater_a] )
keyword[if] identifier[max_rating] keyword[is] keyword[None] :
identifier[max_rating] = identifier[max] ( identifier[rater_a] )
identifier[num_ratings] = identifier[int] ( identifier[max_rating] - identifier[min_rating] + literal[int] )
identifier[conf_mat] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_ratings] )]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[num_ratings] )]
keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[rater_a] , identifier[rater_b] ):
identifier[conf_mat] [ identifier[int] ( identifier[a] - identifier[min_rating] )][ identifier[int] ( identifier[b] - identifier[min_rating] )]+= literal[int]
keyword[return] identifier[conf_mat] | def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Generates a confusion matrix between rater_a and rater_b
A confusion matrix shows how often 2 values agree and disagree
See quadratic_weighted_kappa for argument descriptions
"""
assert len(rater_a) == len(rater_b)
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
min_rating = int(min_rating)
max_rating = int(max_rating)
if min_rating is None:
min_rating = min(rater_a) # depends on [control=['if'], data=['min_rating']]
if max_rating is None:
max_rating = max(rater_a) # depends on [control=['if'], data=['max_rating']]
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)]
for (a, b) in zip(rater_a, rater_b):
conf_mat[int(a - min_rating)][int(b - min_rating)] += 1 # depends on [control=['for'], data=[]]
return conf_mat |
def get_position_searchable(self):
"""Return dict of the position and corrasponding searchable str
"""
ids = gkr.list_item_ids_sync(self.keyring)
position_searchable = {}
for i in ids:
item_attrs = gkr.item_get_attributes_sync(self.keyring, i)
position_searchable[i] = item_attrs['searchable']
return position_searchable | def function[get_position_searchable, parameter[self]]:
constant[Return dict of the position and corrasponding searchable str
]
variable[ids] assign[=] call[name[gkr].list_item_ids_sync, parameter[name[self].keyring]]
variable[position_searchable] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[ids]] begin[:]
variable[item_attrs] assign[=] call[name[gkr].item_get_attributes_sync, parameter[name[self].keyring, name[i]]]
call[name[position_searchable]][name[i]] assign[=] call[name[item_attrs]][constant[searchable]]
return[name[position_searchable]] | keyword[def] identifier[get_position_searchable] ( identifier[self] ):
literal[string]
identifier[ids] = identifier[gkr] . identifier[list_item_ids_sync] ( identifier[self] . identifier[keyring] )
identifier[position_searchable] ={}
keyword[for] identifier[i] keyword[in] identifier[ids] :
identifier[item_attrs] = identifier[gkr] . identifier[item_get_attributes_sync] ( identifier[self] . identifier[keyring] , identifier[i] )
identifier[position_searchable] [ identifier[i] ]= identifier[item_attrs] [ literal[string] ]
keyword[return] identifier[position_searchable] | def get_position_searchable(self):
"""Return dict of the position and corrasponding searchable str
"""
ids = gkr.list_item_ids_sync(self.keyring)
position_searchable = {}
for i in ids:
item_attrs = gkr.item_get_attributes_sync(self.keyring, i)
position_searchable[i] = item_attrs['searchable'] # depends on [control=['for'], data=['i']]
return position_searchable |
def _downloadfile(self, url, fname):
''' Download the image '''
print("The file %s need to be download - Wait\n " %
(fname.split('/')[-1]))
urllib.urlretrieve(url, fname, self._report)
print("\n The download of the file %s has succeded \n " %
(fname.split('/')[-1])) | def function[_downloadfile, parameter[self, url, fname]]:
constant[ Download the image ]
call[name[print], parameter[binary_operation[constant[The file %s need to be download - Wait
] <ast.Mod object at 0x7da2590d6920> call[call[name[fname].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18fe90c70>]]]]
call[name[urllib].urlretrieve, parameter[name[url], name[fname], name[self]._report]]
call[name[print], parameter[binary_operation[constant[
The download of the file %s has succeded
] <ast.Mod object at 0x7da2590d6920> call[call[name[fname].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18fe91720>]]]] | keyword[def] identifier[_downloadfile] ( identifier[self] , identifier[url] , identifier[fname] ):
literal[string]
identifier[print] ( literal[string] %
( identifier[fname] . identifier[split] ( literal[string] )[- literal[int] ]))
identifier[urllib] . identifier[urlretrieve] ( identifier[url] , identifier[fname] , identifier[self] . identifier[_report] )
identifier[print] ( literal[string] %
( identifier[fname] . identifier[split] ( literal[string] )[- literal[int] ])) | def _downloadfile(self, url, fname):
""" Download the image """
print('The file %s need to be download - Wait\n ' % fname.split('/')[-1])
urllib.urlretrieve(url, fname, self._report)
print('\n The download of the file %s has succeded \n ' % fname.split('/')[-1]) |
def load_configuration(self):
"""Loading configuration."""
filename = os.path.join(os.path.dirname(__file__), 'templates/spline-loc.yml.j2')
with open(filename) as handle:
return Adapter(safe_load(handle)).configuration | def function[load_configuration, parameter[self]]:
constant[Loading configuration.]
variable[filename] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[templates/spline-loc.yml.j2]]]
with call[name[open], parameter[name[filename]]] begin[:]
return[call[name[Adapter], parameter[call[name[safe_load], parameter[name[handle]]]]].configuration] | keyword[def] identifier[load_configuration] ( identifier[self] ):
literal[string]
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] )
keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[handle] :
keyword[return] identifier[Adapter] ( identifier[safe_load] ( identifier[handle] )). identifier[configuration] | def load_configuration(self):
"""Loading configuration."""
filename = os.path.join(os.path.dirname(__file__), 'templates/spline-loc.yml.j2')
with open(filename) as handle:
return Adapter(safe_load(handle)).configuration # depends on [control=['with'], data=['handle']] |
def map_channels(mapping, cdim):
"""Create a :class:`CPermuation` based on a dict of channel mappings
For a given mapping in form of a dictionary, generate the channel
permutating circuit that achieves the specified mapping while leaving the
relative order of all non-specified channels intact.
Args:
mapping (dict): Input-output mapping of indices (zero-based)
``{in1:out1, in2:out2,...}``
cdim (int): The circuit dimension (number of channels)
Returns:
CPermutation: Circuit mapping the channels as specified
"""
n = cdim
free_values = list(range(n))
for v in mapping.values():
if v >= n:
raise ValueError('the mapping cannot take on values larger than '
'cdim - 1')
free_values.remove(v)
for k in mapping:
if k >= n:
raise ValueError('the mapping cannot map keys larger than '
'cdim - 1')
permutation = []
for k in range(n):
if k in mapping:
permutation.append(mapping[k])
else:
permutation.append(free_values.pop(0))
return CPermutation.create(tuple(permutation)) | def function[map_channels, parameter[mapping, cdim]]:
constant[Create a :class:`CPermuation` based on a dict of channel mappings
For a given mapping in form of a dictionary, generate the channel
permutating circuit that achieves the specified mapping while leaving the
relative order of all non-specified channels intact.
Args:
mapping (dict): Input-output mapping of indices (zero-based)
``{in1:out1, in2:out2,...}``
cdim (int): The circuit dimension (number of channels)
Returns:
CPermutation: Circuit mapping the channels as specified
]
variable[n] assign[=] name[cdim]
variable[free_values] assign[=] call[name[list], parameter[call[name[range], parameter[name[n]]]]]
for taget[name[v]] in starred[call[name[mapping].values, parameter[]]] begin[:]
if compare[name[v] greater_or_equal[>=] name[n]] begin[:]
<ast.Raise object at 0x7da2047e81c0>
call[name[free_values].remove, parameter[name[v]]]
for taget[name[k]] in starred[name[mapping]] begin[:]
if compare[name[k] greater_or_equal[>=] name[n]] begin[:]
<ast.Raise object at 0x7da2047e98d0>
variable[permutation] assign[=] list[[]]
for taget[name[k]] in starred[call[name[range], parameter[name[n]]]] begin[:]
if compare[name[k] in name[mapping]] begin[:]
call[name[permutation].append, parameter[call[name[mapping]][name[k]]]]
return[call[name[CPermutation].create, parameter[call[name[tuple], parameter[name[permutation]]]]]] | keyword[def] identifier[map_channels] ( identifier[mapping] , identifier[cdim] ):
literal[string]
identifier[n] = identifier[cdim]
identifier[free_values] = identifier[list] ( identifier[range] ( identifier[n] ))
keyword[for] identifier[v] keyword[in] identifier[mapping] . identifier[values] ():
keyword[if] identifier[v] >= identifier[n] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[free_values] . identifier[remove] ( identifier[v] )
keyword[for] identifier[k] keyword[in] identifier[mapping] :
keyword[if] identifier[k] >= identifier[n] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[permutation] =[]
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[n] ):
keyword[if] identifier[k] keyword[in] identifier[mapping] :
identifier[permutation] . identifier[append] ( identifier[mapping] [ identifier[k] ])
keyword[else] :
identifier[permutation] . identifier[append] ( identifier[free_values] . identifier[pop] ( literal[int] ))
keyword[return] identifier[CPermutation] . identifier[create] ( identifier[tuple] ( identifier[permutation] )) | def map_channels(mapping, cdim):
"""Create a :class:`CPermuation` based on a dict of channel mappings
For a given mapping in form of a dictionary, generate the channel
permutating circuit that achieves the specified mapping while leaving the
relative order of all non-specified channels intact.
Args:
mapping (dict): Input-output mapping of indices (zero-based)
``{in1:out1, in2:out2,...}``
cdim (int): The circuit dimension (number of channels)
Returns:
CPermutation: Circuit mapping the channels as specified
"""
n = cdim
free_values = list(range(n))
for v in mapping.values():
if v >= n:
raise ValueError('the mapping cannot take on values larger than cdim - 1') # depends on [control=['if'], data=[]]
free_values.remove(v) # depends on [control=['for'], data=['v']]
for k in mapping:
if k >= n:
raise ValueError('the mapping cannot map keys larger than cdim - 1') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
permutation = []
for k in range(n):
if k in mapping:
permutation.append(mapping[k]) # depends on [control=['if'], data=['k', 'mapping']]
else:
permutation.append(free_values.pop(0)) # depends on [control=['for'], data=['k']]
return CPermutation.create(tuple(permutation)) |
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = "----:" + mean + ":" + name
def getter(tags, key):
return [s.decode("utf-8", "replace") for s in tags[atomid]]
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
tags[atomid] = encoded
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) | def function[RegisterFreeformKey, parameter[cls, key, name, mean]]:
constant[Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
]
variable[atomid] assign[=] binary_operation[binary_operation[binary_operation[constant[----:] + name[mean]] + constant[:]] + name[name]]
def function[getter, parameter[tags, key]]:
return[<ast.ListComp object at 0x7da1b20fb370>]
def function[setter, parameter[tags, key, value]]:
variable[encoded] assign[=] list[[]]
for taget[name[v]] in starred[name[value]] begin[:]
if <ast.UnaryOp object at 0x7da1b20f88e0> begin[:]
if name[PY3] begin[:]
<ast.Raise object at 0x7da1b20f8400>
variable[v] assign[=] call[name[v].decode, parameter[constant[utf-8]]]
call[name[encoded].append, parameter[call[name[v].encode, parameter[constant[utf-8]]]]]
call[name[tags]][name[atomid]] assign[=] name[encoded]
def function[deleter, parameter[tags, key]]:
<ast.Delete object at 0x7da1b20fa6b0>
call[name[cls].RegisterKey, parameter[name[key], name[getter], name[setter], name[deleter]]] | keyword[def] identifier[RegisterFreeformKey] ( identifier[cls] , identifier[key] , identifier[name] , identifier[mean] = literal[string] ):
literal[string]
identifier[atomid] = literal[string] + identifier[mean] + literal[string] + identifier[name]
keyword[def] identifier[getter] ( identifier[tags] , identifier[key] ):
keyword[return] [ identifier[s] . identifier[decode] ( literal[string] , literal[string] ) keyword[for] identifier[s] keyword[in] identifier[tags] [ identifier[atomid] ]]
keyword[def] identifier[setter] ( identifier[tags] , identifier[key] , identifier[value] ):
identifier[encoded] =[]
keyword[for] identifier[v] keyword[in] identifier[value] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[text_type] ):
keyword[if] identifier[PY3] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[v] )
identifier[v] = identifier[v] . identifier[decode] ( literal[string] )
identifier[encoded] . identifier[append] ( identifier[v] . identifier[encode] ( literal[string] ))
identifier[tags] [ identifier[atomid] ]= identifier[encoded]
keyword[def] identifier[deleter] ( identifier[tags] , identifier[key] ):
keyword[del] ( identifier[tags] [ identifier[atomid] ])
identifier[cls] . identifier[RegisterKey] ( identifier[key] , identifier[getter] , identifier[setter] , identifier[deleter] ) | def RegisterFreeformKey(cls, key, name, mean='com.apple.iTunes'):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = '----:' + mean + ':' + name
def getter(tags, key):
return [s.decode('utf-8', 'replace') for s in tags[atomid]]
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError('%r not str' % v) # depends on [control=['if'], data=[]]
v = v.decode('utf-8') # depends on [control=['if'], data=[]]
encoded.append(v.encode('utf-8')) # depends on [control=['for'], data=['v']]
tags[atomid] = encoded
def deleter(tags, key):
del tags[atomid]
cls.RegisterKey(key, getter, setter, deleter) |
def get_libraries(self, database=''):
"""return active enrichr library name.Offical API """
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs) | def function[get_libraries, parameter[self, database]]:
constant[return active enrichr library name.Offical API ]
variable[lib_url] assign[=] binary_operation[constant[http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics] <ast.Mod object at 0x7da2590d6920> name[database]]
variable[libs_json] assign[=] call[name[json].loads, parameter[call[name[requests].get, parameter[name[lib_url]]].text]]
variable[libs] assign[=] <ast.ListComp object at 0x7da18f7231c0>
return[call[name[sorted], parameter[name[libs]]]] | keyword[def] identifier[get_libraries] ( identifier[self] , identifier[database] = literal[string] ):
literal[string]
identifier[lib_url] = literal[string] % identifier[database]
identifier[libs_json] = identifier[json] . identifier[loads] ( identifier[requests] . identifier[get] ( identifier[lib_url] ). identifier[text] )
identifier[libs] =[ identifier[lib] [ literal[string] ] keyword[for] identifier[lib] keyword[in] identifier[libs_json] [ literal[string] ]]
keyword[return] identifier[sorted] ( identifier[libs] ) | def get_libraries(self, database=''):
"""return active enrichr library name.Offical API """
lib_url = 'http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics' % database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs) |
def add_col_features(self, col=None, degree=None):
""" Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
"""
if not col and not degree:
return
else:
if isinstance(col, list) and isinstance(degree, list):
if len(col) != len(degree):
print('col len: ', len(col))
print('degree len: ', len(degree))
raise ValueError('col and degree should have equal length.')
else:
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
for i in range(len(col)):
data.loc[:,col[i]+str(degree[i])] = pow(data.loc[:,col[i]],degree[i]) / pow(10,degree[i]-1)
self.preprocessed_data = data
else:
raise TypeError('col and degree should be lists.') | def function[add_col_features, parameter[self, col, degree]]:
constant[ Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
]
if <ast.BoolOp object at 0x7da18f8108e0> begin[:]
return[None] | keyword[def] identifier[add_col_features] ( identifier[self] , identifier[col] = keyword[None] , identifier[degree] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[col] keyword[and] keyword[not] identifier[degree] :
keyword[return]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[col] , identifier[list] ) keyword[and] identifier[isinstance] ( identifier[degree] , identifier[list] ):
keyword[if] identifier[len] ( identifier[col] )!= identifier[len] ( identifier[degree] ):
identifier[print] ( literal[string] , identifier[len] ( identifier[col] ))
identifier[print] ( literal[string] , identifier[len] ( identifier[degree] ))
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] identifier[self] . identifier[preprocessed_data] . identifier[empty] :
identifier[data] = identifier[self] . identifier[original_data]
keyword[else] :
identifier[data] = identifier[self] . identifier[preprocessed_data]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[col] )):
identifier[data] . identifier[loc] [:, identifier[col] [ identifier[i] ]+ identifier[str] ( identifier[degree] [ identifier[i] ])]= identifier[pow] ( identifier[data] . identifier[loc] [:, identifier[col] [ identifier[i] ]], identifier[degree] [ identifier[i] ])/ identifier[pow] ( literal[int] , identifier[degree] [ identifier[i] ]- literal[int] )
identifier[self] . identifier[preprocessed_data] = identifier[data]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def add_col_features(self, col=None, degree=None):
""" Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
"""
if not col and (not degree):
return # depends on [control=['if'], data=[]]
elif isinstance(col, list) and isinstance(degree, list):
if len(col) != len(degree):
print('col len: ', len(col))
print('degree len: ', len(degree))
raise ValueError('col and degree should have equal length.') # depends on [control=['if'], data=[]]
else:
if self.preprocessed_data.empty:
data = self.original_data # depends on [control=['if'], data=[]]
else:
data = self.preprocessed_data
for i in range(len(col)):
data.loc[:, col[i] + str(degree[i])] = pow(data.loc[:, col[i]], degree[i]) / pow(10, degree[i] - 1) # depends on [control=['for'], data=['i']]
self.preprocessed_data = data # depends on [control=['if'], data=[]]
else:
raise TypeError('col and degree should be lists.') |
def info(ctx):
"""
Display status of PIV application.
"""
controller = ctx.obj['controller']
click.echo('PIV version: %d.%d.%d' % controller.version)
# Largest possible number of PIN tries to get back is 15
tries = controller.get_pin_tries()
tries = '15 or more.' if tries == 15 else tries
click.echo('PIN tries remaining: %s' % tries)
if controller.puk_blocked:
click.echo('PUK blocked.')
if controller.has_derived_key:
click.echo('Management key is derived from PIN.')
if controller.has_stored_key:
click.echo('Management key is stored on the YubiKey, protected by PIN.')
try:
chuid = b2a_hex(controller.get_data(OBJ.CHUID)).decode()
except APDUError as e:
if e.sw == SW.NOT_FOUND:
chuid = 'No data available.'
click.echo('CHUID:\t' + chuid)
try:
ccc = b2a_hex(controller.get_data(OBJ.CAPABILITY)).decode()
except APDUError as e:
if e.sw == SW.NOT_FOUND:
ccc = 'No data available.'
click.echo('CCC: \t' + ccc)
for (slot, cert) in controller.list_certificates().items():
click.echo('Slot %02x:' % slot)
try:
# Try to read out full DN, fallback to only CN.
# Support for DN was added in crytography 2.5
subject_dn = cert.subject.rfc4514_string()
issuer_dn = cert.issuer.rfc4514_string()
print_dn = True
except AttributeError:
print_dn = False
logger.debug('Failed to read DN, falling back to only CNs')
subject_cn = cert.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
subject_cn = subject_cn[0].value if subject_cn else 'None'
issuer_cn = cert.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
issuer_cn = issuer_cn[0].value if issuer_cn else 'None'
except ValueError as e:
# Malformed certificates may throw ValueError
logger.debug('Failed parsing certificate', exc_info=e)
click.echo('\tMalformed certificate: {}'.format(e))
continue
fingerprint = b2a_hex(cert.fingerprint(hashes.SHA256())).decode('ascii')
algo = ALGO.from_public_key(cert.public_key())
serial = cert.serial_number
not_before = cert.not_valid_before
not_after = cert.not_valid_after
# Print out everything
click.echo('\tAlgorithm:\t%s' % algo.name)
if print_dn:
click.echo('\tSubject DN:\t%s' % subject_dn)
click.echo('\tIssuer DN:\t%s' % issuer_dn)
else:
click.echo('\tSubject CN:\t%s' % subject_cn)
click.echo('\tIssuer CN:\t%s' % issuer_cn)
click.echo('\tSerial:\t\t%s' % serial)
click.echo('\tFingerprint:\t%s' % fingerprint)
click.echo('\tNot before:\t%s' % not_before)
click.echo('\tNot after:\t%s' % not_after) | def function[info, parameter[ctx]]:
constant[
Display status of PIV application.
]
variable[controller] assign[=] call[name[ctx].obj][constant[controller]]
call[name[click].echo, parameter[binary_operation[constant[PIV version: %d.%d.%d] <ast.Mod object at 0x7da2590d6920> name[controller].version]]]
variable[tries] assign[=] call[name[controller].get_pin_tries, parameter[]]
variable[tries] assign[=] <ast.IfExp object at 0x7da18bcc9f30>
call[name[click].echo, parameter[binary_operation[constant[PIN tries remaining: %s] <ast.Mod object at 0x7da2590d6920> name[tries]]]]
if name[controller].puk_blocked begin[:]
call[name[click].echo, parameter[constant[PUK blocked.]]]
if name[controller].has_derived_key begin[:]
call[name[click].echo, parameter[constant[Management key is derived from PIN.]]]
if name[controller].has_stored_key begin[:]
call[name[click].echo, parameter[constant[Management key is stored on the YubiKey, protected by PIN.]]]
<ast.Try object at 0x7da2044c04f0>
call[name[click].echo, parameter[binary_operation[constant[CHUID: ] + name[chuid]]]]
<ast.Try object at 0x7da2044c3670>
call[name[click].echo, parameter[binary_operation[constant[CCC: ] + name[ccc]]]]
for taget[tuple[[<ast.Name object at 0x7da2044c26e0>, <ast.Name object at 0x7da2044c3a00>]]] in starred[call[call[name[controller].list_certificates, parameter[]].items, parameter[]]] begin[:]
call[name[click].echo, parameter[binary_operation[constant[Slot %02x:] <ast.Mod object at 0x7da2590d6920> name[slot]]]]
<ast.Try object at 0x7da2044c3400>
variable[fingerprint] assign[=] call[call[name[b2a_hex], parameter[call[name[cert].fingerprint, parameter[call[name[hashes].SHA256, parameter[]]]]]].decode, parameter[constant[ascii]]]
variable[algo] assign[=] call[name[ALGO].from_public_key, parameter[call[name[cert].public_key, parameter[]]]]
variable[serial] assign[=] name[cert].serial_number
variable[not_before] assign[=] name[cert].not_valid_before
variable[not_after] assign[=] name[cert].not_valid_after
call[name[click].echo, parameter[binary_operation[constant[ Algorithm: %s] <ast.Mod object at 0x7da2590d6920> name[algo].name]]]
if name[print_dn] begin[:]
call[name[click].echo, parameter[binary_operation[constant[ Subject DN: %s] <ast.Mod object at 0x7da2590d6920> name[subject_dn]]]]
call[name[click].echo, parameter[binary_operation[constant[ Issuer DN: %s] <ast.Mod object at 0x7da2590d6920> name[issuer_dn]]]]
call[name[click].echo, parameter[binary_operation[constant[ Serial: %s] <ast.Mod object at 0x7da2590d6920> name[serial]]]]
call[name[click].echo, parameter[binary_operation[constant[ Fingerprint: %s] <ast.Mod object at 0x7da2590d6920> name[fingerprint]]]]
call[name[click].echo, parameter[binary_operation[constant[ Not before: %s] <ast.Mod object at 0x7da2590d6920> name[not_before]]]]
call[name[click].echo, parameter[binary_operation[constant[ Not after: %s] <ast.Mod object at 0x7da2590d6920> name[not_after]]]] | keyword[def] identifier[info] ( identifier[ctx] ):
literal[string]
identifier[controller] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[click] . identifier[echo] ( literal[string] % identifier[controller] . identifier[version] )
identifier[tries] = identifier[controller] . identifier[get_pin_tries] ()
identifier[tries] = literal[string] keyword[if] identifier[tries] == literal[int] keyword[else] identifier[tries]
identifier[click] . identifier[echo] ( literal[string] % identifier[tries] )
keyword[if] identifier[controller] . identifier[puk_blocked] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[if] identifier[controller] . identifier[has_derived_key] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[if] identifier[controller] . identifier[has_stored_key] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[try] :
identifier[chuid] = identifier[b2a_hex] ( identifier[controller] . identifier[get_data] ( identifier[OBJ] . identifier[CHUID] )). identifier[decode] ()
keyword[except] identifier[APDUError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[sw] == identifier[SW] . identifier[NOT_FOUND] :
identifier[chuid] = literal[string]
identifier[click] . identifier[echo] ( literal[string] + identifier[chuid] )
keyword[try] :
identifier[ccc] = identifier[b2a_hex] ( identifier[controller] . identifier[get_data] ( identifier[OBJ] . identifier[CAPABILITY] )). identifier[decode] ()
keyword[except] identifier[APDUError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[sw] == identifier[SW] . identifier[NOT_FOUND] :
identifier[ccc] = literal[string]
identifier[click] . identifier[echo] ( literal[string] + identifier[ccc] )
keyword[for] ( identifier[slot] , identifier[cert] ) keyword[in] identifier[controller] . identifier[list_certificates] (). identifier[items] ():
identifier[click] . identifier[echo] ( literal[string] % identifier[slot] )
keyword[try] :
identifier[subject_dn] = identifier[cert] . identifier[subject] . identifier[rfc4514_string] ()
identifier[issuer_dn] = identifier[cert] . identifier[issuer] . identifier[rfc4514_string] ()
identifier[print_dn] = keyword[True]
keyword[except] identifier[AttributeError] :
identifier[print_dn] = keyword[False]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[subject_cn] = identifier[cert] . identifier[subject] . identifier[get_attributes_for_oid] ( identifier[x509] . identifier[NameOID] . identifier[COMMON_NAME] )
identifier[subject_cn] = identifier[subject_cn] [ literal[int] ]. identifier[value] keyword[if] identifier[subject_cn] keyword[else] literal[string]
identifier[issuer_cn] = identifier[cert] . identifier[issuer] . identifier[get_attributes_for_oid] ( identifier[x509] . identifier[NameOID] . identifier[COMMON_NAME] )
identifier[issuer_cn] = identifier[issuer_cn] [ literal[int] ]. identifier[value] keyword[if] identifier[issuer_cn] keyword[else] literal[string]
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[exc_info] = identifier[e] )
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[continue]
identifier[fingerprint] = identifier[b2a_hex] ( identifier[cert] . identifier[fingerprint] ( identifier[hashes] . identifier[SHA256] ())). identifier[decode] ( literal[string] )
identifier[algo] = identifier[ALGO] . identifier[from_public_key] ( identifier[cert] . identifier[public_key] ())
identifier[serial] = identifier[cert] . identifier[serial_number]
identifier[not_before] = identifier[cert] . identifier[not_valid_before]
identifier[not_after] = identifier[cert] . identifier[not_valid_after]
identifier[click] . identifier[echo] ( literal[string] % identifier[algo] . identifier[name] )
keyword[if] identifier[print_dn] :
identifier[click] . identifier[echo] ( literal[string] % identifier[subject_dn] )
identifier[click] . identifier[echo] ( literal[string] % identifier[issuer_dn] )
keyword[else] :
identifier[click] . identifier[echo] ( literal[string] % identifier[subject_cn] )
identifier[click] . identifier[echo] ( literal[string] % identifier[issuer_cn] )
identifier[click] . identifier[echo] ( literal[string] % identifier[serial] )
identifier[click] . identifier[echo] ( literal[string] % identifier[fingerprint] )
identifier[click] . identifier[echo] ( literal[string] % identifier[not_before] )
identifier[click] . identifier[echo] ( literal[string] % identifier[not_after] ) | def info(ctx):
"""
Display status of PIV application.
"""
controller = ctx.obj['controller']
click.echo('PIV version: %d.%d.%d' % controller.version)
# Largest possible number of PIN tries to get back is 15
tries = controller.get_pin_tries()
tries = '15 or more.' if tries == 15 else tries
click.echo('PIN tries remaining: %s' % tries)
if controller.puk_blocked:
click.echo('PUK blocked.') # depends on [control=['if'], data=[]]
if controller.has_derived_key:
click.echo('Management key is derived from PIN.') # depends on [control=['if'], data=[]]
if controller.has_stored_key:
click.echo('Management key is stored on the YubiKey, protected by PIN.') # depends on [control=['if'], data=[]]
try:
chuid = b2a_hex(controller.get_data(OBJ.CHUID)).decode() # depends on [control=['try'], data=[]]
except APDUError as e:
if e.sw == SW.NOT_FOUND:
chuid = 'No data available.' # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
click.echo('CHUID:\t' + chuid)
try:
ccc = b2a_hex(controller.get_data(OBJ.CAPABILITY)).decode() # depends on [control=['try'], data=[]]
except APDUError as e:
if e.sw == SW.NOT_FOUND:
ccc = 'No data available.' # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
click.echo('CCC: \t' + ccc)
for (slot, cert) in controller.list_certificates().items():
click.echo('Slot %02x:' % slot)
try:
# Try to read out full DN, fallback to only CN.
# Support for DN was added in crytography 2.5
subject_dn = cert.subject.rfc4514_string()
issuer_dn = cert.issuer.rfc4514_string()
print_dn = True # depends on [control=['try'], data=[]]
except AttributeError:
print_dn = False
logger.debug('Failed to read DN, falling back to only CNs')
subject_cn = cert.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
subject_cn = subject_cn[0].value if subject_cn else 'None'
issuer_cn = cert.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
issuer_cn = issuer_cn[0].value if issuer_cn else 'None' # depends on [control=['except'], data=[]]
except ValueError as e:
# Malformed certificates may throw ValueError
logger.debug('Failed parsing certificate', exc_info=e)
click.echo('\tMalformed certificate: {}'.format(e))
continue # depends on [control=['except'], data=['e']]
fingerprint = b2a_hex(cert.fingerprint(hashes.SHA256())).decode('ascii')
algo = ALGO.from_public_key(cert.public_key())
serial = cert.serial_number
not_before = cert.not_valid_before
not_after = cert.not_valid_after
# Print out everything
click.echo('\tAlgorithm:\t%s' % algo.name)
if print_dn:
click.echo('\tSubject DN:\t%s' % subject_dn)
click.echo('\tIssuer DN:\t%s' % issuer_dn) # depends on [control=['if'], data=[]]
else:
click.echo('\tSubject CN:\t%s' % subject_cn)
click.echo('\tIssuer CN:\t%s' % issuer_cn)
click.echo('\tSerial:\t\t%s' % serial)
click.echo('\tFingerprint:\t%s' % fingerprint)
click.echo('\tNot before:\t%s' % not_before)
click.echo('\tNot after:\t%s' % not_after) # depends on [control=['for'], data=[]] |
def get_path(self, path=''):
"""
Validate incoming path, if path is empty, build it from resource attributes,
If path is invalid - raise exception
:param path: path to remote file storage
:return: valid path or :raise Exception:
"""
if not path:
host = self.resource_config.backup_location
if ':' not in host:
scheme = self.resource_config.backup_type
if not scheme or scheme.lower() == self.DEFAULT_FILE_SYSTEM.lower():
scheme = self.file_system
scheme = re.sub('(:|/+).*$', '', scheme, re.DOTALL)
host = re.sub('^/+', '', host)
host = '{}://{}'.format(scheme, host)
path = host
url = UrlParser.parse_url(path)
if url[UrlParser.SCHEME].lower() in AUTHORIZATION_REQUIRED_STORAGE:
if UrlParser.USERNAME not in url or not url[UrlParser.USERNAME]:
url[UrlParser.USERNAME] = self.resource_config.backup_user
if UrlParser.PASSWORD not in url or not url[UrlParser.PASSWORD]:
url[UrlParser.PASSWORD] = self._api.DecryptPassword(self.resource_config.backup_password).Value
try:
result = UrlParser.build_url(url)
except Exception as e:
self._logger.error('Failed to build url: {}'.format(e))
raise Exception('ConfigurationOperations', 'Failed to build path url to remote host')
return result | def function[get_path, parameter[self, path]]:
constant[
Validate incoming path, if path is empty, build it from resource attributes,
If path is invalid - raise exception
:param path: path to remote file storage
:return: valid path or :raise Exception:
]
if <ast.UnaryOp object at 0x7da207f9b220> begin[:]
variable[host] assign[=] name[self].resource_config.backup_location
if compare[constant[:] <ast.NotIn object at 0x7da2590d7190> name[host]] begin[:]
variable[scheme] assign[=] name[self].resource_config.backup_type
if <ast.BoolOp object at 0x7da207f9ae90> begin[:]
variable[scheme] assign[=] name[self].file_system
variable[scheme] assign[=] call[name[re].sub, parameter[constant[(:|/+).*$], constant[], name[scheme], name[re].DOTALL]]
variable[host] assign[=] call[name[re].sub, parameter[constant[^/+], constant[], name[host]]]
variable[host] assign[=] call[constant[{}://{}].format, parameter[name[scheme], name[host]]]
variable[path] assign[=] name[host]
variable[url] assign[=] call[name[UrlParser].parse_url, parameter[name[path]]]
if compare[call[call[name[url]][name[UrlParser].SCHEME].lower, parameter[]] in name[AUTHORIZATION_REQUIRED_STORAGE]] begin[:]
if <ast.BoolOp object at 0x7da18f00ff10> begin[:]
call[name[url]][name[UrlParser].USERNAME] assign[=] name[self].resource_config.backup_user
if <ast.BoolOp object at 0x7da18f00ef80> begin[:]
call[name[url]][name[UrlParser].PASSWORD] assign[=] call[name[self]._api.DecryptPassword, parameter[name[self].resource_config.backup_password]].Value
<ast.Try object at 0x7da18ede60b0>
return[name[result]] | keyword[def] identifier[get_path] ( identifier[self] , identifier[path] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
identifier[host] = identifier[self] . identifier[resource_config] . identifier[backup_location]
keyword[if] literal[string] keyword[not] keyword[in] identifier[host] :
identifier[scheme] = identifier[self] . identifier[resource_config] . identifier[backup_type]
keyword[if] keyword[not] identifier[scheme] keyword[or] identifier[scheme] . identifier[lower] ()== identifier[self] . identifier[DEFAULT_FILE_SYSTEM] . identifier[lower] ():
identifier[scheme] = identifier[self] . identifier[file_system]
identifier[scheme] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[scheme] , identifier[re] . identifier[DOTALL] )
identifier[host] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[host] )
identifier[host] = literal[string] . identifier[format] ( identifier[scheme] , identifier[host] )
identifier[path] = identifier[host]
identifier[url] = identifier[UrlParser] . identifier[parse_url] ( identifier[path] )
keyword[if] identifier[url] [ identifier[UrlParser] . identifier[SCHEME] ]. identifier[lower] () keyword[in] identifier[AUTHORIZATION_REQUIRED_STORAGE] :
keyword[if] identifier[UrlParser] . identifier[USERNAME] keyword[not] keyword[in] identifier[url] keyword[or] keyword[not] identifier[url] [ identifier[UrlParser] . identifier[USERNAME] ]:
identifier[url] [ identifier[UrlParser] . identifier[USERNAME] ]= identifier[self] . identifier[resource_config] . identifier[backup_user]
keyword[if] identifier[UrlParser] . identifier[PASSWORD] keyword[not] keyword[in] identifier[url] keyword[or] keyword[not] identifier[url] [ identifier[UrlParser] . identifier[PASSWORD] ]:
identifier[url] [ identifier[UrlParser] . identifier[PASSWORD] ]= identifier[self] . identifier[_api] . identifier[DecryptPassword] ( identifier[self] . identifier[resource_config] . identifier[backup_password] ). identifier[Value]
keyword[try] :
identifier[result] = identifier[UrlParser] . identifier[build_url] ( identifier[url] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[raise] identifier[Exception] ( literal[string] , literal[string] )
keyword[return] identifier[result] | def get_path(self, path=''):
"""
Validate incoming path, if path is empty, build it from resource attributes,
If path is invalid - raise exception
:param path: path to remote file storage
:return: valid path or :raise Exception:
"""
if not path:
host = self.resource_config.backup_location
if ':' not in host:
scheme = self.resource_config.backup_type
if not scheme or scheme.lower() == self.DEFAULT_FILE_SYSTEM.lower():
scheme = self.file_system # depends on [control=['if'], data=[]]
scheme = re.sub('(:|/+).*$', '', scheme, re.DOTALL)
host = re.sub('^/+', '', host)
host = '{}://{}'.format(scheme, host) # depends on [control=['if'], data=['host']]
path = host # depends on [control=['if'], data=[]]
url = UrlParser.parse_url(path)
if url[UrlParser.SCHEME].lower() in AUTHORIZATION_REQUIRED_STORAGE:
if UrlParser.USERNAME not in url or not url[UrlParser.USERNAME]:
url[UrlParser.USERNAME] = self.resource_config.backup_user # depends on [control=['if'], data=[]]
if UrlParser.PASSWORD not in url or not url[UrlParser.PASSWORD]:
url[UrlParser.PASSWORD] = self._api.DecryptPassword(self.resource_config.backup_password).Value # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
result = UrlParser.build_url(url) # depends on [control=['try'], data=[]]
except Exception as e:
self._logger.error('Failed to build url: {}'.format(e))
raise Exception('ConfigurationOperations', 'Failed to build path url to remote host') # depends on [control=['except'], data=['e']]
return result |
def set_figure(self, figure, handle=None):
"""Call this with the Bokeh figure object."""
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
#self.logger.info(str(dir(doc)))
doc.add_periodic_callback(self.timer_cb, 50)
self.logger.info("figure set") | def function[set_figure, parameter[self, figure, handle]]:
constant[Call this with the Bokeh figure object.]
name[self].figure assign[=] name[figure]
name[self].bkimage assign[=] constant[None]
name[self]._push_handle assign[=] name[handle]
variable[wd] assign[=] name[figure].plot_width
variable[ht] assign[=] name[figure].plot_height
call[name[self].configure_window, parameter[name[wd], name[ht]]]
variable[doc] assign[=] call[name[curdoc], parameter[]]
call[name[doc].add_periodic_callback, parameter[name[self].timer_cb, constant[50]]]
call[name[self].logger.info, parameter[constant[figure set]]] | keyword[def] identifier[set_figure] ( identifier[self] , identifier[figure] , identifier[handle] = keyword[None] ):
literal[string]
identifier[self] . identifier[figure] = identifier[figure]
identifier[self] . identifier[bkimage] = keyword[None]
identifier[self] . identifier[_push_handle] = identifier[handle]
identifier[wd] = identifier[figure] . identifier[plot_width]
identifier[ht] = identifier[figure] . identifier[plot_height]
identifier[self] . identifier[configure_window] ( identifier[wd] , identifier[ht] )
identifier[doc] = identifier[curdoc] ()
identifier[doc] . identifier[add_periodic_callback] ( identifier[self] . identifier[timer_cb] , literal[int] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] ) | def set_figure(self, figure, handle=None):
"""Call this with the Bokeh figure object."""
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
#self.logger.info(str(dir(doc)))
doc.add_periodic_callback(self.timer_cb, 50)
self.logger.info('figure set') |
def add_index(self, mode, blob_id, path):
"""
Add new entry to the current index
:param tree:
:return:
"""
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path]) | def function[add_index, parameter[self, mode, blob_id, path]]:
constant[
Add new entry to the current index
:param tree:
:return:
]
call[name[self].command_exec, parameter[list[[<ast.Constant object at 0x7da20e960ac0>, <ast.Constant object at 0x7da20e961630>, <ast.Constant object at 0x7da20e962860>, <ast.Name object at 0x7da20e960c10>, <ast.Name object at 0x7da20e960070>, <ast.Name object at 0x7da20e963760>]]]] | keyword[def] identifier[add_index] ( identifier[self] , identifier[mode] , identifier[blob_id] , identifier[path] ):
literal[string]
identifier[self] . identifier[command_exec] ([ literal[string] , literal[string] , literal[string] , identifier[mode] , identifier[blob_id] , identifier[path] ]) | def add_index(self, mode, blob_id, path):
"""
Add new entry to the current index
:param tree:
:return:
"""
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path]) |
def evaluate_call_args(self, calculator):
"""Interpreting this literal as a function call, return a 2-tuple of
``(args, kwargs)``.
"""
args = []
kwargs = OrderedDict() # Sass kwargs preserve order
for var_node, value_node in self.argpairs:
value = value_node.evaluate(calculator, divide=True)
if var_node is None:
# Positional
args.append(value)
else:
# Named
if not isinstance(var_node, Variable):
raise TypeError(
"Expected variable name, got {0!r}".format(var_node))
kwargs[var_node.name] = value
# Slurpy arguments go on the end of the args
if self.slurp:
args.extend(self.slurp.evaluate(calculator, divide=True))
return args, kwargs | def function[evaluate_call_args, parameter[self, calculator]]:
constant[Interpreting this literal as a function call, return a 2-tuple of
``(args, kwargs)``.
]
variable[args] assign[=] list[[]]
variable[kwargs] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da207f00a60>, <ast.Name object at 0x7da207f034f0>]]] in starred[name[self].argpairs] begin[:]
variable[value] assign[=] call[name[value_node].evaluate, parameter[name[calculator]]]
if compare[name[var_node] is constant[None]] begin[:]
call[name[args].append, parameter[name[value]]]
if name[self].slurp begin[:]
call[name[args].extend, parameter[call[name[self].slurp.evaluate, parameter[name[calculator]]]]]
return[tuple[[<ast.Name object at 0x7da207f03100>, <ast.Name object at 0x7da207f02650>]]] | keyword[def] identifier[evaluate_call_args] ( identifier[self] , identifier[calculator] ):
literal[string]
identifier[args] =[]
identifier[kwargs] = identifier[OrderedDict] ()
keyword[for] identifier[var_node] , identifier[value_node] keyword[in] identifier[self] . identifier[argpairs] :
identifier[value] = identifier[value_node] . identifier[evaluate] ( identifier[calculator] , identifier[divide] = keyword[True] )
keyword[if] identifier[var_node] keyword[is] keyword[None] :
identifier[args] . identifier[append] ( identifier[value] )
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[var_node] , identifier[Variable] ):
keyword[raise] identifier[TypeError] (
literal[string] . identifier[format] ( identifier[var_node] ))
identifier[kwargs] [ identifier[var_node] . identifier[name] ]= identifier[value]
keyword[if] identifier[self] . identifier[slurp] :
identifier[args] . identifier[extend] ( identifier[self] . identifier[slurp] . identifier[evaluate] ( identifier[calculator] , identifier[divide] = keyword[True] ))
keyword[return] identifier[args] , identifier[kwargs] | def evaluate_call_args(self, calculator):
"""Interpreting this literal as a function call, return a 2-tuple of
``(args, kwargs)``.
"""
args = []
kwargs = OrderedDict() # Sass kwargs preserve order
for (var_node, value_node) in self.argpairs:
value = value_node.evaluate(calculator, divide=True)
if var_node is None:
# Positional
args.append(value) # depends on [control=['if'], data=[]]
else:
# Named
if not isinstance(var_node, Variable):
raise TypeError('Expected variable name, got {0!r}'.format(var_node)) # depends on [control=['if'], data=[]]
kwargs[var_node.name] = value # depends on [control=['for'], data=[]]
# Slurpy arguments go on the end of the args
if self.slurp:
args.extend(self.slurp.evaluate(calculator, divide=True)) # depends on [control=['if'], data=[]]
return (args, kwargs) |
def get_websensors(self):
"""
Get sensors with defined tag as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.sensors if self.tag & i.tags} | def function[get_websensors, parameter[self]]:
constant[
Get sensors with defined tag as a dictionary of format ``{name: status}``
]
return[<ast.DictComp object at 0x7da1b255ecb0>] | keyword[def] identifier[get_websensors] ( identifier[self] ):
literal[string]
keyword[return] { identifier[i] . identifier[name] : identifier[i] . identifier[status] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[system] . identifier[sensors] keyword[if] identifier[self] . identifier[tag] & identifier[i] . identifier[tags] } | def get_websensors(self):
"""
Get sensors with defined tag as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.sensors if self.tag & i.tags} |
def purge(vm_, dirs=False, removables=None, **kwargs):
'''
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
disks = _get_disks(dom)
if removables is None:
salt.utils.versions.warn_until(
'Sodium',
'removables argument default value is True, but will be changed '
'to False by default in {version}. Please set to True to maintain '
'the current behavior in the future.'
)
removables = True
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown' and dom.destroy() != 0:
return False
directories = set()
for disk in disks:
if not removables and disks[disk]['type'] in ['cdrom', 'floppy']:
continue
elif disks[disk].get('zfs', False):
# TODO create solution for 'dataset is busy'
time.sleep(3)
fs_name = disks[disk]['file'][len('/dev/zvol/'):]
log.info('Destroying VM ZFS volume %s', fs_name)
__salt__['zfs.destroy'](
name=fs_name,
force=True)
else:
os.remove(disks[disk]['file'])
directories.add(os.path.dirname(disks[disk]['file']))
if dirs:
for dir_ in directories:
shutil.rmtree(dir_)
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
try:
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
except libvirt.libvirtError:
dom.undefine()
else:
dom.undefine()
conn.close()
return True | def function[purge, parameter[vm_, dirs, removables]]:
constant[
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
]
variable[conn] assign[=] call[name[__get_conn], parameter[]]
variable[dom] assign[=] call[name[_get_domain], parameter[name[conn], name[vm_]]]
variable[disks] assign[=] call[name[_get_disks], parameter[name[dom]]]
if compare[name[removables] is constant[None]] begin[:]
call[name[salt].utils.versions.warn_until, parameter[constant[Sodium], constant[removables argument default value is True, but will be changed to False by default in {version}. Please set to True to maintain the current behavior in the future.]]]
variable[removables] assign[=] constant[True]
if <ast.BoolOp object at 0x7da207f9b640> begin[:]
return[constant[False]]
variable[directories] assign[=] call[name[set], parameter[]]
for taget[name[disk]] in starred[name[disks]] begin[:]
if <ast.BoolOp object at 0x7da207f9b8b0> begin[:]
continue
if name[dirs] begin[:]
for taget[name[dir_]] in starred[name[directories]] begin[:]
call[name[shutil].rmtree, parameter[name[dir_]]]
if call[name[getattr], parameter[name[libvirt], constant[VIR_DOMAIN_UNDEFINE_NVRAM], constant[False]]] begin[:]
<ast.Try object at 0x7da207f999c0>
call[name[conn].close, parameter[]]
return[constant[True]] | keyword[def] identifier[purge] ( identifier[vm_] , identifier[dirs] = keyword[False] , identifier[removables] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[__get_conn] (** identifier[kwargs] )
identifier[dom] = identifier[_get_domain] ( identifier[conn] , identifier[vm_] )
identifier[disks] = identifier[_get_disks] ( identifier[dom] )
keyword[if] identifier[removables] keyword[is] keyword[None] :
identifier[salt] . identifier[utils] . identifier[versions] . identifier[warn_until] (
literal[string] ,
literal[string]
literal[string]
literal[string]
)
identifier[removables] = keyword[True]
keyword[if] identifier[VIRT_STATE_NAME_MAP] . identifier[get] ( identifier[dom] . identifier[info] ()[ literal[int] ], literal[string] )!= literal[string] keyword[and] identifier[dom] . identifier[destroy] ()!= literal[int] :
keyword[return] keyword[False]
identifier[directories] = identifier[set] ()
keyword[for] identifier[disk] keyword[in] identifier[disks] :
keyword[if] keyword[not] identifier[removables] keyword[and] identifier[disks] [ identifier[disk] ][ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
keyword[continue]
keyword[elif] identifier[disks] [ identifier[disk] ]. identifier[get] ( literal[string] , keyword[False] ):
identifier[time] . identifier[sleep] ( literal[int] )
identifier[fs_name] = identifier[disks] [ identifier[disk] ][ literal[string] ][ identifier[len] ( literal[string] ):]
identifier[log] . identifier[info] ( literal[string] , identifier[fs_name] )
identifier[__salt__] [ literal[string] ](
identifier[name] = identifier[fs_name] ,
identifier[force] = keyword[True] )
keyword[else] :
identifier[os] . identifier[remove] ( identifier[disks] [ identifier[disk] ][ literal[string] ])
identifier[directories] . identifier[add] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[disks] [ identifier[disk] ][ literal[string] ]))
keyword[if] identifier[dirs] :
keyword[for] identifier[dir_] keyword[in] identifier[directories] :
identifier[shutil] . identifier[rmtree] ( identifier[dir_] )
keyword[if] identifier[getattr] ( identifier[libvirt] , literal[string] , keyword[False] ):
keyword[try] :
identifier[dom] . identifier[undefineFlags] ( identifier[libvirt] . identifier[VIR_DOMAIN_UNDEFINE_NVRAM] )
keyword[except] identifier[libvirt] . identifier[libvirtError] :
identifier[dom] . identifier[undefine] ()
keyword[else] :
identifier[dom] . identifier[undefine] ()
identifier[conn] . identifier[close] ()
keyword[return] keyword[True] | def purge(vm_, dirs=False, removables=None, **kwargs):
"""
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
"""
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
disks = _get_disks(dom)
if removables is None:
salt.utils.versions.warn_until('Sodium', 'removables argument default value is True, but will be changed to False by default in {version}. Please set to True to maintain the current behavior in the future.')
removables = True # depends on [control=['if'], data=['removables']]
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown' and dom.destroy() != 0:
return False # depends on [control=['if'], data=[]]
directories = set()
for disk in disks:
if not removables and disks[disk]['type'] in ['cdrom', 'floppy']:
continue # depends on [control=['if'], data=[]]
elif disks[disk].get('zfs', False):
# TODO create solution for 'dataset is busy'
time.sleep(3)
fs_name = disks[disk]['file'][len('/dev/zvol/'):]
log.info('Destroying VM ZFS volume %s', fs_name)
__salt__['zfs.destroy'](name=fs_name, force=True) # depends on [control=['if'], data=[]]
else:
os.remove(disks[disk]['file'])
directories.add(os.path.dirname(disks[disk]['file'])) # depends on [control=['for'], data=['disk']]
if dirs:
for dir_ in directories:
shutil.rmtree(dir_) # depends on [control=['for'], data=['dir_']] # depends on [control=['if'], data=[]]
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
try:
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) # depends on [control=['try'], data=[]]
except libvirt.libvirtError:
dom.undefine() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
dom.undefine()
conn.close()
return True |
def add_line_wrappers(data_passed_from_pelican):
"""A function to read through each page and post as it comes through from Pelican, find all instances of triple-backtick (```...```) code blocks, and add an HTML wrapper to each line of each of those code blocks"""
if data_passed_from_pelican._content: # If the item passed from Pelican has a "content" attribute (i.e., if it's not an image file or something else like that). NOTE: data_passed_from_pelican.content (without an underscore in front of 'content') seems to be read-only, whereas data_passed_from_pelican._content is able to be overwritten. This is somewhat explained in an IRC log from 2013-02-03 from user alexis to user webdesignhero_ at https://botbot.me/freenode/pelican/2013-02-01/?tz=America/Los_Angeles.
full_content_of_page_or_post = data_passed_from_pelican._content
else:
return # Exit the function, essentially passing over the (non-text) file.
all_instances_of_pre_elements = re.findall('<pre>.*?</pre>', full_content_of_page_or_post, re.DOTALL) # Use a regular expression to find every instance of '<pre>' followed by anything up to the first matching '</pre>'. re.DOTALL puts python's regular expression engine ('re') into a mode where a dot ('.') matches absolutely anything, including newline characters.
if(len(all_instances_of_pre_elements) > 0): # If the article/page HAS any <pre>...</pre> elements, go on. Otherwise, don't (to do so would inadvertantly wipe out the output content for that article/page).
updated_full_content_of_page_or_post = full_content_of_page_or_post # This just gives this an initial value before going into the loop below.
# Go through each <pre> element instance that we found above, and parse it:
for pre_element_to_parse in all_instances_of_pre_elements:
# Wrap each line of the <pre>...</pre> section with <span class=code-line>...</span>, following http://bililite.com/blog/2012/08/05/line-numbering-in-pre-elements/. We'll use these to add line numbers using CSS later.
# Note that below, '^' is the beginning of a string, '$' is the end of a string, and '\n' is a newline.
replacement_text_with_beginning_of_each_line_wrapped_in_span = re.sub(r'(<pre.*?>|\n(?!</pre>))','\\1<span class="code-line">',pre_element_to_parse) # The (?!...) here is a Negative Lookahead (cf. http://www.regular-expressions.info/lookaround.html). This full regular expression says "Give me all code snippets that start with <pre ****> or start with a newline (\n), but NOT if the newline is followed immediately with '</pre>'. Take whatever you find, and replace it with what you found (\1) followed immediately by '<span class="code-lines">'.
# http://stackoverflow.com/a/14625628 explains why we need to escape the backslash in the capture group reference (the '\1'). In short, python will recognize it as "\x01" if it's not escaped.
replacement_text_with_full_line_wrapped_in_span = re.sub(r'((?<!</pre>)$|(?<!</pre>)\n)','</span>\\1',replacement_text_with_beginning_of_each_line_wrapped_in_span) # This regular expression says "Give me all code snippets that are the end of a string or a newline (but not preceeded by "</pre>" (this is a 'negative lookahead,' '(?<)'), and replace whatever you found with '</span'> followed by whatever you found (\1).
updated_full_content_of_page_or_post = updated_full_content_of_page_or_post.replace(pre_element_to_parse,replacement_text_with_full_line_wrapped_in_span)
# Replace the content of the page or post with our now-updated content (having gone through all instances of <pre> elements and updated them all, exiting the loop above.
data_passed_from_pelican._content = updated_full_content_of_page_or_post | def function[add_line_wrappers, parameter[data_passed_from_pelican]]:
constant[A function to read through each page and post as it comes through from Pelican, find all instances of triple-backtick (```...```) code blocks, and add an HTML wrapper to each line of each of those code blocks]
if name[data_passed_from_pelican]._content begin[:]
variable[full_content_of_page_or_post] assign[=] name[data_passed_from_pelican]._content
variable[all_instances_of_pre_elements] assign[=] call[name[re].findall, parameter[constant[<pre>.*?</pre>], name[full_content_of_page_or_post], name[re].DOTALL]]
if compare[call[name[len], parameter[name[all_instances_of_pre_elements]]] greater[>] constant[0]] begin[:]
variable[updated_full_content_of_page_or_post] assign[=] name[full_content_of_page_or_post]
for taget[name[pre_element_to_parse]] in starred[name[all_instances_of_pre_elements]] begin[:]
variable[replacement_text_with_beginning_of_each_line_wrapped_in_span] assign[=] call[name[re].sub, parameter[constant[(<pre.*?>|\n(?!</pre>))], constant[\1<span class="code-line">], name[pre_element_to_parse]]]
variable[replacement_text_with_full_line_wrapped_in_span] assign[=] call[name[re].sub, parameter[constant[((?<!</pre>)$|(?<!</pre>)\n)], constant[</span>\1], name[replacement_text_with_beginning_of_each_line_wrapped_in_span]]]
variable[updated_full_content_of_page_or_post] assign[=] call[name[updated_full_content_of_page_or_post].replace, parameter[name[pre_element_to_parse], name[replacement_text_with_full_line_wrapped_in_span]]]
name[data_passed_from_pelican]._content assign[=] name[updated_full_content_of_page_or_post] | keyword[def] identifier[add_line_wrappers] ( identifier[data_passed_from_pelican] ):
literal[string]
keyword[if] identifier[data_passed_from_pelican] . identifier[_content] :
identifier[full_content_of_page_or_post] = identifier[data_passed_from_pelican] . identifier[_content]
keyword[else] :
keyword[return]
identifier[all_instances_of_pre_elements] = identifier[re] . identifier[findall] ( literal[string] , identifier[full_content_of_page_or_post] , identifier[re] . identifier[DOTALL] )
keyword[if] ( identifier[len] ( identifier[all_instances_of_pre_elements] )> literal[int] ):
identifier[updated_full_content_of_page_or_post] = identifier[full_content_of_page_or_post]
keyword[for] identifier[pre_element_to_parse] keyword[in] identifier[all_instances_of_pre_elements] :
identifier[replacement_text_with_beginning_of_each_line_wrapped_in_span] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[pre_element_to_parse] )
identifier[replacement_text_with_full_line_wrapped_in_span] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[replacement_text_with_beginning_of_each_line_wrapped_in_span] )
identifier[updated_full_content_of_page_or_post] = identifier[updated_full_content_of_page_or_post] . identifier[replace] ( identifier[pre_element_to_parse] , identifier[replacement_text_with_full_line_wrapped_in_span] )
identifier[data_passed_from_pelican] . identifier[_content] = identifier[updated_full_content_of_page_or_post] | def add_line_wrappers(data_passed_from_pelican):
"""A function to read through each page and post as it comes through from Pelican, find all instances of triple-backtick (```...```) code blocks, and add an HTML wrapper to each line of each of those code blocks"""
if data_passed_from_pelican._content: # If the item passed from Pelican has a "content" attribute (i.e., if it's not an image file or something else like that). NOTE: data_passed_from_pelican.content (without an underscore in front of 'content') seems to be read-only, whereas data_passed_from_pelican._content is able to be overwritten. This is somewhat explained in an IRC log from 2013-02-03 from user alexis to user webdesignhero_ at https://botbot.me/freenode/pelican/2013-02-01/?tz=America/Los_Angeles.
full_content_of_page_or_post = data_passed_from_pelican._content # depends on [control=['if'], data=[]]
else:
return # Exit the function, essentially passing over the (non-text) file.
all_instances_of_pre_elements = re.findall('<pre>.*?</pre>', full_content_of_page_or_post, re.DOTALL) # Use a regular expression to find every instance of '<pre>' followed by anything up to the first matching '</pre>'. re.DOTALL puts python's regular expression engine ('re') into a mode where a dot ('.') matches absolutely anything, including newline characters.
if len(all_instances_of_pre_elements) > 0: # If the article/page HAS any <pre>...</pre> elements, go on. Otherwise, don't (to do so would inadvertantly wipe out the output content for that article/page).
updated_full_content_of_page_or_post = full_content_of_page_or_post # This just gives this an initial value before going into the loop below.
# Go through each <pre> element instance that we found above, and parse it:
for pre_element_to_parse in all_instances_of_pre_elements:
# Wrap each line of the <pre>...</pre> section with <span class=code-line>...</span>, following http://bililite.com/blog/2012/08/05/line-numbering-in-pre-elements/. We'll use these to add line numbers using CSS later.
# Note that below, '^' is the beginning of a string, '$' is the end of a string, and '\n' is a newline.
replacement_text_with_beginning_of_each_line_wrapped_in_span = re.sub('(<pre.*?>|\\n(?!</pre>))', '\\1<span class="code-line">', pre_element_to_parse) # The (?!...) here is a Negative Lookahead (cf. http://www.regular-expressions.info/lookaround.html). This full regular expression says "Give me all code snippets that start with <pre ****> or start with a newline (\n), but NOT if the newline is followed immediately with '</pre>'. Take whatever you find, and replace it with what you found (\1) followed immediately by '<span class="code-lines">'.
# http://stackoverflow.com/a/14625628 explains why we need to escape the backslash in the capture group reference (the '\1'). In short, python will recognize it as "\x01" if it's not escaped.
replacement_text_with_full_line_wrapped_in_span = re.sub('((?<!</pre>)$|(?<!</pre>)\\n)', '</span>\\1', replacement_text_with_beginning_of_each_line_wrapped_in_span) # This regular expression says "Give me all code snippets that are the end of a string or a newline (but not preceeded by "</pre>" (this is a 'negative lookahead,' '(?<)'), and replace whatever you found with '</span'> followed by whatever you found (\1).
updated_full_content_of_page_or_post = updated_full_content_of_page_or_post.replace(pre_element_to_parse, replacement_text_with_full_line_wrapped_in_span) # depends on [control=['for'], data=['pre_element_to_parse']]
# Replace the content of the page or post with our now-updated content (having gone through all instances of <pre> elements and updated them all, exiting the loop above.
data_passed_from_pelican._content = updated_full_content_of_page_or_post # depends on [control=['if'], data=[]] |
async def update_data_status(self, **kwargs):
"""Update (PATCH) Data object.
:param kwargs: The dictionary of
:class:`~resolwe.flow.models.Data` attributes to be changed.
"""
await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={
ExecutorProtocol.UPDATE_CHANGESET: kwargs
}) | <ast.AsyncFunctionDef object at 0x7da1b19b7580> | keyword[async] keyword[def] identifier[update_data_status] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[await] identifier[self] . identifier[_send_manager_command] ( identifier[ExecutorProtocol] . identifier[UPDATE] , identifier[extra_fields] ={
identifier[ExecutorProtocol] . identifier[UPDATE_CHANGESET] : identifier[kwargs]
}) | async def update_data_status(self, **kwargs):
"""Update (PATCH) Data object.
:param kwargs: The dictionary of
:class:`~resolwe.flow.models.Data` attributes to be changed.
"""
await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={ExecutorProtocol.UPDATE_CHANGESET: kwargs}) |
def phonex(word, language="english"):
"""
Short for phone index, maps a word onto a sequence of phone clusters
Strips non supported characters (for english, non alpha characters)
"""
phone_variants = phoneticize(word)
mappings = cluster_phones(language)
results = []
for phone_variant in phone_variants:
try:
phonex_variant = tuple([mappings[phone] for phone in
phone_variant])
results.append(phonex_variant)
except:
print('Error:', word, phone_variant)
exit(1)
return results | def function[phonex, parameter[word, language]]:
constant[
Short for phone index, maps a word onto a sequence of phone clusters
Strips non supported characters (for english, non alpha characters)
]
variable[phone_variants] assign[=] call[name[phoneticize], parameter[name[word]]]
variable[mappings] assign[=] call[name[cluster_phones], parameter[name[language]]]
variable[results] assign[=] list[[]]
for taget[name[phone_variant]] in starred[name[phone_variants]] begin[:]
<ast.Try object at 0x7da1b2351030>
return[name[results]] | keyword[def] identifier[phonex] ( identifier[word] , identifier[language] = literal[string] ):
literal[string]
identifier[phone_variants] = identifier[phoneticize] ( identifier[word] )
identifier[mappings] = identifier[cluster_phones] ( identifier[language] )
identifier[results] =[]
keyword[for] identifier[phone_variant] keyword[in] identifier[phone_variants] :
keyword[try] :
identifier[phonex_variant] = identifier[tuple] ([ identifier[mappings] [ identifier[phone] ] keyword[for] identifier[phone] keyword[in]
identifier[phone_variant] ])
identifier[results] . identifier[append] ( identifier[phonex_variant] )
keyword[except] :
identifier[print] ( literal[string] , identifier[word] , identifier[phone_variant] )
identifier[exit] ( literal[int] )
keyword[return] identifier[results] | def phonex(word, language='english'):
"""
Short for phone index, maps a word onto a sequence of phone clusters
Strips non supported characters (for english, non alpha characters)
"""
phone_variants = phoneticize(word)
mappings = cluster_phones(language)
results = []
for phone_variant in phone_variants:
try:
phonex_variant = tuple([mappings[phone] for phone in phone_variant])
results.append(phonex_variant) # depends on [control=['try'], data=[]]
except:
print('Error:', word, phone_variant)
exit(1) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['phone_variant']]
return results |
def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af
except (ValueError, AttributeError, socket.error):
continue
return socket.AF_UNSPEC | def function[_address_family, parameter[address]]:
constant[
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
]
if <ast.BoolOp object at 0x7da1b1c99630> begin[:]
return[name[socket].AF_INET6]
for taget[name[af]] in starred[tuple[[<ast.Attribute object at 0x7da1b1cb15d0>, <ast.Attribute object at 0x7da1b1cb15a0>]]] begin[:]
<ast.Try object at 0x7da1b1cb1600>
return[name[socket].AF_UNSPEC] | keyword[def] identifier[_address_family] ( identifier[address] ):
literal[string]
keyword[if] identifier[address] . identifier[startswith] ( literal[string] ) keyword[and] identifier[address] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[socket] . identifier[AF_INET6]
keyword[for] identifier[af] keyword[in] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[AF_INET6] ):
keyword[try] :
identifier[socket] . identifier[inet_pton] ( identifier[af] , identifier[address] )
keyword[return] identifier[af]
keyword[except] ( identifier[ValueError] , identifier[AttributeError] , identifier[socket] . identifier[error] ):
keyword[continue]
keyword[return] identifier[socket] . identifier[AF_UNSPEC] | def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6 # depends on [control=['if'], data=[]]
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af # depends on [control=['try'], data=[]]
except (ValueError, AttributeError, socket.error):
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['af']]
return socket.AF_UNSPEC |
def element_tree_collection_to_records(tree):
"""Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection>
"""
from .bibrecord import create_record
records = []
collection = tree.getroot()
for record_element in collection.getchildren():
marcxml = ET.tostring(record_element, encoding="utf-8")
record, status, errors = create_record(marcxml)
if errors:
print(str(status))
records.append(record)
return records | def function[element_tree_collection_to_records, parameter[tree]]:
constant[Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection>
]
from relative_module[bibrecord] import module[create_record]
variable[records] assign[=] list[[]]
variable[collection] assign[=] call[name[tree].getroot, parameter[]]
for taget[name[record_element]] in starred[call[name[collection].getchildren, parameter[]]] begin[:]
variable[marcxml] assign[=] call[name[ET].tostring, parameter[name[record_element]]]
<ast.Tuple object at 0x7da18dc07e20> assign[=] call[name[create_record], parameter[name[marcxml]]]
if name[errors] begin[:]
call[name[print], parameter[call[name[str], parameter[name[status]]]]]
call[name[records].append, parameter[name[record]]]
return[name[records]] | keyword[def] identifier[element_tree_collection_to_records] ( identifier[tree] ):
literal[string]
keyword[from] . identifier[bibrecord] keyword[import] identifier[create_record]
identifier[records] =[]
identifier[collection] = identifier[tree] . identifier[getroot] ()
keyword[for] identifier[record_element] keyword[in] identifier[collection] . identifier[getchildren] ():
identifier[marcxml] = identifier[ET] . identifier[tostring] ( identifier[record_element] , identifier[encoding] = literal[string] )
identifier[record] , identifier[status] , identifier[errors] = identifier[create_record] ( identifier[marcxml] )
keyword[if] identifier[errors] :
identifier[print] ( identifier[str] ( identifier[status] ))
identifier[records] . identifier[append] ( identifier[record] )
keyword[return] identifier[records] | def element_tree_collection_to_records(tree):
"""Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection>
"""
from .bibrecord import create_record
records = []
collection = tree.getroot()
for record_element in collection.getchildren():
marcxml = ET.tostring(record_element, encoding='utf-8')
(record, status, errors) = create_record(marcxml)
if errors:
print(str(status)) # depends on [control=['if'], data=[]]
records.append(record) # depends on [control=['for'], data=['record_element']]
return records |
def migrate_data(ignore: Sequence[str],
new_data_path: str,
old_data_path: str):
""" Copy everything in the app data to the root of the main data part
:param ignore: A list of files that should be ignored in the root of /data
:param new_data_path: Where the new data partition is mounted
:param old_data_path: Where the old date files are
"""
# the new ’data’ path is actually /var and /data is in /var/data
dest_data = os.path.join(new_data_path, 'data')
LOG.info(f"migrate_data: copying {old_data_path} to {dest_data}")
os.makedirs(dest_data, exist_ok=True)
with os.scandir(old_data_path) as scanner:
for entry in scanner:
if entry.name in ignore:
LOG.info(f"migrate_data: ignoring {entry.name}")
continue
src = os.path.join(old_data_path, entry.name)
dest = os.path.join(dest_data, entry.name)
if os.path.exists(dest):
LOG.info(f"migrate_data: removing dest tree {dest}")
shutil.rmtree(dest, ignore_errors=True)
if entry.is_dir():
LOG.info(f"migrate_data: copying tree {src}->{dest}")
shutil.copytree(src, dest, symlinks=True,
ignore=migrate_files_to_ignore)
else:
LOG.info(f"migrate_data: copying file {src}->{dest}")
shutil.copy2(src, dest) | def function[migrate_data, parameter[ignore, new_data_path, old_data_path]]:
constant[ Copy everything in the app data to the root of the main data part
:param ignore: A list of files that should be ignored in the root of /data
:param new_data_path: Where the new data partition is mounted
:param old_data_path: Where the old date files are
]
variable[dest_data] assign[=] call[name[os].path.join, parameter[name[new_data_path], constant[data]]]
call[name[LOG].info, parameter[<ast.JoinedStr object at 0x7da18ede7df0>]]
call[name[os].makedirs, parameter[name[dest_data]]]
with call[name[os].scandir, parameter[name[old_data_path]]] begin[:]
for taget[name[entry]] in starred[name[scanner]] begin[:]
if compare[name[entry].name in name[ignore]] begin[:]
call[name[LOG].info, parameter[<ast.JoinedStr object at 0x7da1b08a1db0>]]
continue
variable[src] assign[=] call[name[os].path.join, parameter[name[old_data_path], name[entry].name]]
variable[dest] assign[=] call[name[os].path.join, parameter[name[dest_data], name[entry].name]]
if call[name[os].path.exists, parameter[name[dest]]] begin[:]
call[name[LOG].info, parameter[<ast.JoinedStr object at 0x7da1b08a36a0>]]
call[name[shutil].rmtree, parameter[name[dest]]]
if call[name[entry].is_dir, parameter[]] begin[:]
call[name[LOG].info, parameter[<ast.JoinedStr object at 0x7da1b08a3dc0>]]
call[name[shutil].copytree, parameter[name[src], name[dest]]] | keyword[def] identifier[migrate_data] ( identifier[ignore] : identifier[Sequence] [ identifier[str] ],
identifier[new_data_path] : identifier[str] ,
identifier[old_data_path] : identifier[str] ):
literal[string]
identifier[dest_data] = identifier[os] . identifier[path] . identifier[join] ( identifier[new_data_path] , literal[string] )
identifier[LOG] . identifier[info] ( literal[string] )
identifier[os] . identifier[makedirs] ( identifier[dest_data] , identifier[exist_ok] = keyword[True] )
keyword[with] identifier[os] . identifier[scandir] ( identifier[old_data_path] ) keyword[as] identifier[scanner] :
keyword[for] identifier[entry] keyword[in] identifier[scanner] :
keyword[if] identifier[entry] . identifier[name] keyword[in] identifier[ignore] :
identifier[LOG] . identifier[info] ( literal[string] )
keyword[continue]
identifier[src] = identifier[os] . identifier[path] . identifier[join] ( identifier[old_data_path] , identifier[entry] . identifier[name] )
identifier[dest] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_data] , identifier[entry] . identifier[name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
identifier[LOG] . identifier[info] ( literal[string] )
identifier[shutil] . identifier[rmtree] ( identifier[dest] , identifier[ignore_errors] = keyword[True] )
keyword[if] identifier[entry] . identifier[is_dir] ():
identifier[LOG] . identifier[info] ( literal[string] )
identifier[shutil] . identifier[copytree] ( identifier[src] , identifier[dest] , identifier[symlinks] = keyword[True] ,
identifier[ignore] = identifier[migrate_files_to_ignore] )
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] )
identifier[shutil] . identifier[copy2] ( identifier[src] , identifier[dest] ) | def migrate_data(ignore: Sequence[str], new_data_path: str, old_data_path: str):
""" Copy everything in the app data to the root of the main data part
:param ignore: A list of files that should be ignored in the root of /data
:param new_data_path: Where the new data partition is mounted
:param old_data_path: Where the old date files are
"""
# the new ’data’ path is actually /var and /data is in /var/data
dest_data = os.path.join(new_data_path, 'data')
LOG.info(f'migrate_data: copying {old_data_path} to {dest_data}')
os.makedirs(dest_data, exist_ok=True)
with os.scandir(old_data_path) as scanner:
for entry in scanner:
if entry.name in ignore:
LOG.info(f'migrate_data: ignoring {entry.name}')
continue # depends on [control=['if'], data=[]]
src = os.path.join(old_data_path, entry.name)
dest = os.path.join(dest_data, entry.name)
if os.path.exists(dest):
LOG.info(f'migrate_data: removing dest tree {dest}')
shutil.rmtree(dest, ignore_errors=True) # depends on [control=['if'], data=[]]
if entry.is_dir():
LOG.info(f'migrate_data: copying tree {src}->{dest}')
shutil.copytree(src, dest, symlinks=True, ignore=migrate_files_to_ignore) # depends on [control=['if'], data=[]]
else:
LOG.info(f'migrate_data: copying file {src}->{dest}')
shutil.copy2(src, dest) # depends on [control=['for'], data=['entry']] # depends on [control=['with'], data=['scanner']] |
def count_leaves(x):
"""
Return the number of non-sequence items in a given recursive sequence.
"""
if hasattr(x, 'keys'):
x = list(x.values())
if hasattr(x, '__getitem__'):
return sum(map(count_leaves, x))
return 1 | def function[count_leaves, parameter[x]]:
constant[
Return the number of non-sequence items in a given recursive sequence.
]
if call[name[hasattr], parameter[name[x], constant[keys]]] begin[:]
variable[x] assign[=] call[name[list], parameter[call[name[x].values, parameter[]]]]
if call[name[hasattr], parameter[name[x], constant[__getitem__]]] begin[:]
return[call[name[sum], parameter[call[name[map], parameter[name[count_leaves], name[x]]]]]]
return[constant[1]] | keyword[def] identifier[count_leaves] ( identifier[x] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[x] , literal[string] ):
identifier[x] = identifier[list] ( identifier[x] . identifier[values] ())
keyword[if] identifier[hasattr] ( identifier[x] , literal[string] ):
keyword[return] identifier[sum] ( identifier[map] ( identifier[count_leaves] , identifier[x] ))
keyword[return] literal[int] | def count_leaves(x):
"""
Return the number of non-sequence items in a given recursive sequence.
"""
if hasattr(x, 'keys'):
x = list(x.values()) # depends on [control=['if'], data=[]]
if hasattr(x, '__getitem__'):
return sum(map(count_leaves, x)) # depends on [control=['if'], data=[]]
return 1 |
def _deserialize_int(data, nbytes=32, padding=0):
"""
Read a `nbytes` bytes long big endian signed integer from `data` starting at `offset`
:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data
:param nbytes: number of bytes to read starting from least significant byte
:rtype: int or Expression
"""
assert isinstance(data, (bytearray, Array))
value = ABI._readBE(data, nbytes, padding=True)
value = Operators.SEXTEND(value, nbytes * 8, (nbytes + padding) * 8)
if not issymbolic(value):
# sign bit on
if value & (1 << (nbytes * 8 - 1)):
value = -(((~value) + 1) & ((1 << (nbytes * 8)) - 1))
return value | def function[_deserialize_int, parameter[data, nbytes, padding]]:
constant[
Read a `nbytes` bytes long big endian signed integer from `data` starting at `offset`
:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data
:param nbytes: number of bytes to read starting from least significant byte
:rtype: int or Expression
]
assert[call[name[isinstance], parameter[name[data], tuple[[<ast.Name object at 0x7da18ede6aa0>, <ast.Name object at 0x7da18ede7160>]]]]]
variable[value] assign[=] call[name[ABI]._readBE, parameter[name[data], name[nbytes]]]
variable[value] assign[=] call[name[Operators].SEXTEND, parameter[name[value], binary_operation[name[nbytes] * constant[8]], binary_operation[binary_operation[name[nbytes] + name[padding]] * constant[8]]]]
if <ast.UnaryOp object at 0x7da18ede52d0> begin[:]
if binary_operation[name[value] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[binary_operation[name[nbytes] * constant[8]] - constant[1]]]] begin[:]
variable[value] assign[=] <ast.UnaryOp object at 0x7da18ede6530>
return[name[value]] | keyword[def] identifier[_deserialize_int] ( identifier[data] , identifier[nbytes] = literal[int] , identifier[padding] = literal[int] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[data] ,( identifier[bytearray] , identifier[Array] ))
identifier[value] = identifier[ABI] . identifier[_readBE] ( identifier[data] , identifier[nbytes] , identifier[padding] = keyword[True] )
identifier[value] = identifier[Operators] . identifier[SEXTEND] ( identifier[value] , identifier[nbytes] * literal[int] ,( identifier[nbytes] + identifier[padding] )* literal[int] )
keyword[if] keyword[not] identifier[issymbolic] ( identifier[value] ):
keyword[if] identifier[value] &( literal[int] <<( identifier[nbytes] * literal[int] - literal[int] )):
identifier[value] =-(((~ identifier[value] )+ literal[int] )&(( literal[int] <<( identifier[nbytes] * literal[int] ))- literal[int] ))
keyword[return] identifier[value] | def _deserialize_int(data, nbytes=32, padding=0):
"""
Read a `nbytes` bytes long big endian signed integer from `data` starting at `offset`
:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data
:param nbytes: number of bytes to read starting from least significant byte
:rtype: int or Expression
"""
assert isinstance(data, (bytearray, Array))
value = ABI._readBE(data, nbytes, padding=True)
value = Operators.SEXTEND(value, nbytes * 8, (nbytes + padding) * 8)
if not issymbolic(value):
# sign bit on
if value & 1 << nbytes * 8 - 1:
value = -(~value + 1 & (1 << nbytes * 8) - 1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return value |
def module_definition_from_mirteFile_dict(man, d):
""" Creates a ModuleDefinition instance from the dictionary <d> from
a mirte-file for the Manager instance <man>. """
m = ModuleDefinition()
if 'inherits' not in d:
d['inherits'] = list()
if 'settings' not in d:
d['settings'] = dict()
if 'implementedBy' in d:
m.implementedBy = d['implementedBy']
m.inherits = set(d['inherits'])
for p in d['inherits']:
if p not in man.modules:
raise ValueError("No such module %s" % p)
m.deps.update(man.modules[p].deps)
m.vsettings.update(man.modules[p].vsettings)
m.inherits.update(man.modules[p].inherits)
m.run = m.run or man.modules[p].run
if 'run' in d:
m.run = d['run']
if len(m.inherits) == 0:
m.inherits = set(['module'])
for k, v in six.iteritems(d['settings']):
if 'type' not in v:
if k not in m.vsettings:
raise ValueError("No such existing vsetting %s" % k)
if 'default' in v:
m.vsettings[k] = copy.copy(m.vsettings[k])
m.vsettings[k].default = v['default']
continue
if v['type'] in man.modules:
m.deps[k] = DepDefinition(v['type'], v.get('allownull', False))
elif v['type'] in man.valueTypes:
m.vsettings[k] = VSettingDefinition(
v['type'],
(man.valueTypes[v['type']](v['default'])
if 'default' in v else None)
)
else:
raise ValueError("No such module or valuetype %s" % v)
return m | def function[module_definition_from_mirteFile_dict, parameter[man, d]]:
constant[ Creates a ModuleDefinition instance from the dictionary <d> from
a mirte-file for the Manager instance <man>. ]
variable[m] assign[=] call[name[ModuleDefinition], parameter[]]
if compare[constant[inherits] <ast.NotIn object at 0x7da2590d7190> name[d]] begin[:]
call[name[d]][constant[inherits]] assign[=] call[name[list], parameter[]]
if compare[constant[settings] <ast.NotIn object at 0x7da2590d7190> name[d]] begin[:]
call[name[d]][constant[settings]] assign[=] call[name[dict], parameter[]]
if compare[constant[implementedBy] in name[d]] begin[:]
name[m].implementedBy assign[=] call[name[d]][constant[implementedBy]]
name[m].inherits assign[=] call[name[set], parameter[call[name[d]][constant[inherits]]]]
for taget[name[p]] in starred[call[name[d]][constant[inherits]]] begin[:]
if compare[name[p] <ast.NotIn object at 0x7da2590d7190> name[man].modules] begin[:]
<ast.Raise object at 0x7da1b1342350>
call[name[m].deps.update, parameter[call[name[man].modules][name[p]].deps]]
call[name[m].vsettings.update, parameter[call[name[man].modules][name[p]].vsettings]]
call[name[m].inherits.update, parameter[call[name[man].modules][name[p]].inherits]]
name[m].run assign[=] <ast.BoolOp object at 0x7da1b15b3910>
if compare[constant[run] in name[d]] begin[:]
name[m].run assign[=] call[name[d]][constant[run]]
if compare[call[name[len], parameter[name[m].inherits]] equal[==] constant[0]] begin[:]
name[m].inherits assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b1521960>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b15213f0>, <ast.Name object at 0x7da1b1521db0>]]] in starred[call[name[six].iteritems, parameter[call[name[d]][constant[settings]]]]] begin[:]
if compare[constant[type] <ast.NotIn object at 0x7da2590d7190> name[v]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[m].vsettings] begin[:]
<ast.Raise object at 0x7da1b1520a30>
if compare[constant[default] in name[v]] begin[:]
call[name[m].vsettings][name[k]] assign[=] call[name[copy].copy, parameter[call[name[m].vsettings][name[k]]]]
call[name[m].vsettings][name[k]].default assign[=] call[name[v]][constant[default]]
continue
if compare[call[name[v]][constant[type]] in name[man].modules] begin[:]
call[name[m].deps][name[k]] assign[=] call[name[DepDefinition], parameter[call[name[v]][constant[type]], call[name[v].get, parameter[constant[allownull], constant[False]]]]]
return[name[m]] | keyword[def] identifier[module_definition_from_mirteFile_dict] ( identifier[man] , identifier[d] ):
literal[string]
identifier[m] = identifier[ModuleDefinition] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[d] :
identifier[d] [ literal[string] ]= identifier[list] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[d] :
identifier[d] [ literal[string] ]= identifier[dict] ()
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[m] . identifier[implementedBy] = identifier[d] [ literal[string] ]
identifier[m] . identifier[inherits] = identifier[set] ( identifier[d] [ literal[string] ])
keyword[for] identifier[p] keyword[in] identifier[d] [ literal[string] ]:
keyword[if] identifier[p] keyword[not] keyword[in] identifier[man] . identifier[modules] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[p] )
identifier[m] . identifier[deps] . identifier[update] ( identifier[man] . identifier[modules] [ identifier[p] ]. identifier[deps] )
identifier[m] . identifier[vsettings] . identifier[update] ( identifier[man] . identifier[modules] [ identifier[p] ]. identifier[vsettings] )
identifier[m] . identifier[inherits] . identifier[update] ( identifier[man] . identifier[modules] [ identifier[p] ]. identifier[inherits] )
identifier[m] . identifier[run] = identifier[m] . identifier[run] keyword[or] identifier[man] . identifier[modules] [ identifier[p] ]. identifier[run]
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[m] . identifier[run] = identifier[d] [ literal[string] ]
keyword[if] identifier[len] ( identifier[m] . identifier[inherits] )== literal[int] :
identifier[m] . identifier[inherits] = identifier[set] ([ literal[string] ])
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[d] [ literal[string] ]):
keyword[if] literal[string] keyword[not] keyword[in] identifier[v] :
keyword[if] identifier[k] keyword[not] keyword[in] identifier[m] . identifier[vsettings] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[k] )
keyword[if] literal[string] keyword[in] identifier[v] :
identifier[m] . identifier[vsettings] [ identifier[k] ]= identifier[copy] . identifier[copy] ( identifier[m] . identifier[vsettings] [ identifier[k] ])
identifier[m] . identifier[vsettings] [ identifier[k] ]. identifier[default] = identifier[v] [ literal[string] ]
keyword[continue]
keyword[if] identifier[v] [ literal[string] ] keyword[in] identifier[man] . identifier[modules] :
identifier[m] . identifier[deps] [ identifier[k] ]= identifier[DepDefinition] ( identifier[v] [ literal[string] ], identifier[v] . identifier[get] ( literal[string] , keyword[False] ))
keyword[elif] identifier[v] [ literal[string] ] keyword[in] identifier[man] . identifier[valueTypes] :
identifier[m] . identifier[vsettings] [ identifier[k] ]= identifier[VSettingDefinition] (
identifier[v] [ literal[string] ],
( identifier[man] . identifier[valueTypes] [ identifier[v] [ literal[string] ]]( identifier[v] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[v] keyword[else] keyword[None] )
)
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[v] )
keyword[return] identifier[m] | def module_definition_from_mirteFile_dict(man, d):
""" Creates a ModuleDefinition instance from the dictionary <d> from
a mirte-file for the Manager instance <man>. """
m = ModuleDefinition()
if 'inherits' not in d:
d['inherits'] = list() # depends on [control=['if'], data=['d']]
if 'settings' not in d:
d['settings'] = dict() # depends on [control=['if'], data=['d']]
if 'implementedBy' in d:
m.implementedBy = d['implementedBy'] # depends on [control=['if'], data=['d']]
m.inherits = set(d['inherits'])
for p in d['inherits']:
if p not in man.modules:
raise ValueError('No such module %s' % p) # depends on [control=['if'], data=['p']]
m.deps.update(man.modules[p].deps)
m.vsettings.update(man.modules[p].vsettings)
m.inherits.update(man.modules[p].inherits)
m.run = m.run or man.modules[p].run # depends on [control=['for'], data=['p']]
if 'run' in d:
m.run = d['run'] # depends on [control=['if'], data=['d']]
if len(m.inherits) == 0:
m.inherits = set(['module']) # depends on [control=['if'], data=[]]
for (k, v) in six.iteritems(d['settings']):
if 'type' not in v:
if k not in m.vsettings:
raise ValueError('No such existing vsetting %s' % k) # depends on [control=['if'], data=['k']]
if 'default' in v:
m.vsettings[k] = copy.copy(m.vsettings[k])
m.vsettings[k].default = v['default'] # depends on [control=['if'], data=['v']]
continue # depends on [control=['if'], data=['v']]
if v['type'] in man.modules:
m.deps[k] = DepDefinition(v['type'], v.get('allownull', False)) # depends on [control=['if'], data=[]]
elif v['type'] in man.valueTypes:
m.vsettings[k] = VSettingDefinition(v['type'], man.valueTypes[v['type']](v['default']) if 'default' in v else None) # depends on [control=['if'], data=[]]
else:
raise ValueError('No such module or valuetype %s' % v) # depends on [control=['for'], data=[]]
return m |
def _update_fields_with_objects(self):
""" Convert dict fields into objects, where appropriate """
# Update the photo target with photo objects
if self.target is not None:
if self.target_type == "photo":
self.target = Photo(self._client, self.target)
else:
raise NotImplementedError("Actions can only be assigned to "
"Photos") | def function[_update_fields_with_objects, parameter[self]]:
constant[ Convert dict fields into objects, where appropriate ]
if compare[name[self].target is_not constant[None]] begin[:]
if compare[name[self].target_type equal[==] constant[photo]] begin[:]
name[self].target assign[=] call[name[Photo], parameter[name[self]._client, name[self].target]] | keyword[def] identifier[_update_fields_with_objects] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[target] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[target_type] == literal[string] :
identifier[self] . identifier[target] = identifier[Photo] ( identifier[self] . identifier[_client] , identifier[self] . identifier[target] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] ) | def _update_fields_with_objects(self):
""" Convert dict fields into objects, where appropriate """
# Update the photo target with photo objects
if self.target is not None:
if self.target_type == 'photo':
self.target = Photo(self._client, self.target) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Actions can only be assigned to Photos') # depends on [control=['if'], data=[]] |
def get_id2children(objs):
"""Get all parent item IDs for each item in dict keys."""
id2children = {}
for obj in objs:
_get_id2children(id2children, obj.item_id, obj)
return id2children | def function[get_id2children, parameter[objs]]:
constant[Get all parent item IDs for each item in dict keys.]
variable[id2children] assign[=] dictionary[[], []]
for taget[name[obj]] in starred[name[objs]] begin[:]
call[name[_get_id2children], parameter[name[id2children], name[obj].item_id, name[obj]]]
return[name[id2children]] | keyword[def] identifier[get_id2children] ( identifier[objs] ):
literal[string]
identifier[id2children] ={}
keyword[for] identifier[obj] keyword[in] identifier[objs] :
identifier[_get_id2children] ( identifier[id2children] , identifier[obj] . identifier[item_id] , identifier[obj] )
keyword[return] identifier[id2children] | def get_id2children(objs):
"""Get all parent item IDs for each item in dict keys."""
id2children = {}
for obj in objs:
_get_id2children(id2children, obj.item_id, obj) # depends on [control=['for'], data=['obj']]
return id2children |
def url(self):
""":class:`str`: The URL to the highscores page on Tibia.com containing the results."""
return self.get_url(self.world, self.category, self.vocation, self.page) | def function[url, parameter[self]]:
constant[:class:`str`: The URL to the highscores page on Tibia.com containing the results.]
return[call[name[self].get_url, parameter[name[self].world, name[self].category, name[self].vocation, name[self].page]]] | keyword[def] identifier[url] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[get_url] ( identifier[self] . identifier[world] , identifier[self] . identifier[category] , identifier[self] . identifier[vocation] , identifier[self] . identifier[page] ) | def url(self):
""":class:`str`: The URL to the highscores page on Tibia.com containing the results."""
return self.get_url(self.world, self.category, self.vocation, self.page) |
def public(self):
"""
Is this document visible to anyone?
"""
if self._public:
return self._public
elif not self.abstract:
return self.read_meta()._public
raise EmptyDocumentException() | def function[public, parameter[self]]:
constant[
Is this document visible to anyone?
]
if name[self]._public begin[:]
return[name[self]._public]
<ast.Raise object at 0x7da204960b50> | keyword[def] identifier[public] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_public] :
keyword[return] identifier[self] . identifier[_public]
keyword[elif] keyword[not] identifier[self] . identifier[abstract] :
keyword[return] identifier[self] . identifier[read_meta] (). identifier[_public]
keyword[raise] identifier[EmptyDocumentException] () | def public(self):
"""
Is this document visible to anyone?
"""
if self._public:
return self._public # depends on [control=['if'], data=[]]
elif not self.abstract:
return self.read_meta()._public # depends on [control=['if'], data=[]]
raise EmptyDocumentException() |
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None | def function[_get_transformation_history, parameter[path]]:
constant[
Checks for a transformations.json* file and returns the history.
]
variable[trans_json] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[path], constant[transformations.json*]]]]]
if name[trans_json] begin[:]
<ast.Try object at 0x7da18eb559f0>
return[constant[None]] | keyword[def] identifier[_get_transformation_history] ( identifier[path] ):
literal[string]
identifier[trans_json] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ))
keyword[if] identifier[trans_json] :
keyword[try] :
keyword[with] identifier[zopen] ( identifier[trans_json] [ literal[int] ]) keyword[as] identifier[f] :
keyword[return] identifier[json] . identifier[load] ( identifier[f] )[ literal[string] ]
keyword[except] :
keyword[return] keyword[None]
keyword[return] keyword[None] | def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, 'transformations.json*'))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)['history'] # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except:
return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None |
def dimod_object_hook(obj):
"""JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
"""
if _is_sampleset_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return BinaryQuadraticModel.from_serializable(obj)
return obj | def function[dimod_object_hook, parameter[obj]]:
constant[JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
]
if call[name[_is_sampleset_v2], parameter[name[obj]]] begin[:]
return[call[name[SampleSet].from_serializable, parameter[name[obj]]]]
return[name[obj]] | keyword[def] identifier[dimod_object_hook] ( identifier[obj] ):
literal[string]
keyword[if] identifier[_is_sampleset_v2] ( identifier[obj] ):
keyword[return] identifier[SampleSet] . identifier[from_serializable] ( identifier[obj] )
keyword[elif] identifier[_is_bqm_v2] ( identifier[obj] ):
keyword[return] identifier[BinaryQuadraticModel] . identifier[from_serializable] ( identifier[obj] )
keyword[return] identifier[obj] | def dimod_object_hook(obj):
"""JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
"""
if _is_sampleset_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return SampleSet.from_serializable(obj) # depends on [control=['if'], data=[]]
elif _is_bqm_v2(obj):
# in the future we could handle subtypes but right now we just have the
# one
return BinaryQuadraticModel.from_serializable(obj) # depends on [control=['if'], data=[]]
return obj |
def draw_separators(self):
"""Draw the lines separating the categories on the Canvas"""
total = 1
self._timeline.create_line((0, 1, self.pixel_width, 1))
for index, (category, label) in enumerate(self._category_labels.items()):
height = label.winfo_reqheight()
self._rows[category] = (total, total + height)
total += height
self._timeline.create_line((0, total, self.pixel_width, total))
pixel_height = total
self._timeline.config(height=pixel_height) | def function[draw_separators, parameter[self]]:
constant[Draw the lines separating the categories on the Canvas]
variable[total] assign[=] constant[1]
call[name[self]._timeline.create_line, parameter[tuple[[<ast.Constant object at 0x7da1b2359e10>, <ast.Constant object at 0x7da1b2358bb0>, <ast.Attribute object at 0x7da1b235a140>, <ast.Constant object at 0x7da1b2359b70>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2359f60>, <ast.Tuple object at 0x7da1b23603a0>]]] in starred[call[name[enumerate], parameter[call[name[self]._category_labels.items, parameter[]]]]] begin[:]
variable[height] assign[=] call[name[label].winfo_reqheight, parameter[]]
call[name[self]._rows][name[category]] assign[=] tuple[[<ast.Name object at 0x7da1b2362ce0>, <ast.BinOp object at 0x7da1b23596c0>]]
<ast.AugAssign object at 0x7da1b235a3e0>
call[name[self]._timeline.create_line, parameter[tuple[[<ast.Constant object at 0x7da1b235a8c0>, <ast.Name object at 0x7da1b2359930>, <ast.Attribute object at 0x7da1b2358eb0>, <ast.Name object at 0x7da1b235a920>]]]]
variable[pixel_height] assign[=] name[total]
call[name[self]._timeline.config, parameter[]] | keyword[def] identifier[draw_separators] ( identifier[self] ):
literal[string]
identifier[total] = literal[int]
identifier[self] . identifier[_timeline] . identifier[create_line] (( literal[int] , literal[int] , identifier[self] . identifier[pixel_width] , literal[int] ))
keyword[for] identifier[index] ,( identifier[category] , identifier[label] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[_category_labels] . identifier[items] ()):
identifier[height] = identifier[label] . identifier[winfo_reqheight] ()
identifier[self] . identifier[_rows] [ identifier[category] ]=( identifier[total] , identifier[total] + identifier[height] )
identifier[total] += identifier[height]
identifier[self] . identifier[_timeline] . identifier[create_line] (( literal[int] , identifier[total] , identifier[self] . identifier[pixel_width] , identifier[total] ))
identifier[pixel_height] = identifier[total]
identifier[self] . identifier[_timeline] . identifier[config] ( identifier[height] = identifier[pixel_height] ) | def draw_separators(self):
"""Draw the lines separating the categories on the Canvas"""
total = 1
self._timeline.create_line((0, 1, self.pixel_width, 1))
for (index, (category, label)) in enumerate(self._category_labels.items()):
height = label.winfo_reqheight()
self._rows[category] = (total, total + height)
total += height
self._timeline.create_line((0, total, self.pixel_width, total)) # depends on [control=['for'], data=[]]
pixel_height = total
self._timeline.config(height=pixel_height) |
def values_for_enum(gtype):
"""Get all values for a enum (gtype)."""
g_type_class = gobject_lib.g_type_class_ref(gtype)
g_enum_class = ffi.cast('GEnumClass *', g_type_class)
values = []
# -1 since we always have a "last" member.
for i in range(0, g_enum_class.n_values - 1):
value = _to_string(g_enum_class.values[i].value_nick)
values.append(value)
return values | def function[values_for_enum, parameter[gtype]]:
constant[Get all values for a enum (gtype).]
variable[g_type_class] assign[=] call[name[gobject_lib].g_type_class_ref, parameter[name[gtype]]]
variable[g_enum_class] assign[=] call[name[ffi].cast, parameter[constant[GEnumClass *], name[g_type_class]]]
variable[values] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[name[g_enum_class].n_values - constant[1]]]]] begin[:]
variable[value] assign[=] call[name[_to_string], parameter[call[name[g_enum_class].values][name[i]].value_nick]]
call[name[values].append, parameter[name[value]]]
return[name[values]] | keyword[def] identifier[values_for_enum] ( identifier[gtype] ):
literal[string]
identifier[g_type_class] = identifier[gobject_lib] . identifier[g_type_class_ref] ( identifier[gtype] )
identifier[g_enum_class] = identifier[ffi] . identifier[cast] ( literal[string] , identifier[g_type_class] )
identifier[values] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[g_enum_class] . identifier[n_values] - literal[int] ):
identifier[value] = identifier[_to_string] ( identifier[g_enum_class] . identifier[values] [ identifier[i] ]. identifier[value_nick] )
identifier[values] . identifier[append] ( identifier[value] )
keyword[return] identifier[values] | def values_for_enum(gtype):
"""Get all values for a enum (gtype)."""
g_type_class = gobject_lib.g_type_class_ref(gtype)
g_enum_class = ffi.cast('GEnumClass *', g_type_class)
values = []
# -1 since we always have a "last" member.
for i in range(0, g_enum_class.n_values - 1):
value = _to_string(g_enum_class.values[i].value_nick)
values.append(value) # depends on [control=['for'], data=['i']]
return values |
def _get_basin_response_term(self, C, z2pt5):
"""
Returns the basin response term defined in equation 20
"""
f_sed = np.zeros(len(z2pt5))
idx = z2pt5 < 1.0
f_sed[idx] = (C["c14"] + C["c15"] * float(self.CONSTS["SJ"])) *\
(z2pt5[idx] - 1.0)
idx = z2pt5 > 3.0
f_sed[idx] = C["c16"] * C["k3"] * exp(-0.75) *\
(1.0 - np.exp(-0.25 * (z2pt5[idx] - 3.0)))
return f_sed | def function[_get_basin_response_term, parameter[self, C, z2pt5]]:
constant[
Returns the basin response term defined in equation 20
]
variable[f_sed] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[z2pt5]]]]]
variable[idx] assign[=] compare[name[z2pt5] less[<] constant[1.0]]
call[name[f_sed]][name[idx]] assign[=] binary_operation[binary_operation[call[name[C]][constant[c14]] + binary_operation[call[name[C]][constant[c15]] * call[name[float], parameter[call[name[self].CONSTS][constant[SJ]]]]]] * binary_operation[call[name[z2pt5]][name[idx]] - constant[1.0]]]
variable[idx] assign[=] compare[name[z2pt5] greater[>] constant[3.0]]
call[name[f_sed]][name[idx]] assign[=] binary_operation[binary_operation[binary_operation[call[name[C]][constant[c16]] * call[name[C]][constant[k3]]] * call[name[exp], parameter[<ast.UnaryOp object at 0x7da18bcc9d80>]]] * binary_operation[constant[1.0] - call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da18bcca950> * binary_operation[call[name[z2pt5]][name[idx]] - constant[3.0]]]]]]]
return[name[f_sed]] | keyword[def] identifier[_get_basin_response_term] ( identifier[self] , identifier[C] , identifier[z2pt5] ):
literal[string]
identifier[f_sed] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[z2pt5] ))
identifier[idx] = identifier[z2pt5] < literal[int]
identifier[f_sed] [ identifier[idx] ]=( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[float] ( identifier[self] . identifier[CONSTS] [ literal[string] ]))*( identifier[z2pt5] [ identifier[idx] ]- literal[int] )
identifier[idx] = identifier[z2pt5] > literal[int]
identifier[f_sed] [ identifier[idx] ]= identifier[C] [ literal[string] ]* identifier[C] [ literal[string] ]* identifier[exp] (- literal[int] )*( literal[int] - identifier[np] . identifier[exp] (- literal[int] *( identifier[z2pt5] [ identifier[idx] ]- literal[int] )))
keyword[return] identifier[f_sed] | def _get_basin_response_term(self, C, z2pt5):
"""
Returns the basin response term defined in equation 20
"""
f_sed = np.zeros(len(z2pt5))
idx = z2pt5 < 1.0
f_sed[idx] = (C['c14'] + C['c15'] * float(self.CONSTS['SJ'])) * (z2pt5[idx] - 1.0)
idx = z2pt5 > 3.0
f_sed[idx] = C['c16'] * C['k3'] * exp(-0.75) * (1.0 - np.exp(-0.25 * (z2pt5[idx] - 3.0)))
return f_sed |
def writexlsx(self, path, sheetname="default"):
"""
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
"""
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save() | def function[writexlsx, parameter[self, path, sheetname]]:
constant[
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
]
variable[writer] assign[=] call[name[ExcelRW].UnicodeWriter, parameter[name[path]]]
call[name[writer].set_active_sheet, parameter[name[sheetname]]]
call[name[writer].writerow, parameter[name[self].fields]]
call[name[writer].writerows, parameter[name[self]]]
call[name[writer].save, parameter[]] | keyword[def] identifier[writexlsx] ( identifier[self] , identifier[path] , identifier[sheetname] = literal[string] ):
literal[string]
identifier[writer] = identifier[ExcelRW] . identifier[UnicodeWriter] ( identifier[path] )
identifier[writer] . identifier[set_active_sheet] ( identifier[sheetname] )
identifier[writer] . identifier[writerow] ( identifier[self] . identifier[fields] )
identifier[writer] . identifier[writerows] ( identifier[self] )
identifier[writer] . identifier[save] () | def writexlsx(self, path, sheetname='default'):
"""
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
"""
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save() |
def bind(self, alloy):
'''
Shallow copies this MethodParameter, and binds it to an alloy.
This is required before calling.
'''
param = MethodParameter(self.name, self.method, self.dependencies,
self.units, self.aliases, self._references)
param.alloy = alloy
return param | def function[bind, parameter[self, alloy]]:
constant[
Shallow copies this MethodParameter, and binds it to an alloy.
This is required before calling.
]
variable[param] assign[=] call[name[MethodParameter], parameter[name[self].name, name[self].method, name[self].dependencies, name[self].units, name[self].aliases, name[self]._references]]
name[param].alloy assign[=] name[alloy]
return[name[param]] | keyword[def] identifier[bind] ( identifier[self] , identifier[alloy] ):
literal[string]
identifier[param] = identifier[MethodParameter] ( identifier[self] . identifier[name] , identifier[self] . identifier[method] , identifier[self] . identifier[dependencies] ,
identifier[self] . identifier[units] , identifier[self] . identifier[aliases] , identifier[self] . identifier[_references] )
identifier[param] . identifier[alloy] = identifier[alloy]
keyword[return] identifier[param] | def bind(self, alloy):
"""
Shallow copies this MethodParameter, and binds it to an alloy.
This is required before calling.
"""
param = MethodParameter(self.name, self.method, self.dependencies, self.units, self.aliases, self._references)
param.alloy = alloy
return param |
def encrypt_var(source, keys):
"""Attempts to encrypt a variable"""
cmd = flatten([gnupg_bin(), "--armor", "--encrypt", gnupg_verbose(),
recipients_args(keys)])
output = stderr_with_input(cmd, source)
return output | def function[encrypt_var, parameter[source, keys]]:
constant[Attempts to encrypt a variable]
variable[cmd] assign[=] call[name[flatten], parameter[list[[<ast.Call object at 0x7da1b19b08b0>, <ast.Constant object at 0x7da1b19b1150>, <ast.Constant object at 0x7da1b19b0070>, <ast.Call object at 0x7da1b19b35b0>, <ast.Call object at 0x7da1b19b0910>]]]]
variable[output] assign[=] call[name[stderr_with_input], parameter[name[cmd], name[source]]]
return[name[output]] | keyword[def] identifier[encrypt_var] ( identifier[source] , identifier[keys] ):
literal[string]
identifier[cmd] = identifier[flatten] ([ identifier[gnupg_bin] (), literal[string] , literal[string] , identifier[gnupg_verbose] (),
identifier[recipients_args] ( identifier[keys] )])
identifier[output] = identifier[stderr_with_input] ( identifier[cmd] , identifier[source] )
keyword[return] identifier[output] | def encrypt_var(source, keys):
"""Attempts to encrypt a variable"""
cmd = flatten([gnupg_bin(), '--armor', '--encrypt', gnupg_verbose(), recipients_args(keys)])
output = stderr_with_input(cmd, source)
return output |
def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):
"""Get available GPUs according to utilization thresholds.
Args:
:max_gpu_utilization: percent utilization threshold to consider a GPU "free"
:min_free_memory: percent free memory to consider a GPU "free"
:num_gpu: number of requested GPUs
Returns:
A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
is the lowest amount of free memory available on the available_gpus.
"""
def get_gpu_info():
# Get the gpu information
gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode()
gpu_info = gpu_info.split('\n')
gpu_info_array = []
# Check each gpu
for line in gpu_info:
if len(line) > 0:
gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split(',')
gpu_memory_util = float(used_memory) / float(total_memory)
gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id))
return(gpu_info_array)
# Read the gpu information multiple times
num_times_to_average = 5
current_array = []
for ind in range(num_times_to_average):
current_array.append(get_gpu_info())
time.sleep(1)
# Get number of gpus
num_gpus = len(current_array[0])
# Average the gpu information
avg_array = [(0, 0, str(x)) for x in range(num_gpus)]
for ind in range(num_times_to_average):
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2])
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2])
avg_array.sort()
gpus_found = 0
gpus_to_use = ""
free_memory = 1.0
# Return the least utilized GPUs if it's utilized less than max_gpu_utilization and amount of free memory is at least min_free_memory
# Otherwise, run in cpu only mode
for current_gpu in avg_array:
if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory:
if gpus_found == 0:
gpus_to_use = current_gpu[2]
free_memory = 1 - current_gpu[1]
else:
gpus_to_use = gpus_to_use + "," + current_gpu[2]
free_memory = min(free_memory, 1 - current_gpu[1])
gpus_found = gpus_found + 1
if gpus_found == num_gpu:
break
return gpus_to_use, free_memory | def function[_get_free_gpu, parameter[max_gpu_utilization, min_free_memory, num_gpu]]:
constant[Get available GPUs according to utilization thresholds.
Args:
:max_gpu_utilization: percent utilization threshold to consider a GPU "free"
:min_free_memory: percent free memory to consider a GPU "free"
:num_gpu: number of requested GPUs
Returns:
A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
is the lowest amount of free memory available on the available_gpus.
]
def function[get_gpu_info, parameter[]]:
variable[gpu_info] assign[=] call[call[name[subprocess].check_output, parameter[list[[<ast.Constant object at 0x7da2046212d0>, <ast.Constant object at 0x7da204620b80>, <ast.Constant object at 0x7da2046223e0>]]]].decode, parameter[]]
variable[gpu_info] assign[=] call[name[gpu_info].split, parameter[constant[
]]]
variable[gpu_info_array] assign[=] list[[]]
for taget[name[line]] in starred[name[gpu_info]] begin[:]
if compare[call[name[len], parameter[name[line]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da204623eb0> assign[=] call[name[line].split, parameter[constant[,]]]
variable[gpu_memory_util] assign[=] binary_operation[call[name[float], parameter[name[used_memory]]] / call[name[float], parameter[name[total_memory]]]]
call[name[gpu_info_array].append, parameter[tuple[[<ast.Call object at 0x7da204620a60>, <ast.Name object at 0x7da204623ee0>, <ast.Name object at 0x7da204621390>]]]]
return[name[gpu_info_array]]
variable[num_times_to_average] assign[=] constant[5]
variable[current_array] assign[=] list[[]]
for taget[name[ind]] in starred[call[name[range], parameter[name[num_times_to_average]]]] begin[:]
call[name[current_array].append, parameter[call[name[get_gpu_info], parameter[]]]]
call[name[time].sleep, parameter[constant[1]]]
variable[num_gpus] assign[=] call[name[len], parameter[call[name[current_array]][constant[0]]]]
variable[avg_array] assign[=] <ast.ListComp object at 0x7da204621c00>
for taget[name[ind]] in starred[call[name[range], parameter[name[num_times_to_average]]]] begin[:]
for taget[name[gpu_ind]] in starred[call[name[range], parameter[name[num_gpus]]]] begin[:]
call[name[avg_array]][name[gpu_ind]] assign[=] tuple[[<ast.BinOp object at 0x7da204621930>, <ast.BinOp object at 0x7da204620400>, <ast.Subscript object at 0x7da2046203d0>]]
for taget[name[gpu_ind]] in starred[call[name[range], parameter[name[num_gpus]]]] begin[:]
call[name[avg_array]][name[gpu_ind]] assign[=] tuple[[<ast.BinOp object at 0x7da2046233a0>, <ast.BinOp object at 0x7da204620ca0>, <ast.Subscript object at 0x7da204620460>]]
call[name[avg_array].sort, parameter[]]
variable[gpus_found] assign[=] constant[0]
variable[gpus_to_use] assign[=] constant[]
variable[free_memory] assign[=] constant[1.0]
for taget[name[current_gpu]] in starred[name[avg_array]] begin[:]
if <ast.BoolOp object at 0x7da204621600> begin[:]
if compare[name[gpus_found] equal[==] constant[0]] begin[:]
variable[gpus_to_use] assign[=] call[name[current_gpu]][constant[2]]
variable[free_memory] assign[=] binary_operation[constant[1] - call[name[current_gpu]][constant[1]]]
variable[gpus_found] assign[=] binary_operation[name[gpus_found] + constant[1]]
if compare[name[gpus_found] equal[==] name[num_gpu]] begin[:]
break
return[tuple[[<ast.Name object at 0x7da20c76e9b0>, <ast.Name object at 0x7da20c76ceb0>]]] | keyword[def] identifier[_get_free_gpu] ( identifier[max_gpu_utilization] = literal[int] , identifier[min_free_memory] = literal[int] , identifier[num_gpu] = literal[int] ):
literal[string]
keyword[def] identifier[get_gpu_info] ():
identifier[gpu_info] = identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] , literal[string] ]). identifier[decode] ()
identifier[gpu_info] = identifier[gpu_info] . identifier[split] ( literal[string] )
identifier[gpu_info_array] =[]
keyword[for] identifier[line] keyword[in] identifier[gpu_info] :
keyword[if] identifier[len] ( identifier[line] )> literal[int] :
identifier[gpu_id] , identifier[total_memory] , identifier[free_memory] , identifier[used_memory] , identifier[gpu_util] = identifier[line] . identifier[split] ( literal[string] )
identifier[gpu_memory_util] = identifier[float] ( identifier[used_memory] )/ identifier[float] ( identifier[total_memory] )
identifier[gpu_info_array] . identifier[append] (( identifier[float] ( identifier[gpu_util] ), identifier[gpu_memory_util] , identifier[gpu_id] ))
keyword[return] ( identifier[gpu_info_array] )
identifier[num_times_to_average] = literal[int]
identifier[current_array] =[]
keyword[for] identifier[ind] keyword[in] identifier[range] ( identifier[num_times_to_average] ):
identifier[current_array] . identifier[append] ( identifier[get_gpu_info] ())
identifier[time] . identifier[sleep] ( literal[int] )
identifier[num_gpus] = identifier[len] ( identifier[current_array] [ literal[int] ])
identifier[avg_array] =[( literal[int] , literal[int] , identifier[str] ( identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[num_gpus] )]
keyword[for] identifier[ind] keyword[in] identifier[range] ( identifier[num_times_to_average] ):
keyword[for] identifier[gpu_ind] keyword[in] identifier[range] ( identifier[num_gpus] ):
identifier[avg_array] [ identifier[gpu_ind] ]=( identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ]+ identifier[current_array] [ identifier[ind] ][ identifier[gpu_ind] ][ literal[int] ], identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ]+ identifier[current_array] [ identifier[ind] ][ identifier[gpu_ind] ][ literal[int] ], identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ])
keyword[for] identifier[gpu_ind] keyword[in] identifier[range] ( identifier[num_gpus] ):
identifier[avg_array] [ identifier[gpu_ind] ]=( identifier[float] ( identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ])/ identifier[num_times_to_average] , identifier[float] ( identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ])/ identifier[num_times_to_average] , identifier[avg_array] [ identifier[gpu_ind] ][ literal[int] ])
identifier[avg_array] . identifier[sort] ()
identifier[gpus_found] = literal[int]
identifier[gpus_to_use] = literal[string]
identifier[free_memory] = literal[int]
keyword[for] identifier[current_gpu] keyword[in] identifier[avg_array] :
keyword[if] identifier[current_gpu] [ literal[int] ]< identifier[max_gpu_utilization] keyword[and] ( literal[int] - identifier[current_gpu] [ literal[int] ])> identifier[min_free_memory] :
keyword[if] identifier[gpus_found] == literal[int] :
identifier[gpus_to_use] = identifier[current_gpu] [ literal[int] ]
identifier[free_memory] = literal[int] - identifier[current_gpu] [ literal[int] ]
keyword[else] :
identifier[gpus_to_use] = identifier[gpus_to_use] + literal[string] + identifier[current_gpu] [ literal[int] ]
identifier[free_memory] = identifier[min] ( identifier[free_memory] , literal[int] - identifier[current_gpu] [ literal[int] ])
identifier[gpus_found] = identifier[gpus_found] + literal[int]
keyword[if] identifier[gpus_found] == identifier[num_gpu] :
keyword[break]
keyword[return] identifier[gpus_to_use] , identifier[free_memory] | def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):
"""Get available GPUs according to utilization thresholds.
Args:
:max_gpu_utilization: percent utilization threshold to consider a GPU "free"
:min_free_memory: percent free memory to consider a GPU "free"
:num_gpu: number of requested GPUs
Returns:
A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory
is the lowest amount of free memory available on the available_gpus.
"""
def get_gpu_info():
# Get the gpu information
gpu_info = subprocess.check_output(['nvidia-smi', '--format=csv,noheader,nounits', '--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu']).decode()
gpu_info = gpu_info.split('\n')
gpu_info_array = []
# Check each gpu
for line in gpu_info:
if len(line) > 0:
(gpu_id, total_memory, free_memory, used_memory, gpu_util) = line.split(',')
gpu_memory_util = float(used_memory) / float(total_memory)
gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return gpu_info_array
# Read the gpu information multiple times
num_times_to_average = 5
current_array = []
for ind in range(num_times_to_average):
current_array.append(get_gpu_info())
time.sleep(1) # depends on [control=['for'], data=[]]
# Get number of gpus
num_gpus = len(current_array[0])
# Average the gpu information
avg_array = [(0, 0, str(x)) for x in range(num_gpus)]
for ind in range(num_times_to_average):
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2]) # depends on [control=['for'], data=['gpu_ind']] # depends on [control=['for'], data=['ind']]
for gpu_ind in range(num_gpus):
avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2]) # depends on [control=['for'], data=['gpu_ind']]
avg_array.sort()
gpus_found = 0
gpus_to_use = ''
free_memory = 1.0
# Return the least utilized GPUs if it's utilized less than max_gpu_utilization and amount of free memory is at least min_free_memory
# Otherwise, run in cpu only mode
for current_gpu in avg_array:
if current_gpu[0] < max_gpu_utilization and 1 - current_gpu[1] > min_free_memory:
if gpus_found == 0:
gpus_to_use = current_gpu[2]
free_memory = 1 - current_gpu[1] # depends on [control=['if'], data=[]]
else:
gpus_to_use = gpus_to_use + ',' + current_gpu[2]
free_memory = min(free_memory, 1 - current_gpu[1])
gpus_found = gpus_found + 1 # depends on [control=['if'], data=[]]
if gpus_found == num_gpu:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['current_gpu']]
return (gpus_to_use, free_memory) |
def CheckCondition(condition, check_object):
"""Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad.
"""
try:
of = objectfilter.Parser(condition).Parse()
compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)
return compiled_filter.Matches(check_object)
except objectfilter.Error as e:
raise ConditionError(e) | def function[CheckCondition, parameter[condition, check_object]]:
constant[Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad.
]
<ast.Try object at 0x7da1b1c0c5b0> | keyword[def] identifier[CheckCondition] ( identifier[condition] , identifier[check_object] ):
literal[string]
keyword[try] :
identifier[of] = identifier[objectfilter] . identifier[Parser] ( identifier[condition] ). identifier[Parse] ()
identifier[compiled_filter] = identifier[of] . identifier[Compile] ( identifier[objectfilter] . identifier[BaseFilterImplementation] )
keyword[return] identifier[compiled_filter] . identifier[Matches] ( identifier[check_object] )
keyword[except] identifier[objectfilter] . identifier[Error] keyword[as] identifier[e] :
keyword[raise] identifier[ConditionError] ( identifier[e] ) | def CheckCondition(condition, check_object):
"""Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad.
"""
try:
of = objectfilter.Parser(condition).Parse()
compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)
return compiled_filter.Matches(check_object) # depends on [control=['try'], data=[]]
except objectfilter.Error as e:
raise ConditionError(e) # depends on [control=['except'], data=['e']] |
def acgtn_only(infile, outfile):
'''Replace every non-acgtn (case insensitve) character with an N'''
f = utils.open_file_write(outfile)
for seq in sequences.file_reader(infile):
seq.replace_non_acgt()
print(seq, file=f)
utils.close(f) | def function[acgtn_only, parameter[infile, outfile]]:
constant[Replace every non-acgtn (case insensitve) character with an N]
variable[f] assign[=] call[name[utils].open_file_write, parameter[name[outfile]]]
for taget[name[seq]] in starred[call[name[sequences].file_reader, parameter[name[infile]]]] begin[:]
call[name[seq].replace_non_acgt, parameter[]]
call[name[print], parameter[name[seq]]]
call[name[utils].close, parameter[name[f]]] | keyword[def] identifier[acgtn_only] ( identifier[infile] , identifier[outfile] ):
literal[string]
identifier[f] = identifier[utils] . identifier[open_file_write] ( identifier[outfile] )
keyword[for] identifier[seq] keyword[in] identifier[sequences] . identifier[file_reader] ( identifier[infile] ):
identifier[seq] . identifier[replace_non_acgt] ()
identifier[print] ( identifier[seq] , identifier[file] = identifier[f] )
identifier[utils] . identifier[close] ( identifier[f] ) | def acgtn_only(infile, outfile):
"""Replace every non-acgtn (case insensitve) character with an N"""
f = utils.open_file_write(outfile)
for seq in sequences.file_reader(infile):
seq.replace_non_acgt()
print(seq, file=f) # depends on [control=['for'], data=['seq']]
utils.close(f) |
def data_filler_user_agent(self, number_of_rows, pipe):
'''creates keys with user agent data
'''
try:
for i in range(number_of_rows):
pipe.hmset('user_agent:%s' % i, {
'id': rnd_id_generator(self),
'ip': self.faker.ipv4(),
'countrycode': self.faker.country_code(),
'useragent': self.faker.user_agent()
})
pipe.execute()
logger.warning('user_agent Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d) | def function[data_filler_user_agent, parameter[self, number_of_rows, pipe]]:
constant[creates keys with user agent data
]
<ast.Try object at 0x7da1b08d9090> | keyword[def] identifier[data_filler_user_agent] ( identifier[self] , identifier[number_of_rows] , identifier[pipe] ):
literal[string]
keyword[try] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_rows] ):
identifier[pipe] . identifier[hmset] ( literal[string] % identifier[i] ,{
literal[string] : identifier[rnd_id_generator] ( identifier[self] ),
literal[string] : identifier[self] . identifier[faker] . identifier[ipv4] (),
literal[string] : identifier[self] . identifier[faker] . identifier[country_code] (),
literal[string] : identifier[self] . identifier[faker] . identifier[user_agent] ()
})
identifier[pipe] . identifier[execute] ()
identifier[logger] . identifier[warning] ( literal[string] , identifier[extra] = identifier[d] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[e] , identifier[extra] = identifier[d] ) | def data_filler_user_agent(self, number_of_rows, pipe):
"""creates keys with user agent data
"""
try:
for i in range(number_of_rows):
pipe.hmset('user_agent:%s' % i, {'id': rnd_id_generator(self), 'ip': self.faker.ipv4(), 'countrycode': self.faker.country_code(), 'useragent': self.faker.user_agent()}) # depends on [control=['for'], data=['i']]
pipe.execute()
logger.warning('user_agent Commits are successful after write job!', extra=d) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error(e, extra=d) # depends on [control=['except'], data=['e']] |
def setSpeciesFromJson(self, speciesJson):
"""
Sets the species, an OntologyTerm, to the specified value, given as
a JSON string.
See the documentation for details of this field.
"""
try:
parsed = protocol.fromJson(speciesJson, protocol.OntologyTerm)
except:
raise exceptions.InvalidJsonException(speciesJson)
self._species = protocol.toJsonDict(parsed) | def function[setSpeciesFromJson, parameter[self, speciesJson]]:
constant[
Sets the species, an OntologyTerm, to the specified value, given as
a JSON string.
See the documentation for details of this field.
]
<ast.Try object at 0x7da1b26acb80>
name[self]._species assign[=] call[name[protocol].toJsonDict, parameter[name[parsed]]] | keyword[def] identifier[setSpeciesFromJson] ( identifier[self] , identifier[speciesJson] ):
literal[string]
keyword[try] :
identifier[parsed] = identifier[protocol] . identifier[fromJson] ( identifier[speciesJson] , identifier[protocol] . identifier[OntologyTerm] )
keyword[except] :
keyword[raise] identifier[exceptions] . identifier[InvalidJsonException] ( identifier[speciesJson] )
identifier[self] . identifier[_species] = identifier[protocol] . identifier[toJsonDict] ( identifier[parsed] ) | def setSpeciesFromJson(self, speciesJson):
"""
Sets the species, an OntologyTerm, to the specified value, given as
a JSON string.
See the documentation for details of this field.
"""
try:
parsed = protocol.fromJson(speciesJson, protocol.OntologyTerm) # depends on [control=['try'], data=[]]
except:
raise exceptions.InvalidJsonException(speciesJson) # depends on [control=['except'], data=[]]
self._species = protocol.toJsonDict(parsed) |
def rand_pad(padding:int, size:int, mode:str='reflection'):
"Fixed `mode` `padding` and random crop of `size`"
return [pad(padding=padding,mode=mode),
crop(size=size, **rand_pos)] | def function[rand_pad, parameter[padding, size, mode]]:
constant[Fixed `mode` `padding` and random crop of `size`]
return[list[[<ast.Call object at 0x7da20e9b2560>, <ast.Call object at 0x7da20e9b0ee0>]]] | keyword[def] identifier[rand_pad] ( identifier[padding] : identifier[int] , identifier[size] : identifier[int] , identifier[mode] : identifier[str] = literal[string] ):
literal[string]
keyword[return] [ identifier[pad] ( identifier[padding] = identifier[padding] , identifier[mode] = identifier[mode] ),
identifier[crop] ( identifier[size] = identifier[size] ,** identifier[rand_pos] )] | def rand_pad(padding: int, size: int, mode: str='reflection'):
"""Fixed `mode` `padding` and random crop of `size`"""
return [pad(padding=padding, mode=mode), crop(size=size, **rand_pos)] |
def process_remove_action(processors, action, argument):
"""Process action removals."""
for processor in processors:
processor(action, argument)
db.session.commit() | def function[process_remove_action, parameter[processors, action, argument]]:
constant[Process action removals.]
for taget[name[processor]] in starred[name[processors]] begin[:]
call[name[processor], parameter[name[action], name[argument]]]
call[name[db].session.commit, parameter[]] | keyword[def] identifier[process_remove_action] ( identifier[processors] , identifier[action] , identifier[argument] ):
literal[string]
keyword[for] identifier[processor] keyword[in] identifier[processors] :
identifier[processor] ( identifier[action] , identifier[argument] )
identifier[db] . identifier[session] . identifier[commit] () | def process_remove_action(processors, action, argument):
"""Process action removals."""
for processor in processors:
processor(action, argument) # depends on [control=['for'], data=['processor']]
db.session.commit() |
def treePopupWidget(self):
"""
Returns the popup widget for this record box when it is supposed to
be an ORB tree widget.
:return <XTreeWidget>
"""
edit = self.lineEdit()
if not self._treePopupWidget:
# create the treewidget
tree = XTreeWidget(self)
tree.setWindowFlags(Qt.Popup)
tree.setFocusPolicy(Qt.StrongFocus)
tree.installEventFilter(self)
tree.setAlternatingRowColors(True)
tree.setShowGridColumns(False)
tree.setRootIsDecorated(False)
tree.setVerticalScrollMode(tree.ScrollPerPixel)
# create connections
tree.itemClicked.connect(self.acceptRecord)
if edit:
edit.textEdited.connect(tree.filterItems)
edit.textEdited.connect(self.showPopup)
self._treePopupWidget = tree
return self._treePopupWidget | def function[treePopupWidget, parameter[self]]:
constant[
Returns the popup widget for this record box when it is supposed to
be an ORB tree widget.
:return <XTreeWidget>
]
variable[edit] assign[=] call[name[self].lineEdit, parameter[]]
if <ast.UnaryOp object at 0x7da18f09c5b0> begin[:]
variable[tree] assign[=] call[name[XTreeWidget], parameter[name[self]]]
call[name[tree].setWindowFlags, parameter[name[Qt].Popup]]
call[name[tree].setFocusPolicy, parameter[name[Qt].StrongFocus]]
call[name[tree].installEventFilter, parameter[name[self]]]
call[name[tree].setAlternatingRowColors, parameter[constant[True]]]
call[name[tree].setShowGridColumns, parameter[constant[False]]]
call[name[tree].setRootIsDecorated, parameter[constant[False]]]
call[name[tree].setVerticalScrollMode, parameter[name[tree].ScrollPerPixel]]
call[name[tree].itemClicked.connect, parameter[name[self].acceptRecord]]
if name[edit] begin[:]
call[name[edit].textEdited.connect, parameter[name[tree].filterItems]]
call[name[edit].textEdited.connect, parameter[name[self].showPopup]]
name[self]._treePopupWidget assign[=] name[tree]
return[name[self]._treePopupWidget] | keyword[def] identifier[treePopupWidget] ( identifier[self] ):
literal[string]
identifier[edit] = identifier[self] . identifier[lineEdit] ()
keyword[if] keyword[not] identifier[self] . identifier[_treePopupWidget] :
identifier[tree] = identifier[XTreeWidget] ( identifier[self] )
identifier[tree] . identifier[setWindowFlags] ( identifier[Qt] . identifier[Popup] )
identifier[tree] . identifier[setFocusPolicy] ( identifier[Qt] . identifier[StrongFocus] )
identifier[tree] . identifier[installEventFilter] ( identifier[self] )
identifier[tree] . identifier[setAlternatingRowColors] ( keyword[True] )
identifier[tree] . identifier[setShowGridColumns] ( keyword[False] )
identifier[tree] . identifier[setRootIsDecorated] ( keyword[False] )
identifier[tree] . identifier[setVerticalScrollMode] ( identifier[tree] . identifier[ScrollPerPixel] )
identifier[tree] . identifier[itemClicked] . identifier[connect] ( identifier[self] . identifier[acceptRecord] )
keyword[if] identifier[edit] :
identifier[edit] . identifier[textEdited] . identifier[connect] ( identifier[tree] . identifier[filterItems] )
identifier[edit] . identifier[textEdited] . identifier[connect] ( identifier[self] . identifier[showPopup] )
identifier[self] . identifier[_treePopupWidget] = identifier[tree]
keyword[return] identifier[self] . identifier[_treePopupWidget] | def treePopupWidget(self):
"""
Returns the popup widget for this record box when it is supposed to
be an ORB tree widget.
:return <XTreeWidget>
"""
edit = self.lineEdit()
if not self._treePopupWidget: # create the treewidget
tree = XTreeWidget(self)
tree.setWindowFlags(Qt.Popup)
tree.setFocusPolicy(Qt.StrongFocus)
tree.installEventFilter(self)
tree.setAlternatingRowColors(True)
tree.setShowGridColumns(False)
tree.setRootIsDecorated(False)
tree.setVerticalScrollMode(tree.ScrollPerPixel) # create connections
tree.itemClicked.connect(self.acceptRecord)
if edit:
edit.textEdited.connect(tree.filterItems)
edit.textEdited.connect(self.showPopup) # depends on [control=['if'], data=[]]
self._treePopupWidget = tree # depends on [control=['if'], data=[]]
return self._treePopupWidget |
def setup_db(self, wait_for_ready=True):
''' Create and configure index
If `wait_for_ready` is True, this function will block until
status for `self.index_name` will be `yellow`
'''
if self.es.indices.exists(self.index_name):
try:
self.update_mappings()
except MappingsException as ex:
log.error(ex)
log.warn('An old or wrong properties mapping has been found for index: "{0}",\
this could led to some errors. It is recomanded to run "libreant-db upgrade"'.format(self.index_name))
else:
log.debug("Index is missing: '{0}'".format(self.index_name))
self.create_index()
if wait_for_ready:
log.debug('waiting for index "{}" to be ready'.format(self.index_name))
self.es.cluster.health(index=self.index_name, level='index', wait_for_status='yellow')
log.debug('index "{}" is now ready'.format(self.index_name)) | def function[setup_db, parameter[self, wait_for_ready]]:
constant[ Create and configure index
If `wait_for_ready` is True, this function will block until
status for `self.index_name` will be `yellow`
]
if call[name[self].es.indices.exists, parameter[name[self].index_name]] begin[:]
<ast.Try object at 0x7da1b26cba30>
if name[wait_for_ready] begin[:]
call[name[log].debug, parameter[call[constant[waiting for index "{}" to be ready].format, parameter[name[self].index_name]]]]
call[name[self].es.cluster.health, parameter[]]
call[name[log].debug, parameter[call[constant[index "{}" is now ready].format, parameter[name[self].index_name]]]] | keyword[def] identifier[setup_db] ( identifier[self] , identifier[wait_for_ready] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[es] . identifier[indices] . identifier[exists] ( identifier[self] . identifier[index_name] ):
keyword[try] :
identifier[self] . identifier[update_mappings] ()
keyword[except] identifier[MappingsException] keyword[as] identifier[ex] :
identifier[log] . identifier[error] ( identifier[ex] )
identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[self] . identifier[index_name] ))
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[index_name] ))
identifier[self] . identifier[create_index] ()
keyword[if] identifier[wait_for_ready] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[index_name] ))
identifier[self] . identifier[es] . identifier[cluster] . identifier[health] ( identifier[index] = identifier[self] . identifier[index_name] , identifier[level] = literal[string] , identifier[wait_for_status] = literal[string] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[index_name] )) | def setup_db(self, wait_for_ready=True):
""" Create and configure index
If `wait_for_ready` is True, this function will block until
status for `self.index_name` will be `yellow`
"""
if self.es.indices.exists(self.index_name):
try:
self.update_mappings() # depends on [control=['try'], data=[]]
except MappingsException as ex:
log.error(ex)
log.warn('An old or wrong properties mapping has been found for index: "{0}", this could led to some errors. It is recomanded to run "libreant-db upgrade"'.format(self.index_name)) # depends on [control=['except'], data=['ex']] # depends on [control=['if'], data=[]]
else:
log.debug("Index is missing: '{0}'".format(self.index_name))
self.create_index()
if wait_for_ready:
log.debug('waiting for index "{}" to be ready'.format(self.index_name))
self.es.cluster.health(index=self.index_name, level='index', wait_for_status='yellow')
log.debug('index "{}" is now ready'.format(self.index_name)) # depends on [control=['if'], data=[]] |
def update_lan(self, datacenter_id, lan_id, name=None,
public=None, ip_failover=None):
"""
Updates a LAN
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param lan_id: The unique ID of the LAN.
:type lan_id: ``str``
:param name: The new name of the LAN.
:type name: ``str``
:param public: Indicates if the LAN is public.
:type public: ``bool``
:param ip_failover: A list of IP fail-over dicts.
:type ip_failover: ``list``
"""
data = {}
if name:
data['name'] = name
if public is not None:
data['public'] = public
if ip_failover:
data['ipFailover'] = ip_failover
response = self._perform_request(
url='/datacenters/%s/lans/%s' % (datacenter_id, lan_id),
method='PATCH',
data=json.dumps(data))
return response | def function[update_lan, parameter[self, datacenter_id, lan_id, name, public, ip_failover]]:
constant[
Updates a LAN
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param lan_id: The unique ID of the LAN.
:type lan_id: ``str``
:param name: The new name of the LAN.
:type name: ``str``
:param public: Indicates if the LAN is public.
:type public: ``bool``
:param ip_failover: A list of IP fail-over dicts.
:type ip_failover: ``list``
]
variable[data] assign[=] dictionary[[], []]
if name[name] begin[:]
call[name[data]][constant[name]] assign[=] name[name]
if compare[name[public] is_not constant[None]] begin[:]
call[name[data]][constant[public]] assign[=] name[public]
if name[ip_failover] begin[:]
call[name[data]][constant[ipFailover]] assign[=] name[ip_failover]
variable[response] assign[=] call[name[self]._perform_request, parameter[]]
return[name[response]] | keyword[def] identifier[update_lan] ( identifier[self] , identifier[datacenter_id] , identifier[lan_id] , identifier[name] = keyword[None] ,
identifier[public] = keyword[None] , identifier[ip_failover] = keyword[None] ):
literal[string]
identifier[data] ={}
keyword[if] identifier[name] :
identifier[data] [ literal[string] ]= identifier[name]
keyword[if] identifier[public] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[public]
keyword[if] identifier[ip_failover] :
identifier[data] [ literal[string] ]= identifier[ip_failover]
identifier[response] = identifier[self] . identifier[_perform_request] (
identifier[url] = literal[string] %( identifier[datacenter_id] , identifier[lan_id] ),
identifier[method] = literal[string] ,
identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] ))
keyword[return] identifier[response] | def update_lan(self, datacenter_id, lan_id, name=None, public=None, ip_failover=None):
"""
Updates a LAN
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param lan_id: The unique ID of the LAN.
:type lan_id: ``str``
:param name: The new name of the LAN.
:type name: ``str``
:param public: Indicates if the LAN is public.
:type public: ``bool``
:param ip_failover: A list of IP fail-over dicts.
:type ip_failover: ``list``
"""
data = {}
if name:
data['name'] = name # depends on [control=['if'], data=[]]
if public is not None:
data['public'] = public # depends on [control=['if'], data=['public']]
if ip_failover:
data['ipFailover'] = ip_failover # depends on [control=['if'], data=[]]
response = self._perform_request(url='/datacenters/%s/lans/%s' % (datacenter_id, lan_id), method='PATCH', data=json.dumps(data))
return response |
def _get_operation_rpc(self):
"""Polls the status of the current operation.
Uses gRPC request to check.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
"""
request_pb = operations_pb2.GetOperationRequest(name=self.name)
return self.client._operations_stub.GetOperation(request_pb) | def function[_get_operation_rpc, parameter[self]]:
constant[Polls the status of the current operation.
Uses gRPC request to check.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
]
variable[request_pb] assign[=] call[name[operations_pb2].GetOperationRequest, parameter[]]
return[call[name[self].client._operations_stub.GetOperation, parameter[name[request_pb]]]] | keyword[def] identifier[_get_operation_rpc] ( identifier[self] ):
literal[string]
identifier[request_pb] = identifier[operations_pb2] . identifier[GetOperationRequest] ( identifier[name] = identifier[self] . identifier[name] )
keyword[return] identifier[self] . identifier[client] . identifier[_operations_stub] . identifier[GetOperation] ( identifier[request_pb] ) | def _get_operation_rpc(self):
"""Polls the status of the current operation.
Uses gRPC request to check.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
"""
request_pb = operations_pb2.GetOperationRequest(name=self.name)
return self.client._operations_stub.GetOperation(request_pb) |
def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split(' ', 1)
try:
count = int(args[1].strip())
except Exception:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except IndexError:
self.error('Breakpoint number expected')
except ValueError as err:
self.error(err)
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' %
(countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.'
% bp.number) | def function[do_ignore, parameter[self, arg]]:
constant[ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
]
variable[args] assign[=] call[name[arg].split, parameter[constant[ ], constant[1]]]
<ast.Try object at 0x7da1b0e73700>
<ast.Try object at 0x7da1b0e72bf0> | keyword[def] identifier[do_ignore] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[args] = identifier[arg] . identifier[split] ( literal[string] , literal[int] )
keyword[try] :
identifier[count] = identifier[int] ( identifier[args] [ literal[int] ]. identifier[strip] ())
keyword[except] identifier[Exception] :
identifier[count] = literal[int]
keyword[try] :
identifier[bp] = identifier[self] . identifier[get_bpbynumber] ( identifier[args] [ literal[int] ]. identifier[strip] ())
keyword[except] identifier[IndexError] :
identifier[self] . identifier[error] ( literal[string] )
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
identifier[self] . identifier[error] ( identifier[err] )
keyword[else] :
identifier[bp] . identifier[ignore] = identifier[count]
keyword[if] identifier[count] > literal[int] :
keyword[if] identifier[count] > literal[int] :
identifier[countstr] = literal[string] % identifier[count]
keyword[else] :
identifier[countstr] = literal[string]
identifier[self] . identifier[message] ( literal[string] %
( identifier[countstr] , identifier[bp] . identifier[number] ))
keyword[else] :
identifier[self] . identifier[message] ( literal[string]
% identifier[bp] . identifier[number] ) | def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split(' ', 1)
try:
count = int(args[1].strip()) # depends on [control=['try'], data=[]]
except Exception:
count = 0 # depends on [control=['except'], data=[]]
try:
bp = self.get_bpbynumber(args[0].strip()) # depends on [control=['try'], data=[]]
except IndexError:
self.error('Breakpoint number expected') # depends on [control=['except'], data=[]]
except ValueError as err:
self.error(err) # depends on [control=['except'], data=['err']]
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count # depends on [control=['if'], data=['count']]
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' % (countstr, bp.number)) # depends on [control=['if'], data=['count']]
else:
self.message('Will stop next time breakpoint %d is reached.' % bp.number) |
def class_box(self, cn: ClassDefinitionName) -> str:
""" Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and
(b) it appears in the gen_classes list
@param cn:
@param inherited:
@return:
"""
slot_defs: List[str] = []
if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes):
cls = self.schema.classes[cn]
for slotname in self.filtered_cls_slots(cn, all_slots=True):
slot = self.schema.slots[slotname]
if not slot.range or slot.range in builtin_names or slot.range in self.schema.types:
mod = self.prop_modifier(cls, slot)
slot_defs.append(underscore(self.aliased_slot_name(slot)) +
mod + ':' +
underscore(slot.range) + self.cardinality(slot))
self.box_generated.add(cn)
self.referenced.add(cn)
return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' | def function[class_box, parameter[self, cn]]:
constant[ Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and
(b) it appears in the gen_classes list
@param cn:
@param inherited:
@return:
]
<ast.AnnAssign object at 0x7da1b0489180>
if <ast.BoolOp object at 0x7da1b0489390> begin[:]
variable[cls] assign[=] call[name[self].schema.classes][name[cn]]
for taget[name[slotname]] in starred[call[name[self].filtered_cls_slots, parameter[name[cn]]]] begin[:]
variable[slot] assign[=] call[name[self].schema.slots][name[slotname]]
if <ast.BoolOp object at 0x7da1b048a110> begin[:]
variable[mod] assign[=] call[name[self].prop_modifier, parameter[name[cls], name[slot]]]
call[name[slot_defs].append, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[underscore], parameter[call[name[self].aliased_slot_name, parameter[name[slot]]]]] + name[mod]] + constant[:]] + call[name[underscore], parameter[name[slot].range]]] + call[name[self].cardinality, parameter[name[slot]]]]]]
call[name[self].box_generated.add, parameter[name[cn]]]
call[name[self].referenced.add, parameter[name[cn]]]
return[binary_operation[binary_operation[binary_operation[constant[[] + call[name[camelcase], parameter[name[cn]]]] + <ast.IfExp object at 0x7da1b04ea680>] + constant[]]]] | keyword[def] identifier[class_box] ( identifier[self] , identifier[cn] : identifier[ClassDefinitionName] )-> identifier[str] :
literal[string]
identifier[slot_defs] : identifier[List] [ identifier[str] ]=[]
keyword[if] identifier[cn] keyword[not] keyword[in] identifier[self] . identifier[box_generated] keyword[and] ( keyword[not] identifier[self] . identifier[focus_classes] keyword[or] identifier[cn] keyword[in] identifier[self] . identifier[focus_classes] ):
identifier[cls] = identifier[self] . identifier[schema] . identifier[classes] [ identifier[cn] ]
keyword[for] identifier[slotname] keyword[in] identifier[self] . identifier[filtered_cls_slots] ( identifier[cn] , identifier[all_slots] = keyword[True] ):
identifier[slot] = identifier[self] . identifier[schema] . identifier[slots] [ identifier[slotname] ]
keyword[if] keyword[not] identifier[slot] . identifier[range] keyword[or] identifier[slot] . identifier[range] keyword[in] identifier[builtin_names] keyword[or] identifier[slot] . identifier[range] keyword[in] identifier[self] . identifier[schema] . identifier[types] :
identifier[mod] = identifier[self] . identifier[prop_modifier] ( identifier[cls] , identifier[slot] )
identifier[slot_defs] . identifier[append] ( identifier[underscore] ( identifier[self] . identifier[aliased_slot_name] ( identifier[slot] ))+
identifier[mod] + literal[string] +
identifier[underscore] ( identifier[slot] . identifier[range] )+ identifier[self] . identifier[cardinality] ( identifier[slot] ))
identifier[self] . identifier[box_generated] . identifier[add] ( identifier[cn] )
identifier[self] . identifier[referenced] . identifier[add] ( identifier[cn] )
keyword[return] literal[string] + identifier[camelcase] ( identifier[cn] )+( literal[string] + literal[string] . identifier[join] ( identifier[slot_defs] ) keyword[if] identifier[slot_defs] keyword[else] literal[string] )+ literal[string] | def class_box(self, cn: ClassDefinitionName) -> str:
""" Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and
(b) it appears in the gen_classes list
@param cn:
@param inherited:
@return:
"""
slot_defs: List[str] = []
if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes):
cls = self.schema.classes[cn]
for slotname in self.filtered_cls_slots(cn, all_slots=True):
slot = self.schema.slots[slotname]
if not slot.range or slot.range in builtin_names or slot.range in self.schema.types:
mod = self.prop_modifier(cls, slot)
slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['slotname']]
self.box_generated.add(cn) # depends on [control=['if'], data=[]]
self.referenced.add(cn)
return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' |
def log_run_info(self, model_name):
"""Collect most of the TF runtime information for the local env.
The schema of the run info follows official/benchmark/datastore/schema.
Args:
model_name: string, the name of the model.
"""
run_info = {
"model_name": model_name,
"machine_config": {},
"run_date": datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)}
_collect_tensorflow_info(run_info)
_collect_tensorflow_environment_variables(run_info)
_collect_cpu_info(run_info)
_collect_gpu_info(run_info)
_collect_memory_info(run_info)
with tf.gfile.GFile(os.path.join(
self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f:
try:
json.dump(run_info, f)
f.write("\n")
except (TypeError, ValueError) as e:
tf.logging.warning("Failed to dump benchmark run info to log file: %s",
e) | def function[log_run_info, parameter[self, model_name]]:
constant[Collect most of the TF runtime information for the local env.
The schema of the run info follows official/benchmark/datastore/schema.
Args:
model_name: string, the name of the model.
]
variable[run_info] assign[=] dictionary[[<ast.Constant object at 0x7da204960310>, <ast.Constant object at 0x7da2049637f0>, <ast.Constant object at 0x7da204962830>], [<ast.Name object at 0x7da204963430>, <ast.Dict object at 0x7da204962fb0>, <ast.Call object at 0x7da204961bd0>]]
call[name[_collect_tensorflow_info], parameter[name[run_info]]]
call[name[_collect_tensorflow_environment_variables], parameter[name[run_info]]]
call[name[_collect_cpu_info], parameter[name[run_info]]]
call[name[_collect_gpu_info], parameter[name[run_info]]]
call[name[_collect_memory_info], parameter[name[run_info]]]
with call[name[tf].gfile.GFile, parameter[call[name[os].path.join, parameter[name[self]._logging_dir, name[BENCHMARK_RUN_LOG_FILE_NAME]]], constant[w]]] begin[:]
<ast.Try object at 0x7da204960ac0> | keyword[def] identifier[log_run_info] ( identifier[self] , identifier[model_name] ):
literal[string]
identifier[run_info] ={
literal[string] : identifier[model_name] ,
literal[string] :{},
literal[string] : identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( identifier[_DATE_TIME_FORMAT_PATTERN] )}
identifier[_collect_tensorflow_info] ( identifier[run_info] )
identifier[_collect_tensorflow_environment_variables] ( identifier[run_info] )
identifier[_collect_cpu_info] ( identifier[run_info] )
identifier[_collect_gpu_info] ( identifier[run_info] )
identifier[_collect_memory_info] ( identifier[run_info] )
keyword[with] identifier[tf] . identifier[gfile] . identifier[GFile] ( identifier[os] . identifier[path] . identifier[join] (
identifier[self] . identifier[_logging_dir] , identifier[BENCHMARK_RUN_LOG_FILE_NAME] ), literal[string] ) keyword[as] identifier[f] :
keyword[try] :
identifier[json] . identifier[dump] ( identifier[run_info] , identifier[f] )
identifier[f] . identifier[write] ( literal[string] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[e] :
identifier[tf] . identifier[logging] . identifier[warning] ( literal[string] ,
identifier[e] ) | def log_run_info(self, model_name):
"""Collect most of the TF runtime information for the local env.
The schema of the run info follows official/benchmark/datastore/schema.
Args:
model_name: string, the name of the model.
"""
run_info = {'model_name': model_name, 'machine_config': {}, 'run_date': datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)}
_collect_tensorflow_info(run_info)
_collect_tensorflow_environment_variables(run_info)
_collect_cpu_info(run_info)
_collect_gpu_info(run_info)
_collect_memory_info(run_info)
with tf.gfile.GFile(os.path.join(self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), 'w') as f:
try:
json.dump(run_info, f)
f.write('\n') # depends on [control=['try'], data=[]]
except (TypeError, ValueError) as e:
tf.logging.warning('Failed to dump benchmark run info to log file: %s', e) # depends on [control=['except'], data=['e']] # depends on [control=['with'], data=['f']] |
def do_GET(self):
"""
Handle the retrieval of the code
"""
parsed_url = urlparse(self.path)
if parsed_url[2] == "/" + SERVER_REDIRECT_PATH: # 2 = Path
parsed_query = parse_qs(parsed_url[4]) # 4 = Query
if "code" not in parsed_query:
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write("No code found, try again!".encode("utf-8"))
return
self.server.response_code = parsed_query["code"][0]
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(
"Thank you for using OAuth2Util. The authorization was successful, "
"you can now close this window.".encode("utf-8"))
elif parsed_url[2] == "/" + SERVER_LINK_PATH: # 2 = Path
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("<html><body>Hey there!<br/>Click <a href=\"{0}\">here</a> to claim your prize.</body></html>"
.format(self.server.authorize_url).encode("utf-8"))
else:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write("404 not found".encode("utf-8")) | def function[do_GET, parameter[self]]:
constant[
Handle the retrieval of the code
]
variable[parsed_url] assign[=] call[name[urlparse], parameter[name[self].path]]
if compare[call[name[parsed_url]][constant[2]] equal[==] binary_operation[constant[/] + name[SERVER_REDIRECT_PATH]]] begin[:]
variable[parsed_query] assign[=] call[name[parse_qs], parameter[call[name[parsed_url]][constant[4]]]]
if compare[constant[code] <ast.NotIn object at 0x7da2590d7190> name[parsed_query]] begin[:]
call[name[self].send_response, parameter[constant[200]]]
call[name[self].send_header, parameter[constant[Content-Type], constant[text/plain]]]
call[name[self].end_headers, parameter[]]
call[name[self].wfile.write, parameter[call[constant[No code found, try again!].encode, parameter[constant[utf-8]]]]]
return[None]
name[self].server.response_code assign[=] call[call[name[parsed_query]][constant[code]]][constant[0]]
call[name[self].send_response, parameter[constant[200]]]
call[name[self].send_header, parameter[constant[Content-Type], constant[text/plain]]]
call[name[self].end_headers, parameter[]]
call[name[self].wfile.write, parameter[call[constant[Thank you for using OAuth2Util. The authorization was successful, you can now close this window.].encode, parameter[constant[utf-8]]]]] | keyword[def] identifier[do_GET] ( identifier[self] ):
literal[string]
identifier[parsed_url] = identifier[urlparse] ( identifier[self] . identifier[path] )
keyword[if] identifier[parsed_url] [ literal[int] ]== literal[string] + identifier[SERVER_REDIRECT_PATH] :
identifier[parsed_query] = identifier[parse_qs] ( identifier[parsed_url] [ literal[int] ])
keyword[if] literal[string] keyword[not] keyword[in] identifier[parsed_query] :
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( literal[string] . identifier[encode] ( literal[string] ))
keyword[return]
identifier[self] . identifier[server] . identifier[response_code] = identifier[parsed_query] [ literal[string] ][ literal[int] ]
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] (
literal[string]
literal[string] . identifier[encode] ( literal[string] ))
keyword[elif] identifier[parsed_url] [ literal[int] ]== literal[string] + identifier[SERVER_LINK_PATH] :
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( literal[string]
. identifier[format] ( identifier[self] . identifier[server] . identifier[authorize_url] ). identifier[encode] ( literal[string] ))
keyword[else] :
identifier[self] . identifier[send_response] ( literal[int] )
identifier[self] . identifier[send_header] ( literal[string] , literal[string] )
identifier[self] . identifier[end_headers] ()
identifier[self] . identifier[wfile] . identifier[write] ( literal[string] . identifier[encode] ( literal[string] )) | def do_GET(self):
"""
Handle the retrieval of the code
"""
parsed_url = urlparse(self.path)
if parsed_url[2] == '/' + SERVER_REDIRECT_PATH: # 2 = Path
parsed_query = parse_qs(parsed_url[4]) # 4 = Query
if 'code' not in parsed_query:
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('No code found, try again!'.encode('utf-8'))
return # depends on [control=['if'], data=[]]
self.server.response_code = parsed_query['code'][0]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('Thank you for using OAuth2Util. The authorization was successful, you can now close this window.'.encode('utf-8')) # depends on [control=['if'], data=[]]
elif parsed_url[2] == '/' + SERVER_LINK_PATH: # 2 = Path
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><body>Hey there!<br/>Click <a href="{0}">here</a> to claim your prize.</body></html>'.format(self.server.authorize_url).encode('utf-8')) # depends on [control=['if'], data=[]]
else:
self.send_response(404)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('404 not found'.encode('utf-8')) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.