code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def init_app(self, app):
"""
:param app: :class:`sanic.Sanic` instance to rate limit.
"""
self.enabled = app.config.setdefault(C.ENABLED, True)
self._swallow_errors = app.config.setdefault(
C.SWALLOW_ERRORS, self._swallow_errors
)
self._storage_options.update(
app.config.get(C.STORAGE_OPTIONS, {})
)
self._storage = storage_from_string(
self._storage_uri
or app.config.setdefault(C.STORAGE_URL, 'memory://'),
**self._storage_options
)
strategy = (
self._strategy
or app.config.setdefault(C.STRATEGY, 'fixed-window')
)
if strategy not in STRATEGIES:
raise ConfigurationError("Invalid rate limiting strategy %s" % strategy)
self._limiter = STRATEGIES[strategy](self._storage)
conf_limits = app.config.get(C.GLOBAL_LIMITS, None)
if not self._global_limits and conf_limits:
self._global_limits = [
ExtLimit(
limit, self._key_func, None, False, None, None, None
) for limit in parse_many(conf_limits)
]
app.request_middleware.append(self.__check_request_limit) | def function[init_app, parameter[self, app]]:
constant[
:param app: :class:`sanic.Sanic` instance to rate limit.
]
name[self].enabled assign[=] call[name[app].config.setdefault, parameter[name[C].ENABLED, constant[True]]]
name[self]._swallow_errors assign[=] call[name[app].config.setdefault, parameter[name[C].SWALLOW_ERRORS, name[self]._swallow_errors]]
call[name[self]._storage_options.update, parameter[call[name[app].config.get, parameter[name[C].STORAGE_OPTIONS, dictionary[[], []]]]]]
name[self]._storage assign[=] call[name[storage_from_string], parameter[<ast.BoolOp object at 0x7da1b0fac070>]]
variable[strategy] assign[=] <ast.BoolOp object at 0x7da1b0f3e1a0>
if compare[name[strategy] <ast.NotIn object at 0x7da2590d7190> name[STRATEGIES]] begin[:]
<ast.Raise object at 0x7da1b0f3d180>
name[self]._limiter assign[=] call[call[name[STRATEGIES]][name[strategy]], parameter[name[self]._storage]]
variable[conf_limits] assign[=] call[name[app].config.get, parameter[name[C].GLOBAL_LIMITS, constant[None]]]
if <ast.BoolOp object at 0x7da1b0f5b3a0> begin[:]
name[self]._global_limits assign[=] <ast.ListComp object at 0x7da1b0f59930>
call[name[app].request_middleware.append, parameter[name[self].__check_request_limit]] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
identifier[self] . identifier[enabled] = identifier[app] . identifier[config] . identifier[setdefault] ( identifier[C] . identifier[ENABLED] , keyword[True] )
identifier[self] . identifier[_swallow_errors] = identifier[app] . identifier[config] . identifier[setdefault] (
identifier[C] . identifier[SWALLOW_ERRORS] , identifier[self] . identifier[_swallow_errors]
)
identifier[self] . identifier[_storage_options] . identifier[update] (
identifier[app] . identifier[config] . identifier[get] ( identifier[C] . identifier[STORAGE_OPTIONS] ,{})
)
identifier[self] . identifier[_storage] = identifier[storage_from_string] (
identifier[self] . identifier[_storage_uri]
keyword[or] identifier[app] . identifier[config] . identifier[setdefault] ( identifier[C] . identifier[STORAGE_URL] , literal[string] ),
** identifier[self] . identifier[_storage_options]
)
identifier[strategy] =(
identifier[self] . identifier[_strategy]
keyword[or] identifier[app] . identifier[config] . identifier[setdefault] ( identifier[C] . identifier[STRATEGY] , literal[string] )
)
keyword[if] identifier[strategy] keyword[not] keyword[in] identifier[STRATEGIES] :
keyword[raise] identifier[ConfigurationError] ( literal[string] % identifier[strategy] )
identifier[self] . identifier[_limiter] = identifier[STRATEGIES] [ identifier[strategy] ]( identifier[self] . identifier[_storage] )
identifier[conf_limits] = identifier[app] . identifier[config] . identifier[get] ( identifier[C] . identifier[GLOBAL_LIMITS] , keyword[None] )
keyword[if] keyword[not] identifier[self] . identifier[_global_limits] keyword[and] identifier[conf_limits] :
identifier[self] . identifier[_global_limits] =[
identifier[ExtLimit] (
identifier[limit] , identifier[self] . identifier[_key_func] , keyword[None] , keyword[False] , keyword[None] , keyword[None] , keyword[None]
) keyword[for] identifier[limit] keyword[in] identifier[parse_many] ( identifier[conf_limits] )
]
identifier[app] . identifier[request_middleware] . identifier[append] ( identifier[self] . identifier[__check_request_limit] ) | def init_app(self, app):
"""
:param app: :class:`sanic.Sanic` instance to rate limit.
"""
self.enabled = app.config.setdefault(C.ENABLED, True)
self._swallow_errors = app.config.setdefault(C.SWALLOW_ERRORS, self._swallow_errors)
self._storage_options.update(app.config.get(C.STORAGE_OPTIONS, {}))
self._storage = storage_from_string(self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), **self._storage_options)
strategy = self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window')
if strategy not in STRATEGIES:
raise ConfigurationError('Invalid rate limiting strategy %s' % strategy) # depends on [control=['if'], data=['strategy']]
self._limiter = STRATEGIES[strategy](self._storage)
conf_limits = app.config.get(C.GLOBAL_LIMITS, None)
if not self._global_limits and conf_limits:
self._global_limits = [ExtLimit(limit, self._key_func, None, False, None, None, None) for limit in parse_many(conf_limits)] # depends on [control=['if'], data=[]]
app.request_middleware.append(self.__check_request_limit) |
def calcLogSum(Vals, sigma):
'''
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
'''
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
V = np.amax(Vals, axis=0)
return V
# else we have a taste shock
maxV = np.max(Vals, axis=0)
# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)
sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0)
LogSumV = np.log(sumexp)
LogSumV = maxV + sigma*LogSumV
return LogSumV | def function[calcLogSum, parameter[Vals, sigma]]:
constant[
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
]
if compare[name[sigma] equal[==] constant[0.0]] begin[:]
variable[V] assign[=] call[name[np].amax, parameter[name[Vals]]]
return[name[V]]
variable[maxV] assign[=] call[name[np].max, parameter[name[Vals]]]
variable[sumexp] assign[=] call[name[np].sum, parameter[call[name[np].exp, parameter[binary_operation[binary_operation[name[Vals] - name[maxV]] / name[sigma]]]]]]
variable[LogSumV] assign[=] call[name[np].log, parameter[name[sumexp]]]
variable[LogSumV] assign[=] binary_operation[name[maxV] + binary_operation[name[sigma] * name[LogSumV]]]
return[name[LogSumV]] | keyword[def] identifier[calcLogSum] ( identifier[Vals] , identifier[sigma] ):
literal[string]
keyword[if] identifier[sigma] == literal[int] :
identifier[V] = identifier[np] . identifier[amax] ( identifier[Vals] , identifier[axis] = literal[int] )
keyword[return] identifier[V]
identifier[maxV] = identifier[np] . identifier[max] ( identifier[Vals] , identifier[axis] = literal[int] )
identifier[sumexp] = identifier[np] . identifier[sum] ( identifier[np] . identifier[exp] (( identifier[Vals] - identifier[maxV] )/ identifier[sigma] ), identifier[axis] = literal[int] )
identifier[LogSumV] = identifier[np] . identifier[log] ( identifier[sumexp] )
identifier[LogSumV] = identifier[maxV] + identifier[sigma] * identifier[LogSumV]
keyword[return] identifier[LogSumV] | def calcLogSum(Vals, sigma):
"""
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
"""
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
V = np.amax(Vals, axis=0)
return V # depends on [control=['if'], data=[]]
# else we have a taste shock
maxV = np.max(Vals, axis=0)
# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)
sumexp = np.sum(np.exp((Vals - maxV) / sigma), axis=0)
LogSumV = np.log(sumexp)
LogSumV = maxV + sigma * LogSumV
return LogSumV |
def multiplied(*values):
"""
Returns the product of all supplied values. One or more *values* can be
specified. For example, to light a :class:`~gpiozero.PWMLED` as the product
(i.e. multiplication) of several potentiometers connected to an
:class:`~gpiozero.MCP3008`
ADC::
from gpiozero import MCP3008, PWMLED
from gpiozero.tools import multiplied
from signal import pause
pot1 = MCP3008(channel=0)
pot2 = MCP3008(channel=1)
pot3 = MCP3008(channel=2)
led = PWMLED(4)
led.source = multiplied(pot1, pot2, pot3)
pause()
"""
values = [_normalize(v) for v in values]
def _product(it):
p = 1
for n in it:
p *= n
return p
for v in zip(*values):
yield _product(v) | def function[multiplied, parameter[]]:
constant[
Returns the product of all supplied values. One or more *values* can be
specified. For example, to light a :class:`~gpiozero.PWMLED` as the product
(i.e. multiplication) of several potentiometers connected to an
:class:`~gpiozero.MCP3008`
ADC::
from gpiozero import MCP3008, PWMLED
from gpiozero.tools import multiplied
from signal import pause
pot1 = MCP3008(channel=0)
pot2 = MCP3008(channel=1)
pot3 = MCP3008(channel=2)
led = PWMLED(4)
led.source = multiplied(pot1, pot2, pot3)
pause()
]
variable[values] assign[=] <ast.ListComp object at 0x7da18f09c550>
def function[_product, parameter[it]]:
variable[p] assign[=] constant[1]
for taget[name[n]] in starred[name[it]] begin[:]
<ast.AugAssign object at 0x7da18f09e7a0>
return[name[p]]
for taget[name[v]] in starred[call[name[zip], parameter[<ast.Starred object at 0x7da18f09c820>]]] begin[:]
<ast.Yield object at 0x7da18f09c4f0> | keyword[def] identifier[multiplied] (* identifier[values] ):
literal[string]
identifier[values] =[ identifier[_normalize] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ]
keyword[def] identifier[_product] ( identifier[it] ):
identifier[p] = literal[int]
keyword[for] identifier[n] keyword[in] identifier[it] :
identifier[p] *= identifier[n]
keyword[return] identifier[p]
keyword[for] identifier[v] keyword[in] identifier[zip] (* identifier[values] ):
keyword[yield] identifier[_product] ( identifier[v] ) | def multiplied(*values):
"""
Returns the product of all supplied values. One or more *values* can be
specified. For example, to light a :class:`~gpiozero.PWMLED` as the product
(i.e. multiplication) of several potentiometers connected to an
:class:`~gpiozero.MCP3008`
ADC::
from gpiozero import MCP3008, PWMLED
from gpiozero.tools import multiplied
from signal import pause
pot1 = MCP3008(channel=0)
pot2 = MCP3008(channel=1)
pot3 = MCP3008(channel=2)
led = PWMLED(4)
led.source = multiplied(pot1, pot2, pot3)
pause()
"""
values = [_normalize(v) for v in values]
def _product(it):
p = 1
for n in it:
p *= n # depends on [control=['for'], data=['n']]
return p
for v in zip(*values):
yield _product(v) # depends on [control=['for'], data=['v']] |
def parse_veto_definer(veto_def_filename):
""" Parse a veto definer file from the filename and return a dictionary
indexed by ifo and veto definer category level.
Parameters
----------
veto_def_filename: str
The path to the veto definer file
Returns:
parsed_definition: dict
Returns a dictionary first indexed by ifo, then category level, and
finally a list of veto definitions.
"""
from glue.ligolw import table, lsctables, utils as ligolw_utils
from glue.ligolw.ligolw import LIGOLWContentHandler as h
lsctables.use_in(h)
indoc = ligolw_utils.load_filename(veto_def_filename, False,
contenthandler=h)
veto_table = table.get_table(indoc, 'veto_definer')
ifo = veto_table.getColumnByName('ifo')
name = veto_table.getColumnByName('name')
version = numpy.array(veto_table.getColumnByName('version'))
category = numpy.array(veto_table.getColumnByName('category'))
start = numpy.array(veto_table.getColumnByName('start_time'))
end = numpy.array(veto_table.getColumnByName('end_time'))
start_pad = numpy.array(veto_table.getColumnByName('start_pad'))
end_pad = numpy.array(veto_table.getColumnByName('end_pad'))
data = {}
for i in range(len(veto_table)):
if ifo[i] not in data:
data[ifo[i]] = {}
# The veto-definer categories are weird! Hardware injections are stored
# in "3" and numbers above that are bumped up by one (although not
# often used any more). So we remap 3 to H and anything above 3 to
# N-1. 2 and 1 correspond to 2 and 1 (YAY!)
if category[i] > 3:
curr_cat = "CAT_{}".format(category[i]-1)
elif category[i] == 3:
curr_cat = "CAT_H"
else:
curr_cat = "CAT_{}".format(category[i])
if curr_cat not in data[ifo[i]]:
data[ifo[i]][curr_cat] = []
veto_info = {'name': name[i],
'version': version[i],
'start': start[i],
'end': end[i],
'start_pad': start_pad[i],
'end_pad': end_pad[i],
}
data[ifo[i]][curr_cat].append(veto_info)
return data | def function[parse_veto_definer, parameter[veto_def_filename]]:
constant[ Parse a veto definer file from the filename and return a dictionary
indexed by ifo and veto definer category level.
Parameters
----------
veto_def_filename: str
The path to the veto definer file
Returns:
parsed_definition: dict
Returns a dictionary first indexed by ifo, then category level, and
finally a list of veto definitions.
]
from relative_module[glue.ligolw] import module[table], module[lsctables], module[utils]
from relative_module[glue.ligolw.ligolw] import module[LIGOLWContentHandler]
call[name[lsctables].use_in, parameter[name[h]]]
variable[indoc] assign[=] call[name[ligolw_utils].load_filename, parameter[name[veto_def_filename], constant[False]]]
variable[veto_table] assign[=] call[name[table].get_table, parameter[name[indoc], constant[veto_definer]]]
variable[ifo] assign[=] call[name[veto_table].getColumnByName, parameter[constant[ifo]]]
variable[name] assign[=] call[name[veto_table].getColumnByName, parameter[constant[name]]]
variable[version] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[version]]]]]
variable[category] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[category]]]]]
variable[start] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[start_time]]]]]
variable[end] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[end_time]]]]]
variable[start_pad] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[start_pad]]]]]
variable[end_pad] assign[=] call[name[numpy].array, parameter[call[name[veto_table].getColumnByName, parameter[constant[end_pad]]]]]
variable[data] assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[veto_table]]]]]] begin[:]
if compare[call[name[ifo]][name[i]] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][call[name[ifo]][name[i]]] assign[=] dictionary[[], []]
if compare[call[name[category]][name[i]] greater[>] constant[3]] begin[:]
variable[curr_cat] assign[=] call[constant[CAT_{}].format, parameter[binary_operation[call[name[category]][name[i]] - constant[1]]]]
if compare[name[curr_cat] <ast.NotIn object at 0x7da2590d7190> call[name[data]][call[name[ifo]][name[i]]]] begin[:]
call[call[name[data]][call[name[ifo]][name[i]]]][name[curr_cat]] assign[=] list[[]]
variable[veto_info] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5000>, <ast.Constant object at 0x7da2054a4100>, <ast.Constant object at 0x7da2054a7700>, <ast.Constant object at 0x7da2054a7bb0>, <ast.Constant object at 0x7da2054a7550>, <ast.Constant object at 0x7da2054a5660>], [<ast.Subscript object at 0x7da2054a5c90>, <ast.Subscript object at 0x7da2054a4ac0>, <ast.Subscript object at 0x7da2054a7430>, <ast.Subscript object at 0x7da2054a4760>, <ast.Subscript object at 0x7da2054a5bd0>, <ast.Subscript object at 0x7da2054a5720>]]
call[call[call[name[data]][call[name[ifo]][name[i]]]][name[curr_cat]].append, parameter[name[veto_info]]]
return[name[data]] | keyword[def] identifier[parse_veto_definer] ( identifier[veto_def_filename] ):
literal[string]
keyword[from] identifier[glue] . identifier[ligolw] keyword[import] identifier[table] , identifier[lsctables] , identifier[utils] keyword[as] identifier[ligolw_utils]
keyword[from] identifier[glue] . identifier[ligolw] . identifier[ligolw] keyword[import] identifier[LIGOLWContentHandler] keyword[as] identifier[h]
identifier[lsctables] . identifier[use_in] ( identifier[h] )
identifier[indoc] = identifier[ligolw_utils] . identifier[load_filename] ( identifier[veto_def_filename] , keyword[False] ,
identifier[contenthandler] = identifier[h] )
identifier[veto_table] = identifier[table] . identifier[get_table] ( identifier[indoc] , literal[string] )
identifier[ifo] = identifier[veto_table] . identifier[getColumnByName] ( literal[string] )
identifier[name] = identifier[veto_table] . identifier[getColumnByName] ( literal[string] )
identifier[version] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[category] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[start] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[end] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[start_pad] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[end_pad] = identifier[numpy] . identifier[array] ( identifier[veto_table] . identifier[getColumnByName] ( literal[string] ))
identifier[data] ={}
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[veto_table] )):
keyword[if] identifier[ifo] [ identifier[i] ] keyword[not] keyword[in] identifier[data] :
identifier[data] [ identifier[ifo] [ identifier[i] ]]={}
keyword[if] identifier[category] [ identifier[i] ]> literal[int] :
identifier[curr_cat] = literal[string] . identifier[format] ( identifier[category] [ identifier[i] ]- literal[int] )
keyword[elif] identifier[category] [ identifier[i] ]== literal[int] :
identifier[curr_cat] = literal[string]
keyword[else] :
identifier[curr_cat] = literal[string] . identifier[format] ( identifier[category] [ identifier[i] ])
keyword[if] identifier[curr_cat] keyword[not] keyword[in] identifier[data] [ identifier[ifo] [ identifier[i] ]]:
identifier[data] [ identifier[ifo] [ identifier[i] ]][ identifier[curr_cat] ]=[]
identifier[veto_info] ={ literal[string] : identifier[name] [ identifier[i] ],
literal[string] : identifier[version] [ identifier[i] ],
literal[string] : identifier[start] [ identifier[i] ],
literal[string] : identifier[end] [ identifier[i] ],
literal[string] : identifier[start_pad] [ identifier[i] ],
literal[string] : identifier[end_pad] [ identifier[i] ],
}
identifier[data] [ identifier[ifo] [ identifier[i] ]][ identifier[curr_cat] ]. identifier[append] ( identifier[veto_info] )
keyword[return] identifier[data] | def parse_veto_definer(veto_def_filename):
""" Parse a veto definer file from the filename and return a dictionary
indexed by ifo and veto definer category level.
Parameters
----------
veto_def_filename: str
The path to the veto definer file
Returns:
parsed_definition: dict
Returns a dictionary first indexed by ifo, then category level, and
finally a list of veto definitions.
"""
from glue.ligolw import table, lsctables, utils as ligolw_utils
from glue.ligolw.ligolw import LIGOLWContentHandler as h
lsctables.use_in(h)
indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h)
veto_table = table.get_table(indoc, 'veto_definer')
ifo = veto_table.getColumnByName('ifo')
name = veto_table.getColumnByName('name')
version = numpy.array(veto_table.getColumnByName('version'))
category = numpy.array(veto_table.getColumnByName('category'))
start = numpy.array(veto_table.getColumnByName('start_time'))
end = numpy.array(veto_table.getColumnByName('end_time'))
start_pad = numpy.array(veto_table.getColumnByName('start_pad'))
end_pad = numpy.array(veto_table.getColumnByName('end_pad'))
data = {}
for i in range(len(veto_table)):
if ifo[i] not in data:
data[ifo[i]] = {} # depends on [control=['if'], data=['data']]
# The veto-definer categories are weird! Hardware injections are stored
# in "3" and numbers above that are bumped up by one (although not
# often used any more). So we remap 3 to H and anything above 3 to
# N-1. 2 and 1 correspond to 2 and 1 (YAY!)
if category[i] > 3:
curr_cat = 'CAT_{}'.format(category[i] - 1) # depends on [control=['if'], data=[]]
elif category[i] == 3:
curr_cat = 'CAT_H' # depends on [control=['if'], data=[]]
else:
curr_cat = 'CAT_{}'.format(category[i])
if curr_cat not in data[ifo[i]]:
data[ifo[i]][curr_cat] = [] # depends on [control=['if'], data=['curr_cat']]
veto_info = {'name': name[i], 'version': version[i], 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i]}
data[ifo[i]][curr_cat].append(veto_info) # depends on [control=['for'], data=['i']]
return data |
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64')
return result | def function[components, parameter[self]]:
constant[
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
]
from relative_module[pandas] import module[DataFrame]
variable[columns] assign[=] list[[<ast.Constant object at 0x7da1b1ec5180>, <ast.Constant object at 0x7da1b1ec4130>, <ast.Constant object at 0x7da1b1ec4430>, <ast.Constant object at 0x7da1b1ec7550>, <ast.Constant object at 0x7da1b1ec68f0>, <ast.Constant object at 0x7da1b1ec5a50>, <ast.Constant object at 0x7da1b1ec7760>]]
variable[hasnans] assign[=] name[self]._hasnans
if name[hasnans] begin[:]
def function[f, parameter[x]]:
if call[name[isna], parameter[name[x]]] begin[:]
return[binary_operation[list[[<ast.Attribute object at 0x7da1b1ec50c0>]] * call[name[len], parameter[name[columns]]]]]
return[name[x].components]
variable[result] assign[=] call[name[DataFrame], parameter[<ast.ListComp object at 0x7da18f723c10>]]
if <ast.UnaryOp object at 0x7da18f721d80> begin[:]
variable[result] assign[=] call[name[result].astype, parameter[constant[int64]]]
return[name[result]] | keyword[def] identifier[components] ( identifier[self] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[DataFrame]
identifier[columns] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[hasnans] = identifier[self] . identifier[_hasnans]
keyword[if] identifier[hasnans] :
keyword[def] identifier[f] ( identifier[x] ):
keyword[if] identifier[isna] ( identifier[x] ):
keyword[return] [ identifier[np] . identifier[nan] ]* identifier[len] ( identifier[columns] )
keyword[return] identifier[x] . identifier[components]
keyword[else] :
keyword[def] identifier[f] ( identifier[x] ):
keyword[return] identifier[x] . identifier[components]
identifier[result] = identifier[DataFrame] ([ identifier[f] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] ], identifier[columns] = identifier[columns] )
keyword[if] keyword[not] identifier[hasnans] :
identifier[result] = identifier[result] . identifier[astype] ( literal[string] )
keyword[return] identifier[result] | def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns) # depends on [control=['if'], data=[]]
return x.components # depends on [control=['if'], data=[]]
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64') # depends on [control=['if'], data=[]]
return result |
def associations(self, association_resource):
"""Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations.
"""
resource = self.copy()
resource._request_entity = association_resource.api_entity
resource._request_uri = '{}/{}'.format(
resource._request_uri, association_resource.request_uri
)
return resource | def function[associations, parameter[self, association_resource]]:
constant[Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations.
]
variable[resource] assign[=] call[name[self].copy, parameter[]]
name[resource]._request_entity assign[=] name[association_resource].api_entity
name[resource]._request_uri assign[=] call[constant[{}/{}].format, parameter[name[resource]._request_uri, name[association_resource].request_uri]]
return[name[resource]] | keyword[def] identifier[associations] ( identifier[self] , identifier[association_resource] ):
literal[string]
identifier[resource] = identifier[self] . identifier[copy] ()
identifier[resource] . identifier[_request_entity] = identifier[association_resource] . identifier[api_entity]
identifier[resource] . identifier[_request_uri] = literal[string] . identifier[format] (
identifier[resource] . identifier[_request_uri] , identifier[association_resource] . identifier[request_uri]
)
keyword[return] identifier[resource] | def associations(self, association_resource):
"""Retrieve Association for this resource of the type in association_resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided association resource_type.
**Example Endpoints URI's**
+--------+----------------------------------------------------------------------+
| Method | API Endpoint URI's |
+========+======================================================================+
| {base} | /v2/{resourceClass}/{resourceType}/{resourceId} |
+--------+----------------------------------------------------------------------+
| GET | {base}/{assoc resourceClass}/{assoc resourceType} |
+--------+----------------------------------------------------------------------+
| POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
| DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} |
+--------+----------------------------------------------------------------------+
+ resourceClass - Groups/Indicators
+ resourceType - Adversary, Incident, etc / Address, EmailAddress, etc
+ resourceId - Group Id / Indicator Value
Args:
association_resource (Resource Instance): A resource object with optional resource_id.
Return:
(instance): A copy of this resource instance cleaned and updated for associations.
"""
resource = self.copy()
resource._request_entity = association_resource.api_entity
resource._request_uri = '{}/{}'.format(resource._request_uri, association_resource.request_uri)
return resource |
def _parse_hosts_inventory(self, inventory_path):
"""
Read all the available hosts inventory information into one big list
and parse it.
"""
hosts_contents = []
if os.path.isdir(inventory_path):
self.log.debug("Inventory path {} is a dir. Looking for inventory files in that dir.".format(inventory_path))
for fname in os.listdir(inventory_path):
# Skip .git folder
if fname == '.git':
continue
path = os.path.join(inventory_path, fname)
if os.path.isdir(path):
continue
with codecs.open(path, 'r', encoding='utf8') as f:
hosts_contents += f.readlines()
else:
self.log.debug("Inventory path {} is a file. Reading as inventory.".format(inventory_path))
with codecs.open(inventory_path, 'r', encoding='utf8') as f:
hosts_contents = f.readlines()
# Parse inventory and apply it to the hosts
hosts_parser = parser.HostsParser(hosts_contents)
for hostname, key_values in hosts_parser.hosts.items():
self.update_host(hostname, key_values) | def function[_parse_hosts_inventory, parameter[self, inventory_path]]:
constant[
Read all the available hosts inventory information into one big list
and parse it.
]
variable[hosts_contents] assign[=] list[[]]
if call[name[os].path.isdir, parameter[name[inventory_path]]] begin[:]
call[name[self].log.debug, parameter[call[constant[Inventory path {} is a dir. Looking for inventory files in that dir.].format, parameter[name[inventory_path]]]]]
for taget[name[fname]] in starred[call[name[os].listdir, parameter[name[inventory_path]]]] begin[:]
if compare[name[fname] equal[==] constant[.git]] begin[:]
continue
variable[path] assign[=] call[name[os].path.join, parameter[name[inventory_path], name[fname]]]
if call[name[os].path.isdir, parameter[name[path]]] begin[:]
continue
with call[name[codecs].open, parameter[name[path], constant[r]]] begin[:]
<ast.AugAssign object at 0x7da1b1d22830>
variable[hosts_parser] assign[=] call[name[parser].HostsParser, parameter[name[hosts_contents]]]
for taget[tuple[[<ast.Name object at 0x7da1b1d225c0>, <ast.Name object at 0x7da1b1d20d30>]]] in starred[call[name[hosts_parser].hosts.items, parameter[]]] begin[:]
call[name[self].update_host, parameter[name[hostname], name[key_values]]] | keyword[def] identifier[_parse_hosts_inventory] ( identifier[self] , identifier[inventory_path] ):
literal[string]
identifier[hosts_contents] =[]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[inventory_path] ):
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[inventory_path] ))
keyword[for] identifier[fname] keyword[in] identifier[os] . identifier[listdir] ( identifier[inventory_path] ):
keyword[if] identifier[fname] == literal[string] :
keyword[continue]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[inventory_path] , identifier[fname] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[continue]
keyword[with] identifier[codecs] . identifier[open] ( identifier[path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[hosts_contents] += identifier[f] . identifier[readlines] ()
keyword[else] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[inventory_path] ))
keyword[with] identifier[codecs] . identifier[open] ( identifier[inventory_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[hosts_contents] = identifier[f] . identifier[readlines] ()
identifier[hosts_parser] = identifier[parser] . identifier[HostsParser] ( identifier[hosts_contents] )
keyword[for] identifier[hostname] , identifier[key_values] keyword[in] identifier[hosts_parser] . identifier[hosts] . identifier[items] ():
identifier[self] . identifier[update_host] ( identifier[hostname] , identifier[key_values] ) | def _parse_hosts_inventory(self, inventory_path):
"""
Read all the available hosts inventory information into one big list
and parse it.
"""
hosts_contents = []
if os.path.isdir(inventory_path):
self.log.debug('Inventory path {} is a dir. Looking for inventory files in that dir.'.format(inventory_path))
for fname in os.listdir(inventory_path):
# Skip .git folder
if fname == '.git':
continue # depends on [control=['if'], data=[]]
path = os.path.join(inventory_path, fname)
if os.path.isdir(path):
continue # depends on [control=['if'], data=[]]
with codecs.open(path, 'r', encoding='utf8') as f:
hosts_contents += f.readlines() # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['fname']] # depends on [control=['if'], data=[]]
else:
self.log.debug('Inventory path {} is a file. Reading as inventory.'.format(inventory_path))
with codecs.open(inventory_path, 'r', encoding='utf8') as f:
hosts_contents = f.readlines() # depends on [control=['with'], data=['f']]
# Parse inventory and apply it to the hosts
hosts_parser = parser.HostsParser(hosts_contents)
for (hostname, key_values) in hosts_parser.hosts.items():
self.update_host(hostname, key_values) # depends on [control=['for'], data=[]] |
def open_file(self, path, line=None):
"""
Creates a new GenericCodeEdit, opens the requested file and adds it
to the tab widget.
:param path: Path of the file to open
:return The opened editor if open succeeded.
"""
editor = None
if path:
interpreter, pyserver, args = self._get_backend_parameters()
editor = self.tabWidget.open_document(
path, None, interpreter=interpreter, server_script=pyserver,
args=args)
if editor:
self.setup_editor(editor)
self.recent_files_manager.open_file(path)
self.menu_recents.update_actions()
if line is not None:
TextHelper(self.tabWidget.current_widget()).goto_line(line)
return editor | def function[open_file, parameter[self, path, line]]:
constant[
Creates a new GenericCodeEdit, opens the requested file and adds it
to the tab widget.
:param path: Path of the file to open
:return The opened editor if open succeeded.
]
variable[editor] assign[=] constant[None]
if name[path] begin[:]
<ast.Tuple object at 0x7da1b00cb2b0> assign[=] call[name[self]._get_backend_parameters, parameter[]]
variable[editor] assign[=] call[name[self].tabWidget.open_document, parameter[name[path], constant[None]]]
if name[editor] begin[:]
call[name[self].setup_editor, parameter[name[editor]]]
call[name[self].recent_files_manager.open_file, parameter[name[path]]]
call[name[self].menu_recents.update_actions, parameter[]]
if compare[name[line] is_not constant[None]] begin[:]
call[call[name[TextHelper], parameter[call[name[self].tabWidget.current_widget, parameter[]]]].goto_line, parameter[name[line]]]
return[name[editor]] | keyword[def] identifier[open_file] ( identifier[self] , identifier[path] , identifier[line] = keyword[None] ):
literal[string]
identifier[editor] = keyword[None]
keyword[if] identifier[path] :
identifier[interpreter] , identifier[pyserver] , identifier[args] = identifier[self] . identifier[_get_backend_parameters] ()
identifier[editor] = identifier[self] . identifier[tabWidget] . identifier[open_document] (
identifier[path] , keyword[None] , identifier[interpreter] = identifier[interpreter] , identifier[server_script] = identifier[pyserver] ,
identifier[args] = identifier[args] )
keyword[if] identifier[editor] :
identifier[self] . identifier[setup_editor] ( identifier[editor] )
identifier[self] . identifier[recent_files_manager] . identifier[open_file] ( identifier[path] )
identifier[self] . identifier[menu_recents] . identifier[update_actions] ()
keyword[if] identifier[line] keyword[is] keyword[not] keyword[None] :
identifier[TextHelper] ( identifier[self] . identifier[tabWidget] . identifier[current_widget] ()). identifier[goto_line] ( identifier[line] )
keyword[return] identifier[editor] | def open_file(self, path, line=None):
"""
Creates a new GenericCodeEdit, opens the requested file and adds it
to the tab widget.
:param path: Path of the file to open
:return The opened editor if open succeeded.
"""
editor = None
if path:
(interpreter, pyserver, args) = self._get_backend_parameters()
editor = self.tabWidget.open_document(path, None, interpreter=interpreter, server_script=pyserver, args=args)
if editor:
self.setup_editor(editor) # depends on [control=['if'], data=[]]
self.recent_files_manager.open_file(path)
self.menu_recents.update_actions() # depends on [control=['if'], data=[]]
if line is not None:
TextHelper(self.tabWidget.current_widget()).goto_line(line) # depends on [control=['if'], data=['line']]
return editor |
def _get_focused_item(self):
""" Returns the currently focused item """
focused_model = self._selection.focus
if not focused_model:
return None
return self.canvas.get_view_for_model(focused_model) | def function[_get_focused_item, parameter[self]]:
constant[ Returns the currently focused item ]
variable[focused_model] assign[=] name[self]._selection.focus
if <ast.UnaryOp object at 0x7da1b1a5f7f0> begin[:]
return[constant[None]]
return[call[name[self].canvas.get_view_for_model, parameter[name[focused_model]]]] | keyword[def] identifier[_get_focused_item] ( identifier[self] ):
literal[string]
identifier[focused_model] = identifier[self] . identifier[_selection] . identifier[focus]
keyword[if] keyword[not] identifier[focused_model] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[canvas] . identifier[get_view_for_model] ( identifier[focused_model] ) | def _get_focused_item(self):
""" Returns the currently focused item """
focused_model = self._selection.focus
if not focused_model:
return None # depends on [control=['if'], data=[]]
return self.canvas.get_view_for_model(focused_model) |
def euler(self):
"""TODO DEPRECATE THIS?"""
e_xyz = transformations.euler_from_matrix(self.rotation, 'sxyz')
return np.array([180.0 / np.pi * a for a in e_xyz]) | def function[euler, parameter[self]]:
constant[TODO DEPRECATE THIS?]
variable[e_xyz] assign[=] call[name[transformations].euler_from_matrix, parameter[name[self].rotation, constant[sxyz]]]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da1b12b5870>]]] | keyword[def] identifier[euler] ( identifier[self] ):
literal[string]
identifier[e_xyz] = identifier[transformations] . identifier[euler_from_matrix] ( identifier[self] . identifier[rotation] , literal[string] )
keyword[return] identifier[np] . identifier[array] ([ literal[int] / identifier[np] . identifier[pi] * identifier[a] keyword[for] identifier[a] keyword[in] identifier[e_xyz] ]) | def euler(self):
"""TODO DEPRECATE THIS?"""
e_xyz = transformations.euler_from_matrix(self.rotation, 'sxyz')
return np.array([180.0 / np.pi * a for a in e_xyz]) |
def h2e(string):
"""
Convert hangul to english. ('한영타' -> 'gksdudxk')
:param string: hangul that actually represent english string
:return: converted english string
"""
result = []
for c in string:
ccode = ord(c) # Character code
if 0x3131 <= ccode <= 0x3163: # Hangul Compatibility Jamo
result.append(H2E_MAPPING[c])
elif 0xAC00 <= ccode <= 0xD7A3: # Hangul Syllables
ccode -= 0xAC00
# decompose hangul
lead = JA_LEAD[ccode // 588]
medi = MO[(ccode % 588) // 28]
tail = JA_TAIL[(ccode % 588) % 28]
result.append(H2E_MAPPING[lead])
result.append(H2E_MAPPING[medi])
result.append(H2E_MAPPING[tail])
else: # Rest of all characters
result.append(c)
return ''.join(result) | def function[h2e, parameter[string]]:
constant[
Convert hangul to english. ('한영타' -> 'gksdudxk')
:param string: hangul that actually represent english string
:return: converted english string
]
variable[result] assign[=] list[[]]
for taget[name[c]] in starred[name[string]] begin[:]
variable[ccode] assign[=] call[name[ord], parameter[name[c]]]
if compare[constant[12593] less_or_equal[<=] name[ccode]] begin[:]
call[name[result].append, parameter[call[name[H2E_MAPPING]][name[c]]]]
return[call[constant[].join, parameter[name[result]]]] | keyword[def] identifier[h2e] ( identifier[string] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[c] keyword[in] identifier[string] :
identifier[ccode] = identifier[ord] ( identifier[c] )
keyword[if] literal[int] <= identifier[ccode] <= literal[int] :
identifier[result] . identifier[append] ( identifier[H2E_MAPPING] [ identifier[c] ])
keyword[elif] literal[int] <= identifier[ccode] <= literal[int] :
identifier[ccode] -= literal[int]
identifier[lead] = identifier[JA_LEAD] [ identifier[ccode] // literal[int] ]
identifier[medi] = identifier[MO] [( identifier[ccode] % literal[int] )// literal[int] ]
identifier[tail] = identifier[JA_TAIL] [( identifier[ccode] % literal[int] )% literal[int] ]
identifier[result] . identifier[append] ( identifier[H2E_MAPPING] [ identifier[lead] ])
identifier[result] . identifier[append] ( identifier[H2E_MAPPING] [ identifier[medi] ])
identifier[result] . identifier[append] ( identifier[H2E_MAPPING] [ identifier[tail] ])
keyword[else] :
identifier[result] . identifier[append] ( identifier[c] )
keyword[return] literal[string] . identifier[join] ( identifier[result] ) | def h2e(string):
"""
Convert hangul to english. ('한영타' -> 'gksdudxk')
:param string: hangul that actually represent english string
:return: converted english string
"""
result = []
for c in string:
ccode = ord(c) # Character code
if 12593 <= ccode <= 12643: # Hangul Compatibility Jamo
result.append(H2E_MAPPING[c]) # depends on [control=['if'], data=[]]
elif 44032 <= ccode <= 55203: # Hangul Syllables
ccode -= 44032
# decompose hangul
lead = JA_LEAD[ccode // 588]
medi = MO[ccode % 588 // 28]
tail = JA_TAIL[ccode % 588 % 28]
result.append(H2E_MAPPING[lead])
result.append(H2E_MAPPING[medi])
result.append(H2E_MAPPING[tail]) # depends on [control=['if'], data=['ccode']]
else: # Rest of all characters
result.append(c) # depends on [control=['for'], data=['c']]
return ''.join(result) |
def run(self, **client_params):
"""
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
"""
try:
self.send(self.get_collection_endpoint(),
http_method="POST",
**client_params)
except Exception as e:
raise CartoException(e) | def function[run, parameter[self]]:
constant[
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
]
<ast.Try object at 0x7da1b0f616c0> | keyword[def] identifier[run] ( identifier[self] ,** identifier[client_params] ):
literal[string]
keyword[try] :
identifier[self] . identifier[send] ( identifier[self] . identifier[get_collection_endpoint] (),
identifier[http_method] = literal[string] ,
** identifier[client_params] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[CartoException] ( identifier[e] ) | def run(self, **client_params):
"""
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
"""
try:
self.send(self.get_collection_endpoint(), http_method='POST', **client_params) # depends on [control=['try'], data=[]]
except Exception as e:
raise CartoException(e) # depends on [control=['except'], data=['e']] |
def filter_genes_cv_deprecated(X, Ecutoff, cvFilter):
"""Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.')
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + .0001) > cvFilter
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset | def function[filter_genes_cv_deprecated, parameter[X, Ecutoff, cvFilter]]:
constant[Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
]
if call[name[issparse], parameter[name[X]]] begin[:]
<ast.Raise object at 0x7da18f7236d0>
variable[mean_filter] assign[=] compare[call[name[np].mean, parameter[name[X]]] greater[>] name[Ecutoff]]
variable[var_filter] assign[=] compare[binary_operation[call[name[np].std, parameter[name[X]]] / binary_operation[call[name[np].mean, parameter[name[X]]] + constant[0.0001]]] greater[>] name[cvFilter]]
variable[gene_subset] assign[=] call[call[name[np].nonzero, parameter[call[name[np].all, parameter[list[[<ast.Name object at 0x7da18f721e10>, <ast.Name object at 0x7da18f720130>]]]]]]][constant[0]]
return[name[gene_subset]] | keyword[def] identifier[filter_genes_cv_deprecated] ( identifier[X] , identifier[Ecutoff] , identifier[cvFilter] ):
literal[string]
keyword[if] identifier[issparse] ( identifier[X] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[mean_filter] = identifier[np] . identifier[mean] ( identifier[X] , identifier[axis] = literal[int] )> identifier[Ecutoff]
identifier[var_filter] = identifier[np] . identifier[std] ( identifier[X] , identifier[axis] = literal[int] )/( identifier[np] . identifier[mean] ( identifier[X] , identifier[axis] = literal[int] )+ literal[int] )> identifier[cvFilter]
identifier[gene_subset] = identifier[np] . identifier[nonzero] ( identifier[np] . identifier[all] ([ identifier[mean_filter] , identifier[var_filter] ], identifier[axis] = literal[int] ))[ literal[int] ]
keyword[return] identifier[gene_subset] | def filter_genes_cv_deprecated(X, Ecutoff, cvFilter):
"""Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.') # depends on [control=['if'], data=[]]
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + 0.0001) > cvFilter
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset |
def magic_read(self,infile):
"""
reads a Magic template file, puts data in a list of dictionaries
"""
# print "calling magic_read(self, infile)", infile
hold,magic_data,magic_record,magic_keys=[],[],{},[]
try:
f=open(infile,"r")
except:
return [],'bad_file'
d = f.readline()[:-1].strip('\n')
if d[0]=="s" or d[1]=="s":
delim='space'
elif d[0]=="t" or d[1]=="t":
delim='tab'
else:
print('error reading ', infile)
sys.exit()
if delim=='space':file_type=d.split()[1]
if delim=='tab':file_type=d.split('\t')[1]
if file_type=='delimited':
if delim=='space':file_type=d.split()[2]
if delim=='tab':file_type=d.split('\t')[2]
if delim=='space':line =f.readline()[:-1].split()
if delim=='tab':line =f.readline()[:-1].split('\t')
for key in line:
magic_keys.append(key)
lines=f.readlines()
for line in lines[:-1]:
line.replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line[:-1].split('\t')
hold.append(rec)
line = lines[-1].replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line.split('\t')
hold.append(rec)
for rec in hold:
magic_record={}
if len(magic_keys) != len(rec):
print("Warning: Uneven record lengths detected: ")
#print magic_keys
#print rec
for k in range(len(rec)):
magic_record[magic_keys[k]]=rec[k].strip('\n')
magic_data.append(magic_record)
magictype=file_type.lower().split("_")
Types=['er','magic','pmag','rmag']
if magictype in Types:file_type=file_type.lower()
# print "magic data from magic_read:"
# print str(magic_data)[:500] + "..."
# print "file_type", file_type
return magic_data,file_type | def function[magic_read, parameter[self, infile]]:
constant[
reads a Magic template file, puts data in a list of dictionaries
]
<ast.Tuple object at 0x7da1b01bfd00> assign[=] tuple[[<ast.List object at 0x7da1b01bff10>, <ast.List object at 0x7da1b01bfdc0>, <ast.Dict object at 0x7da1b01bfc70>, <ast.List object at 0x7da1b01bfdf0>]]
<ast.Try object at 0x7da1b01bfd30>
variable[d] assign[=] call[call[call[name[f].readline, parameter[]]][<ast.Slice object at 0x7da204567760>].strip, parameter[constant[
]]]
if <ast.BoolOp object at 0x7da2045664d0> begin[:]
variable[delim] assign[=] constant[space]
if compare[name[delim] equal[==] constant[space]] begin[:]
variable[file_type] assign[=] call[call[name[d].split, parameter[]]][constant[1]]
if compare[name[delim] equal[==] constant[tab]] begin[:]
variable[file_type] assign[=] call[call[name[d].split, parameter[constant[ ]]]][constant[1]]
if compare[name[file_type] equal[==] constant[delimited]] begin[:]
if compare[name[delim] equal[==] constant[space]] begin[:]
variable[file_type] assign[=] call[call[name[d].split, parameter[]]][constant[2]]
if compare[name[delim] equal[==] constant[tab]] begin[:]
variable[file_type] assign[=] call[call[name[d].split, parameter[constant[ ]]]][constant[2]]
if compare[name[delim] equal[==] constant[space]] begin[:]
variable[line] assign[=] call[call[call[name[f].readline, parameter[]]][<ast.Slice object at 0x7da204567c40>].split, parameter[]]
if compare[name[delim] equal[==] constant[tab]] begin[:]
variable[line] assign[=] call[call[call[name[f].readline, parameter[]]][<ast.Slice object at 0x7da204565000>].split, parameter[constant[ ]]]
for taget[name[key]] in starred[name[line]] begin[:]
call[name[magic_keys].append, parameter[name[key]]]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
for taget[name[line]] in starred[call[name[lines]][<ast.Slice object at 0x7da204566fb0>]] begin[:]
call[name[line].replace, parameter[constant[
], constant[]]]
if compare[name[delim] equal[==] constant[space]] begin[:]
variable[rec] assign[=] call[call[name[line]][<ast.Slice object at 0x7da2045672b0>].split, parameter[]]
if compare[name[delim] equal[==] constant[tab]] begin[:]
variable[rec] assign[=] call[call[name[line]][<ast.Slice object at 0x7da204566290>].split, parameter[constant[ ]]]
call[name[hold].append, parameter[name[rec]]]
variable[line] assign[=] call[call[name[lines]][<ast.UnaryOp object at 0x7da204564d90>].replace, parameter[constant[
], constant[]]]
if compare[name[delim] equal[==] constant[space]] begin[:]
variable[rec] assign[=] call[call[name[line]][<ast.Slice object at 0x7da204565330>].split, parameter[]]
if compare[name[delim] equal[==] constant[tab]] begin[:]
variable[rec] assign[=] call[name[line].split, parameter[constant[ ]]]
call[name[hold].append, parameter[name[rec]]]
for taget[name[rec]] in starred[name[hold]] begin[:]
variable[magic_record] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[magic_keys]]] not_equal[!=] call[name[len], parameter[name[rec]]]] begin[:]
call[name[print], parameter[constant[Warning: Uneven record lengths detected: ]]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[len], parameter[name[rec]]]]]] begin[:]
call[name[magic_record]][call[name[magic_keys]][name[k]]] assign[=] call[call[name[rec]][name[k]].strip, parameter[constant[
]]]
call[name[magic_data].append, parameter[name[magic_record]]]
variable[magictype] assign[=] call[call[name[file_type].lower, parameter[]].split, parameter[constant[_]]]
variable[Types] assign[=] list[[<ast.Constant object at 0x7da1b05e1480>, <ast.Constant object at 0x7da1b05e14b0>, <ast.Constant object at 0x7da1b05e14e0>, <ast.Constant object at 0x7da1b05e1510>]]
if compare[name[magictype] in name[Types]] begin[:]
variable[file_type] assign[=] call[name[file_type].lower, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b05e18a0>, <ast.Name object at 0x7da1b05e18d0>]]] | keyword[def] identifier[magic_read] ( identifier[self] , identifier[infile] ):
literal[string]
identifier[hold] , identifier[magic_data] , identifier[magic_record] , identifier[magic_keys] =[],[],{},[]
keyword[try] :
identifier[f] = identifier[open] ( identifier[infile] , literal[string] )
keyword[except] :
keyword[return] [], literal[string]
identifier[d] = identifier[f] . identifier[readline] ()[:- literal[int] ]. identifier[strip] ( literal[string] )
keyword[if] identifier[d] [ literal[int] ]== literal[string] keyword[or] identifier[d] [ literal[int] ]== literal[string] :
identifier[delim] = literal[string]
keyword[elif] identifier[d] [ literal[int] ]== literal[string] keyword[or] identifier[d] [ literal[int] ]== literal[string] :
identifier[delim] = literal[string]
keyword[else] :
identifier[print] ( literal[string] , identifier[infile] )
identifier[sys] . identifier[exit] ()
keyword[if] identifier[delim] == literal[string] : identifier[file_type] = identifier[d] . identifier[split] ()[ literal[int] ]
keyword[if] identifier[delim] == literal[string] : identifier[file_type] = identifier[d] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[file_type] == literal[string] :
keyword[if] identifier[delim] == literal[string] : identifier[file_type] = identifier[d] . identifier[split] ()[ literal[int] ]
keyword[if] identifier[delim] == literal[string] : identifier[file_type] = identifier[d] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[delim] == literal[string] : identifier[line] = identifier[f] . identifier[readline] ()[:- literal[int] ]. identifier[split] ()
keyword[if] identifier[delim] == literal[string] : identifier[line] = identifier[f] . identifier[readline] ()[:- literal[int] ]. identifier[split] ( literal[string] )
keyword[for] identifier[key] keyword[in] identifier[line] :
identifier[magic_keys] . identifier[append] ( identifier[key] )
identifier[lines] = identifier[f] . identifier[readlines] ()
keyword[for] identifier[line] keyword[in] identifier[lines] [:- literal[int] ]:
identifier[line] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[delim] == literal[string] : identifier[rec] = identifier[line] [:- literal[int] ]. identifier[split] ()
keyword[if] identifier[delim] == literal[string] : identifier[rec] = identifier[line] [:- literal[int] ]. identifier[split] ( literal[string] )
identifier[hold] . identifier[append] ( identifier[rec] )
identifier[line] = identifier[lines] [- literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[delim] == literal[string] : identifier[rec] = identifier[line] [:- literal[int] ]. identifier[split] ()
keyword[if] identifier[delim] == literal[string] : identifier[rec] = identifier[line] . identifier[split] ( literal[string] )
identifier[hold] . identifier[append] ( identifier[rec] )
keyword[for] identifier[rec] keyword[in] identifier[hold] :
identifier[magic_record] ={}
keyword[if] identifier[len] ( identifier[magic_keys] )!= identifier[len] ( identifier[rec] ):
identifier[print] ( literal[string] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[rec] )):
identifier[magic_record] [ identifier[magic_keys] [ identifier[k] ]]= identifier[rec] [ identifier[k] ]. identifier[strip] ( literal[string] )
identifier[magic_data] . identifier[append] ( identifier[magic_record] )
identifier[magictype] = identifier[file_type] . identifier[lower] (). identifier[split] ( literal[string] )
identifier[Types] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[magictype] keyword[in] identifier[Types] : identifier[file_type] = identifier[file_type] . identifier[lower] ()
keyword[return] identifier[magic_data] , identifier[file_type] | def magic_read(self, infile):
"""
reads a Magic template file, puts data in a list of dictionaries
"""
# print "calling magic_read(self, infile)", infile
(hold, magic_data, magic_record, magic_keys) = ([], [], {}, [])
try:
f = open(infile, 'r') # depends on [control=['try'], data=[]]
except:
return ([], 'bad_file') # depends on [control=['except'], data=[]]
d = f.readline()[:-1].strip('\n')
if d[0] == 's' or d[1] == 's':
delim = 'space' # depends on [control=['if'], data=[]]
elif d[0] == 't' or d[1] == 't':
delim = 'tab' # depends on [control=['if'], data=[]]
else:
print('error reading ', infile)
sys.exit()
if delim == 'space':
file_type = d.split()[1] # depends on [control=['if'], data=[]]
if delim == 'tab':
file_type = d.split('\t')[1] # depends on [control=['if'], data=[]]
if file_type == 'delimited':
if delim == 'space':
file_type = d.split()[2] # depends on [control=['if'], data=[]]
if delim == 'tab':
file_type = d.split('\t')[2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['file_type']]
if delim == 'space':
line = f.readline()[:-1].split() # depends on [control=['if'], data=[]]
if delim == 'tab':
line = f.readline()[:-1].split('\t') # depends on [control=['if'], data=[]]
for key in line:
magic_keys.append(key) # depends on [control=['for'], data=['key']]
lines = f.readlines()
for line in lines[:-1]:
line.replace('\n', '')
if delim == 'space':
rec = line[:-1].split() # depends on [control=['if'], data=[]]
if delim == 'tab':
rec = line[:-1].split('\t') # depends on [control=['if'], data=[]]
hold.append(rec) # depends on [control=['for'], data=['line']]
line = lines[-1].replace('\n', '')
if delim == 'space':
rec = line[:-1].split() # depends on [control=['if'], data=[]]
if delim == 'tab':
rec = line.split('\t') # depends on [control=['if'], data=[]]
hold.append(rec)
for rec in hold:
magic_record = {}
if len(magic_keys) != len(rec):
print('Warning: Uneven record lengths detected: ') # depends on [control=['if'], data=[]]
#print magic_keys
#print rec
for k in range(len(rec)):
magic_record[magic_keys[k]] = rec[k].strip('\n') # depends on [control=['for'], data=['k']]
magic_data.append(magic_record) # depends on [control=['for'], data=['rec']]
magictype = file_type.lower().split('_')
Types = ['er', 'magic', 'pmag', 'rmag']
if magictype in Types:
file_type = file_type.lower() # depends on [control=['if'], data=[]]
# print "magic data from magic_read:"
# print str(magic_data)[:500] + "..."
# print "file_type", file_type
return (magic_data, file_type) |
def _set_char(self, x, y, char, fg=None, bg=None,
bgblend=_lib.TCOD_BKGND_SET):
"""
Sets a character.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_format_char and _format_color before passing to this."""
# values are already formatted, honestly this function is redundant
return _put_char_ex(self.console_c, x, y, char, fg, bg, bgblend) | def function[_set_char, parameter[self, x, y, char, fg, bg, bgblend]]:
constant[
Sets a character.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_format_char and _format_color before passing to this.]
return[call[name[_put_char_ex], parameter[name[self].console_c, name[x], name[y], name[char], name[fg], name[bg], name[bgblend]]]] | keyword[def] identifier[_set_char] ( identifier[self] , identifier[x] , identifier[y] , identifier[char] , identifier[fg] = keyword[None] , identifier[bg] = keyword[None] ,
identifier[bgblend] = identifier[_lib] . identifier[TCOD_BKGND_SET] ):
literal[string]
keyword[return] identifier[_put_char_ex] ( identifier[self] . identifier[console_c] , identifier[x] , identifier[y] , identifier[char] , identifier[fg] , identifier[bg] , identifier[bgblend] ) | def _set_char(self, x, y, char, fg=None, bg=None, bgblend=_lib.TCOD_BKGND_SET):
"""
Sets a character.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_format_char and _format_color before passing to this."""
# values are already formatted, honestly this function is redundant
return _put_char_ex(self.console_c, x, y, char, fg, bg, bgblend) |
def reverseCommit(self):
"""
Re-insert the previously deleted line.
"""
if self.markerPos is None:
return
# Remove the specified string from the same position in every line
# in between the mark and the cursor (inclusive).
col = min((self.markerPos[1], self.cursorPos[1]))
for line in range(self.markerPos[0], self.cursorPos[0] + 1):
self.qteWidget.setSelection(line, col, line, col + len(self.text))
self.baseClass.removeSelectedText()
self.qteWidget.setCursorPosition(*self.cursorPos) | def function[reverseCommit, parameter[self]]:
constant[
Re-insert the previously deleted line.
]
if compare[name[self].markerPos is constant[None]] begin[:]
return[None]
variable[col] assign[=] call[name[min], parameter[tuple[[<ast.Subscript object at 0x7da204960a30>, <ast.Subscript object at 0x7da204961360>]]]]
for taget[name[line]] in starred[call[name[range], parameter[call[name[self].markerPos][constant[0]], binary_operation[call[name[self].cursorPos][constant[0]] + constant[1]]]]] begin[:]
call[name[self].qteWidget.setSelection, parameter[name[line], name[col], name[line], binary_operation[name[col] + call[name[len], parameter[name[self].text]]]]]
call[name[self].baseClass.removeSelectedText, parameter[]]
call[name[self].qteWidget.setCursorPosition, parameter[<ast.Starred object at 0x7da20c6ab4c0>]] | keyword[def] identifier[reverseCommit] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[markerPos] keyword[is] keyword[None] :
keyword[return]
identifier[col] = identifier[min] (( identifier[self] . identifier[markerPos] [ literal[int] ], identifier[self] . identifier[cursorPos] [ literal[int] ]))
keyword[for] identifier[line] keyword[in] identifier[range] ( identifier[self] . identifier[markerPos] [ literal[int] ], identifier[self] . identifier[cursorPos] [ literal[int] ]+ literal[int] ):
identifier[self] . identifier[qteWidget] . identifier[setSelection] ( identifier[line] , identifier[col] , identifier[line] , identifier[col] + identifier[len] ( identifier[self] . identifier[text] ))
identifier[self] . identifier[baseClass] . identifier[removeSelectedText] ()
identifier[self] . identifier[qteWidget] . identifier[setCursorPosition] (* identifier[self] . identifier[cursorPos] ) | def reverseCommit(self):
"""
Re-insert the previously deleted line.
"""
if self.markerPos is None:
return # depends on [control=['if'], data=[]]
# Remove the specified string from the same position in every line
# in between the mark and the cursor (inclusive).
col = min((self.markerPos[1], self.cursorPos[1]))
for line in range(self.markerPos[0], self.cursorPos[0] + 1):
self.qteWidget.setSelection(line, col, line, col + len(self.text))
self.baseClass.removeSelectedText() # depends on [control=['for'], data=['line']]
self.qteWidget.setCursorPosition(*self.cursorPos) |
def sample(self, initial_pos, num_adapt, num_samples, stepsize=None, return_type='dataframe'):
"""
Returns samples using No U Turn Sampler with dual averaging
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_adapt: int
The number of interations to run the adaptation of stepsize
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
Examples
---------
>>> from pgmpy.sampling import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian, LeapFrog
>>> from pgmpy.factors.continuous import GaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([10, -13])
>>> covariance = np.array([[16, -3], [-3, 13]])
>>> model = JGD(['x', 'y'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([12, -4]), num_adapt=10, num_samples=10,
... stepsize=0.1, return_type='dataframe')
>>> samples
x y
0 12.000000 -4.000000
1 11.864821 -3.696109
2 10.546986 -4.892169
3 8.526596 -21.555793
4 8.526596 -21.555793
5 11.343194 -6.353789
6 -1.583269 -12.802931
7 12.411957 -11.704859
8 13.253336 -20.169492
9 11.295901 -7.665058
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos)
if num_adapt <= 1:
return NoUTurnSampler(self.model, self.grad_log_pdf,
self.simulate_dynamics).sample(initial_pos, num_samples, stepsize)
mu = np.log(10.0 * stepsize)
stepsize_bar = 1.0
h_bar = 0.0
types = [(var_name, 'float') for var_name in self.model.variables]
samples = np.zeros(num_samples, dtype=types).view(np.recarray)
samples[0] = tuple(initial_pos)
position_m = initial_pos
for i in range(1, num_samples):
position_m, alpha, n_alpha = self._sample(position_m, stepsize)
samples[i] = tuple(position_m)
if i <= num_adapt:
stepsize, stepsize_bar, h_bar = self._adapt_params(stepsize, stepsize_bar, h_bar, mu,
i, alpha, n_alpha)
else:
stepsize = stepsize_bar
return _return_samples(return_type, samples) | def function[sample, parameter[self, initial_pos, num_adapt, num_samples, stepsize, return_type]]:
constant[
Returns samples using No U Turn Sampler with dual averaging
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_adapt: int
The number of interations to run the adaptation of stepsize
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
Examples
---------
>>> from pgmpy.sampling import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian, LeapFrog
>>> from pgmpy.factors.continuous import GaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([10, -13])
>>> covariance = np.array([[16, -3], [-3, 13]])
>>> model = JGD(['x', 'y'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([12, -4]), num_adapt=10, num_samples=10,
... stepsize=0.1, return_type='dataframe')
>>> samples
x y
0 12.000000 -4.000000
1 11.864821 -3.696109
2 10.546986 -4.892169
3 8.526596 -21.555793
4 8.526596 -21.555793
5 11.343194 -6.353789
6 -1.583269 -12.802931
7 12.411957 -11.704859
8 13.253336 -20.169492
9 11.295901 -7.665058
]
variable[initial_pos] assign[=] call[name[_check_1d_array_object], parameter[name[initial_pos], constant[initial_pos]]]
call[name[_check_length_equal], parameter[name[initial_pos], name[self].model.variables, constant[initial_pos], constant[model.variables]]]
if compare[name[stepsize] is constant[None]] begin[:]
variable[stepsize] assign[=] call[name[self]._find_reasonable_stepsize, parameter[name[initial_pos]]]
if compare[name[num_adapt] less_or_equal[<=] constant[1]] begin[:]
return[call[call[name[NoUTurnSampler], parameter[name[self].model, name[self].grad_log_pdf, name[self].simulate_dynamics]].sample, parameter[name[initial_pos], name[num_samples], name[stepsize]]]]
variable[mu] assign[=] call[name[np].log, parameter[binary_operation[constant[10.0] * name[stepsize]]]]
variable[stepsize_bar] assign[=] constant[1.0]
variable[h_bar] assign[=] constant[0.0]
variable[types] assign[=] <ast.ListComp object at 0x7da18f812d10>
variable[samples] assign[=] call[call[name[np].zeros, parameter[name[num_samples]]].view, parameter[name[np].recarray]]
call[name[samples]][constant[0]] assign[=] call[name[tuple], parameter[name[initial_pos]]]
variable[position_m] assign[=] name[initial_pos]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[num_samples]]]] begin[:]
<ast.Tuple object at 0x7da18f813a00> assign[=] call[name[self]._sample, parameter[name[position_m], name[stepsize]]]
call[name[samples]][name[i]] assign[=] call[name[tuple], parameter[name[position_m]]]
if compare[name[i] less_or_equal[<=] name[num_adapt]] begin[:]
<ast.Tuple object at 0x7da204620070> assign[=] call[name[self]._adapt_params, parameter[name[stepsize], name[stepsize_bar], name[h_bar], name[mu], name[i], name[alpha], name[n_alpha]]]
return[call[name[_return_samples], parameter[name[return_type], name[samples]]]] | keyword[def] identifier[sample] ( identifier[self] , identifier[initial_pos] , identifier[num_adapt] , identifier[num_samples] , identifier[stepsize] = keyword[None] , identifier[return_type] = literal[string] ):
literal[string]
identifier[initial_pos] = identifier[_check_1d_array_object] ( identifier[initial_pos] , literal[string] )
identifier[_check_length_equal] ( identifier[initial_pos] , identifier[self] . identifier[model] . identifier[variables] , literal[string] , literal[string] )
keyword[if] identifier[stepsize] keyword[is] keyword[None] :
identifier[stepsize] = identifier[self] . identifier[_find_reasonable_stepsize] ( identifier[initial_pos] )
keyword[if] identifier[num_adapt] <= literal[int] :
keyword[return] identifier[NoUTurnSampler] ( identifier[self] . identifier[model] , identifier[self] . identifier[grad_log_pdf] ,
identifier[self] . identifier[simulate_dynamics] ). identifier[sample] ( identifier[initial_pos] , identifier[num_samples] , identifier[stepsize] )
identifier[mu] = identifier[np] . identifier[log] ( literal[int] * identifier[stepsize] )
identifier[stepsize_bar] = literal[int]
identifier[h_bar] = literal[int]
identifier[types] =[( identifier[var_name] , literal[string] ) keyword[for] identifier[var_name] keyword[in] identifier[self] . identifier[model] . identifier[variables] ]
identifier[samples] = identifier[np] . identifier[zeros] ( identifier[num_samples] , identifier[dtype] = identifier[types] ). identifier[view] ( identifier[np] . identifier[recarray] )
identifier[samples] [ literal[int] ]= identifier[tuple] ( identifier[initial_pos] )
identifier[position_m] = identifier[initial_pos]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[num_samples] ):
identifier[position_m] , identifier[alpha] , identifier[n_alpha] = identifier[self] . identifier[_sample] ( identifier[position_m] , identifier[stepsize] )
identifier[samples] [ identifier[i] ]= identifier[tuple] ( identifier[position_m] )
keyword[if] identifier[i] <= identifier[num_adapt] :
identifier[stepsize] , identifier[stepsize_bar] , identifier[h_bar] = identifier[self] . identifier[_adapt_params] ( identifier[stepsize] , identifier[stepsize_bar] , identifier[h_bar] , identifier[mu] ,
identifier[i] , identifier[alpha] , identifier[n_alpha] )
keyword[else] :
identifier[stepsize] = identifier[stepsize_bar]
keyword[return] identifier[_return_samples] ( identifier[return_type] , identifier[samples] ) | def sample(self, initial_pos, num_adapt, num_samples, stepsize=None, return_type='dataframe'):
"""
Returns samples using No U Turn Sampler with dual averaging
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_adapt: int
The number of interations to run the adaptation of stepsize
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
Examples
---------
>>> from pgmpy.sampling import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian, LeapFrog
>>> from pgmpy.factors.continuous import GaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([10, -13])
>>> covariance = np.array([[16, -3], [-3, 13]])
>>> model = JGD(['x', 'y'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([12, -4]), num_adapt=10, num_samples=10,
... stepsize=0.1, return_type='dataframe')
>>> samples
x y
0 12.000000 -4.000000
1 11.864821 -3.696109
2 10.546986 -4.892169
3 8.526596 -21.555793
4 8.526596 -21.555793
5 11.343194 -6.353789
6 -1.583269 -12.802931
7 12.411957 -11.704859
8 13.253336 -20.169492
9 11.295901 -7.665058
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos) # depends on [control=['if'], data=['stepsize']]
if num_adapt <= 1:
return NoUTurnSampler(self.model, self.grad_log_pdf, self.simulate_dynamics).sample(initial_pos, num_samples, stepsize) # depends on [control=['if'], data=[]]
mu = np.log(10.0 * stepsize)
stepsize_bar = 1.0
h_bar = 0.0
types = [(var_name, 'float') for var_name in self.model.variables]
samples = np.zeros(num_samples, dtype=types).view(np.recarray)
samples[0] = tuple(initial_pos)
position_m = initial_pos
for i in range(1, num_samples):
(position_m, alpha, n_alpha) = self._sample(position_m, stepsize)
samples[i] = tuple(position_m)
if i <= num_adapt:
(stepsize, stepsize_bar, h_bar) = self._adapt_params(stepsize, stepsize_bar, h_bar, mu, i, alpha, n_alpha) # depends on [control=['if'], data=['i']]
else:
stepsize = stepsize_bar # depends on [control=['for'], data=['i']]
return _return_samples(return_type, samples) |
def _round_multiple(x:int, mult:int=None)->int:
"Calc `x` to nearest multiple of `mult`."
return (int(x/mult+0.5)*mult) if mult is not None else x | def function[_round_multiple, parameter[x, mult]]:
constant[Calc `x` to nearest multiple of `mult`.]
return[<ast.IfExp object at 0x7da1b20284c0>] | keyword[def] identifier[_round_multiple] ( identifier[x] : identifier[int] , identifier[mult] : identifier[int] = keyword[None] )-> identifier[int] :
literal[string]
keyword[return] ( identifier[int] ( identifier[x] / identifier[mult] + literal[int] )* identifier[mult] ) keyword[if] identifier[mult] keyword[is] keyword[not] keyword[None] keyword[else] identifier[x] | def _round_multiple(x: int, mult: int=None) -> int:
"""Calc `x` to nearest multiple of `mult`."""
return int(x / mult + 0.5) * mult if mult is not None else x |
def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled("reimported"):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for known_context, known_level in contexts:
for name, alias in node.names:
first = _get_first_import(
node, known_context, name, basename, known_level, alias
)
if first is not None:
self.add_message(
"reimported", node=node, args=(name, first.fromlineno)
) | def function[_check_reimport, parameter[self, node, basename, level]]:
constant[check if the import is necessary (i.e. not already done)]
if <ast.UnaryOp object at 0x7da1b03a43d0> begin[:]
return[None]
variable[frame] assign[=] call[name[node].frame, parameter[]]
variable[root] assign[=] call[name[node].root, parameter[]]
variable[contexts] assign[=] list[[<ast.Tuple object at 0x7da1b03a5ae0>]]
if compare[name[root] is_not name[frame]] begin[:]
call[name[contexts].append, parameter[tuple[[<ast.Name object at 0x7da1b03a6170>, <ast.Constant object at 0x7da1b03a4340>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b03a5f00>, <ast.Name object at 0x7da1b03a4490>]]] in starred[name[contexts]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b03a5000>, <ast.Name object at 0x7da1b03a5c90>]]] in starred[name[node].names] begin[:]
variable[first] assign[=] call[name[_get_first_import], parameter[name[node], name[known_context], name[name], name[basename], name[known_level], name[alias]]]
if compare[name[first] is_not constant[None]] begin[:]
call[name[self].add_message, parameter[constant[reimported]]] | keyword[def] identifier[_check_reimport] ( identifier[self] , identifier[node] , identifier[basename] = keyword[None] , identifier[level] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[linter] . identifier[is_message_enabled] ( literal[string] ):
keyword[return]
identifier[frame] = identifier[node] . identifier[frame] ()
identifier[root] = identifier[node] . identifier[root] ()
identifier[contexts] =[( identifier[frame] , identifier[level] )]
keyword[if] identifier[root] keyword[is] keyword[not] identifier[frame] :
identifier[contexts] . identifier[append] (( identifier[root] , keyword[None] ))
keyword[for] identifier[known_context] , identifier[known_level] keyword[in] identifier[contexts] :
keyword[for] identifier[name] , identifier[alias] keyword[in] identifier[node] . identifier[names] :
identifier[first] = identifier[_get_first_import] (
identifier[node] , identifier[known_context] , identifier[name] , identifier[basename] , identifier[known_level] , identifier[alias]
)
keyword[if] identifier[first] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[add_message] (
literal[string] , identifier[node] = identifier[node] , identifier[args] =( identifier[name] , identifier[first] . identifier[fromlineno] )
) | def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return # depends on [control=['if'], data=[]]
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None)) # depends on [control=['if'], data=['root']]
for (known_context, known_level) in contexts:
for (name, alias) in node.names:
first = _get_first_import(node, known_context, name, basename, known_level, alias)
if first is not None:
self.add_message('reimported', node=node, args=(name, first.fromlineno)) # depends on [control=['if'], data=['first']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def get_worst_flag_level(self, flags):
'''
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
'''
worst_flag_level = 0
for flag_level_name in flags:
flag_level = self.FLAG_LEVELS[flag_level_name]
if flag_level > worst_flag_level:
worst_flag_level = flag_level
return self.FLAG_LEVEL_CODES[worst_flag_level] | def function[get_worst_flag_level, parameter[self, flags]]:
constant[
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
]
variable[worst_flag_level] assign[=] constant[0]
for taget[name[flag_level_name]] in starred[name[flags]] begin[:]
variable[flag_level] assign[=] call[name[self].FLAG_LEVELS][name[flag_level_name]]
if compare[name[flag_level] greater[>] name[worst_flag_level]] begin[:]
variable[worst_flag_level] assign[=] name[flag_level]
return[call[name[self].FLAG_LEVEL_CODES][name[worst_flag_level]]] | keyword[def] identifier[get_worst_flag_level] ( identifier[self] , identifier[flags] ):
literal[string]
identifier[worst_flag_level] = literal[int]
keyword[for] identifier[flag_level_name] keyword[in] identifier[flags] :
identifier[flag_level] = identifier[self] . identifier[FLAG_LEVELS] [ identifier[flag_level_name] ]
keyword[if] identifier[flag_level] > identifier[worst_flag_level] :
identifier[worst_flag_level] = identifier[flag_level]
keyword[return] identifier[self] . identifier[FLAG_LEVEL_CODES] [ identifier[worst_flag_level] ] | def get_worst_flag_level(self, flags):
"""
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
"""
worst_flag_level = 0
for flag_level_name in flags:
flag_level = self.FLAG_LEVELS[flag_level_name]
if flag_level > worst_flag_level:
worst_flag_level = flag_level # depends on [control=['if'], data=['flag_level', 'worst_flag_level']] # depends on [control=['for'], data=['flag_level_name']]
return self.FLAG_LEVEL_CODES[worst_flag_level] |
def handle_unhandled_exception(exc_type, exc_value, exc_traceback):
"""Handler for unhandled exceptions that will write to the logs"""
if issubclass(exc_type, KeyboardInterrupt):
# call the default excepthook saved at __excepthook__
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger = logging.getLogger(__name__) # type: ignore
logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback)) | def function[handle_unhandled_exception, parameter[exc_type, exc_value, exc_traceback]]:
constant[Handler for unhandled exceptions that will write to the logs]
if call[name[issubclass], parameter[name[exc_type], name[KeyboardInterrupt]]] begin[:]
call[name[sys].__excepthook__, parameter[name[exc_type], name[exc_value], name[exc_traceback]]]
return[None]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].critical, parameter[constant[Unhandled exception]]] | keyword[def] identifier[handle_unhandled_exception] ( identifier[exc_type] , identifier[exc_value] , identifier[exc_traceback] ):
literal[string]
keyword[if] identifier[issubclass] ( identifier[exc_type] , identifier[KeyboardInterrupt] ):
identifier[sys] . identifier[__excepthook__] ( identifier[exc_type] , identifier[exc_value] , identifier[exc_traceback] )
keyword[return]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[critical] ( literal[string] , identifier[exc_info] =( identifier[exc_type] , identifier[exc_value] , identifier[exc_traceback] )) | def handle_unhandled_exception(exc_type, exc_value, exc_traceback):
"""Handler for unhandled exceptions that will write to the logs"""
if issubclass(exc_type, KeyboardInterrupt):
# call the default excepthook saved at __excepthook__
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return # depends on [control=['if'], data=[]]
logger = logging.getLogger(__name__) # type: ignore
logger.critical('Unhandled exception', exc_info=(exc_type, exc_value, exc_traceback)) |
def reformat_pattern(pattern, compile=False):
'''
Apply the filters on user pattern to generate a new regular expression
pattern.
A user provided variable, should start with an alphabet, can be
alphanumeric and can have _.
'''
# User pattern: (w:<name>) --> Changes to (?P<name>\w)
rex_pattern = re.sub(r'\(w:<([\w\d_]+)>\)', '(?P<\\1>\w+)', pattern)
# User pattern: (d:<name>) --> change to (?P<name>\d)
rex_pattern = re.sub(r'\(d:<([\w\d_]+)>\)', '(?P<\\1>\d+)', rex_pattern)
# User pattern: (W:<name>) --> change to (?P<name>\w)
rex_pattern = re.sub(r'\(W:<([\w\d_]+)>\)', '(?P<\\1>\W+)', rex_pattern)
# User pattern: (any:<anyvalue> --> change to (?P<anyvalue>.*)
rex_pattern = re.sub(r'\(any:<([\w\d_]+)>\)', '(?P<\\1>.*)', rex_pattern)
# User pattern: (ip:<ipaddr>) --> Change to (?P<ipaddr>\d+\.\d+\.\d+\.\d+)
rex_pattern = re.sub(r'\(ip:<([\w\d_]+)>\)',
'(?P<\\1>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
rex_pattern)
# User pattern: (mac:<macaddr>) --> Change to (?P<mac>\w\w:\w\w:\w\w:..)
rex_pattern = re.sub(r'\(mac:<([\w\d_]+)>\)',
'(?P<\\1>\w\w:\w\w:\w\w:\w\w:\w\w:\w\w)', rex_pattern)
# User pattern: (decimal:<name>) --> Change to (?P<name>\d*\.\d+|\d+)
rex_pattern = re.sub(r'\(decimal:<([\w\d_]+)>\)',
'(?P<\\1>\d*\.\d+|\d+)', rex_pattern)
# User pattern: (measurement:<name> --> change to \
# (?P<name>\d*\.\d+|\d+)(\w+|\w+/\w+)
rex_pattern = re.sub(r'\(measurement:<([\w\d_]+)>\)',
'(?P<\\1>\d*\.\d+|\d+)(?P<\\1_unit>\w+|\w+/\w+)',
rex_pattern)
######################################
# Timestamp patterns.
# User pattern: (ts[n]:<timestamp>) -->
# Converted to: The below options.
######################################
# User pattern: (ts1:<timestamp>)
# Keystone, nova, libvirt, cinder
# Example: 2015-11-13 06:38:04.571
rex_pattern = re.sub(
r'\(ts1:<([\w\d_]+)>\)',
'(?P<\\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+)\.(\d+))',
rex_pattern)
# User pattern: (ts2:<timestamp>)
# contrail
# Example: 2015-11-13 Fri 13:14:51:907.395 PST
rex_pattern = re.sub(
r'\(ts2:<([\w\d_]+)>\)',
'(?P<\\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\w{3}) (\d+):(\d+):(\d+):(\d+)\.(\d+))',
rex_pattern)
# User pattern: (ts3:<timestamp>)
# apache2
# Example: Thu Aug 20 14:18:34 2015
rex_pattern = re.sub(
r'\(ts3:<([\w\d_]+)>\)',
'(?P<\\1>(\w{3}) (\w{3}) (\d{1,2}) (\d+):(\d+):(\d+) (\d{4}))',
rex_pattern)
# User pattern: (ts4:<timestamp>)
# Example: 02/Nov/2015:09:03:19 -0800
rex_pattern = re.sub(
r'\(ts4:<([\w\d_]+)>\)',
'(?P<\\1>(\d+)\/(\w{3})\/(\d{4}):(\d+):(\d+):(\d+) -(\d+))',
rex_pattern)
# User pattern: (ts5:<timestamp>)
# ceph logs.
# Example: 2015-11-13 06:25:29.436844
rex_pattern = re.sub(
r'\(ts5:<([\w\d_]+)>\)',
'(?P<\\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+)\.(\d+))',
rex_pattern)
# User pattern: (ts6:<timestamp>)
# cassandra
# Example:2015-10-23 12:38:15
rex_pattern = re.sub(
r'\(ts6:<([\w\d_]+)>\)',
'(?P<\\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+))',
rex_pattern)
# User pattern: (ts7:<timestamp>)
# haproxy
# Example: 13/Nov/2015:06:25:05.465
rex_pattern = re.sub(
r'\(ts7:<([\w\d_]+)>\)',
'(?P<\\1>(\d+)\/(\w{3})\/(\d{4}):(\d+):(\d+):(\d+)\.(\d+))',
rex_pattern)
# User pattern: (ts8:<timestamp>)
# mysql
# Example: 12:03:28
rex_pattern = re.sub(
r'\(ts8:<([\w\d_]+)>\)',
'(?P<\\1>(\d+):(\d+):(\d+))',
rex_pattern)
# User pattern: (ts9:<timestamp>)
# reddis
# Example: 08 Nov 06:26:05.084
rex_pattern = re.sub(
r'\(ts9:<([\w\d_]+)>\)',
'(?P<\\1>(\d+) (\w{3}) (\d+):(\d+):(\d+)\.(\d+))',
rex_pattern)
# user pattern: (ts10:<timestamp>)
# supervisord, zookeeper
# Example: 2015-06-30 10:59:18,133
rex_pattern = re.sub(
r'\(ts10:<([\w\d_]+)>\)',
'(?P<\\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+),(\d+))',
rex_pattern)
# User pattern: (ts11:<timestamp>)
# dmesg
# Example: 11148214.574583
rex_pattern = re.sub(
r'\(ts11:<([\w\d_]+)>\)',
'(?P<\\1>(\d+)\.(\d+))',
rex_pattern)
# Finally if no prefix is specified take default action.
rex_pattern = re.sub(r'\(<([\w\d_]+)>\)', '(?P<\\1>.*)', rex_pattern)
if compile:
return re.compile(rex_pattern)
return rex_pattern | def function[reformat_pattern, parameter[pattern, compile]]:
constant[
Apply the filters on user pattern to generate a new regular expression
pattern.
A user provided variable, should start with an alphabet, can be
alphanumeric and can have _.
]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(w:<([\w\d_]+)>\)], constant[(?P<\1>\w+)], name[pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(d:<([\w\d_]+)>\)], constant[(?P<\1>\d+)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(W:<([\w\d_]+)>\)], constant[(?P<\1>\W+)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(any:<([\w\d_]+)>\)], constant[(?P<\1>.*)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ip:<([\w\d_]+)>\)], constant[(?P<\1>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(mac:<([\w\d_]+)>\)], constant[(?P<\1>\w\w:\w\w:\w\w:\w\w:\w\w:\w\w)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(decimal:<([\w\d_]+)>\)], constant[(?P<\1>\d*\.\d+|\d+)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(measurement:<([\w\d_]+)>\)], constant[(?P<\1>\d*\.\d+|\d+)(?P<\1_unit>\w+|\w+/\w+)], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts1:<([\w\d_]+)>\)], constant[(?P<\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts2:<([\w\d_]+)>\)], constant[(?P<\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\w{3}) (\d+):(\d+):(\d+):(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts3:<([\w\d_]+)>\)], constant[(?P<\1>(\w{3}) (\w{3}) (\d{1,2}) (\d+):(\d+):(\d+) (\d{4}))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts4:<([\w\d_]+)>\)], constant[(?P<\1>(\d+)\/(\w{3})\/(\d{4}):(\d+):(\d+):(\d+) -(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts5:<([\w\d_]+)>\)], constant[(?P<\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts6:<([\w\d_]+)>\)], constant[(?P<\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts7:<([\w\d_]+)>\)], constant[(?P<\1>(\d+)\/(\w{3})\/(\d{4}):(\d+):(\d+):(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts8:<([\w\d_]+)>\)], constant[(?P<\1>(\d+):(\d+):(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts9:<([\w\d_]+)>\)], constant[(?P<\1>(\d+) (\w{3}) (\d+):(\d+):(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts10:<([\w\d_]+)>\)], constant[(?P<\1>(\d{4})-(\d{1,2})-(\d{1,2}) (\d+):(\d+):(\d+),(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(ts11:<([\w\d_]+)>\)], constant[(?P<\1>(\d+)\.(\d+))], name[rex_pattern]]]
variable[rex_pattern] assign[=] call[name[re].sub, parameter[constant[\(<([\w\d_]+)>\)], constant[(?P<\1>.*)], name[rex_pattern]]]
if name[compile] begin[:]
return[call[name[re].compile, parameter[name[rex_pattern]]]]
return[name[rex_pattern]] | keyword[def] identifier[reformat_pattern] ( identifier[pattern] , identifier[compile] = keyword[False] ):
literal[string]
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] ,
literal[string] , identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] ,
literal[string] , identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] (
literal[string] ,
literal[string] ,
identifier[rex_pattern] )
identifier[rex_pattern] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[rex_pattern] )
keyword[if] identifier[compile] :
keyword[return] identifier[re] . identifier[compile] ( identifier[rex_pattern] )
keyword[return] identifier[rex_pattern] | def reformat_pattern(pattern, compile=False):
"""
Apply the filters on user pattern to generate a new regular expression
pattern.
A user provided variable, should start with an alphabet, can be
alphanumeric and can have _.
"""
# User pattern: (w:<name>) --> Changes to (?P<name>\w)
rex_pattern = re.sub('\\(w:<([\\w\\d_]+)>\\)', '(?P<\\1>\\w+)', pattern)
# User pattern: (d:<name>) --> change to (?P<name>\d)
rex_pattern = re.sub('\\(d:<([\\w\\d_]+)>\\)', '(?P<\\1>\\d+)', rex_pattern)
# User pattern: (W:<name>) --> change to (?P<name>\w)
rex_pattern = re.sub('\\(W:<([\\w\\d_]+)>\\)', '(?P<\\1>\\W+)', rex_pattern)
# User pattern: (any:<anyvalue> --> change to (?P<anyvalue>.*)
rex_pattern = re.sub('\\(any:<([\\w\\d_]+)>\\)', '(?P<\\1>.*)', rex_pattern)
# User pattern: (ip:<ipaddr>) --> Change to (?P<ipaddr>\d+\.\d+\.\d+\.\d+)
rex_pattern = re.sub('\\(ip:<([\\w\\d_]+)>\\)', '(?P<\\1>\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})', rex_pattern)
# User pattern: (mac:<macaddr>) --> Change to (?P<mac>\w\w:\w\w:\w\w:..)
rex_pattern = re.sub('\\(mac:<([\\w\\d_]+)>\\)', '(?P<\\1>\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w)', rex_pattern)
# User pattern: (decimal:<name>) --> Change to (?P<name>\d*\.\d+|\d+)
rex_pattern = re.sub('\\(decimal:<([\\w\\d_]+)>\\)', '(?P<\\1>\\d*\\.\\d+|\\d+)', rex_pattern)
# User pattern: (measurement:<name> --> change to \
# (?P<name>\d*\.\d+|\d+)(\w+|\w+/\w+)
rex_pattern = re.sub('\\(measurement:<([\\w\\d_]+)>\\)', '(?P<\\1>\\d*\\.\\d+|\\d+)(?P<\\1_unit>\\w+|\\w+/\\w+)', rex_pattern)
######################################
# Timestamp patterns.
# User pattern: (ts[n]:<timestamp>) -->
# Converted to: The below options.
######################################
# User pattern: (ts1:<timestamp>)
# Keystone, nova, libvirt, cinder
# Example: 2015-11-13 06:38:04.571
rex_pattern = re.sub('\\(ts1:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\d+):(\\d+):(\\d+)\\.(\\d+))', rex_pattern)
# User pattern: (ts2:<timestamp>)
# contrail
# Example: 2015-11-13 Fri 13:14:51:907.395 PST
rex_pattern = re.sub('\\(ts2:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\w{3}) (\\d+):(\\d+):(\\d+):(\\d+)\\.(\\d+))', rex_pattern)
# User pattern: (ts3:<timestamp>)
# apache2
# Example: Thu Aug 20 14:18:34 2015
rex_pattern = re.sub('\\(ts3:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\w{3}) (\\w{3}) (\\d{1,2}) (\\d+):(\\d+):(\\d+) (\\d{4}))', rex_pattern)
# User pattern: (ts4:<timestamp>)
# Example: 02/Nov/2015:09:03:19 -0800
rex_pattern = re.sub('\\(ts4:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d+)\\/(\\w{3})\\/(\\d{4}):(\\d+):(\\d+):(\\d+) -(\\d+))', rex_pattern)
# User pattern: (ts5:<timestamp>)
# ceph logs.
# Example: 2015-11-13 06:25:29.436844
rex_pattern = re.sub('\\(ts5:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\d+):(\\d+):(\\d+)\\.(\\d+))', rex_pattern)
# User pattern: (ts6:<timestamp>)
# cassandra
# Example:2015-10-23 12:38:15
rex_pattern = re.sub('\\(ts6:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\d+):(\\d+):(\\d+))', rex_pattern)
# User pattern: (ts7:<timestamp>)
# haproxy
# Example: 13/Nov/2015:06:25:05.465
rex_pattern = re.sub('\\(ts7:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d+)\\/(\\w{3})\\/(\\d{4}):(\\d+):(\\d+):(\\d+)\\.(\\d+))', rex_pattern)
# User pattern: (ts8:<timestamp>)
# mysql
# Example: 12:03:28
rex_pattern = re.sub('\\(ts8:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d+):(\\d+):(\\d+))', rex_pattern)
# User pattern: (ts9:<timestamp>)
# reddis
# Example: 08 Nov 06:26:05.084
rex_pattern = re.sub('\\(ts9:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d+) (\\w{3}) (\\d+):(\\d+):(\\d+)\\.(\\d+))', rex_pattern)
# user pattern: (ts10:<timestamp>)
# supervisord, zookeeper
# Example: 2015-06-30 10:59:18,133
rex_pattern = re.sub('\\(ts10:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\d+):(\\d+):(\\d+),(\\d+))', rex_pattern)
# User pattern: (ts11:<timestamp>)
# dmesg
# Example: 11148214.574583
rex_pattern = re.sub('\\(ts11:<([\\w\\d_]+)>\\)', '(?P<\\1>(\\d+)\\.(\\d+))', rex_pattern)
# Finally if no prefix is specified take default action.
rex_pattern = re.sub('\\(<([\\w\\d_]+)>\\)', '(?P<\\1>.*)', rex_pattern)
if compile:
return re.compile(rex_pattern) # depends on [control=['if'], data=[]]
return rex_pattern |
def estimate_map(interface, state, label, inp):
"""Find the cluster `i` that is closest to the datapoint `e`."""
out = interface.output(0)
centers = {}
for row in inp:
row = row.strip().split(state["delimiter"])
if len(row) > 1:
x = [(0 if row[i] in state["missing_vals"] else float(row[i])) for i in state["X_indices"]]
cluster = min((state['dist'](c, x), i) for i, c in state['centers'])[1]
vertex = state['create'](x, 1.0)
centers[cluster] = vertex if cluster not in centers else state["update"](centers[cluster], vertex)
for cluster, values in centers.iteritems():
out.add(cluster, values) | def function[estimate_map, parameter[interface, state, label, inp]]:
constant[Find the cluster `i` that is closest to the datapoint `e`.]
variable[out] assign[=] call[name[interface].output, parameter[constant[0]]]
variable[centers] assign[=] dictionary[[], []]
for taget[name[row]] in starred[name[inp]] begin[:]
variable[row] assign[=] call[call[name[row].strip, parameter[]].split, parameter[call[name[state]][constant[delimiter]]]]
if compare[call[name[len], parameter[name[row]]] greater[>] constant[1]] begin[:]
variable[x] assign[=] <ast.ListComp object at 0x7da18f58e4a0>
variable[cluster] assign[=] call[call[name[min], parameter[<ast.GeneratorExp object at 0x7da1b24d7a90>]]][constant[1]]
variable[vertex] assign[=] call[call[name[state]][constant[create]], parameter[name[x], constant[1.0]]]
call[name[centers]][name[cluster]] assign[=] <ast.IfExp object at 0x7da1b24d7a30>
for taget[tuple[[<ast.Name object at 0x7da1b24d4bb0>, <ast.Name object at 0x7da1b214bc40>]]] in starred[call[name[centers].iteritems, parameter[]]] begin[:]
call[name[out].add, parameter[name[cluster], name[values]]] | keyword[def] identifier[estimate_map] ( identifier[interface] , identifier[state] , identifier[label] , identifier[inp] ):
literal[string]
identifier[out] = identifier[interface] . identifier[output] ( literal[int] )
identifier[centers] ={}
keyword[for] identifier[row] keyword[in] identifier[inp] :
identifier[row] = identifier[row] . identifier[strip] (). identifier[split] ( identifier[state] [ literal[string] ])
keyword[if] identifier[len] ( identifier[row] )> literal[int] :
identifier[x] =[( literal[int] keyword[if] identifier[row] [ identifier[i] ] keyword[in] identifier[state] [ literal[string] ] keyword[else] identifier[float] ( identifier[row] [ identifier[i] ])) keyword[for] identifier[i] keyword[in] identifier[state] [ literal[string] ]]
identifier[cluster] = identifier[min] (( identifier[state] [ literal[string] ]( identifier[c] , identifier[x] ), identifier[i] ) keyword[for] identifier[i] , identifier[c] keyword[in] identifier[state] [ literal[string] ])[ literal[int] ]
identifier[vertex] = identifier[state] [ literal[string] ]( identifier[x] , literal[int] )
identifier[centers] [ identifier[cluster] ]= identifier[vertex] keyword[if] identifier[cluster] keyword[not] keyword[in] identifier[centers] keyword[else] identifier[state] [ literal[string] ]( identifier[centers] [ identifier[cluster] ], identifier[vertex] )
keyword[for] identifier[cluster] , identifier[values] keyword[in] identifier[centers] . identifier[iteritems] ():
identifier[out] . identifier[add] ( identifier[cluster] , identifier[values] ) | def estimate_map(interface, state, label, inp):
"""Find the cluster `i` that is closest to the datapoint `e`."""
out = interface.output(0)
centers = {}
for row in inp:
row = row.strip().split(state['delimiter'])
if len(row) > 1:
x = [0 if row[i] in state['missing_vals'] else float(row[i]) for i in state['X_indices']]
cluster = min(((state['dist'](c, x), i) for (i, c) in state['centers']))[1]
vertex = state['create'](x, 1.0)
centers[cluster] = vertex if cluster not in centers else state['update'](centers[cluster], vertex) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
for (cluster, values) in centers.iteritems():
out.add(cluster, values) # depends on [control=['for'], data=[]] |
def rand_ssn():
"""Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
"""
return "%s-%s-%s" % (rand_str(3, string.digits),
rand_str(2, string.digits),
rand_str(4, string.digits)) | def function[rand_ssn, parameter[]]:
constant[Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
]
return[binary_operation[constant[%s-%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b23730a0>, <ast.Call object at 0x7da1b23b0310>, <ast.Call object at 0x7da1b23b3e80>]]]] | keyword[def] identifier[rand_ssn] ():
literal[string]
keyword[return] literal[string] %( identifier[rand_str] ( literal[int] , identifier[string] . identifier[digits] ),
identifier[rand_str] ( literal[int] , identifier[string] . identifier[digits] ),
identifier[rand_str] ( literal[int] , identifier[string] . identifier[digits] )) | def rand_ssn():
"""Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
"""
return '%s-%s-%s' % (rand_str(3, string.digits), rand_str(2, string.digits), rand_str(4, string.digits)) |
def get_padding(x, padding_value=0):
"""Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
Returns:
flaot tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
"""
with tf.name_scope("padding"):
return tf.to_float(tf.equal(x, padding_value)) | def function[get_padding, parameter[x, padding_value]]:
constant[Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
Returns:
flaot tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
]
with call[name[tf].name_scope, parameter[constant[padding]]] begin[:]
return[call[name[tf].to_float, parameter[call[name[tf].equal, parameter[name[x], name[padding_value]]]]]] | keyword[def] identifier[get_padding] ( identifier[x] , identifier[padding_value] = literal[int] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
keyword[return] identifier[tf] . identifier[to_float] ( identifier[tf] . identifier[equal] ( identifier[x] , identifier[padding_value] )) | def get_padding(x, padding_value=0):
"""Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
Returns:
flaot tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
"""
with tf.name_scope('padding'):
return tf.to_float(tf.equal(x, padding_value)) # depends on [control=['with'], data=[]] |
def from_pypirc(pypi_repository):
""" Load configuration from .pypirc file, cached to only run once """
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue
if 'index-servers' not in parser.options('distutils'):
continue
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option)
if not ret:
raise ConfigError(
'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) +
', remember that it needs an entry in [distutils] and its own section'
)
return ret | def function[from_pypirc, parameter[pypi_repository]]:
constant[ Load configuration from .pypirc file, cached to only run once ]
variable[ret] assign[=] dictionary[[], []]
variable[pypirc_locations] assign[=] name[PYPIRC_LOCATIONS]
for taget[name[pypirc_path]] in starred[name[pypirc_locations]] begin[:]
variable[pypirc_path] assign[=] call[name[os].path.expanduser, parameter[name[pypirc_path]]]
if call[name[os].path.isfile, parameter[name[pypirc_path]]] begin[:]
variable[parser] assign[=] call[name[configparser].SafeConfigParser, parameter[]]
call[name[parser].read, parameter[name[pypirc_path]]]
if compare[constant[distutils] <ast.NotIn object at 0x7da2590d7190> call[name[parser].sections, parameter[]]] begin[:]
continue
if compare[constant[index-servers] <ast.NotIn object at 0x7da2590d7190> call[name[parser].options, parameter[constant[distutils]]]] begin[:]
continue
if compare[name[pypi_repository] <ast.NotIn object at 0x7da2590d7190> call[name[parser].get, parameter[constant[distutils], constant[index-servers]]]] begin[:]
continue
if compare[name[pypi_repository] in call[name[parser].sections, parameter[]]] begin[:]
for taget[name[option]] in starred[call[name[parser].options, parameter[name[pypi_repository]]]] begin[:]
call[name[ret]][name[option]] assign[=] call[name[parser].get, parameter[name[pypi_repository], name[option]]]
if <ast.UnaryOp object at 0x7da1b0d21c90> begin[:]
<ast.Raise object at 0x7da1b0d227d0>
return[name[ret]] | keyword[def] identifier[from_pypirc] ( identifier[pypi_repository] ):
literal[string]
identifier[ret] ={}
identifier[pypirc_locations] = identifier[PYPIRC_LOCATIONS]
keyword[for] identifier[pypirc_path] keyword[in] identifier[pypirc_locations] :
identifier[pypirc_path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[pypirc_path] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[pypirc_path] ):
identifier[parser] = identifier[configparser] . identifier[SafeConfigParser] ()
identifier[parser] . identifier[read] ( identifier[pypirc_path] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[parser] . identifier[sections] ():
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[parser] . identifier[options] ( literal[string] ):
keyword[continue]
keyword[if] identifier[pypi_repository] keyword[not] keyword[in] identifier[parser] . identifier[get] ( literal[string] , literal[string] ):
keyword[continue]
keyword[if] identifier[pypi_repository] keyword[in] identifier[parser] . identifier[sections] ():
keyword[for] identifier[option] keyword[in] identifier[parser] . identifier[options] ( identifier[pypi_repository] ):
identifier[ret] [ identifier[option] ]= identifier[parser] . identifier[get] ( identifier[pypi_repository] , identifier[option] )
keyword[if] keyword[not] identifier[ret] :
keyword[raise] identifier[ConfigError] (
literal[string] . identifier[format] ( identifier[pypi_repository] )+
literal[string]
)
keyword[return] identifier[ret] | def from_pypirc(pypi_repository):
""" Load configuration from .pypirc file, cached to only run once """
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue # depends on [control=['if'], data=[]]
if 'index-servers' not in parser.options('distutils'):
continue # depends on [control=['if'], data=[]]
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue # depends on [control=['if'], data=[]]
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option) # depends on [control=['for'], data=['option']] # depends on [control=['if'], data=['pypi_repository']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pypirc_path']]
if not ret:
raise ConfigError('repository does not appear to be configured in pypirc ({})'.format(pypi_repository) + ', remember that it needs an entry in [distutils] and its own section') # depends on [control=['if'], data=[]]
return ret |
def window(iterable, size=2):
''' yields wondows of a given size '''
iterable = iter(iterable)
d = deque(islice(iterable, size-1), maxlen=size)
for _ in map(d.append, iterable):
yield tuple(d) | def function[window, parameter[iterable, size]]:
constant[ yields wondows of a given size ]
variable[iterable] assign[=] call[name[iter], parameter[name[iterable]]]
variable[d] assign[=] call[name[deque], parameter[call[name[islice], parameter[name[iterable], binary_operation[name[size] - constant[1]]]]]]
for taget[name[_]] in starred[call[name[map], parameter[name[d].append, name[iterable]]]] begin[:]
<ast.Yield object at 0x7da207f033d0> | keyword[def] identifier[window] ( identifier[iterable] , identifier[size] = literal[int] ):
literal[string]
identifier[iterable] = identifier[iter] ( identifier[iterable] )
identifier[d] = identifier[deque] ( identifier[islice] ( identifier[iterable] , identifier[size] - literal[int] ), identifier[maxlen] = identifier[size] )
keyword[for] identifier[_] keyword[in] identifier[map] ( identifier[d] . identifier[append] , identifier[iterable] ):
keyword[yield] identifier[tuple] ( identifier[d] ) | def window(iterable, size=2):
""" yields wondows of a given size """
iterable = iter(iterable)
d = deque(islice(iterable, size - 1), maxlen=size)
for _ in map(d.append, iterable):
yield tuple(d) # depends on [control=['for'], data=[]] |
def get_items_of_type(self, item_type):
"""
Returns all items of specified type.
>>> book.get_items_of_type(epub.ITEM_IMAGE)
:Args:
- item_type: Type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.get_type() == item_type) | def function[get_items_of_type, parameter[self, item_type]]:
constant[
Returns all items of specified type.
>>> book.get_items_of_type(epub.ITEM_IMAGE)
:Args:
- item_type: Type for items we are searching for
:Returns:
Returns found items as tuple.
]
return[<ast.GeneratorExp object at 0x7da207f9a9e0>] | keyword[def] identifier[get_items_of_type] ( identifier[self] , identifier[item_type] ):
literal[string]
keyword[return] ( identifier[item] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[items] keyword[if] identifier[item] . identifier[get_type] ()== identifier[item_type] ) | def get_items_of_type(self, item_type):
"""
Returns all items of specified type.
>>> book.get_items_of_type(epub.ITEM_IMAGE)
:Args:
- item_type: Type for items we are searching for
:Returns:
Returns found items as tuple.
"""
return (item for item in self.items if item.get_type() == item_type) |
def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
"""Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback.
"""
_, block_index = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*(rest + (inner_rhs,))),
out_port=out_port, in_port=in_port) << outer_rhs
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port) << outer_rhs
raise CannotSimplify() | def function[_pull_out_unaffected_blocks_rhs, parameter[rest, rhs, out_port, in_port]]:
constant[Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback.
]
<ast.Tuple object at 0x7da20c9928c0> assign[=] call[name[rhs].index_in_block, parameter[name[in_port]]]
variable[rest] assign[=] call[name[tuple], parameter[name[rest]]]
variable[bs] assign[=] name[rhs].block_structure
<ast.Tuple object at 0x7da20c990670> assign[=] tuple[[<ast.Call object at 0x7da20c9935b0>, <ast.Subscript object at 0x7da20c993c70>, <ast.Call object at 0x7da20c990e50>]]
<ast.Tuple object at 0x7da20c992b90> assign[=] call[name[rhs].get_blocks, parameter[tuple[[<ast.Name object at 0x7da20c993850>, <ast.Name object at 0x7da20c991a20>, <ast.Name object at 0x7da2041d88e0>]]]]
if <ast.BoolOp object at 0x7da2041da800> begin[:]
variable[outer_rhs] assign[=] binary_operation[binary_operation[name[before] + call[name[cid], parameter[binary_operation[name[nblock] - constant[1]]]]] + name[after]]
variable[inner_rhs] assign[=] binary_operation[binary_operation[call[name[cid], parameter[name[nbefore]]] + name[block]] + call[name[cid], parameter[name[nafter]]]]
return[binary_operation[call[name[Feedback].create, parameter[call[name[SeriesProduct].create, parameter[<ast.Starred object at 0x7da2041d8d00>]]]] <ast.LShift object at 0x7da2590d69e0> name[outer_rhs]]]
<ast.Raise object at 0x7da2041d86d0> | keyword[def] identifier[_pull_out_unaffected_blocks_rhs] ( identifier[rest] , identifier[rhs] , identifier[out_port] , identifier[in_port] ):
literal[string]
identifier[_] , identifier[block_index] = identifier[rhs] . identifier[index_in_block] ( identifier[in_port] )
identifier[rest] = identifier[tuple] ( identifier[rest] )
identifier[bs] = identifier[rhs] . identifier[block_structure]
( identifier[nbefore] , identifier[nblock] , identifier[nafter] )=( identifier[sum] ( identifier[bs] [: identifier[block_index] ]),
identifier[bs] [ identifier[block_index] ],
identifier[sum] ( identifier[bs] [ identifier[block_index] + literal[int] :]))
identifier[before] , identifier[block] , identifier[after] = identifier[rhs] . identifier[get_blocks] (( identifier[nbefore] , identifier[nblock] , identifier[nafter] ))
keyword[if] identifier[before] != identifier[cid] ( identifier[nbefore] ) keyword[or] identifier[after] != identifier[cid] ( identifier[nafter] ):
identifier[outer_rhs] = identifier[before] + identifier[cid] ( identifier[nblock] - literal[int] )+ identifier[after]
identifier[inner_rhs] = identifier[cid] ( identifier[nbefore] )+ identifier[block] + identifier[cid] ( identifier[nafter] )
keyword[return] identifier[Feedback] . identifier[create] ( identifier[SeriesProduct] . identifier[create] (*( identifier[rest] +( identifier[inner_rhs] ,))),
identifier[out_port] = identifier[out_port] , identifier[in_port] = identifier[in_port] )<< identifier[outer_rhs]
keyword[elif] identifier[block] == identifier[cid] ( identifier[nblock] ):
identifier[outer_rhs] = identifier[before] + identifier[cid] ( identifier[nblock] - literal[int] )+ identifier[after]
keyword[return] identifier[Feedback] . identifier[create] ( identifier[SeriesProduct] . identifier[create] (* identifier[rest] ),
identifier[out_port] = identifier[out_port] , identifier[in_port] = identifier[in_port] )<< identifier[outer_rhs]
keyword[raise] identifier[CannotSimplify] () | def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
"""Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback.
"""
(_, block_index) = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]), bs[block_index], sum(bs[block_index + 1:]))
(before, block, after) = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*rest + (inner_rhs,)), out_port=out_port, in_port=in_port) << outer_rhs # depends on [control=['if'], data=[]]
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest), out_port=out_port, in_port=in_port) << outer_rhs # depends on [control=['if'], data=[]]
raise CannotSimplify() |
def query_by_wiql(self, wiql, team_context=None, time_precision=None, top=None):
"""QueryByWiql.
[Preview API] Gets the results of the query given its WIQL.
:param :class:`<Wiql> <azure.devops.v5_1.work_item_tracking.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <azure.devops.v5_1.work_item_tracking.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <azure.devops.v5_1.work-item-tracking.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(wiql, 'Wiql')
response = self._send(http_method='POST',
location_id='1a9c53f7-f243-4447-b110-35ef023636e4',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('WorkItemQueryResult', response) | def function[query_by_wiql, parameter[self, wiql, team_context, time_precision, top]]:
constant[QueryByWiql.
[Preview API] Gets the results of the query given its WIQL.
:param :class:`<Wiql> <azure.devops.v5_1.work_item_tracking.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <azure.devops.v5_1.work_item_tracking.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <azure.devops.v5_1.work-item-tracking.models.WorkItemQueryResult>`
]
variable[project] assign[=] constant[None]
variable[team] assign[=] constant[None]
if compare[name[team_context] is_not constant[None]] begin[:]
if name[team_context].project_id begin[:]
variable[project] assign[=] name[team_context].project_id
if name[team_context].team_id begin[:]
variable[team] assign[=] name[team_context].team_id
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[string]]]
if compare[name[team] is_not constant[None]] begin[:]
call[name[route_values]][constant[team]] assign[=] call[name[self]._serialize.url, parameter[constant[team], name[team], constant[string]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[time_precision] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[timePrecision]] assign[=] call[name[self]._serialize.query, parameter[constant[time_precision], name[time_precision], constant[bool]]]
if compare[name[top] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[$top]] assign[=] call[name[self]._serialize.query, parameter[constant[top], name[top], constant[int]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[wiql], constant[Wiql]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[WorkItemQueryResult], name[response]]]] | keyword[def] identifier[query_by_wiql] ( identifier[self] , identifier[wiql] , identifier[team_context] = keyword[None] , identifier[time_precision] = keyword[None] , identifier[top] = keyword[None] ):
literal[string]
identifier[project] = keyword[None]
identifier[team] = keyword[None]
keyword[if] identifier[team_context] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[team_context] . identifier[project_id] :
identifier[project] = identifier[team_context] . identifier[project_id]
keyword[else] :
identifier[project] = identifier[team_context] . identifier[project]
keyword[if] identifier[team_context] . identifier[team_id] :
identifier[team] = identifier[team_context] . identifier[team_id]
keyword[else] :
identifier[team] = identifier[team_context] . identifier[team]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[team] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[team] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[time_precision] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[time_precision] , literal[string] )
keyword[if] identifier[top] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[top] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[wiql] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] ,
identifier[content] = identifier[content] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def query_by_wiql(self, wiql, team_context=None, time_precision=None, top=None):
"""QueryByWiql.
[Preview API] Gets the results of the query given its WIQL.
:param :class:`<Wiql> <azure.devops.v5_1.work_item_tracking.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <azure.devops.v5_1.work_item_tracking.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <azure.devops.v5_1.work-item-tracking.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id # depends on [control=['if'], data=[]]
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id # depends on [control=['if'], data=[]]
else:
team = team_context.team # depends on [control=['if'], data=['team_context']]
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string') # depends on [control=['if'], data=['project']]
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string') # depends on [control=['if'], data=['team']]
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool') # depends on [control=['if'], data=['time_precision']]
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int') # depends on [control=['if'], data=['top']]
content = self._serialize.body(wiql, 'Wiql')
response = self._send(http_method='POST', location_id='1a9c53f7-f243-4447-b110-35ef023636e4', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content)
return self._deserialize('WorkItemQueryResult', response) |
def authors(self):
"""A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
"""
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(":", 1)[-1]: v for k, v in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'),
surname=author.get('surname'),
initials=author.get('initials'),
url=author.get('author-url'))
out.append(new)
return out or None | def function[authors, parameter[self]]:
constant[A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
]
variable[out] assign[=] list[[]]
variable[order] assign[=] constant[name surname initials id url]
variable[auth] assign[=] call[name[namedtuple], parameter[constant[Author], name[order]]]
for taget[name[author]] in starred[call[name[self]._citeInfoMatrix.get, parameter[constant[author]]]] begin[:]
variable[author] assign[=] <ast.DictComp object at 0x7da18f00dde0>
variable[new] assign[=] call[name[auth], parameter[]]
call[name[out].append, parameter[name[new]]]
return[<ast.BoolOp object at 0x7da18ede4670>] | keyword[def] identifier[authors] ( identifier[self] ):
literal[string]
identifier[out] =[]
identifier[order] = literal[string]
identifier[auth] = identifier[namedtuple] ( literal[string] , identifier[order] )
keyword[for] identifier[author] keyword[in] identifier[self] . identifier[_citeInfoMatrix] . identifier[get] ( literal[string] ):
identifier[author] ={ identifier[k] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]: identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[author] . identifier[items] ()}
identifier[new] = identifier[auth] ( identifier[name] = identifier[author] . identifier[get] ( literal[string] ), identifier[id] = identifier[author] . identifier[get] ( literal[string] ),
identifier[surname] = identifier[author] . identifier[get] ( literal[string] ),
identifier[initials] = identifier[author] . identifier[get] ( literal[string] ),
identifier[url] = identifier[author] . identifier[get] ( literal[string] ))
identifier[out] . identifier[append] ( identifier[new] )
keyword[return] identifier[out] keyword[or] keyword[None] | def authors(self):
"""A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
"""
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(':', 1)[-1]: v for (k, v) in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'), surname=author.get('surname'), initials=author.get('initials'), url=author.get('author-url'))
out.append(new) # depends on [control=['for'], data=['author']]
return out or None |
def gunzip(gzipfile, template=None, runas=None, options=None):
'''
Uses the gunzip command to unpack gzip files
template : None
Can be set to 'jinja' or another supported template engine to render
the command arguments before execution:
.. code-block:: bash
salt '*' archive.gunzip template=jinja /tmp/{{grains.id}}.txt.gz
runas : None
The user with which to run the gzip command line
options : None
Pass any additional arguments to gzip
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
# Create /tmp/sourcefile.txt
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
salt '*' archive.gunzip /tmp/sourcefile.txt options='--verbose'
'''
cmd = ['gunzip']
if options:
cmd.append(options)
cmd.append('{0}'.format(gzipfile))
return __salt__['cmd.run'](cmd,
template=template,
runas=runas,
python_shell=False).splitlines() | def function[gunzip, parameter[gzipfile, template, runas, options]]:
constant[
Uses the gunzip command to unpack gzip files
template : None
Can be set to 'jinja' or another supported template engine to render
the command arguments before execution:
.. code-block:: bash
salt '*' archive.gunzip template=jinja /tmp/{{grains.id}}.txt.gz
runas : None
The user with which to run the gzip command line
options : None
Pass any additional arguments to gzip
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
# Create /tmp/sourcefile.txt
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
salt '*' archive.gunzip /tmp/sourcefile.txt options='--verbose'
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18f00fac0>]]
if name[options] begin[:]
call[name[cmd].append, parameter[name[options]]]
call[name[cmd].append, parameter[call[constant[{0}].format, parameter[name[gzipfile]]]]]
return[call[call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]].splitlines, parameter[]]] | keyword[def] identifier[gunzip] ( identifier[gzipfile] , identifier[template] = keyword[None] , identifier[runas] = keyword[None] , identifier[options] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[if] identifier[options] :
identifier[cmd] . identifier[append] ( identifier[options] )
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[gzipfile] ))
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] ,
identifier[template] = identifier[template] ,
identifier[runas] = identifier[runas] ,
identifier[python_shell] = keyword[False] ). identifier[splitlines] () | def gunzip(gzipfile, template=None, runas=None, options=None):
"""
Uses the gunzip command to unpack gzip files
template : None
Can be set to 'jinja' or another supported template engine to render
the command arguments before execution:
.. code-block:: bash
salt '*' archive.gunzip template=jinja /tmp/{{grains.id}}.txt.gz
runas : None
The user with which to run the gzip command line
options : None
Pass any additional arguments to gzip
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
# Create /tmp/sourcefile.txt
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
salt '*' archive.gunzip /tmp/sourcefile.txt options='--verbose'
"""
cmd = ['gunzip']
if options:
cmd.append(options) # depends on [control=['if'], data=[]]
cmd.append('{0}'.format(gzipfile))
return __salt__['cmd.run'](cmd, template=template, runas=runas, python_shell=False).splitlines() |
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy) | def function[get_bool_data, parameter[self, copy]]:
constant[
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
]
call[name[self]._consolidate_inplace, parameter[]]
return[call[name[self].combine, parameter[<ast.ListComp object at 0x7da1b26ae6b0>, name[copy]]]] | keyword[def] identifier[get_bool_data] ( identifier[self] , identifier[copy] = keyword[False] ):
literal[string]
identifier[self] . identifier[_consolidate_inplace] ()
keyword[return] identifier[self] . identifier[combine] ([ identifier[b] keyword[for] identifier[b] keyword[in] identifier[self] . identifier[blocks] keyword[if] identifier[b] . identifier[is_bool] ], identifier[copy] ) | def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy) |
def spendables_for_address(self, address):
"""
Return a list of Spendable objects for the
given bitcoin address.
"""
URL = "%s/addr/%s/utxo" % (self.base_url, address)
r = json.loads(urlopen(URL).read().decode("utf8"))
spendables = []
for u in r:
coin_value = btc_to_satoshi(str(u.get("amount")))
script = h2b(u.get("scriptPubKey"))
previous_hash = h2b_rev(u.get("txid"))
previous_index = u.get("vout")
spendables.append(Tx.Spendable(coin_value, script, previous_hash, previous_index))
return spendables | def function[spendables_for_address, parameter[self, address]]:
constant[
Return a list of Spendable objects for the
given bitcoin address.
]
variable[URL] assign[=] binary_operation[constant[%s/addr/%s/utxo] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1d76ec0>, <ast.Name object at 0x7da1b1d74a30>]]]
variable[r] assign[=] call[name[json].loads, parameter[call[call[call[name[urlopen], parameter[name[URL]]].read, parameter[]].decode, parameter[constant[utf8]]]]]
variable[spendables] assign[=] list[[]]
for taget[name[u]] in starred[name[r]] begin[:]
variable[coin_value] assign[=] call[name[btc_to_satoshi], parameter[call[name[str], parameter[call[name[u].get, parameter[constant[amount]]]]]]]
variable[script] assign[=] call[name[h2b], parameter[call[name[u].get, parameter[constant[scriptPubKey]]]]]
variable[previous_hash] assign[=] call[name[h2b_rev], parameter[call[name[u].get, parameter[constant[txid]]]]]
variable[previous_index] assign[=] call[name[u].get, parameter[constant[vout]]]
call[name[spendables].append, parameter[call[name[Tx].Spendable, parameter[name[coin_value], name[script], name[previous_hash], name[previous_index]]]]]
return[name[spendables]] | keyword[def] identifier[spendables_for_address] ( identifier[self] , identifier[address] ):
literal[string]
identifier[URL] = literal[string] %( identifier[self] . identifier[base_url] , identifier[address] )
identifier[r] = identifier[json] . identifier[loads] ( identifier[urlopen] ( identifier[URL] ). identifier[read] (). identifier[decode] ( literal[string] ))
identifier[spendables] =[]
keyword[for] identifier[u] keyword[in] identifier[r] :
identifier[coin_value] = identifier[btc_to_satoshi] ( identifier[str] ( identifier[u] . identifier[get] ( literal[string] )))
identifier[script] = identifier[h2b] ( identifier[u] . identifier[get] ( literal[string] ))
identifier[previous_hash] = identifier[h2b_rev] ( identifier[u] . identifier[get] ( literal[string] ))
identifier[previous_index] = identifier[u] . identifier[get] ( literal[string] )
identifier[spendables] . identifier[append] ( identifier[Tx] . identifier[Spendable] ( identifier[coin_value] , identifier[script] , identifier[previous_hash] , identifier[previous_index] ))
keyword[return] identifier[spendables] | def spendables_for_address(self, address):
"""
Return a list of Spendable objects for the
given bitcoin address.
"""
URL = '%s/addr/%s/utxo' % (self.base_url, address)
r = json.loads(urlopen(URL).read().decode('utf8'))
spendables = []
for u in r:
coin_value = btc_to_satoshi(str(u.get('amount')))
script = h2b(u.get('scriptPubKey'))
previous_hash = h2b_rev(u.get('txid'))
previous_index = u.get('vout')
spendables.append(Tx.Spendable(coin_value, script, previous_hash, previous_index)) # depends on [control=['for'], data=['u']]
return spendables |
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method | def function[iter_alignments, parameter[dataset, cognate_sets, column, method]]:
constant[
Function computes automatic alignments and writes them to file.
]
if <ast.UnaryOp object at 0x7da1b236b940> begin[:]
variable[wordlist] assign[=] call[name[_cldf2wordlist], parameter[name[dataset]]]
variable[cognates] assign[=] <ast.DictComp object at 0x7da1b2369840>
call[name[wordlist].add_entries, parameter[constant[cogid], constant[lid], <ast.Lambda object at 0x7da1b236abc0>]]
variable[alm] assign[=] call[name[lingpy].Alignments, parameter[name[wordlist]]]
call[name[alm].align, parameter[]]
for taget[name[k]] in starred[name[alm]] begin[:]
if compare[call[name[alm]][tuple[[<ast.Name object at 0x7da1b23682b0>, <ast.Constant object at 0x7da1b2368c40>]]] in name[cognates]] begin[:]
variable[cognate] assign[=] call[name[cognates]][call[name[alm]][tuple[[<ast.Name object at 0x7da1b2369ae0>, <ast.Constant object at 0x7da1b2368a00>]]]]
call[name[cognate]][constant[Alignment]] assign[=] call[name[alm]][tuple[[<ast.Name object at 0x7da1b2369030>, <ast.Constant object at 0x7da1b2369f90>]]]
call[name[cognate]][constant[Alignment_Method]] assign[=] name[method] | keyword[def] identifier[iter_alignments] ( identifier[dataset] , identifier[cognate_sets] , identifier[column] = literal[string] , identifier[method] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[dataset] , identifier[lingpy] . identifier[basic] . identifier[parser] . identifier[QLCParser] ):
identifier[wordlist] = identifier[_cldf2wordlist] ( identifier[dataset] )
identifier[cognates] ={ identifier[r] [ literal[string] ]: identifier[r] keyword[for] identifier[r] keyword[in] identifier[cognate_sets] }
identifier[wordlist] . identifier[add_entries] (
literal[string] ,
literal[string] ,
keyword[lambda] identifier[x] : identifier[cognates] [ identifier[x] ][ literal[string] ] keyword[if] identifier[x] keyword[in] identifier[cognates] keyword[else] literal[int] )
identifier[alm] = identifier[lingpy] . identifier[Alignments] (
identifier[wordlist] ,
identifier[ref] = literal[string] ,
identifier[row] = literal[string] ,
identifier[col] = literal[string] ,
identifier[segments] = identifier[column] . identifier[lower] ())
identifier[alm] . identifier[align] ( identifier[method] = identifier[method] )
keyword[for] identifier[k] keyword[in] identifier[alm] :
keyword[if] identifier[alm] [ identifier[k] , literal[string] ] keyword[in] identifier[cognates] :
identifier[cognate] = identifier[cognates] [ identifier[alm] [ identifier[k] , literal[string] ]]
identifier[cognate] [ literal[string] ]= identifier[alm] [ identifier[k] , literal[string] ]
identifier[cognate] [ literal[string] ]= identifier[method]
keyword[else] :
identifier[alm] = identifier[lingpy] . identifier[Alignments] ( identifier[dataset] , identifier[ref] = literal[string] )
identifier[alm] . identifier[align] ( identifier[method] = identifier[method] )
keyword[for] identifier[cognate] keyword[in] identifier[cognate_sets] :
identifier[idx] = identifier[cognate] [ literal[string] ] keyword[or] identifier[cognate] [ literal[string] ]
identifier[cognate] [ literal[string] ]= identifier[alm] [ identifier[int] ( identifier[idx] ), literal[string] ]
identifier[cognate] [ literal[string] ]= literal[string] + identifier[method] | def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries('cogid', 'lid', lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(wordlist, ref='cogid', row='parameter_id', col='language_id', segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method # depends on [control=['if'], data=['cognates']] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method # depends on [control=['for'], data=['cognate']] |
def _find_widget_match(self, prop_name):
"""
Used to search ``self.view`` when :meth:`adapt` is not given a widget
name.
*prop_name* is the name of a property in the model.
Returns a string with the best match. Raises
:class:`TooManyCandidatesError` or ``ValueError`` when nothing is
found.
Subclasses can customise this. No super call necessary. The default
implementation converts *prop_name* to lower case and allows prefixes
like ``entry_``.
"""
names = []
for wid_name in self.view:
# if widget names ends with given property name: we skip
# any prefix in widget name
if wid_name.lower().endswith(prop_name.lower()):
names.append(wid_name)
if len(names) == 0:
raise ValueError("No widget candidates match property '%s': %s" % \
(prop_name, names))
if len(names) > 1:
raise TooManyCandidatesError("%d widget candidates match property '%s': %s" % \
(len(names), prop_name, names))
return names[0] | def function[_find_widget_match, parameter[self, prop_name]]:
constant[
Used to search ``self.view`` when :meth:`adapt` is not given a widget
name.
*prop_name* is the name of a property in the model.
Returns a string with the best match. Raises
:class:`TooManyCandidatesError` or ``ValueError`` when nothing is
found.
Subclasses can customise this. No super call necessary. The default
implementation converts *prop_name* to lower case and allows prefixes
like ``entry_``.
]
variable[names] assign[=] list[[]]
for taget[name[wid_name]] in starred[name[self].view] begin[:]
if call[call[name[wid_name].lower, parameter[]].endswith, parameter[call[name[prop_name].lower, parameter[]]]] begin[:]
call[name[names].append, parameter[name[wid_name]]]
if compare[call[name[len], parameter[name[names]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b14071c0>
if compare[call[name[len], parameter[name[names]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1407e20>
return[call[name[names]][constant[0]]] | keyword[def] identifier[_find_widget_match] ( identifier[self] , identifier[prop_name] ):
literal[string]
identifier[names] =[]
keyword[for] identifier[wid_name] keyword[in] identifier[self] . identifier[view] :
keyword[if] identifier[wid_name] . identifier[lower] (). identifier[endswith] ( identifier[prop_name] . identifier[lower] ()):
identifier[names] . identifier[append] ( identifier[wid_name] )
keyword[if] identifier[len] ( identifier[names] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[prop_name] , identifier[names] ))
keyword[if] identifier[len] ( identifier[names] )> literal[int] :
keyword[raise] identifier[TooManyCandidatesError] ( literal[string] %( identifier[len] ( identifier[names] ), identifier[prop_name] , identifier[names] ))
keyword[return] identifier[names] [ literal[int] ] | def _find_widget_match(self, prop_name):
"""
Used to search ``self.view`` when :meth:`adapt` is not given a widget
name.
*prop_name* is the name of a property in the model.
Returns a string with the best match. Raises
:class:`TooManyCandidatesError` or ``ValueError`` when nothing is
found.
Subclasses can customise this. No super call necessary. The default
implementation converts *prop_name* to lower case and allows prefixes
like ``entry_``.
"""
names = []
for wid_name in self.view:
# if widget names ends with given property name: we skip
# any prefix in widget name
if wid_name.lower().endswith(prop_name.lower()):
names.append(wid_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['wid_name']]
if len(names) == 0:
raise ValueError("No widget candidates match property '%s': %s" % (prop_name, names)) # depends on [control=['if'], data=[]]
if len(names) > 1:
raise TooManyCandidatesError("%d widget candidates match property '%s': %s" % (len(names), prop_name, names)) # depends on [control=['if'], data=[]]
return names[0] |
def configurable(name_or_fn=None, module=None, whitelist=None, blacklist=None):
"""Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`blacklist`. If only a restricted set of parameters should be configurable,
they can be specified in `whitelist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply a whitelist/blacklist:
@config.configurable('explicit_configurable_name', whitelist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass(object):
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
whitelist: A whitelisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `whitelist` or
`blacklist` should be specified.
blacklist: A blacklisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `whitelist` or `blacklist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
return _make_configurable(fn_or_cls, name, module, whitelist, blacklist)
if decoration_target:
return perform_decoration(decoration_target)
return perform_decoration | def function[configurable, parameter[name_or_fn, module, whitelist, blacklist]]:
constant[Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`blacklist`. If only a restricted set of parameters should be configurable,
they can be specified in `whitelist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply a whitelist/blacklist:
@config.configurable('explicit_configurable_name', whitelist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass(object):
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
whitelist: A whitelisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `whitelist` or
`blacklist` should be specified.
blacklist: A blacklisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `whitelist` or `blacklist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
]
variable[decoration_target] assign[=] constant[None]
if call[name[callable], parameter[name[name_or_fn]]] begin[:]
variable[decoration_target] assign[=] name[name_or_fn]
variable[name] assign[=] constant[None]
def function[perform_decoration, parameter[fn_or_cls]]:
return[call[name[_make_configurable], parameter[name[fn_or_cls], name[name], name[module], name[whitelist], name[blacklist]]]]
if name[decoration_target] begin[:]
return[call[name[perform_decoration], parameter[name[decoration_target]]]]
return[name[perform_decoration]] | keyword[def] identifier[configurable] ( identifier[name_or_fn] = keyword[None] , identifier[module] = keyword[None] , identifier[whitelist] = keyword[None] , identifier[blacklist] = keyword[None] ):
literal[string]
identifier[decoration_target] = keyword[None]
keyword[if] identifier[callable] ( identifier[name_or_fn] ):
identifier[decoration_target] = identifier[name_or_fn]
identifier[name] = keyword[None]
keyword[else] :
identifier[name] = identifier[name_or_fn]
keyword[def] identifier[perform_decoration] ( identifier[fn_or_cls] ):
keyword[return] identifier[_make_configurable] ( identifier[fn_or_cls] , identifier[name] , identifier[module] , identifier[whitelist] , identifier[blacklist] )
keyword[if] identifier[decoration_target] :
keyword[return] identifier[perform_decoration] ( identifier[decoration_target] )
keyword[return] identifier[perform_decoration] | def configurable(name_or_fn=None, module=None, whitelist=None, blacklist=None):
"""Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`blacklist`. If only a restricted set of parameters should be configurable,
they can be specified in `whitelist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply a whitelist/blacklist:
@config.configurable('explicit_configurable_name', whitelist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass(object):
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
whitelist: A whitelisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `whitelist` or
`blacklist` should be specified.
blacklist: A blacklisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `whitelist` or `blacklist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None # depends on [control=['if'], data=[]]
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
return _make_configurable(fn_or_cls, name, module, whitelist, blacklist)
if decoration_target:
return perform_decoration(decoration_target) # depends on [control=['if'], data=[]]
return perform_decoration |
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed)
totalTimeout: float,
retryWait: float=0.1,
acceptableExceptions=None,
acceptableFails: int=0,
override_timeout_limit=False):
# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will
# exhaust the entire timeout.
"""
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
"""
start = time.perf_counter()
def remaining():
return totalTimeout + start - time.perf_counter()
funcNames = []
others = 0
fails = 0
rem = None
for cf in coroFuncs:
if len(funcNames) < 2:
funcNames.append(get_func_name(cf))
else:
others += 1
# noinspection PyBroadException
try:
rem = remaining()
if rem <= 0:
break
await eventually(cf,
retryWait=retryWait,
timeout=rem,
acceptableExceptions=acceptableExceptions,
verbose=True,
override_timeout_limit=override_timeout_limit)
except Exception as ex:
if acceptableExceptions and type(ex) not in acceptableExceptions:
raise
fails += 1
logger.debug("a coro {} with args {} timed out without succeeding; fail count: "
"{}, acceptable: {}".
format(get_func_name(cf), get_func_args(cf), fails, acceptableFails))
if fails > acceptableFails:
raise
if rem is not None and rem <= 0:
fails += 1
if fails > acceptableFails:
err = 'All checks could not complete successfully since total timeout ' \
'expired {} sec ago'.format(-1 * rem if rem < 0 else 0)
raise EventuallyTimeoutException(err)
if others:
funcNames.append("and {} others".format(others))
desc = ", ".join(funcNames)
logger.debug("{} succeeded with {:.2f} seconds to spare".
format(desc, remaining())) | <ast.AsyncFunctionDef object at 0x7da2047ebcd0> | keyword[async] keyword[def] identifier[eventuallyAll] (* identifier[coroFuncs] : identifier[FlexFunc] ,
identifier[totalTimeout] : identifier[float] ,
identifier[retryWait] : identifier[float] = literal[int] ,
identifier[acceptableExceptions] = keyword[None] ,
identifier[acceptableFails] : identifier[int] = literal[int] ,
identifier[override_timeout_limit] = keyword[False] ):
literal[string]
identifier[start] = identifier[time] . identifier[perf_counter] ()
keyword[def] identifier[remaining] ():
keyword[return] identifier[totalTimeout] + identifier[start] - identifier[time] . identifier[perf_counter] ()
identifier[funcNames] =[]
identifier[others] = literal[int]
identifier[fails] = literal[int]
identifier[rem] = keyword[None]
keyword[for] identifier[cf] keyword[in] identifier[coroFuncs] :
keyword[if] identifier[len] ( identifier[funcNames] )< literal[int] :
identifier[funcNames] . identifier[append] ( identifier[get_func_name] ( identifier[cf] ))
keyword[else] :
identifier[others] += literal[int]
keyword[try] :
identifier[rem] = identifier[remaining] ()
keyword[if] identifier[rem] <= literal[int] :
keyword[break]
keyword[await] identifier[eventually] ( identifier[cf] ,
identifier[retryWait] = identifier[retryWait] ,
identifier[timeout] = identifier[rem] ,
identifier[acceptableExceptions] = identifier[acceptableExceptions] ,
identifier[verbose] = keyword[True] ,
identifier[override_timeout_limit] = identifier[override_timeout_limit] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[acceptableExceptions] keyword[and] identifier[type] ( identifier[ex] ) keyword[not] keyword[in] identifier[acceptableExceptions] :
keyword[raise]
identifier[fails] += literal[int]
identifier[logger] . identifier[debug] ( literal[string]
literal[string] .
identifier[format] ( identifier[get_func_name] ( identifier[cf] ), identifier[get_func_args] ( identifier[cf] ), identifier[fails] , identifier[acceptableFails] ))
keyword[if] identifier[fails] > identifier[acceptableFails] :
keyword[raise]
keyword[if] identifier[rem] keyword[is] keyword[not] keyword[None] keyword[and] identifier[rem] <= literal[int] :
identifier[fails] += literal[int]
keyword[if] identifier[fails] > identifier[acceptableFails] :
identifier[err] = literal[string] literal[string] . identifier[format] (- literal[int] * identifier[rem] keyword[if] identifier[rem] < literal[int] keyword[else] literal[int] )
keyword[raise] identifier[EventuallyTimeoutException] ( identifier[err] )
keyword[if] identifier[others] :
identifier[funcNames] . identifier[append] ( literal[string] . identifier[format] ( identifier[others] ))
identifier[desc] = literal[string] . identifier[join] ( identifier[funcNames] )
identifier[logger] . identifier[debug] ( literal[string] .
identifier[format] ( identifier[desc] , identifier[remaining] ())) | async def eventuallyAll(*coroFuncs: FlexFunc, totalTimeout: float, retryWait: float=0.1, acceptableExceptions=None, acceptableFails: int=0, override_timeout_limit=False): # (use functools.partials if needed)
# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will
# exhaust the entire timeout.
'\n :param coroFuncs: iterable of no-arg functions\n :param totalTimeout:\n :param retryWait:\n :param acceptableExceptions:\n :param acceptableFails: how many of the passed in coroutines can\n ultimately fail and still be ok\n :return:\n '
start = time.perf_counter()
def remaining():
return totalTimeout + start - time.perf_counter()
funcNames = []
others = 0
fails = 0
rem = None
for cf in coroFuncs:
if len(funcNames) < 2:
funcNames.append(get_func_name(cf)) # depends on [control=['if'], data=[]]
else:
others += 1
# noinspection PyBroadException
try:
rem = remaining()
if rem <= 0:
break # depends on [control=['if'], data=[]]
await eventually(cf, retryWait=retryWait, timeout=rem, acceptableExceptions=acceptableExceptions, verbose=True, override_timeout_limit=override_timeout_limit) # depends on [control=['try'], data=[]]
except Exception as ex:
if acceptableExceptions and type(ex) not in acceptableExceptions:
raise # depends on [control=['if'], data=[]]
fails += 1
logger.debug('a coro {} with args {} timed out without succeeding; fail count: {}, acceptable: {}'.format(get_func_name(cf), get_func_args(cf), fails, acceptableFails))
if fails > acceptableFails:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] # depends on [control=['for'], data=['cf']]
if rem is not None and rem <= 0:
fails += 1
if fails > acceptableFails:
err = 'All checks could not complete successfully since total timeout expired {} sec ago'.format(-1 * rem if rem < 0 else 0)
raise EventuallyTimeoutException(err) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if others:
funcNames.append('and {} others'.format(others)) # depends on [control=['if'], data=[]]
desc = ', '.join(funcNames)
logger.debug('{} succeeded with {:.2f} seconds to spare'.format(desc, remaining())) |
def _tokenize(self, text, token_class=None):
"""
Tokenizes a text
:Returns:
A `list` of tokens
"""
token_class = token_class or Token
tokens = {}
for i, match in enumerate(self.regex.finditer(text)):
value = match.group(0)
try:
token = tokens[value]
except KeyError:
type = match.lastgroup
token = token_class(value, type=type)
tokens[value] = token
yield token | def function[_tokenize, parameter[self, text, token_class]]:
constant[
Tokenizes a text
:Returns:
A `list` of tokens
]
variable[token_class] assign[=] <ast.BoolOp object at 0x7da1b0babb80>
variable[tokens] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0bab160>, <ast.Name object at 0x7da1b0bab880>]]] in starred[call[name[enumerate], parameter[call[name[self].regex.finditer, parameter[name[text]]]]]] begin[:]
variable[value] assign[=] call[name[match].group, parameter[constant[0]]]
<ast.Try object at 0x7da1b0babeb0>
<ast.Yield object at 0x7da1b0babfa0> | keyword[def] identifier[_tokenize] ( identifier[self] , identifier[text] , identifier[token_class] = keyword[None] ):
literal[string]
identifier[token_class] = identifier[token_class] keyword[or] identifier[Token]
identifier[tokens] ={}
keyword[for] identifier[i] , identifier[match] keyword[in] identifier[enumerate] ( identifier[self] . identifier[regex] . identifier[finditer] ( identifier[text] )):
identifier[value] = identifier[match] . identifier[group] ( literal[int] )
keyword[try] :
identifier[token] = identifier[tokens] [ identifier[value] ]
keyword[except] identifier[KeyError] :
identifier[type] = identifier[match] . identifier[lastgroup]
identifier[token] = identifier[token_class] ( identifier[value] , identifier[type] = identifier[type] )
identifier[tokens] [ identifier[value] ]= identifier[token]
keyword[yield] identifier[token] | def _tokenize(self, text, token_class=None):
"""
Tokenizes a text
:Returns:
A `list` of tokens
"""
token_class = token_class or Token
tokens = {}
for (i, match) in enumerate(self.regex.finditer(text)):
value = match.group(0)
try:
token = tokens[value] # depends on [control=['try'], data=[]]
except KeyError:
type = match.lastgroup
token = token_class(value, type=type)
tokens[value] = token # depends on [control=['except'], data=[]]
yield token # depends on [control=['for'], data=[]] |
def hide_routemap_holder_route_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
name = ET.SubElement(route_map, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[hide_routemap_holder_route_map_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[hide_routemap_holder] assign[=] call[name[ET].SubElement, parameter[name[config], constant[hide-routemap-holder]]]
variable[route_map] assign[=] call[name[ET].SubElement, parameter[name[hide_routemap_holder], constant[route-map]]]
variable[action_rm_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[action-rm]]]
name[action_rm_key].text assign[=] call[name[kwargs].pop, parameter[constant[action_rm]]]
variable[instance_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[instance]]]
name[instance_key].text assign[=] call[name[kwargs].pop, parameter[constant[instance]]]
variable[name] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[name]]]
name[name].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[hide_routemap_holder_route_map_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[hide_routemap_holder] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[route_map] = identifier[ET] . identifier[SubElement] ( identifier[hide_routemap_holder] , literal[string] )
identifier[action_rm_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[action_rm_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[instance_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[instance_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[name] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def hide_routemap_holder_route_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
hide_routemap_holder = ET.SubElement(config, 'hide-routemap-holder', xmlns='urn:brocade.com:mgmt:brocade-ip-policy')
route_map = ET.SubElement(hide_routemap_holder, 'route-map')
action_rm_key = ET.SubElement(route_map, 'action-rm')
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, 'instance')
instance_key.text = kwargs.pop('instance')
name = ET.SubElement(route_map, 'name')
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
async def oauth(request):
"""Oauth example."""
provider = request.match_info.get('provider')
client, _ = await app.ps.oauth.login(provider, request)
user, data = await client.user_info()
response = (
"<a href='/'>back</a><br/><br/>"
"<ul>"
"<li>ID: {u.id}</li>"
"<li>Username: {u.username}</li>"
"<li>First, last name: {u.first_name}, {u.last_name}</li>"
"<li>Email: {u.email}</li>"
"<li>Link: {u.link}</li>"
"<li>Picture: {u.picture}</li>"
"<li>Country, city: {u.country}, {u.city}</li>"
"</ul>"
).format(u=user)
response += "<code>%s</code>" % html.escape(repr(data))
return response | <ast.AsyncFunctionDef object at 0x7da204347160> | keyword[async] keyword[def] identifier[oauth] ( identifier[request] ):
literal[string]
identifier[provider] = identifier[request] . identifier[match_info] . identifier[get] ( literal[string] )
identifier[client] , identifier[_] = keyword[await] identifier[app] . identifier[ps] . identifier[oauth] . identifier[login] ( identifier[provider] , identifier[request] )
identifier[user] , identifier[data] = keyword[await] identifier[client] . identifier[user_info] ()
identifier[response] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
). identifier[format] ( identifier[u] = identifier[user] )
identifier[response] += literal[string] % identifier[html] . identifier[escape] ( identifier[repr] ( identifier[data] ))
keyword[return] identifier[response] | async def oauth(request):
"""Oauth example."""
provider = request.match_info.get('provider')
(client, _) = await app.ps.oauth.login(provider, request)
(user, data) = await client.user_info()
response = "<a href='/'>back</a><br/><br/><ul><li>ID: {u.id}</li><li>Username: {u.username}</li><li>First, last name: {u.first_name}, {u.last_name}</li><li>Email: {u.email}</li><li>Link: {u.link}</li><li>Picture: {u.picture}</li><li>Country, city: {u.country}, {u.city}</li></ul>".format(u=user)
response += '<code>%s</code>' % html.escape(repr(data))
return response |
def _split_regex(regex):
"""
Return an array of the URL split at each regex match like (?P<id>[\d]+)
Call with a regex of '^/foo/(?P<id>[\d]+)/bar/$' and you will receive ['/foo/', '/bar/']
"""
if regex[0] == '^':
regex = regex[1:]
if regex[-1] == '$':
regex = regex[0:-1]
results = []
line = ''
for c in regex:
if c == '(':
results.append(line)
line = ''
elif c == ')':
line = ''
else:
line = line + c
if len(line) > 0:
results.append(line)
return results | def function[_split_regex, parameter[regex]]:
constant[
Return an array of the URL split at each regex match like (?P<id>[\d]+)
Call with a regex of '^/foo/(?P<id>[\d]+)/bar/$' and you will receive ['/foo/', '/bar/']
]
if compare[call[name[regex]][constant[0]] equal[==] constant[^]] begin[:]
variable[regex] assign[=] call[name[regex]][<ast.Slice object at 0x7da1b0aa3bb0>]
if compare[call[name[regex]][<ast.UnaryOp object at 0x7da1b0aa3430>] equal[==] constant[$]] begin[:]
variable[regex] assign[=] call[name[regex]][<ast.Slice object at 0x7da1b0aa0820>]
variable[results] assign[=] list[[]]
variable[line] assign[=] constant[]
for taget[name[c]] in starred[name[regex]] begin[:]
if compare[name[c] equal[==] constant[(]] begin[:]
call[name[results].append, parameter[name[line]]]
variable[line] assign[=] constant[]
if compare[call[name[len], parameter[name[line]]] greater[>] constant[0]] begin[:]
call[name[results].append, parameter[name[line]]]
return[name[results]] | keyword[def] identifier[_split_regex] ( identifier[regex] ):
literal[string]
keyword[if] identifier[regex] [ literal[int] ]== literal[string] :
identifier[regex] = identifier[regex] [ literal[int] :]
keyword[if] identifier[regex] [- literal[int] ]== literal[string] :
identifier[regex] = identifier[regex] [ literal[int] :- literal[int] ]
identifier[results] =[]
identifier[line] = literal[string]
keyword[for] identifier[c] keyword[in] identifier[regex] :
keyword[if] identifier[c] == literal[string] :
identifier[results] . identifier[append] ( identifier[line] )
identifier[line] = literal[string]
keyword[elif] identifier[c] == literal[string] :
identifier[line] = literal[string]
keyword[else] :
identifier[line] = identifier[line] + identifier[c]
keyword[if] identifier[len] ( identifier[line] )> literal[int] :
identifier[results] . identifier[append] ( identifier[line] )
keyword[return] identifier[results] | def _split_regex(regex):
"""
Return an array of the URL split at each regex match like (?P<id>[\\d]+)
Call with a regex of '^/foo/(?P<id>[\\d]+)/bar/$' and you will receive ['/foo/', '/bar/']
"""
if regex[0] == '^':
regex = regex[1:] # depends on [control=['if'], data=[]]
if regex[-1] == '$':
regex = regex[0:-1] # depends on [control=['if'], data=[]]
results = []
line = ''
for c in regex:
if c == '(':
results.append(line)
line = '' # depends on [control=['if'], data=[]]
elif c == ')':
line = '' # depends on [control=['if'], data=[]]
else:
line = line + c # depends on [control=['for'], data=['c']]
if len(line) > 0:
results.append(line) # depends on [control=['if'], data=[]]
return results |
def link_order_filter(self, column, modelview_name):
"""
Arguments are passed like:
_oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
new_args = request.view_args.copy()
args = request.args.copy()
if ("_oc_" + modelview_name) in args:
args["_oc_" + modelview_name] = column
if args.get("_od_" + modelview_name) == "asc":
args["_od_" + modelview_name] = "desc"
else:
args["_od_" + modelview_name] = "asc"
else:
args["_oc_" + modelview_name] = column
args["_od_" + modelview_name] = "asc"
return url_for(
request.endpoint,
**dict(list(new_args.items()) + list(args.to_dict().items()))
) | def function[link_order_filter, parameter[self, column, modelview_name]]:
constant[
Arguments are passed like:
_oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
]
variable[new_args] assign[=] call[name[request].view_args.copy, parameter[]]
variable[args] assign[=] call[name[request].args.copy, parameter[]]
if compare[binary_operation[constant[_oc_] + name[modelview_name]] in name[args]] begin[:]
call[name[args]][binary_operation[constant[_oc_] + name[modelview_name]]] assign[=] name[column]
if compare[call[name[args].get, parameter[binary_operation[constant[_od_] + name[modelview_name]]]] equal[==] constant[asc]] begin[:]
call[name[args]][binary_operation[constant[_od_] + name[modelview_name]]] assign[=] constant[desc]
return[call[name[url_for], parameter[name[request].endpoint]]] | keyword[def] identifier[link_order_filter] ( identifier[self] , identifier[column] , identifier[modelview_name] ):
literal[string]
identifier[new_args] = identifier[request] . identifier[view_args] . identifier[copy] ()
identifier[args] = identifier[request] . identifier[args] . identifier[copy] ()
keyword[if] ( literal[string] + identifier[modelview_name] ) keyword[in] identifier[args] :
identifier[args] [ literal[string] + identifier[modelview_name] ]= identifier[column]
keyword[if] identifier[args] . identifier[get] ( literal[string] + identifier[modelview_name] )== literal[string] :
identifier[args] [ literal[string] + identifier[modelview_name] ]= literal[string]
keyword[else] :
identifier[args] [ literal[string] + identifier[modelview_name] ]= literal[string]
keyword[else] :
identifier[args] [ literal[string] + identifier[modelview_name] ]= identifier[column]
identifier[args] [ literal[string] + identifier[modelview_name] ]= literal[string]
keyword[return] identifier[url_for] (
identifier[request] . identifier[endpoint] ,
** identifier[dict] ( identifier[list] ( identifier[new_args] . identifier[items] ())+ identifier[list] ( identifier[args] . identifier[to_dict] (). identifier[items] ()))
) | def link_order_filter(self, column, modelview_name):
"""
Arguments are passed like:
_oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
new_args = request.view_args.copy()
args = request.args.copy()
if '_oc_' + modelview_name in args:
args['_oc_' + modelview_name] = column
if args.get('_od_' + modelview_name) == 'asc':
args['_od_' + modelview_name] = 'desc' # depends on [control=['if'], data=[]]
else:
args['_od_' + modelview_name] = 'asc' # depends on [control=['if'], data=['args']]
else:
args['_oc_' + modelview_name] = column
args['_od_' + modelview_name] = 'asc'
return url_for(request.endpoint, **dict(list(new_args.items()) + list(args.to_dict().items()))) |
def policy_present(name, rules):
'''
Ensure a Vault policy with the given name and rules is present.
name
The name of the policy
rules
Rules formatted as in-line HCL
.. code-block:: yaml
demo-policy:
vault.policy_present:
- name: foo/bar
- rules: |
path "secret/top-secret/*" {
policy = "deny"
}
path "secret/not-very-secret/*" {
policy = "write"
}
'''
url = "v1/sys/policy/{0}".format(name)
response = __utils__['vault.make_request']('GET', url)
try:
if response.status_code == 200:
return _handle_existing_policy(name, rules, response.json()['rules'])
elif response.status_code == 404:
return _create_new_policy(name, rules)
else:
response.raise_for_status()
except Exception as e:
return {
'name': name,
'changes': {},
'result': False,
'comment': 'Failed to get policy: {0}'.format(e)
} | def function[policy_present, parameter[name, rules]]:
constant[
Ensure a Vault policy with the given name and rules is present.
name
The name of the policy
rules
Rules formatted as in-line HCL
.. code-block:: yaml
demo-policy:
vault.policy_present:
- name: foo/bar
- rules: |
path "secret/top-secret/*" {
policy = "deny"
}
path "secret/not-very-secret/*" {
policy = "write"
}
]
variable[url] assign[=] call[constant[v1/sys/policy/{0}].format, parameter[name[name]]]
variable[response] assign[=] call[call[name[__utils__]][constant[vault.make_request]], parameter[constant[GET], name[url]]]
<ast.Try object at 0x7da207f03b50> | keyword[def] identifier[policy_present] ( identifier[name] , identifier[rules] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[name] )
identifier[response] = identifier[__utils__] [ literal[string] ]( literal[string] , identifier[url] )
keyword[try] :
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[_handle_existing_policy] ( identifier[name] , identifier[rules] , identifier[response] . identifier[json] ()[ literal[string] ])
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[_create_new_policy] ( identifier[name] , identifier[rules] )
keyword[else] :
identifier[response] . identifier[raise_for_status] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] {
literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] . identifier[format] ( identifier[e] )
} | def policy_present(name, rules):
"""
Ensure a Vault policy with the given name and rules is present.
name
The name of the policy
rules
Rules formatted as in-line HCL
.. code-block:: yaml
demo-policy:
vault.policy_present:
- name: foo/bar
- rules: |
path "secret/top-secret/*" {
policy = "deny"
}
path "secret/not-very-secret/*" {
policy = "write"
}
"""
url = 'v1/sys/policy/{0}'.format(name)
response = __utils__['vault.make_request']('GET', url)
try:
if response.status_code == 200:
return _handle_existing_policy(name, rules, response.json()['rules']) # depends on [control=['if'], data=[]]
elif response.status_code == 404:
return _create_new_policy(name, rules) # depends on [control=['if'], data=[]]
else:
response.raise_for_status() # depends on [control=['try'], data=[]]
except Exception as e:
return {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to get policy: {0}'.format(e)} # depends on [control=['except'], data=['e']] |
def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv))
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src)
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src | def function[exec_scratch_virtualenv, parameter[args]]:
constant[
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
]
variable[scratch] assign[=] call[name[Scratch], parameter[]]
if <ast.UnaryOp object at 0x7da207f01240> begin[:]
call[name[run], parameter[tuple[[<ast.Constant object at 0x7da207f01bd0>, <ast.Attribute object at 0x7da207f03cd0>]]]]
if <ast.UnaryOp object at 0x7da207f00a30> begin[:]
variable[scratch_python] assign[=] call[name[venv_python], parameter[name[scratch].venv]]
variable[tmp] assign[=] binary_operation[name[scratch].src + constant[.tmp]]
call[name[run], parameter[tuple[[<ast.Name object at 0x7da1b1296170>, <ast.Constant object at 0x7da1b1294580>, <ast.Constant object at 0x7da1b1294f40>, <ast.Constant object at 0x7da1b12947c0>, <ast.Constant object at 0x7da1b1295db0>, <ast.Constant object at 0x7da1b1297bb0>, <ast.Name object at 0x7da1b12946a0>]]]]
from relative_module[os] import module[rename]
call[name[rename], parameter[name[tmp], name[scratch].src]]
import module[sys]
from relative_module[os.path] import module[realpath]
if compare[call[name[realpath], parameter[name[sys].prefix]] not_equal[!=] call[name[realpath], parameter[name[scratch].venv]]] begin[:]
call[name[exec_], parameter[binary_operation[tuple[[<ast.Attribute object at 0x7da1b1295240>, <ast.Call object at 0x7da1b12945b0>]] + name[args]]]]
call[name[sys].path][constant[0]] assign[=] name[scratch].src | keyword[def] identifier[exec_scratch_virtualenv] ( identifier[args] ):
literal[string]
identifier[scratch] = identifier[Scratch] ()
keyword[if] keyword[not] identifier[exists] ( identifier[scratch] . identifier[python] ):
identifier[run] (( literal[string] , identifier[scratch] . identifier[venv] ))
keyword[if] keyword[not] identifier[exists] ( identifier[join] ( identifier[scratch] . identifier[src] , literal[string] )):
identifier[scratch_python] = identifier[venv_python] ( identifier[scratch] . identifier[venv] )
identifier[tmp] = identifier[scratch] . identifier[src] + literal[string]
identifier[run] (( identifier[scratch_python] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[tmp] ))
keyword[from] identifier[os] keyword[import] identifier[rename]
identifier[rename] ( identifier[tmp] , identifier[scratch] . identifier[src] )
keyword[import] identifier[sys]
keyword[from] identifier[os] . identifier[path] keyword[import] identifier[realpath]
keyword[if] identifier[realpath] ( identifier[sys] . identifier[prefix] )!= identifier[realpath] ( identifier[scratch] . identifier[venv] ):
identifier[exec_] (( identifier[scratch] . identifier[python] , identifier[dotpy] ( identifier[__file__] ))+ identifier[args] )
identifier[sys] . identifier[path] [ literal[int] ]= identifier[scratch] . identifier[src] | def exec_scratch_virtualenv(args):
"""
goals:
- get any random site-packages off of the pythonpath
- ensure we can import virtualenv
- ensure that we're not using the interpreter that we may need to delete
- idempotency: do nothing if the above goals are already met
"""
scratch = Scratch()
if not exists(scratch.python):
run(('virtualenv', scratch.venv)) # depends on [control=['if'], data=[]]
if not exists(join(scratch.src, 'virtualenv.py')):
scratch_python = venv_python(scratch.venv)
# TODO: do we allow user-defined override of which version of virtualenv to install?
tmp = scratch.src + '.tmp'
run((scratch_python, '-m', 'pip.__main__', 'install', 'virtualenv', '--target', tmp))
from os import rename
rename(tmp, scratch.src) # depends on [control=['if'], data=[]]
import sys
from os.path import realpath
# We want to compare the paths themselves as sometimes sys.path is the same
# as scratch.venv, but with a suffix of bin/..
if realpath(sys.prefix) != realpath(scratch.venv):
# TODO-TEST: sometimes we would get a stale version of venv-update
exec_((scratch.python, dotpy(__file__)) + args) # never returns # depends on [control=['if'], data=[]]
# TODO-TEST: the original venv-update's directory was on sys.path (when using symlinking)
sys.path[0] = scratch.src |
def list_guests(self, host_id, tags=None, cpus=None, memory=None, hostname=None,
domain=None, local_disk=None, nic_speed=None, public_ip=None,
private_ip=None, **kwargs):
"""Retrieve a list of all virtual servers on the dedicated host.
Example::
# Print out a list of instances with 4 cpu cores in the host id 12345.
for vsi in mgr.list_guests(host_id=12345, cpus=4):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_guests(mask=object_mask,cpus=4):
print vsi
:param integer host_id: the identifier of dedicated host
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'lastKnownPowerState.name',
'hourlyBillingFlag',
'powerState',
'maxCpu',
'maxMemory',
'datacenter',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['guests']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['guests']['maxCpu'] = utils.query_filter(cpus)
if memory:
_filter['guests']['maxMemory'] = utils.query_filter(memory)
if hostname:
_filter['guests']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['guests']['domain'] = utils.query_filter(domain)
if local_disk is not None:
_filter['guests']['localDiskFlag'] = (
utils.query_filter(bool(local_disk)))
if nic_speed:
_filter['guests']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['guests']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['guests']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.host.getGuests(id=host_id, **kwargs) | def function[list_guests, parameter[self, host_id, tags, cpus, memory, hostname, domain, local_disk, nic_speed, public_ip, private_ip]]:
constant[Retrieve a list of all virtual servers on the dedicated host.
Example::
# Print out a list of instances with 4 cpu cores in the host id 12345.
for vsi in mgr.list_guests(host_id=12345, cpus=4):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_guests(mask=object_mask,cpus=4):
print vsi
:param integer host_id: the identifier of dedicated host
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \*\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
]
if compare[constant[mask] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
variable[items] assign[=] list[[<ast.Constant object at 0x7da20c794850>, <ast.Constant object at 0x7da20c795570>, <ast.Constant object at 0x7da20c796050>, <ast.Constant object at 0x7da20c7952d0>, <ast.Constant object at 0x7da20c796b30>, <ast.Constant object at 0x7da20c7952a0>, <ast.Constant object at 0x7da20c794bb0>, <ast.Constant object at 0x7da20c7951b0>, <ast.Constant object at 0x7da20c7943a0>, <ast.Constant object at 0x7da20c794a00>, <ast.Constant object at 0x7da20c795f00>, <ast.Constant object at 0x7da20c796620>, <ast.Constant object at 0x7da20c795d50>, <ast.Constant object at 0x7da20c796410>, <ast.Constant object at 0x7da20c796470>]]
call[name[kwargs]][constant[mask]] assign[=] binary_operation[constant[mask[%s]] <ast.Mod object at 0x7da2590d6920> call[constant[,].join, parameter[name[items]]]]
variable[_filter] assign[=] call[name[utils].NestedDict, parameter[<ast.BoolOp object at 0x7da20c795030>]]
if name[tags] begin[:]
call[call[call[call[name[_filter]][constant[guests]]][constant[tagReferences]]][constant[tag]]][constant[name]] assign[=] dictionary[[<ast.Constant object at 0x7da20c795d20>, <ast.Constant object at 0x7da20c795090>], [<ast.Constant object at 0x7da20c795960>, <ast.List object at 0x7da20c794130>]]
if name[cpus] begin[:]
call[call[name[_filter]][constant[guests]]][constant[maxCpu]] assign[=] call[name[utils].query_filter, parameter[name[cpus]]]
if name[memory] begin[:]
call[call[name[_filter]][constant[guests]]][constant[maxMemory]] assign[=] call[name[utils].query_filter, parameter[name[memory]]]
if name[hostname] begin[:]
call[call[name[_filter]][constant[guests]]][constant[hostname]] assign[=] call[name[utils].query_filter, parameter[name[hostname]]]
if name[domain] begin[:]
call[call[name[_filter]][constant[guests]]][constant[domain]] assign[=] call[name[utils].query_filter, parameter[name[domain]]]
if compare[name[local_disk] is_not constant[None]] begin[:]
call[call[name[_filter]][constant[guests]]][constant[localDiskFlag]] assign[=] call[name[utils].query_filter, parameter[call[name[bool], parameter[name[local_disk]]]]]
if name[nic_speed] begin[:]
call[call[call[name[_filter]][constant[guests]]][constant[networkComponents]]][constant[maxSpeed]] assign[=] call[name[utils].query_filter, parameter[name[nic_speed]]]
if name[public_ip] begin[:]
call[call[name[_filter]][constant[guests]]][constant[primaryIpAddress]] assign[=] call[name[utils].query_filter, parameter[name[public_ip]]]
if name[private_ip] begin[:]
call[call[name[_filter]][constant[guests]]][constant[primaryBackendIpAddress]] assign[=] call[name[utils].query_filter, parameter[name[private_ip]]]
call[name[kwargs]][constant[filter]] assign[=] call[name[_filter].to_dict, parameter[]]
call[name[kwargs]][constant[iter]] assign[=] constant[True]
return[call[name[self].host.getGuests, parameter[]]] | keyword[def] identifier[list_guests] ( identifier[self] , identifier[host_id] , identifier[tags] = keyword[None] , identifier[cpus] = keyword[None] , identifier[memory] = keyword[None] , identifier[hostname] = keyword[None] ,
identifier[domain] = keyword[None] , identifier[local_disk] = keyword[None] , identifier[nic_speed] = keyword[None] , identifier[public_ip] = keyword[None] ,
identifier[private_ip] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[items] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[kwargs] [ literal[string] ]= literal[string] % literal[string] . identifier[join] ( identifier[items] )
identifier[_filter] = identifier[utils] . identifier[NestedDict] ( identifier[kwargs] . identifier[get] ( literal[string] ) keyword[or] {})
keyword[if] identifier[tags] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]={
literal[string] : literal[string] ,
literal[string] :[{ literal[string] : literal[string] , literal[string] : identifier[tags] }],
}
keyword[if] identifier[cpus] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[cpus] )
keyword[if] identifier[memory] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[memory] )
keyword[if] identifier[hostname] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[hostname] )
keyword[if] identifier[domain] :
identifier[_filter] [ literal[string] ][ literal[string] ]= identifier[utils] . identifier[query_filter] ( identifier[domain] )
keyword[if] identifier[local_disk] keyword[is] keyword[not] keyword[None] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[bool] ( identifier[local_disk] )))
keyword[if] identifier[nic_speed] :
identifier[_filter] [ literal[string] ][ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[nic_speed] ))
keyword[if] identifier[public_ip] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[public_ip] ))
keyword[if] identifier[private_ip] :
identifier[_filter] [ literal[string] ][ literal[string] ]=(
identifier[utils] . identifier[query_filter] ( identifier[private_ip] ))
identifier[kwargs] [ literal[string] ]= identifier[_filter] . identifier[to_dict] ()
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[return] identifier[self] . identifier[host] . identifier[getGuests] ( identifier[id] = identifier[host_id] ,** identifier[kwargs] ) | def list_guests(self, host_id, tags=None, cpus=None, memory=None, hostname=None, domain=None, local_disk=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs):
"""Retrieve a list of all virtual servers on the dedicated host.
Example::
# Print out a list of instances with 4 cpu cores in the host id 12345.
for vsi in mgr.list_guests(host_id=12345, cpus=4):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_guests(mask=object_mask,cpus=4):
print vsi
:param integer host_id: the identifier of dedicated host
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = ['id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'lastKnownPowerState.name', 'hourlyBillingFlag', 'powerState', 'maxCpu', 'maxMemory', 'datacenter', 'activeTransaction.transactionStatus[friendlyName,name]', 'status']
kwargs['mask'] = 'mask[%s]' % ','.join(items) # depends on [control=['if'], data=['kwargs']]
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['guests']['tagReferences']['tag']['name'] = {'operation': 'in', 'options': [{'name': 'data', 'value': tags}]} # depends on [control=['if'], data=[]]
if cpus:
_filter['guests']['maxCpu'] = utils.query_filter(cpus) # depends on [control=['if'], data=[]]
if memory:
_filter['guests']['maxMemory'] = utils.query_filter(memory) # depends on [control=['if'], data=[]]
if hostname:
_filter['guests']['hostname'] = utils.query_filter(hostname) # depends on [control=['if'], data=[]]
if domain:
_filter['guests']['domain'] = utils.query_filter(domain) # depends on [control=['if'], data=[]]
if local_disk is not None:
_filter['guests']['localDiskFlag'] = utils.query_filter(bool(local_disk)) # depends on [control=['if'], data=['local_disk']]
if nic_speed:
_filter['guests']['networkComponents']['maxSpeed'] = utils.query_filter(nic_speed) # depends on [control=['if'], data=[]]
if public_ip:
_filter['guests']['primaryIpAddress'] = utils.query_filter(public_ip) # depends on [control=['if'], data=[]]
if private_ip:
_filter['guests']['primaryBackendIpAddress'] = utils.query_filter(private_ip) # depends on [control=['if'], data=[]]
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.host.getGuests(id=host_id, **kwargs) |
def _add_linux_ethernet(self, port_info, bridge_name):
"""
Use raw sockets on Linux.
If interface is a bridge we connect a tap to it
"""
interface = port_info["interface"]
if gns3server.utils.interfaces.is_interface_bridge(interface):
network_interfaces = [interface["name"] for interface in self._interfaces()]
i = 0
while True:
tap = "gns3tap{}-{}".format(i, port_info["port_number"])
if tap not in network_interfaces:
break
i += 1
yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface))
else:
yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) | def function[_add_linux_ethernet, parameter[self, port_info, bridge_name]]:
constant[
Use raw sockets on Linux.
If interface is a bridge we connect a tap to it
]
variable[interface] assign[=] call[name[port_info]][constant[interface]]
if call[name[gns3server].utils.interfaces.is_interface_bridge, parameter[name[interface]]] begin[:]
variable[network_interfaces] assign[=] <ast.ListComp object at 0x7da20e957c10>
variable[i] assign[=] constant[0]
while constant[True] begin[:]
variable[tap] assign[=] call[constant[gns3tap{}-{}].format, parameter[name[i], call[name[port_info]][constant[port_number]]]]
if compare[name[tap] <ast.NotIn object at 0x7da2590d7190> name[network_interfaces]] begin[:]
break
<ast.AugAssign object at 0x7da204565f90>
<ast.YieldFrom object at 0x7da204565960>
<ast.YieldFrom object at 0x7da204566890> | keyword[def] identifier[_add_linux_ethernet] ( identifier[self] , identifier[port_info] , identifier[bridge_name] ):
literal[string]
identifier[interface] = identifier[port_info] [ literal[string] ]
keyword[if] identifier[gns3server] . identifier[utils] . identifier[interfaces] . identifier[is_interface_bridge] ( identifier[interface] ):
identifier[network_interfaces] =[ identifier[interface] [ literal[string] ] keyword[for] identifier[interface] keyword[in] identifier[self] . identifier[_interfaces] ()]
identifier[i] = literal[int]
keyword[while] keyword[True] :
identifier[tap] = literal[string] . identifier[format] ( identifier[i] , identifier[port_info] [ literal[string] ])
keyword[if] identifier[tap] keyword[not] keyword[in] identifier[network_interfaces] :
keyword[break]
identifier[i] += literal[int]
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] , identifier[interface] = identifier[tap] ))
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[tap] = identifier[tap] , identifier[interface] = identifier[interface] ))
keyword[else] :
keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] , identifier[interface] = identifier[interface] )) | def _add_linux_ethernet(self, port_info, bridge_name):
"""
Use raw sockets on Linux.
If interface is a bridge we connect a tap to it
"""
interface = port_info['interface']
if gns3server.utils.interfaces.is_interface_bridge(interface):
network_interfaces = [interface['name'] for interface in self._interfaces()]
i = 0
while True:
tap = 'gns3tap{}-{}'.format(i, port_info['port_number'])
if tap not in network_interfaces:
break # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['while'], data=[]]
yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface)) # depends on [control=['if'], data=[]]
else:
yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) |
def list_dataset_uris(cls, base_uri, config_path):
"""Return list containing URIs with base URI."""
storage_account_name = generous_parse_uri(base_uri).netloc
blobservice = get_blob_service(storage_account_name, config_path)
containers = blobservice.list_containers(include_metadata=True)
uri_list = []
for c in containers:
admin_metadata = c.metadata
uri = cls.generate_uri(
admin_metadata['name'],
admin_metadata['uuid'],
base_uri
)
uri_list.append(uri)
return uri_list | def function[list_dataset_uris, parameter[cls, base_uri, config_path]]:
constant[Return list containing URIs with base URI.]
variable[storage_account_name] assign[=] call[name[generous_parse_uri], parameter[name[base_uri]]].netloc
variable[blobservice] assign[=] call[name[get_blob_service], parameter[name[storage_account_name], name[config_path]]]
variable[containers] assign[=] call[name[blobservice].list_containers, parameter[]]
variable[uri_list] assign[=] list[[]]
for taget[name[c]] in starred[name[containers]] begin[:]
variable[admin_metadata] assign[=] name[c].metadata
variable[uri] assign[=] call[name[cls].generate_uri, parameter[call[name[admin_metadata]][constant[name]], call[name[admin_metadata]][constant[uuid]], name[base_uri]]]
call[name[uri_list].append, parameter[name[uri]]]
return[name[uri_list]] | keyword[def] identifier[list_dataset_uris] ( identifier[cls] , identifier[base_uri] , identifier[config_path] ):
literal[string]
identifier[storage_account_name] = identifier[generous_parse_uri] ( identifier[base_uri] ). identifier[netloc]
identifier[blobservice] = identifier[get_blob_service] ( identifier[storage_account_name] , identifier[config_path] )
identifier[containers] = identifier[blobservice] . identifier[list_containers] ( identifier[include_metadata] = keyword[True] )
identifier[uri_list] =[]
keyword[for] identifier[c] keyword[in] identifier[containers] :
identifier[admin_metadata] = identifier[c] . identifier[metadata]
identifier[uri] = identifier[cls] . identifier[generate_uri] (
identifier[admin_metadata] [ literal[string] ],
identifier[admin_metadata] [ literal[string] ],
identifier[base_uri]
)
identifier[uri_list] . identifier[append] ( identifier[uri] )
keyword[return] identifier[uri_list] | def list_dataset_uris(cls, base_uri, config_path):
"""Return list containing URIs with base URI."""
storage_account_name = generous_parse_uri(base_uri).netloc
blobservice = get_blob_service(storage_account_name, config_path)
containers = blobservice.list_containers(include_metadata=True)
uri_list = []
for c in containers:
admin_metadata = c.metadata
uri = cls.generate_uri(admin_metadata['name'], admin_metadata['uuid'], base_uri)
uri_list.append(uri) # depends on [control=['for'], data=['c']]
return uri_list |
def match_function(self, args, kwargs, match_args=(), star_arg=None, kwd_args=(), dubstar_arg=None):
"""Matches a pattern-matching function."""
self.match_in_args_kwargs(match_args, args, kwargs, allow_star_args=star_arg is not None)
if star_arg is not None:
self.match(star_arg, args + "[" + str(len(match_args)) + ":]")
self.match_in_kwargs(kwd_args, kwargs)
with self.down_a_level():
if dubstar_arg is None:
self.add_check("not " + kwargs)
else:
self.match(dubstar_arg, kwargs) | def function[match_function, parameter[self, args, kwargs, match_args, star_arg, kwd_args, dubstar_arg]]:
constant[Matches a pattern-matching function.]
call[name[self].match_in_args_kwargs, parameter[name[match_args], name[args], name[kwargs]]]
if compare[name[star_arg] is_not constant[None]] begin[:]
call[name[self].match, parameter[name[star_arg], binary_operation[binary_operation[binary_operation[name[args] + constant[[]] + call[name[str], parameter[call[name[len], parameter[name[match_args]]]]]] + constant[:]]]]]
call[name[self].match_in_kwargs, parameter[name[kwd_args], name[kwargs]]]
with call[name[self].down_a_level, parameter[]] begin[:]
if compare[name[dubstar_arg] is constant[None]] begin[:]
call[name[self].add_check, parameter[binary_operation[constant[not ] + name[kwargs]]]] | keyword[def] identifier[match_function] ( identifier[self] , identifier[args] , identifier[kwargs] , identifier[match_args] =(), identifier[star_arg] = keyword[None] , identifier[kwd_args] =(), identifier[dubstar_arg] = keyword[None] ):
literal[string]
identifier[self] . identifier[match_in_args_kwargs] ( identifier[match_args] , identifier[args] , identifier[kwargs] , identifier[allow_star_args] = identifier[star_arg] keyword[is] keyword[not] keyword[None] )
keyword[if] identifier[star_arg] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[match] ( identifier[star_arg] , identifier[args] + literal[string] + identifier[str] ( identifier[len] ( identifier[match_args] ))+ literal[string] )
identifier[self] . identifier[match_in_kwargs] ( identifier[kwd_args] , identifier[kwargs] )
keyword[with] identifier[self] . identifier[down_a_level] ():
keyword[if] identifier[dubstar_arg] keyword[is] keyword[None] :
identifier[self] . identifier[add_check] ( literal[string] + identifier[kwargs] )
keyword[else] :
identifier[self] . identifier[match] ( identifier[dubstar_arg] , identifier[kwargs] ) | def match_function(self, args, kwargs, match_args=(), star_arg=None, kwd_args=(), dubstar_arg=None):
"""Matches a pattern-matching function."""
self.match_in_args_kwargs(match_args, args, kwargs, allow_star_args=star_arg is not None)
if star_arg is not None:
self.match(star_arg, args + '[' + str(len(match_args)) + ':]') # depends on [control=['if'], data=['star_arg']]
self.match_in_kwargs(kwd_args, kwargs)
with self.down_a_level():
if dubstar_arg is None:
self.add_check('not ' + kwargs) # depends on [control=['if'], data=[]]
else:
self.match(dubstar_arg, kwargs) # depends on [control=['with'], data=[]] |
def register(self):
"""
Register via the method configured
:return:
"""
if self.register_method == "twine":
self.register_by_twine()
if self.register_method == "setup":
self.register_by_setup()
if self.register_method == "upload":
self.upload() | def function[register, parameter[self]]:
constant[
Register via the method configured
:return:
]
if compare[name[self].register_method equal[==] constant[twine]] begin[:]
call[name[self].register_by_twine, parameter[]]
if compare[name[self].register_method equal[==] constant[setup]] begin[:]
call[name[self].register_by_setup, parameter[]]
if compare[name[self].register_method equal[==] constant[upload]] begin[:]
call[name[self].upload, parameter[]] | keyword[def] identifier[register] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[register_method] == literal[string] :
identifier[self] . identifier[register_by_twine] ()
keyword[if] identifier[self] . identifier[register_method] == literal[string] :
identifier[self] . identifier[register_by_setup] ()
keyword[if] identifier[self] . identifier[register_method] == literal[string] :
identifier[self] . identifier[upload] () | def register(self):
"""
Register via the method configured
:return:
"""
if self.register_method == 'twine':
self.register_by_twine() # depends on [control=['if'], data=[]]
if self.register_method == 'setup':
self.register_by_setup() # depends on [control=['if'], data=[]]
if self.register_method == 'upload':
self.upload() # depends on [control=['if'], data=[]] |
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p | def function[lfprob, parameter[dfnum, dfden, F]]:
constant[
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
]
variable[p] assign[=] call[name[betai], parameter[binary_operation[constant[0.5] * name[dfden]], binary_operation[constant[0.5] * name[dfnum]], binary_operation[name[dfden] / call[name[float], parameter[binary_operation[name[dfden] + binary_operation[name[dfnum] * name[F]]]]]]]]
return[name[p]] | keyword[def] identifier[lfprob] ( identifier[dfnum] , identifier[dfden] , identifier[F] ):
literal[string]
identifier[p] = identifier[betai] ( literal[int] * identifier[dfden] , literal[int] * identifier[dfnum] , identifier[dfden] / identifier[float] ( identifier[dfden] + identifier[dfnum] * identifier[F] ))
keyword[return] identifier[p] | def lfprob(dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F))
return p |
def LogAccessWrapper(func):
"""Decorator that ensures that HTTP access is logged."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception: # pylint: disable=g-broad-except
# This should never happen: wrapped function is supposed to handle
# all possible exceptions and generate a proper Response object.
# Still, handling exceptions here to guarantee that the access is logged
# no matter what.
response = werkzeug_wrappers.Response("", status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
return Wrapper | def function[LogAccessWrapper, parameter[func]]:
constant[Decorator that ensures that HTTP access is logged.]
def function[Wrapper, parameter[request]]:
constant[Wrapping function.]
<ast.Try object at 0x7da18bcc91e0>
return[name[response]]
return[name[Wrapper]] | keyword[def] identifier[LogAccessWrapper] ( identifier[func] ):
literal[string]
keyword[def] identifier[Wrapper] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[response] = identifier[func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
identifier[server_logging] . identifier[LOGGER] . identifier[LogHttpAdminUIAccess] ( identifier[request] , identifier[response] )
keyword[except] identifier[Exception] :
identifier[response] = identifier[werkzeug_wrappers] . identifier[Response] ( literal[string] , identifier[status] = literal[int] )
identifier[server_logging] . identifier[LOGGER] . identifier[LogHttpAdminUIAccess] ( identifier[request] , identifier[response] )
keyword[raise]
keyword[return] identifier[response]
keyword[return] identifier[Wrapper] | def LogAccessWrapper(func):
"""Decorator that ensures that HTTP access is logged."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response) # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=g-broad-except
# This should never happen: wrapped function is supposed to handle
# all possible exceptions and generate a proper Response object.
# Still, handling exceptions here to guarantee that the access is logged
# no matter what.
response = werkzeug_wrappers.Response('', status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise # depends on [control=['except'], data=[]]
return response
return Wrapper |
def collapse_user(fp):
"""
Converts a path back to ~/ from expanduser()
"""
home_dir = os.path.expanduser("~")
abs_path = os.path.abspath(fp)
return abs_path.replace(home_dir, "~") | def function[collapse_user, parameter[fp]]:
constant[
Converts a path back to ~/ from expanduser()
]
variable[home_dir] assign[=] call[name[os].path.expanduser, parameter[constant[~]]]
variable[abs_path] assign[=] call[name[os].path.abspath, parameter[name[fp]]]
return[call[name[abs_path].replace, parameter[name[home_dir], constant[~]]]] | keyword[def] identifier[collapse_user] ( identifier[fp] ):
literal[string]
identifier[home_dir] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
identifier[abs_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[fp] )
keyword[return] identifier[abs_path] . identifier[replace] ( identifier[home_dir] , literal[string] ) | def collapse_user(fp):
"""
Converts a path back to ~/ from expanduser()
"""
home_dir = os.path.expanduser('~')
abs_path = os.path.abspath(fp)
return abs_path.replace(home_dir, '~') |
def create_data(step: 'projects.ProjectStep') -> STEP_DATA:
"""
Creates the data object that stores the step information in the notebook
results JavaScript file.
:param step:
Project step for which to create the data
:return:
Step data tuple containing scaffold data structure for the step output.
The dictionary must then be populated with data from the step to
correctly reflect the current state of the step.
This is essentially a "blank" step dictionary, which is what the step
would look like if it had not yet run
"""
return STEP_DATA(
name=step.definition.name,
status=step.status(),
has_error=False,
body=None,
data=dict(),
includes=[],
cauldron_version=list(environ.version_info),
file_writes=[]
) | def function[create_data, parameter[step]]:
constant[
Creates the data object that stores the step information in the notebook
results JavaScript file.
:param step:
Project step for which to create the data
:return:
Step data tuple containing scaffold data structure for the step output.
The dictionary must then be populated with data from the step to
correctly reflect the current state of the step.
This is essentially a "blank" step dictionary, which is what the step
would look like if it had not yet run
]
return[call[name[STEP_DATA], parameter[]]] | keyword[def] identifier[create_data] ( identifier[step] : literal[string] )-> identifier[STEP_DATA] :
literal[string]
keyword[return] identifier[STEP_DATA] (
identifier[name] = identifier[step] . identifier[definition] . identifier[name] ,
identifier[status] = identifier[step] . identifier[status] (),
identifier[has_error] = keyword[False] ,
identifier[body] = keyword[None] ,
identifier[data] = identifier[dict] (),
identifier[includes] =[],
identifier[cauldron_version] = identifier[list] ( identifier[environ] . identifier[version_info] ),
identifier[file_writes] =[]
) | def create_data(step: 'projects.ProjectStep') -> STEP_DATA:
"""
Creates the data object that stores the step information in the notebook
results JavaScript file.
:param step:
Project step for which to create the data
:return:
Step data tuple containing scaffold data structure for the step output.
The dictionary must then be populated with data from the step to
correctly reflect the current state of the step.
This is essentially a "blank" step dictionary, which is what the step
would look like if it had not yet run
"""
return STEP_DATA(name=step.definition.name, status=step.status(), has_error=False, body=None, data=dict(), includes=[], cauldron_version=list(environ.version_info), file_writes=[]) |
def _get_all_files(self, path, *exclude):
'''
Walk implementation. Version in python 2.x and 3.x works differently.
'''
files = list()
dirs = list()
links = list()
if os.access(path, os.R_OK):
for obj in os.listdir(path):
obj = os.path.join(path, obj)
valid = True
for ex_obj in exclude:
if obj.startswith(str(ex_obj)):
valid = False
continue
if not valid or not os.path.exists(obj) or not os.access(obj, os.R_OK):
continue
if salt.utils.path.islink(obj):
links.append(obj)
elif os.path.isdir(obj):
dirs.append(obj)
f_obj, d_obj, l_obj = self._get_all_files(obj, *exclude)
files.extend(f_obj)
dirs.extend(d_obj)
links.extend(l_obj)
elif os.path.isfile(obj):
files.append(obj)
return sorted(files), sorted(dirs), sorted(links) | def function[_get_all_files, parameter[self, path]]:
constant[
Walk implementation. Version in python 2.x and 3.x works differently.
]
variable[files] assign[=] call[name[list], parameter[]]
variable[dirs] assign[=] call[name[list], parameter[]]
variable[links] assign[=] call[name[list], parameter[]]
if call[name[os].access, parameter[name[path], name[os].R_OK]] begin[:]
for taget[name[obj]] in starred[call[name[os].listdir, parameter[name[path]]]] begin[:]
variable[obj] assign[=] call[name[os].path.join, parameter[name[path], name[obj]]]
variable[valid] assign[=] constant[True]
for taget[name[ex_obj]] in starred[name[exclude]] begin[:]
if call[name[obj].startswith, parameter[call[name[str], parameter[name[ex_obj]]]]] begin[:]
variable[valid] assign[=] constant[False]
continue
if <ast.BoolOp object at 0x7da20c76da50> begin[:]
continue
if call[name[salt].utils.path.islink, parameter[name[obj]]] begin[:]
call[name[links].append, parameter[name[obj]]]
return[tuple[[<ast.Call object at 0x7da18f00f940>, <ast.Call object at 0x7da18f00f1f0>, <ast.Call object at 0x7da18f00c340>]]] | keyword[def] identifier[_get_all_files] ( identifier[self] , identifier[path] ,* identifier[exclude] ):
literal[string]
identifier[files] = identifier[list] ()
identifier[dirs] = identifier[list] ()
identifier[links] = identifier[list] ()
keyword[if] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[R_OK] ):
keyword[for] identifier[obj] keyword[in] identifier[os] . identifier[listdir] ( identifier[path] ):
identifier[obj] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[obj] )
identifier[valid] = keyword[True]
keyword[for] identifier[ex_obj] keyword[in] identifier[exclude] :
keyword[if] identifier[obj] . identifier[startswith] ( identifier[str] ( identifier[ex_obj] )):
identifier[valid] = keyword[False]
keyword[continue]
keyword[if] keyword[not] identifier[valid] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[obj] ) keyword[or] keyword[not] identifier[os] . identifier[access] ( identifier[obj] , identifier[os] . identifier[R_OK] ):
keyword[continue]
keyword[if] identifier[salt] . identifier[utils] . identifier[path] . identifier[islink] ( identifier[obj] ):
identifier[links] . identifier[append] ( identifier[obj] )
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[obj] ):
identifier[dirs] . identifier[append] ( identifier[obj] )
identifier[f_obj] , identifier[d_obj] , identifier[l_obj] = identifier[self] . identifier[_get_all_files] ( identifier[obj] ,* identifier[exclude] )
identifier[files] . identifier[extend] ( identifier[f_obj] )
identifier[dirs] . identifier[extend] ( identifier[d_obj] )
identifier[links] . identifier[extend] ( identifier[l_obj] )
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[obj] ):
identifier[files] . identifier[append] ( identifier[obj] )
keyword[return] identifier[sorted] ( identifier[files] ), identifier[sorted] ( identifier[dirs] ), identifier[sorted] ( identifier[links] ) | def _get_all_files(self, path, *exclude):
"""
Walk implementation. Version in python 2.x and 3.x works differently.
"""
files = list()
dirs = list()
links = list()
if os.access(path, os.R_OK):
for obj in os.listdir(path):
obj = os.path.join(path, obj)
valid = True
for ex_obj in exclude:
if obj.startswith(str(ex_obj)):
valid = False
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex_obj']]
if not valid or not os.path.exists(obj) or (not os.access(obj, os.R_OK)):
continue # depends on [control=['if'], data=[]]
if salt.utils.path.islink(obj):
links.append(obj) # depends on [control=['if'], data=[]]
elif os.path.isdir(obj):
dirs.append(obj)
(f_obj, d_obj, l_obj) = self._get_all_files(obj, *exclude)
files.extend(f_obj)
dirs.extend(d_obj)
links.extend(l_obj) # depends on [control=['if'], data=[]]
elif os.path.isfile(obj):
files.append(obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] # depends on [control=['if'], data=[]]
return (sorted(files), sorted(dirs), sorted(links)) |
def matrix_undirected_weighted(user, interaction=None):
"""
Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated.
"""
matrix = _interaction_matrix(user, interaction=interaction)
result = [[0 for _ in range(len(matrix))] for _ in range(len(matrix))]
for a in range(len(matrix)):
for b in range(len(matrix)):
if a != b and matrix[a][b] and matrix[b][a]:
result[a][b] = matrix[a][b] + matrix[b][a]
elif matrix[a][b] is None or matrix[b][a] is None:
result[a][b] = None
else:
result[a][b] = 0
return result | def function[matrix_undirected_weighted, parameter[user, interaction]]:
constant[
Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated.
]
variable[matrix] assign[=] call[name[_interaction_matrix], parameter[name[user]]]
variable[result] assign[=] <ast.ListComp object at 0x7da204565900>
for taget[name[a]] in starred[call[name[range], parameter[call[name[len], parameter[name[matrix]]]]]] begin[:]
for taget[name[b]] in starred[call[name[range], parameter[call[name[len], parameter[name[matrix]]]]]] begin[:]
if <ast.BoolOp object at 0x7da204567460> begin[:]
call[call[name[result]][name[a]]][name[b]] assign[=] binary_operation[call[call[name[matrix]][name[a]]][name[b]] + call[call[name[matrix]][name[b]]][name[a]]]
return[name[result]] | keyword[def] identifier[matrix_undirected_weighted] ( identifier[user] , identifier[interaction] = keyword[None] ):
literal[string]
identifier[matrix] = identifier[_interaction_matrix] ( identifier[user] , identifier[interaction] = identifier[interaction] )
identifier[result] =[[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[matrix] ))] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[matrix] ))]
keyword[for] identifier[a] keyword[in] identifier[range] ( identifier[len] ( identifier[matrix] )):
keyword[for] identifier[b] keyword[in] identifier[range] ( identifier[len] ( identifier[matrix] )):
keyword[if] identifier[a] != identifier[b] keyword[and] identifier[matrix] [ identifier[a] ][ identifier[b] ] keyword[and] identifier[matrix] [ identifier[b] ][ identifier[a] ]:
identifier[result] [ identifier[a] ][ identifier[b] ]= identifier[matrix] [ identifier[a] ][ identifier[b] ]+ identifier[matrix] [ identifier[b] ][ identifier[a] ]
keyword[elif] identifier[matrix] [ identifier[a] ][ identifier[b] ] keyword[is] keyword[None] keyword[or] identifier[matrix] [ identifier[b] ][ identifier[a] ] keyword[is] keyword[None] :
identifier[result] [ identifier[a] ][ identifier[b] ]= keyword[None]
keyword[else] :
identifier[result] [ identifier[a] ][ identifier[b] ]= literal[int]
keyword[return] identifier[result] | def matrix_undirected_weighted(user, interaction=None):
"""
Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated.
"""
matrix = _interaction_matrix(user, interaction=interaction)
result = [[0 for _ in range(len(matrix))] for _ in range(len(matrix))]
for a in range(len(matrix)):
for b in range(len(matrix)):
if a != b and matrix[a][b] and matrix[b][a]:
result[a][b] = matrix[a][b] + matrix[b][a] # depends on [control=['if'], data=[]]
elif matrix[a][b] is None or matrix[b][a] is None:
result[a][b] = None # depends on [control=['if'], data=[]]
else:
result[a][b] = 0 # depends on [control=['for'], data=['b']] # depends on [control=['for'], data=['a']]
return result |
def _list_records(self, rtype=None, name=None, content=None):
"""
list all records
:param str rtype: type of record
:param str name: name of record
:param mixed content: value of record
:return list: list of found records
:raises Exception: on error
"""
opts = {'domain': self._domain}
if rtype is not None:
opts['type'] = rtype.upper()
if name is not None:
opts['name'] = self._full_name(name)
if content is not None:
opts['content'] = content
opts.update(self._auth)
response = self._api.nameserver.info(opts)
self._validate_response(
response=response, message='Failed to get records')
records = []
if 'record' in response['resData']:
for record in response['resData']['record']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
return records | def function[_list_records, parameter[self, rtype, name, content]]:
constant[
list all records
:param str rtype: type of record
:param str name: name of record
:param mixed content: value of record
:return list: list of found records
:raises Exception: on error
]
variable[opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b22a5330>], [<ast.Attribute object at 0x7da1b22a46d0>]]
if compare[name[rtype] is_not constant[None]] begin[:]
call[name[opts]][constant[type]] assign[=] call[name[rtype].upper, parameter[]]
if compare[name[name] is_not constant[None]] begin[:]
call[name[opts]][constant[name]] assign[=] call[name[self]._full_name, parameter[name[name]]]
if compare[name[content] is_not constant[None]] begin[:]
call[name[opts]][constant[content]] assign[=] name[content]
call[name[opts].update, parameter[name[self]._auth]]
variable[response] assign[=] call[name[self]._api.nameserver.info, parameter[name[opts]]]
call[name[self]._validate_response, parameter[]]
variable[records] assign[=] list[[]]
if compare[constant[record] in call[name[response]][constant[resData]]] begin[:]
for taget[name[record]] in starred[call[call[name[response]][constant[resData]]][constant[record]]] begin[:]
variable[processed_record] assign[=] dictionary[[<ast.Constant object at 0x7da1b238a1d0>, <ast.Constant object at 0x7da1b23895d0>, <ast.Constant object at 0x7da1b2389810>, <ast.Constant object at 0x7da1b2389510>, <ast.Constant object at 0x7da1b23888b0>], [<ast.Subscript object at 0x7da1b238a8f0>, <ast.Subscript object at 0x7da1b23885e0>, <ast.Subscript object at 0x7da1b2388970>, <ast.Subscript object at 0x7da1b22ad5d0>, <ast.Subscript object at 0x7da1b22ac4c0>]]
call[name[records].append, parameter[name[processed_record]]]
return[name[records]] | keyword[def] identifier[_list_records] ( identifier[self] , identifier[rtype] = keyword[None] , identifier[name] = keyword[None] , identifier[content] = keyword[None] ):
literal[string]
identifier[opts] ={ literal[string] : identifier[self] . identifier[_domain] }
keyword[if] identifier[rtype] keyword[is] keyword[not] keyword[None] :
identifier[opts] [ literal[string] ]= identifier[rtype] . identifier[upper] ()
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[opts] [ literal[string] ]= identifier[self] . identifier[_full_name] ( identifier[name] )
keyword[if] identifier[content] keyword[is] keyword[not] keyword[None] :
identifier[opts] [ literal[string] ]= identifier[content]
identifier[opts] . identifier[update] ( identifier[self] . identifier[_auth] )
identifier[response] = identifier[self] . identifier[_api] . identifier[nameserver] . identifier[info] ( identifier[opts] )
identifier[self] . identifier[_validate_response] (
identifier[response] = identifier[response] , identifier[message] = literal[string] )
identifier[records] =[]
keyword[if] literal[string] keyword[in] identifier[response] [ literal[string] ]:
keyword[for] identifier[record] keyword[in] identifier[response] [ literal[string] ][ literal[string] ]:
identifier[processed_record] ={
literal[string] : identifier[record] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ]
}
identifier[records] . identifier[append] ( identifier[processed_record] )
keyword[return] identifier[records] | def _list_records(self, rtype=None, name=None, content=None):
"""
list all records
:param str rtype: type of record
:param str name: name of record
:param mixed content: value of record
:return list: list of found records
:raises Exception: on error
"""
opts = {'domain': self._domain}
if rtype is not None:
opts['type'] = rtype.upper() # depends on [control=['if'], data=['rtype']]
if name is not None:
opts['name'] = self._full_name(name) # depends on [control=['if'], data=['name']]
if content is not None:
opts['content'] = content # depends on [control=['if'], data=['content']]
opts.update(self._auth)
response = self._api.nameserver.info(opts)
self._validate_response(response=response, message='Failed to get records')
records = []
if 'record' in response['resData']:
for record in response['resData']['record']:
processed_record = {'type': record['type'], 'name': record['name'], 'ttl': record['ttl'], 'content': record['content'], 'id': record['id']}
records.append(processed_record) # depends on [control=['for'], data=['record']] # depends on [control=['if'], data=[]]
return records |
def increment(self):
'''Increment the counter (overflow rolls back to 0).'''
for i in xrange(len(self._counter) - 1, -1, -1):
self._counter[i] += 1
if self._counter[i] < 256: break
# Carry the one
self._counter[i] = 0
# Overflow
else:
self._counter = [ 0 ] * len(self._counter) | def function[increment, parameter[self]]:
constant[Increment the counter (overflow rolls back to 0).]
for taget[name[i]] in starred[call[name[xrange], parameter[binary_operation[call[name[len], parameter[name[self]._counter]] - constant[1]], <ast.UnaryOp object at 0x7da20c6e6140>, <ast.UnaryOp object at 0x7da2041dbca0>]]] begin[:]
<ast.AugAssign object at 0x7da2041daaa0>
if compare[call[name[self]._counter][name[i]] less[<] constant[256]] begin[:]
break
call[name[self]._counter][name[i]] assign[=] constant[0] | keyword[def] identifier[increment] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[self] . identifier[_counter] )- literal[int] ,- literal[int] ,- literal[int] ):
identifier[self] . identifier[_counter] [ identifier[i] ]+= literal[int]
keyword[if] identifier[self] . identifier[_counter] [ identifier[i] ]< literal[int] : keyword[break]
identifier[self] . identifier[_counter] [ identifier[i] ]= literal[int]
keyword[else] :
identifier[self] . identifier[_counter] =[ literal[int] ]* identifier[len] ( identifier[self] . identifier[_counter] ) | def increment(self):
"""Increment the counter (overflow rolls back to 0)."""
for i in xrange(len(self._counter) - 1, -1, -1):
self._counter[i] += 1
if self._counter[i] < 256:
break # depends on [control=['if'], data=[]]
# Carry the one
self._counter[i] = 0 # depends on [control=['for'], data=['i']]
else:
# Overflow
self._counter = [0] * len(self._counter) |
def _terms(self):
""" Returns a list with the objects as terms. """
res = []
for sign, terms in self.terms.items():
for ID, lon in terms.items():
res.append(self.T(ID, sign))
return res | def function[_terms, parameter[self]]:
constant[ Returns a list with the objects as terms. ]
variable[res] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b11dc2e0>, <ast.Name object at 0x7da1b11dc520>]]] in starred[call[name[self].terms.items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b11ddcc0>, <ast.Name object at 0x7da1b11df160>]]] in starred[call[name[terms].items, parameter[]]] begin[:]
call[name[res].append, parameter[call[name[self].T, parameter[name[ID], name[sign]]]]]
return[name[res]] | keyword[def] identifier[_terms] ( identifier[self] ):
literal[string]
identifier[res] =[]
keyword[for] identifier[sign] , identifier[terms] keyword[in] identifier[self] . identifier[terms] . identifier[items] ():
keyword[for] identifier[ID] , identifier[lon] keyword[in] identifier[terms] . identifier[items] ():
identifier[res] . identifier[append] ( identifier[self] . identifier[T] ( identifier[ID] , identifier[sign] ))
keyword[return] identifier[res] | def _terms(self):
""" Returns a list with the objects as terms. """
res = []
for (sign, terms) in self.terms.items():
for (ID, lon) in terms.items():
res.append(self.T(ID, sign)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return res |
def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception("This SWF was not loaded! (no data)")
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!")
return exporter.export(self, force_stroke) | def function[export, parameter[self, exporter, force_stroke]]:
constant[
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
]
variable[exporter] assign[=] <ast.IfExp object at 0x7da1b0eec280>
if compare[name[self]._data is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0eec430>
if compare[call[name[len], parameter[name[self].tags]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0eec610>
return[call[name[exporter].export, parameter[name[self], name[force_stroke]]]] | keyword[def] identifier[export] ( identifier[self] , identifier[exporter] = keyword[None] , identifier[force_stroke] = keyword[False] ):
literal[string]
identifier[exporter] = identifier[SVGExporter] () keyword[if] identifier[exporter] keyword[is] keyword[None] keyword[else] identifier[exporter]
keyword[if] identifier[self] . identifier[_data] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[len] ( identifier[self] . identifier[tags] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[exporter] . identifier[export] ( identifier[self] , identifier[force_stroke] ) | def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception('This SWF was not loaded! (no data)') # depends on [control=['if'], data=[]]
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!") # depends on [control=['if'], data=[]]
return exporter.export(self, force_stroke) |
def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast | def function[cell2code, parameter[cell, named_ranges]]:
constant[Generate python code for the given cell]
if name[cell].formula begin[:]
variable[debug] assign[=] constant[False]
variable[ref] assign[=] <ast.IfExp object at 0x7da1b08602b0>
variable[sheet] assign[=] name[cell].sheet
variable[e] assign[=] call[name[shunting_yard], parameter[name[cell].formula, name[named_ranges]]]
<ast.Tuple object at 0x7da1b0862d40> assign[=] call[name[build_ast], parameter[name[e]]]
variable[code] assign[=] call[name[root].emit, parameter[name[ast]]]
return[tuple[[<ast.Name object at 0x7da204565f90>, <ast.Name object at 0x7da204566fe0>]]] | keyword[def] identifier[cell2code] ( identifier[cell] , identifier[named_ranges] ):
literal[string]
keyword[if] identifier[cell] . identifier[formula] :
identifier[debug] = keyword[False]
identifier[ref] = identifier[parse_cell_address] ( identifier[cell] . identifier[address] ()) keyword[if] keyword[not] identifier[cell] . identifier[is_named_range] keyword[else] keyword[None]
identifier[sheet] = identifier[cell] . identifier[sheet]
identifier[e] = identifier[shunting_yard] ( identifier[cell] . identifier[formula] , identifier[named_ranges] , identifier[ref] = identifier[ref] , identifier[tokenize_range] = keyword[False] )
identifier[ast] , identifier[root] = identifier[build_ast] ( identifier[e] , identifier[debug] = identifier[debug] )
identifier[code] = identifier[root] . identifier[emit] ( identifier[ast] , identifier[context] = identifier[sheet] )
keyword[else] :
identifier[ast] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[cell] . identifier[value] , identifier[unicode] ):
identifier[code] = literal[string] + identifier[cell] . identifier[value] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
keyword[elif] identifier[isinstance] ( identifier[cell] . identifier[value] , identifier[str] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[else] :
identifier[code] = identifier[str] ( identifier[cell] . identifier[value] )
keyword[return] identifier[code] , identifier[ast] | def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range=False)
(ast, root) = build_ast(e, debug=debug)
code = root.emit(ast, context=sheet) # depends on [control=['if'], data=[]]
else:
# print 'CODE', code, ref
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"' # depends on [control=['if'], data=[]]
elif isinstance(cell.value, str):
raise RuntimeError('Got unexpected non-unicode str') # depends on [control=['if'], data=[]]
else:
code = str(cell.value)
return (code, ast) |
def disassemble(self, code, lasti=-1, file=None):
"""Disassemble a code object."""
return self.disco(code, lasti, file) | def function[disassemble, parameter[self, code, lasti, file]]:
constant[Disassemble a code object.]
return[call[name[self].disco, parameter[name[code], name[lasti], name[file]]]] | keyword[def] identifier[disassemble] ( identifier[self] , identifier[code] , identifier[lasti] =- literal[int] , identifier[file] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[disco] ( identifier[code] , identifier[lasti] , identifier[file] ) | def disassemble(self, code, lasti=-1, file=None):
"""Disassemble a code object."""
return self.disco(code, lasti, file) |
def njsd_all(network, ref, query, file, verbose=True):
"""Compute transcriptome-wide nJSD between reference and query expression profiles.
Attribute:
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
"""
graph, gene_set_total = util.parse_network(network)
ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True)
query_gene_expression_dict = util.parse_gene_expression(query, mean=False)
maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict)
gene_set_present = set(query_gene_expression_dict.keys())
with open(file, 'w') as outFile:
print('nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile)
normal_to_tumor_njsd = entropy.njsd(network=graph,
ref_gene_expression_dict=ref_gene_expression_dict,
query_gene_expression_dict=query_gene_expression_dict,
gene_set=gene_set_present)
tumor_to_ambiguous_njsd = entropy.njsd(network=graph,
ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict,
query_gene_expression_dict=query_gene_expression_dict,
gene_set=gene_set_present)
tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd)
with open(file, 'a') as outFile:
print(normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile)
return normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd) | def function[njsd_all, parameter[network, ref, query, file, verbose]]:
constant[Compute transcriptome-wide nJSD between reference and query expression profiles.
Attribute:
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
]
<ast.Tuple object at 0x7da1b2775e40> assign[=] call[name[util].parse_network, parameter[name[network]]]
variable[ref_gene_expression_dict] assign[=] call[name[util].parse_gene_expression, parameter[name[ref]]]
variable[query_gene_expression_dict] assign[=] call[name[util].parse_gene_expression, parameter[name[query]]]
variable[maximally_ambiguous_gene_experession_dict] assign[=] call[name[util].get_maximally_ambiguous_network, parameter[name[query_gene_expression_dict]]]
variable[gene_set_present] assign[=] call[name[set], parameter[call[name[query_gene_expression_dict].keys, parameter[]]]]
with call[name[open], parameter[name[file], constant[w]]] begin[:]
call[name[print], parameter[constant[nJSD_NT], constant[nJSD_TA], constant[tITH]]]
variable[normal_to_tumor_njsd] assign[=] call[name[entropy].njsd, parameter[]]
variable[tumor_to_ambiguous_njsd] assign[=] call[name[entropy].njsd, parameter[]]
variable[tITH] assign[=] binary_operation[name[normal_to_tumor_njsd] / binary_operation[name[normal_to_tumor_njsd] + name[tumor_to_ambiguous_njsd]]]
with call[name[open], parameter[name[file], constant[a]]] begin[:]
call[name[print], parameter[name[normal_to_tumor_njsd], name[tumor_to_ambiguous_njsd], name[tITH]]]
return[binary_operation[name[normal_to_tumor_njsd] / binary_operation[name[normal_to_tumor_njsd] + name[tumor_to_ambiguous_njsd]]]] | keyword[def] identifier[njsd_all] ( identifier[network] , identifier[ref] , identifier[query] , identifier[file] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[graph] , identifier[gene_set_total] = identifier[util] . identifier[parse_network] ( identifier[network] )
identifier[ref_gene_expression_dict] = identifier[util] . identifier[parse_gene_expression] ( identifier[ref] , identifier[mean] = keyword[True] )
identifier[query_gene_expression_dict] = identifier[util] . identifier[parse_gene_expression] ( identifier[query] , identifier[mean] = keyword[False] )
identifier[maximally_ambiguous_gene_experession_dict] = identifier[util] . identifier[get_maximally_ambiguous_network] ( identifier[query_gene_expression_dict] )
identifier[gene_set_present] = identifier[set] ( identifier[query_gene_expression_dict] . identifier[keys] ())
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[outFile] :
identifier[print] ( literal[string] , literal[string] , literal[string] , identifier[sep] = literal[string] , identifier[file] = identifier[outFile] )
identifier[normal_to_tumor_njsd] = identifier[entropy] . identifier[njsd] ( identifier[network] = identifier[graph] ,
identifier[ref_gene_expression_dict] = identifier[ref_gene_expression_dict] ,
identifier[query_gene_expression_dict] = identifier[query_gene_expression_dict] ,
identifier[gene_set] = identifier[gene_set_present] )
identifier[tumor_to_ambiguous_njsd] = identifier[entropy] . identifier[njsd] ( identifier[network] = identifier[graph] ,
identifier[ref_gene_expression_dict] = identifier[maximally_ambiguous_gene_experession_dict] ,
identifier[query_gene_expression_dict] = identifier[query_gene_expression_dict] ,
identifier[gene_set] = identifier[gene_set_present] )
identifier[tITH] = identifier[normal_to_tumor_njsd] /( identifier[normal_to_tumor_njsd] + identifier[tumor_to_ambiguous_njsd] )
keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[outFile] :
identifier[print] ( identifier[normal_to_tumor_njsd] , identifier[tumor_to_ambiguous_njsd] , identifier[tITH] , identifier[sep] = literal[string] , identifier[file] = identifier[outFile] )
keyword[return] identifier[normal_to_tumor_njsd] /( identifier[normal_to_tumor_njsd] + identifier[tumor_to_ambiguous_njsd] ) | def njsd_all(network, ref, query, file, verbose=True):
"""Compute transcriptome-wide nJSD between reference and query expression profiles.
Attribute:
network (str): File path to a network file.
ref (str): File path to a reference expression file.
query (str): File path to a query expression file.
"""
(graph, gene_set_total) = util.parse_network(network)
ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True)
query_gene_expression_dict = util.parse_gene_expression(query, mean=False)
maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict)
gene_set_present = set(query_gene_expression_dict.keys())
with open(file, 'w') as outFile:
print('nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile) # depends on [control=['with'], data=['outFile']]
normal_to_tumor_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=ref_gene_expression_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set_present)
tumor_to_ambiguous_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set_present)
tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd)
with open(file, 'a') as outFile:
print(normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile) # depends on [control=['with'], data=['outFile']]
return normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd) |
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line) | def function[parse, parameter[self, fileobj, name_hint, parser]]:
constant[Fill from a file-like object.]
name[self].current_block assign[=] constant[None]
variable[parser] assign[=] <ast.BoolOp object at 0x7da1b2346b60>
for taget[name[line]] in starred[call[name[parser].parse, parameter[name[fileobj]]]] begin[:]
call[name[self].handle_line, parameter[name[line]]] | keyword[def] identifier[parse] ( identifier[self] , identifier[fileobj] , identifier[name_hint] = literal[string] , identifier[parser] = keyword[None] ):
literal[string]
identifier[self] . identifier[current_block] = keyword[None]
identifier[parser] = identifier[parser] keyword[or] identifier[Parser] ()
keyword[for] identifier[line] keyword[in] identifier[parser] . identifier[parse] ( identifier[fileobj] , identifier[name_hint] = identifier[name_hint] ):
identifier[self] . identifier[handle_line] ( identifier[line] ) | def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line) # depends on [control=['for'], data=['line']] |
def rename(self, new_dirname=None, new_basename=None):
"""Rename the dirname, basename or their combinations.
**中文文档**
对文件的目录名, 文件夹名, 或它们的组合进行修改。
"""
if not new_basename:
new_basename = self.new_basename
if not new_dirname:
new_dirname = self.dirname
else:
new_dirname = os.path.abspath(new_dirname)
new_abspath = os.path.join(new_dirname, new_basename)
os.rename(self.abspath, new_abspath)
# 如果成功重命名, 则更新文件信息
self.abspath = new_abspath
self.dirname = new_dirname
self.basename = new_basename | def function[rename, parameter[self, new_dirname, new_basename]]:
constant[Rename the dirname, basename or their combinations.
**中文文档**
对文件的目录名, 文件夹名, 或它们的组合进行修改。
]
if <ast.UnaryOp object at 0x7da18f09d690> begin[:]
variable[new_basename] assign[=] name[self].new_basename
if <ast.UnaryOp object at 0x7da18f09c3a0> begin[:]
variable[new_dirname] assign[=] name[self].dirname
variable[new_abspath] assign[=] call[name[os].path.join, parameter[name[new_dirname], name[new_basename]]]
call[name[os].rename, parameter[name[self].abspath, name[new_abspath]]]
name[self].abspath assign[=] name[new_abspath]
name[self].dirname assign[=] name[new_dirname]
name[self].basename assign[=] name[new_basename] | keyword[def] identifier[rename] ( identifier[self] , identifier[new_dirname] = keyword[None] , identifier[new_basename] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[new_basename] :
identifier[new_basename] = identifier[self] . identifier[new_basename]
keyword[if] keyword[not] identifier[new_dirname] :
identifier[new_dirname] = identifier[self] . identifier[dirname]
keyword[else] :
identifier[new_dirname] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[new_dirname] )
identifier[new_abspath] = identifier[os] . identifier[path] . identifier[join] ( identifier[new_dirname] , identifier[new_basename] )
identifier[os] . identifier[rename] ( identifier[self] . identifier[abspath] , identifier[new_abspath] )
identifier[self] . identifier[abspath] = identifier[new_abspath]
identifier[self] . identifier[dirname] = identifier[new_dirname]
identifier[self] . identifier[basename] = identifier[new_basename] | def rename(self, new_dirname=None, new_basename=None):
"""Rename the dirname, basename or their combinations.
**中文文档**
对文件的目录名, 文件夹名, 或它们的组合进行修改。
"""
if not new_basename:
new_basename = self.new_basename # depends on [control=['if'], data=[]]
if not new_dirname:
new_dirname = self.dirname # depends on [control=['if'], data=[]]
else:
new_dirname = os.path.abspath(new_dirname)
new_abspath = os.path.join(new_dirname, new_basename)
os.rename(self.abspath, new_abspath)
# 如果成功重命名, 则更新文件信息
self.abspath = new_abspath
self.dirname = new_dirname
self.basename = new_basename |
def reduce_data_frame_evenly_with_gaps (df, valcol, target_len, maxgap, **kwargs):
""""Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on
gaps in one of the columns.
This function combines :func:`reduce_data_frame` with
:func:`slice_evenly_with_gaps`.
"""
return reduce_data_frame (df,
slice_evenly_with_gaps (df[valcol], target_len, maxgap),
**kwargs) | def function[reduce_data_frame_evenly_with_gaps, parameter[df, valcol, target_len, maxgap]]:
constant["Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on
gaps in one of the columns.
This function combines :func:`reduce_data_frame` with
:func:`slice_evenly_with_gaps`.
]
return[call[name[reduce_data_frame], parameter[name[df], call[name[slice_evenly_with_gaps], parameter[call[name[df]][name[valcol]], name[target_len], name[maxgap]]]]]] | keyword[def] identifier[reduce_data_frame_evenly_with_gaps] ( identifier[df] , identifier[valcol] , identifier[target_len] , identifier[maxgap] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[reduce_data_frame] ( identifier[df] ,
identifier[slice_evenly_with_gaps] ( identifier[df] [ identifier[valcol] ], identifier[target_len] , identifier[maxgap] ),
** identifier[kwargs] ) | def reduce_data_frame_evenly_with_gaps(df, valcol, target_len, maxgap, **kwargs):
""""Reduce" a DataFrame by collapsing rows in grouped chunks, grouping based on
gaps in one of the columns.
This function combines :func:`reduce_data_frame` with
:func:`slice_evenly_with_gaps`.
"""
return reduce_data_frame(df, slice_evenly_with_gaps(df[valcol], target_len, maxgap), **kwargs) |
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged) | def function[update_one, parameter[self, filter, update, upsert, bypass_document_validation, collation]]:
constant[Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
]
call[name[common].validate_is_mapping, parameter[constant[filter], name[filter]]]
call[name[common].validate_ok_for_update, parameter[name[update]]]
with call[name[self]._socket_for_writes, parameter[]] begin[:]
variable[result] assign[=] call[name[self]._update, parameter[name[sock_info], name[filter], name[update], name[upsert]]]
return[call[name[UpdateResult], parameter[name[result], name[self].write_concern.acknowledged]]] | keyword[def] identifier[update_one] ( identifier[self] , identifier[filter] , identifier[update] , identifier[upsert] = keyword[False] ,
identifier[bypass_document_validation] = keyword[False] ,
identifier[collation] = keyword[None] ):
literal[string]
identifier[common] . identifier[validate_is_mapping] ( literal[string] , identifier[filter] )
identifier[common] . identifier[validate_ok_for_update] ( identifier[update] )
keyword[with] identifier[self] . identifier[_socket_for_writes] () keyword[as] identifier[sock_info] :
identifier[result] = identifier[self] . identifier[_update] ( identifier[sock_info] , identifier[filter] , identifier[update] , identifier[upsert] ,
identifier[check_keys] = keyword[False] ,
identifier[bypass_doc_val] = identifier[bypass_document_validation] ,
identifier[collation] = identifier[collation] )
keyword[return] identifier[UpdateResult] ( identifier[result] , identifier[self] . identifier[write_concern] . identifier[acknowledged] ) | def update_one(self, filter, update, upsert=False, bypass_document_validation=False, collation=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping('filter', filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert, check_keys=False, bypass_doc_val=bypass_document_validation, collation=collation) # depends on [control=['with'], data=['sock_info']]
return UpdateResult(result, self.write_concern.acknowledged) |
def create_new_example(self, foo='', a='', b=''):
"""Entity object factory."""
return create_new_example(foo=foo, a=a, b=b) | def function[create_new_example, parameter[self, foo, a, b]]:
constant[Entity object factory.]
return[call[name[create_new_example], parameter[]]] | keyword[def] identifier[create_new_example] ( identifier[self] , identifier[foo] = literal[string] , identifier[a] = literal[string] , identifier[b] = literal[string] ):
literal[string]
keyword[return] identifier[create_new_example] ( identifier[foo] = identifier[foo] , identifier[a] = identifier[a] , identifier[b] = identifier[b] ) | def create_new_example(self, foo='', a='', b=''):
"""Entity object factory."""
return create_new_example(foo=foo, a=a, b=b) |
def _error_handler(data, unique_id):
"""Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
"""
if data.get('statusCode') == 'FAILURE':
logger.error('[Subscription: %s] %s: %s' % (unique_id, data.get('errorCode'), data.get('errorMessage')))
if data.get('connectionClosed'):
return True
if data.get('status'):
# Clients shouldn't disconnect if status 503 is returned; when the stream
# recovers updates will be sent containing the latest data
logger.warning('[Subscription: %s] status: %s' % (unique_id, data['status'])) | def function[_error_handler, parameter[data, unique_id]]:
constant[Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
]
if compare[call[name[data].get, parameter[constant[statusCode]]] equal[==] constant[FAILURE]] begin[:]
call[name[logger].error, parameter[binary_operation[constant[[Subscription: %s] %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b15f3280>, <ast.Call object at 0x7da1b15f22c0>, <ast.Call object at 0x7da1b17fa860>]]]]]
if call[name[data].get, parameter[constant[connectionClosed]]] begin[:]
return[constant[True]]
if call[name[data].get, parameter[constant[status]]] begin[:]
call[name[logger].warning, parameter[binary_operation[constant[[Subscription: %s] status: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1793100>, <ast.Subscript object at 0x7da1b17918a0>]]]]] | keyword[def] identifier[_error_handler] ( identifier[data] , identifier[unique_id] ):
literal[string]
keyword[if] identifier[data] . identifier[get] ( literal[string] )== literal[string] :
identifier[logger] . identifier[error] ( literal[string] %( identifier[unique_id] , identifier[data] . identifier[get] ( literal[string] ), identifier[data] . identifier[get] ( literal[string] )))
keyword[if] identifier[data] . identifier[get] ( literal[string] ):
keyword[return] keyword[True]
keyword[if] identifier[data] . identifier[get] ( literal[string] ):
identifier[logger] . identifier[warning] ( literal[string] %( identifier[unique_id] , identifier[data] [ literal[string] ])) | def _error_handler(data, unique_id):
"""Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
"""
if data.get('statusCode') == 'FAILURE':
logger.error('[Subscription: %s] %s: %s' % (unique_id, data.get('errorCode'), data.get('errorMessage')))
if data.get('connectionClosed'):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if data.get('status'):
# Clients shouldn't disconnect if status 503 is returned; when the stream
# recovers updates will be sent containing the latest data
logger.warning('[Subscription: %s] status: %s' % (unique_id, data['status'])) # depends on [control=['if'], data=[]] |
def isdir(path, **kwargs):
"""Check if *path* is a directory"""
import os.path
return os.path.isdir(path, **kwargs) | def function[isdir, parameter[path]]:
constant[Check if *path* is a directory]
import module[os.path]
return[call[name[os].path.isdir, parameter[name[path]]]] | keyword[def] identifier[isdir] ( identifier[path] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[os] . identifier[path]
keyword[return] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ,** identifier[kwargs] ) | def isdir(path, **kwargs):
"""Check if *path* is a directory"""
import os.path
return os.path.isdir(path, **kwargs) |
def best_model(self):
"""Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
AttributeError
Raises a name error if the optimiser has not been run.
"""
if not hasattr(self, 'halloffame'):
raise AttributeError(
'No best model found, have you ran the optimiser?')
model = self.build_fn(
(self.specification,
self.sequences,
self.parse_individual(self.halloffame[0])
))
return model | def function[best_model, parameter[self]]:
constant[Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
AttributeError
Raises a name error if the optimiser has not been run.
]
if <ast.UnaryOp object at 0x7da1b26202b0> begin[:]
<ast.Raise object at 0x7da1b26203a0>
variable[model] assign[=] call[name[self].build_fn, parameter[tuple[[<ast.Attribute object at 0x7da1b2621de0>, <ast.Attribute object at 0x7da1b2621e40>, <ast.Call object at 0x7da1b2621cf0>]]]]
return[name[model]] | keyword[def] identifier[best_model] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[AttributeError] (
literal[string] )
identifier[model] = identifier[self] . identifier[build_fn] (
( identifier[self] . identifier[specification] ,
identifier[self] . identifier[sequences] ,
identifier[self] . identifier[parse_individual] ( identifier[self] . identifier[halloffame] [ literal[int] ])
))
keyword[return] identifier[model] | def best_model(self):
"""Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
AttributeError
Raises a name error if the optimiser has not been run.
"""
if not hasattr(self, 'halloffame'):
raise AttributeError('No best model found, have you ran the optimiser?') # depends on [control=['if'], data=[]]
model = self.build_fn((self.specification, self.sequences, self.parse_individual(self.halloffame[0])))
return model |
def get_vulnerability(
source,
sink,
triggers,
lattice,
cfg,
interactive,
blackbox_mapping
):
"""Get vulnerability between source and sink if it exists.
Uses triggers to find sanitisers.
Note: When a secondary node is in_constraint with the sink
but not the source, the secondary is a save_N_LHS
node made in process_function in expr_visitor.
Args:
source(TriggerNode): TriggerNode of the source.
sink(TriggerNode): TriggerNode of the sink.
triggers(Triggers): Triggers of the CFG.
lattice(Lattice): the lattice we're analysing.
cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
Returns:
A Vulnerability if it exists, else None
"""
nodes_in_constraint = [
secondary
for secondary in reversed(source.secondary_nodes)
if lattice.in_constraint(
secondary,
sink.cfg_node
)
]
nodes_in_constraint.append(source.cfg_node)
if sink.trigger.all_arguments_propagate_taint:
sink_args = get_sink_args(sink.cfg_node)
else:
sink_args = get_sink_args_which_propagate(sink, sink.cfg_node.ast_node)
tainted_node_in_sink_arg = get_tainted_node_in_sink_args(
sink_args,
nodes_in_constraint,
)
if tainted_node_in_sink_arg:
vuln_deets = {
'source': source.cfg_node,
'source_trigger_word': source.trigger_word,
'sink': sink.cfg_node,
'sink_trigger_word': sink.trigger_word
}
sanitiser_nodes = set()
potential_sanitiser = None
if sink.sanitisers:
for sanitiser in sink.sanitisers:
for cfg_node in triggers.sanitiser_dict[sanitiser]:
if isinstance(cfg_node, AssignmentNode):
sanitiser_nodes.add(cfg_node)
elif isinstance(cfg_node, IfNode):
potential_sanitiser = cfg_node
def_use = build_def_use_chain(
cfg.nodes,
lattice
)
for chain in get_vulnerability_chains(
source.cfg_node,
sink.cfg_node,
def_use
):
vulnerability_type, interactive = how_vulnerable(
chain,
blackbox_mapping,
sanitiser_nodes,
potential_sanitiser,
cfg.blackbox_assignments,
interactive,
vuln_deets
)
if vulnerability_type == VulnerabilityType.FALSE:
continue
vuln_deets['reassignment_nodes'] = chain
return vuln_factory(vulnerability_type)(**vuln_deets), interactive
return None, interactive | def function[get_vulnerability, parameter[source, sink, triggers, lattice, cfg, interactive, blackbox_mapping]]:
constant[Get vulnerability between source and sink if it exists.
Uses triggers to find sanitisers.
Note: When a secondary node is in_constraint with the sink
but not the source, the secondary is a save_N_LHS
node made in process_function in expr_visitor.
Args:
source(TriggerNode): TriggerNode of the source.
sink(TriggerNode): TriggerNode of the sink.
triggers(Triggers): Triggers of the CFG.
lattice(Lattice): the lattice we're analysing.
cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
Returns:
A Vulnerability if it exists, else None
]
variable[nodes_in_constraint] assign[=] <ast.ListComp object at 0x7da1b1e65ed0>
call[name[nodes_in_constraint].append, parameter[name[source].cfg_node]]
if name[sink].trigger.all_arguments_propagate_taint begin[:]
variable[sink_args] assign[=] call[name[get_sink_args], parameter[name[sink].cfg_node]]
variable[tainted_node_in_sink_arg] assign[=] call[name[get_tainted_node_in_sink_args], parameter[name[sink_args], name[nodes_in_constraint]]]
if name[tainted_node_in_sink_arg] begin[:]
variable[vuln_deets] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e67250>, <ast.Constant object at 0x7da1b1e64550>, <ast.Constant object at 0x7da1b1e649d0>, <ast.Constant object at 0x7da1b1e64040>], [<ast.Attribute object at 0x7da1b1e66320>, <ast.Attribute object at 0x7da1b1e654b0>, <ast.Attribute object at 0x7da1b1e64fa0>, <ast.Attribute object at 0x7da1b1e67220>]]
variable[sanitiser_nodes] assign[=] call[name[set], parameter[]]
variable[potential_sanitiser] assign[=] constant[None]
if name[sink].sanitisers begin[:]
for taget[name[sanitiser]] in starred[name[sink].sanitisers] begin[:]
for taget[name[cfg_node]] in starred[call[name[triggers].sanitiser_dict][name[sanitiser]]] begin[:]
if call[name[isinstance], parameter[name[cfg_node], name[AssignmentNode]]] begin[:]
call[name[sanitiser_nodes].add, parameter[name[cfg_node]]]
variable[def_use] assign[=] call[name[build_def_use_chain], parameter[name[cfg].nodes, name[lattice]]]
for taget[name[chain]] in starred[call[name[get_vulnerability_chains], parameter[name[source].cfg_node, name[sink].cfg_node, name[def_use]]]] begin[:]
<ast.Tuple object at 0x7da1b1ec2710> assign[=] call[name[how_vulnerable], parameter[name[chain], name[blackbox_mapping], name[sanitiser_nodes], name[potential_sanitiser], name[cfg].blackbox_assignments, name[interactive], name[vuln_deets]]]
if compare[name[vulnerability_type] equal[==] name[VulnerabilityType].FALSE] begin[:]
continue
call[name[vuln_deets]][constant[reassignment_nodes]] assign[=] name[chain]
return[tuple[[<ast.Call object at 0x7da1b1ec2860>, <ast.Name object at 0x7da1b1da2ec0>]]]
return[tuple[[<ast.Constant object at 0x7da1b1da0b20>, <ast.Name object at 0x7da1b1da3a00>]]] | keyword[def] identifier[get_vulnerability] (
identifier[source] ,
identifier[sink] ,
identifier[triggers] ,
identifier[lattice] ,
identifier[cfg] ,
identifier[interactive] ,
identifier[blackbox_mapping]
):
literal[string]
identifier[nodes_in_constraint] =[
identifier[secondary]
keyword[for] identifier[secondary] keyword[in] identifier[reversed] ( identifier[source] . identifier[secondary_nodes] )
keyword[if] identifier[lattice] . identifier[in_constraint] (
identifier[secondary] ,
identifier[sink] . identifier[cfg_node]
)
]
identifier[nodes_in_constraint] . identifier[append] ( identifier[source] . identifier[cfg_node] )
keyword[if] identifier[sink] . identifier[trigger] . identifier[all_arguments_propagate_taint] :
identifier[sink_args] = identifier[get_sink_args] ( identifier[sink] . identifier[cfg_node] )
keyword[else] :
identifier[sink_args] = identifier[get_sink_args_which_propagate] ( identifier[sink] , identifier[sink] . identifier[cfg_node] . identifier[ast_node] )
identifier[tainted_node_in_sink_arg] = identifier[get_tainted_node_in_sink_args] (
identifier[sink_args] ,
identifier[nodes_in_constraint] ,
)
keyword[if] identifier[tainted_node_in_sink_arg] :
identifier[vuln_deets] ={
literal[string] : identifier[source] . identifier[cfg_node] ,
literal[string] : identifier[source] . identifier[trigger_word] ,
literal[string] : identifier[sink] . identifier[cfg_node] ,
literal[string] : identifier[sink] . identifier[trigger_word]
}
identifier[sanitiser_nodes] = identifier[set] ()
identifier[potential_sanitiser] = keyword[None]
keyword[if] identifier[sink] . identifier[sanitisers] :
keyword[for] identifier[sanitiser] keyword[in] identifier[sink] . identifier[sanitisers] :
keyword[for] identifier[cfg_node] keyword[in] identifier[triggers] . identifier[sanitiser_dict] [ identifier[sanitiser] ]:
keyword[if] identifier[isinstance] ( identifier[cfg_node] , identifier[AssignmentNode] ):
identifier[sanitiser_nodes] . identifier[add] ( identifier[cfg_node] )
keyword[elif] identifier[isinstance] ( identifier[cfg_node] , identifier[IfNode] ):
identifier[potential_sanitiser] = identifier[cfg_node]
identifier[def_use] = identifier[build_def_use_chain] (
identifier[cfg] . identifier[nodes] ,
identifier[lattice]
)
keyword[for] identifier[chain] keyword[in] identifier[get_vulnerability_chains] (
identifier[source] . identifier[cfg_node] ,
identifier[sink] . identifier[cfg_node] ,
identifier[def_use]
):
identifier[vulnerability_type] , identifier[interactive] = identifier[how_vulnerable] (
identifier[chain] ,
identifier[blackbox_mapping] ,
identifier[sanitiser_nodes] ,
identifier[potential_sanitiser] ,
identifier[cfg] . identifier[blackbox_assignments] ,
identifier[interactive] ,
identifier[vuln_deets]
)
keyword[if] identifier[vulnerability_type] == identifier[VulnerabilityType] . identifier[FALSE] :
keyword[continue]
identifier[vuln_deets] [ literal[string] ]= identifier[chain]
keyword[return] identifier[vuln_factory] ( identifier[vulnerability_type] )(** identifier[vuln_deets] ), identifier[interactive]
keyword[return] keyword[None] , identifier[interactive] | def get_vulnerability(source, sink, triggers, lattice, cfg, interactive, blackbox_mapping):
"""Get vulnerability between source and sink if it exists.
Uses triggers to find sanitisers.
Note: When a secondary node is in_constraint with the sink
but not the source, the secondary is a save_N_LHS
node made in process_function in expr_visitor.
Args:
source(TriggerNode): TriggerNode of the source.
sink(TriggerNode): TriggerNode of the sink.
triggers(Triggers): Triggers of the CFG.
lattice(Lattice): the lattice we're analysing.
cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
Returns:
A Vulnerability if it exists, else None
"""
nodes_in_constraint = [secondary for secondary in reversed(source.secondary_nodes) if lattice.in_constraint(secondary, sink.cfg_node)]
nodes_in_constraint.append(source.cfg_node)
if sink.trigger.all_arguments_propagate_taint:
sink_args = get_sink_args(sink.cfg_node) # depends on [control=['if'], data=[]]
else:
sink_args = get_sink_args_which_propagate(sink, sink.cfg_node.ast_node)
tainted_node_in_sink_arg = get_tainted_node_in_sink_args(sink_args, nodes_in_constraint)
if tainted_node_in_sink_arg:
vuln_deets = {'source': source.cfg_node, 'source_trigger_word': source.trigger_word, 'sink': sink.cfg_node, 'sink_trigger_word': sink.trigger_word}
sanitiser_nodes = set()
potential_sanitiser = None
if sink.sanitisers:
for sanitiser in sink.sanitisers:
for cfg_node in triggers.sanitiser_dict[sanitiser]:
if isinstance(cfg_node, AssignmentNode):
sanitiser_nodes.add(cfg_node) # depends on [control=['if'], data=[]]
elif isinstance(cfg_node, IfNode):
potential_sanitiser = cfg_node # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cfg_node']] # depends on [control=['for'], data=['sanitiser']] # depends on [control=['if'], data=[]]
def_use = build_def_use_chain(cfg.nodes, lattice)
for chain in get_vulnerability_chains(source.cfg_node, sink.cfg_node, def_use):
(vulnerability_type, interactive) = how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, cfg.blackbox_assignments, interactive, vuln_deets)
if vulnerability_type == VulnerabilityType.FALSE:
continue # depends on [control=['if'], data=[]]
vuln_deets['reassignment_nodes'] = chain
return (vuln_factory(vulnerability_type)(**vuln_deets), interactive) # depends on [control=['for'], data=['chain']] # depends on [control=['if'], data=[]]
return (None, interactive) |
def init_with_context(self, context):
"""
Please refer to
:meth:`~admin_tools.menu.items.MenuItem.init_with_context`
documentation from :class:`~admin_tools.menu.items.MenuItem` class.
"""
from admin_tools.menu.models import Bookmark
for b in Bookmark.objects.filter(user=context['request'].user):
self.children.append(MenuItem(mark_safe(b.title), b.url))
if not len(self.children):
self.enabled = False | def function[init_with_context, parameter[self, context]]:
constant[
Please refer to
:meth:`~admin_tools.menu.items.MenuItem.init_with_context`
documentation from :class:`~admin_tools.menu.items.MenuItem` class.
]
from relative_module[admin_tools.menu.models] import module[Bookmark]
for taget[name[b]] in starred[call[name[Bookmark].objects.filter, parameter[]]] begin[:]
call[name[self].children.append, parameter[call[name[MenuItem], parameter[call[name[mark_safe], parameter[name[b].title]], name[b].url]]]]
if <ast.UnaryOp object at 0x7da2041da830> begin[:]
name[self].enabled assign[=] constant[False] | keyword[def] identifier[init_with_context] ( identifier[self] , identifier[context] ):
literal[string]
keyword[from] identifier[admin_tools] . identifier[menu] . identifier[models] keyword[import] identifier[Bookmark]
keyword[for] identifier[b] keyword[in] identifier[Bookmark] . identifier[objects] . identifier[filter] ( identifier[user] = identifier[context] [ literal[string] ]. identifier[user] ):
identifier[self] . identifier[children] . identifier[append] ( identifier[MenuItem] ( identifier[mark_safe] ( identifier[b] . identifier[title] ), identifier[b] . identifier[url] ))
keyword[if] keyword[not] identifier[len] ( identifier[self] . identifier[children] ):
identifier[self] . identifier[enabled] = keyword[False] | def init_with_context(self, context):
"""
Please refer to
:meth:`~admin_tools.menu.items.MenuItem.init_with_context`
documentation from :class:`~admin_tools.menu.items.MenuItem` class.
"""
from admin_tools.menu.models import Bookmark
for b in Bookmark.objects.filter(user=context['request'].user):
self.children.append(MenuItem(mark_safe(b.title), b.url)) # depends on [control=['for'], data=['b']]
if not len(self.children):
self.enabled = False # depends on [control=['if'], data=[]] |
def get_field(self, offset, length, format):
"""Returns unpacked Python struct array.
Args:
offset (int): offset to byte array within structure
length (int): how many bytes to unpack
format (str): Python struct format string for unpacking
See Also:
https://docs.python.org/2/library/struct.html#format-characters
"""
return struct.unpack(format, self.data[offset:offset + length])[0] | def function[get_field, parameter[self, offset, length, format]]:
constant[Returns unpacked Python struct array.
Args:
offset (int): offset to byte array within structure
length (int): how many bytes to unpack
format (str): Python struct format string for unpacking
See Also:
https://docs.python.org/2/library/struct.html#format-characters
]
return[call[call[name[struct].unpack, parameter[name[format], call[name[self].data][<ast.Slice object at 0x7da1b27e3280>]]]][constant[0]]] | keyword[def] identifier[get_field] ( identifier[self] , identifier[offset] , identifier[length] , identifier[format] ):
literal[string]
keyword[return] identifier[struct] . identifier[unpack] ( identifier[format] , identifier[self] . identifier[data] [ identifier[offset] : identifier[offset] + identifier[length] ])[ literal[int] ] | def get_field(self, offset, length, format):
"""Returns unpacked Python struct array.
Args:
offset (int): offset to byte array within structure
length (int): how many bytes to unpack
format (str): Python struct format string for unpacking
See Also:
https://docs.python.org/2/library/struct.html#format-characters
"""
return struct.unpack(format, self.data[offset:offset + length])[0] |
def html_to_ssml(text):
"""
Replaces specific html tags with probable SSML counterparts.
"""
ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text)
return ssml_text | def function[html_to_ssml, parameter[text]]:
constant[
Replaces specific html tags with probable SSML counterparts.
]
variable[ssml_text] assign[=] call[name[reduce], parameter[<ast.Lambda object at 0x7da1b11744f0>, name[html_to_ssml_maps], name[text]]]
return[name[ssml_text]] | keyword[def] identifier[html_to_ssml] ( identifier[text] ):
literal[string]
identifier[ssml_text] = identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] . identifier[replace] ( identifier[y] , identifier[html_to_ssml_maps] [ identifier[y] ]), identifier[html_to_ssml_maps] , identifier[text] )
keyword[return] identifier[ssml_text] | def html_to_ssml(text):
"""
Replaces specific html tags with probable SSML counterparts.
"""
ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text)
return ssml_text |
def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
except (TypeError, ValueError):
return False
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
return False
try:
my_event = int(row['event'], 10)
except (TypeError, ValueError):
return False
if my_event < 1 or my_event > 77:
return False
try:
category = int(row['cat'], 10)
except (TypeError, ValueError):
return False
if category < 1 or category > 4:
return False
return True | def function[VerifyRow, parameter[self, parser_mediator, row]]:
constant[Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
]
<ast.Try object at 0x7da20e9b1870>
<ast.Try object at 0x7da20e9b3e50>
<ast.Try object at 0x7da20e9b1d80>
if <ast.BoolOp object at 0x7da20e9b3fa0> begin[:]
return[constant[False]]
<ast.Try object at 0x7da207f99c30>
if <ast.BoolOp object at 0x7da207f98e80> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[VerifyRow] ( identifier[self] , identifier[parser_mediator] , identifier[row] ):
literal[string]
keyword[try] :
identifier[time_elements_tuple] = identifier[self] . identifier[_GetTimeElementsTuple] ( identifier[row] [ literal[string] ])
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[return] keyword[False]
keyword[try] :
identifier[dfdatetime_time_elements] . identifier[TimeElements] (
identifier[time_elements_tuple] = identifier[time_elements_tuple] )
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
keyword[try] :
identifier[my_event] = identifier[int] ( identifier[row] [ literal[string] ], literal[int] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[return] keyword[False]
keyword[if] identifier[my_event] < literal[int] keyword[or] identifier[my_event] > literal[int] :
keyword[return] keyword[False]
keyword[try] :
identifier[category] = identifier[int] ( identifier[row] [ literal[string] ], literal[int] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[return] keyword[False]
keyword[if] identifier[category] < literal[int] keyword[or] identifier[category] > literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
time_elements_tuple = self._GetTimeElementsTuple(row['time']) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
return False # depends on [control=['except'], data=[]]
try:
dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
try:
my_event = int(row['event'], 10) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
return False # depends on [control=['except'], data=[]]
if my_event < 1 or my_event > 77:
return False # depends on [control=['if'], data=[]]
try:
category = int(row['cat'], 10) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
return False # depends on [control=['except'], data=[]]
if category < 1 or category > 4:
return False # depends on [control=['if'], data=[]]
return True |
def _get_selection(self):
"Returns the index of the selected item (list for multiselect) or None"
if self.multiselect:
return self.wx_obj.GetSelections()
else:
sel = self.wx_obj.GetSelection()
if sel == wx.NOT_FOUND:
return None
else:
return sel | def function[_get_selection, parameter[self]]:
constant[Returns the index of the selected item (list for multiselect) or None]
if name[self].multiselect begin[:]
return[call[name[self].wx_obj.GetSelections, parameter[]]] | keyword[def] identifier[_get_selection] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[multiselect] :
keyword[return] identifier[self] . identifier[wx_obj] . identifier[GetSelections] ()
keyword[else] :
identifier[sel] = identifier[self] . identifier[wx_obj] . identifier[GetSelection] ()
keyword[if] identifier[sel] == identifier[wx] . identifier[NOT_FOUND] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[sel] | def _get_selection(self):
"""Returns the index of the selected item (list for multiselect) or None"""
if self.multiselect:
return self.wx_obj.GetSelections() # depends on [control=['if'], data=[]]
else:
sel = self.wx_obj.GetSelection()
if sel == wx.NOT_FOUND:
return None # depends on [control=['if'], data=[]]
else:
return sel |
def read_stats(self, *stats):
""" Read port statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
"""
self.statistics = OrderedDict()
for port in self.ports:
port_stats = IxeStatTotal(port).get_attributes(FLAG_RDONLY, *stats)
port_stats.update({c + '_rate': v for c, v in
IxeStatRate(port).get_attributes(FLAG_RDONLY, *stats).items()})
self.statistics[str(port)] = port_stats
return self.statistics | def function[read_stats, parameter[self]]:
constant[ Read port statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
]
name[self].statistics assign[=] call[name[OrderedDict], parameter[]]
for taget[name[port]] in starred[name[self].ports] begin[:]
variable[port_stats] assign[=] call[call[name[IxeStatTotal], parameter[name[port]]].get_attributes, parameter[name[FLAG_RDONLY], <ast.Starred object at 0x7da2047e8910>]]
call[name[port_stats].update, parameter[<ast.DictComp object at 0x7da2047e8100>]]
call[name[self].statistics][call[name[str], parameter[name[port]]]] assign[=] name[port_stats]
return[name[self].statistics] | keyword[def] identifier[read_stats] ( identifier[self] ,* identifier[stats] ):
literal[string]
identifier[self] . identifier[statistics] = identifier[OrderedDict] ()
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[ports] :
identifier[port_stats] = identifier[IxeStatTotal] ( identifier[port] ). identifier[get_attributes] ( identifier[FLAG_RDONLY] ,* identifier[stats] )
identifier[port_stats] . identifier[update] ({ identifier[c] + literal[string] : identifier[v] keyword[for] identifier[c] , identifier[v] keyword[in]
identifier[IxeStatRate] ( identifier[port] ). identifier[get_attributes] ( identifier[FLAG_RDONLY] ,* identifier[stats] ). identifier[items] ()})
identifier[self] . identifier[statistics] [ identifier[str] ( identifier[port] )]= identifier[port_stats]
keyword[return] identifier[self] . identifier[statistics] | def read_stats(self, *stats):
""" Read port statistics from chassis.
:param stats: list of requested statistics to read, if empty - read all statistics.
"""
self.statistics = OrderedDict()
for port in self.ports:
port_stats = IxeStatTotal(port).get_attributes(FLAG_RDONLY, *stats)
port_stats.update({c + '_rate': v for (c, v) in IxeStatRate(port).get_attributes(FLAG_RDONLY, *stats).items()})
self.statistics[str(port)] = port_stats # depends on [control=['for'], data=['port']]
return self.statistics |
def _presize(self, size:int, val_xtra_size:int=32, scale:Tuple[float]=(0.08, 1.0), ratio:Tuple[float]=(0.75, 4./3.),
interpolation:int=2):
"Resize images to `size` using `RandomResizedCrop`, passing along `kwargs` to train transform"
return self.pre_transform(
tvt.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation),
[tvt.Resize(size+val_xtra_size), tvt.CenterCrop(size)]) | def function[_presize, parameter[self, size, val_xtra_size, scale, ratio, interpolation]]:
constant[Resize images to `size` using `RandomResizedCrop`, passing along `kwargs` to train transform]
return[call[name[self].pre_transform, parameter[call[name[tvt].RandomResizedCrop, parameter[name[size]]], list[[<ast.Call object at 0x7da1b2029f30>, <ast.Call object at 0x7da1b2028b20>]]]]] | keyword[def] identifier[_presize] ( identifier[self] , identifier[size] : identifier[int] , identifier[val_xtra_size] : identifier[int] = literal[int] , identifier[scale] : identifier[Tuple] [ identifier[float] ]=( literal[int] , literal[int] ), identifier[ratio] : identifier[Tuple] [ identifier[float] ]=( literal[int] , literal[int] / literal[int] ),
identifier[interpolation] : identifier[int] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[pre_transform] (
identifier[tvt] . identifier[RandomResizedCrop] ( identifier[size] , identifier[scale] = identifier[scale] , identifier[ratio] = identifier[ratio] , identifier[interpolation] = identifier[interpolation] ),
[ identifier[tvt] . identifier[Resize] ( identifier[size] + identifier[val_xtra_size] ), identifier[tvt] . identifier[CenterCrop] ( identifier[size] )]) | def _presize(self, size: int, val_xtra_size: int=32, scale: Tuple[float]=(0.08, 1.0), ratio: Tuple[float]=(0.75, 4.0 / 3.0), interpolation: int=2):
"""Resize images to `size` using `RandomResizedCrop`, passing along `kwargs` to train transform"""
return self.pre_transform(tvt.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation), [tvt.Resize(size + val_xtra_size), tvt.CenterCrop(size)]) |
def require(self, product_type):
"""Schedules the tasks that produce product_type to be executed before the requesting task.
There must be at least one task that produces the required product type, or the
dependencies will not be satisfied.
:API: public
"""
self._dependencies.add(product_type)
self._context.products.require(product_type) | def function[require, parameter[self, product_type]]:
constant[Schedules the tasks that produce product_type to be executed before the requesting task.
There must be at least one task that produces the required product type, or the
dependencies will not be satisfied.
:API: public
]
call[name[self]._dependencies.add, parameter[name[product_type]]]
call[name[self]._context.products.require, parameter[name[product_type]]] | keyword[def] identifier[require] ( identifier[self] , identifier[product_type] ):
literal[string]
identifier[self] . identifier[_dependencies] . identifier[add] ( identifier[product_type] )
identifier[self] . identifier[_context] . identifier[products] . identifier[require] ( identifier[product_type] ) | def require(self, product_type):
"""Schedules the tasks that produce product_type to be executed before the requesting task.
There must be at least one task that produces the required product type, or the
dependencies will not be satisfied.
:API: public
"""
self._dependencies.add(product_type)
self._context.products.require(product_type) |
def arc(document, bounding_rect, start, extent, style):
"arc, pieslice (filled), arc with chord (filled)"
(x1, y1, x2, y2) = bounding_rect
import math
cx = (x1 + x2)/2.0
cy = (y1 + y2)/2.0
rx = (x2 - x1)/2.0
ry = (y2 - y1)/2.0
start = math.radians(float(start))
extent = math.radians(float(extent))
# from SVG spec:
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
x1 = rx * math.cos(start) + cx
y1 = -ry * math.sin(start) + cy # XXX: ry is negated here
x2 = rx * math.cos(start + extent) + cx
y2 = -ry * math.sin(start + extent) + cy # XXX: ry is negated here
if abs(extent) > math.pi:
fa = 1
else:
fa = 0
if extent > 0.0:
fs = 0
else:
fs = 1
path = []
# common: arc
path.append('M%s,%s' % (x1, y1))
path.append('A%s,%s 0 %d %d %s,%s' % (rx, ry, fa, fs, x2, y2))
if style == ARC:
pass
elif style == CHORD:
path.append('z')
else: # default: pieslice
path.append('L%s,%s' % (cx, cy))
path.append('z')
return setattribs(document.createElement('path'), d = ''.join(path)) | def function[arc, parameter[document, bounding_rect, start, extent, style]]:
constant[arc, pieslice (filled), arc with chord (filled)]
<ast.Tuple object at 0x7da1b0f44ac0> assign[=] name[bounding_rect]
import module[math]
variable[cx] assign[=] binary_operation[binary_operation[name[x1] + name[x2]] / constant[2.0]]
variable[cy] assign[=] binary_operation[binary_operation[name[y1] + name[y2]] / constant[2.0]]
variable[rx] assign[=] binary_operation[binary_operation[name[x2] - name[x1]] / constant[2.0]]
variable[ry] assign[=] binary_operation[binary_operation[name[y2] - name[y1]] / constant[2.0]]
variable[start] assign[=] call[name[math].radians, parameter[call[name[float], parameter[name[start]]]]]
variable[extent] assign[=] call[name[math].radians, parameter[call[name[float], parameter[name[extent]]]]]
variable[x1] assign[=] binary_operation[binary_operation[name[rx] * call[name[math].cos, parameter[name[start]]]] + name[cx]]
variable[y1] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0f449a0> * call[name[math].sin, parameter[name[start]]]] + name[cy]]
variable[x2] assign[=] binary_operation[binary_operation[name[rx] * call[name[math].cos, parameter[binary_operation[name[start] + name[extent]]]]] + name[cx]]
variable[y2] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0f47850> * call[name[math].sin, parameter[binary_operation[name[start] + name[extent]]]]] + name[cy]]
if compare[call[name[abs], parameter[name[extent]]] greater[>] name[math].pi] begin[:]
variable[fa] assign[=] constant[1]
if compare[name[extent] greater[>] constant[0.0]] begin[:]
variable[fs] assign[=] constant[0]
variable[path] assign[=] list[[]]
call[name[path].append, parameter[binary_operation[constant[M%s,%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f46500>, <ast.Name object at 0x7da1b0f44370>]]]]]
call[name[path].append, parameter[binary_operation[constant[A%s,%s 0 %d %d %s,%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f479d0>, <ast.Name object at 0x7da1b0f47610>, <ast.Name object at 0x7da1b0f44a00>, <ast.Name object at 0x7da1b0f44310>, <ast.Name object at 0x7da1b0f46b60>, <ast.Name object at 0x7da1b0f44490>]]]]]
if compare[name[style] equal[==] name[ARC]] begin[:]
pass
return[call[name[setattribs], parameter[call[name[document].createElement, parameter[constant[path]]]]]] | keyword[def] identifier[arc] ( identifier[document] , identifier[bounding_rect] , identifier[start] , identifier[extent] , identifier[style] ):
literal[string]
( identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] )= identifier[bounding_rect]
keyword[import] identifier[math]
identifier[cx] =( identifier[x1] + identifier[x2] )/ literal[int]
identifier[cy] =( identifier[y1] + identifier[y2] )/ literal[int]
identifier[rx] =( identifier[x2] - identifier[x1] )/ literal[int]
identifier[ry] =( identifier[y2] - identifier[y1] )/ literal[int]
identifier[start] = identifier[math] . identifier[radians] ( identifier[float] ( identifier[start] ))
identifier[extent] = identifier[math] . identifier[radians] ( identifier[float] ( identifier[extent] ))
identifier[x1] = identifier[rx] * identifier[math] . identifier[cos] ( identifier[start] )+ identifier[cx]
identifier[y1] =- identifier[ry] * identifier[math] . identifier[sin] ( identifier[start] )+ identifier[cy]
identifier[x2] = identifier[rx] * identifier[math] . identifier[cos] ( identifier[start] + identifier[extent] )+ identifier[cx]
identifier[y2] =- identifier[ry] * identifier[math] . identifier[sin] ( identifier[start] + identifier[extent] )+ identifier[cy]
keyword[if] identifier[abs] ( identifier[extent] )> identifier[math] . identifier[pi] :
identifier[fa] = literal[int]
keyword[else] :
identifier[fa] = literal[int]
keyword[if] identifier[extent] > literal[int] :
identifier[fs] = literal[int]
keyword[else] :
identifier[fs] = literal[int]
identifier[path] =[]
identifier[path] . identifier[append] ( literal[string] %( identifier[x1] , identifier[y1] ))
identifier[path] . identifier[append] ( literal[string] %( identifier[rx] , identifier[ry] , identifier[fa] , identifier[fs] , identifier[x2] , identifier[y2] ))
keyword[if] identifier[style] == identifier[ARC] :
keyword[pass]
keyword[elif] identifier[style] == identifier[CHORD] :
identifier[path] . identifier[append] ( literal[string] )
keyword[else] :
identifier[path] . identifier[append] ( literal[string] %( identifier[cx] , identifier[cy] ))
identifier[path] . identifier[append] ( literal[string] )
keyword[return] identifier[setattribs] ( identifier[document] . identifier[createElement] ( literal[string] ), identifier[d] = literal[string] . identifier[join] ( identifier[path] )) | def arc(document, bounding_rect, start, extent, style):
"""arc, pieslice (filled), arc with chord (filled)"""
(x1, y1, x2, y2) = bounding_rect
import math
cx = (x1 + x2) / 2.0
cy = (y1 + y2) / 2.0
rx = (x2 - x1) / 2.0
ry = (y2 - y1) / 2.0
start = math.radians(float(start))
extent = math.radians(float(extent)) # from SVG spec:
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
x1 = rx * math.cos(start) + cx
y1 = -ry * math.sin(start) + cy # XXX: ry is negated here
x2 = rx * math.cos(start + extent) + cx
y2 = -ry * math.sin(start + extent) + cy # XXX: ry is negated here
if abs(extent) > math.pi:
fa = 1 # depends on [control=['if'], data=[]]
else:
fa = 0
if extent > 0.0:
fs = 0 # depends on [control=['if'], data=[]]
else:
fs = 1
path = [] # common: arc
path.append('M%s,%s' % (x1, y1))
path.append('A%s,%s 0 %d %d %s,%s' % (rx, ry, fa, fs, x2, y2))
if style == ARC:
pass # depends on [control=['if'], data=[]]
elif style == CHORD:
path.append('z') # depends on [control=['if'], data=[]]
else: # default: pieslice
path.append('L%s,%s' % (cx, cy))
path.append('z')
return setattribs(document.createElement('path'), d=''.join(path)) |
def clear_marking(self):
"""Clear marking from image.
This does not clear loaded coordinates from memory."""
if self.marktag:
try:
self.canvas.delete_object_by_tag(self.marktag, redraw=False)
except Exception:
pass
if self.markhltag:
try:
self.canvas.delete_object_by_tag(self.markhltag, redraw=False)
except Exception:
pass
self.treeview.clear() # Clear table too
self.w.nshown.set_text('0')
self.fitsimage.redraw() | def function[clear_marking, parameter[self]]:
constant[Clear marking from image.
This does not clear loaded coordinates from memory.]
if name[self].marktag begin[:]
<ast.Try object at 0x7da18eb57d30>
if name[self].markhltag begin[:]
<ast.Try object at 0x7da18eb57dc0>
call[name[self].treeview.clear, parameter[]]
call[name[self].w.nshown.set_text, parameter[constant[0]]]
call[name[self].fitsimage.redraw, parameter[]] | keyword[def] identifier[clear_marking] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[marktag] :
keyword[try] :
identifier[self] . identifier[canvas] . identifier[delete_object_by_tag] ( identifier[self] . identifier[marktag] , identifier[redraw] = keyword[False] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] identifier[self] . identifier[markhltag] :
keyword[try] :
identifier[self] . identifier[canvas] . identifier[delete_object_by_tag] ( identifier[self] . identifier[markhltag] , identifier[redraw] = keyword[False] )
keyword[except] identifier[Exception] :
keyword[pass]
identifier[self] . identifier[treeview] . identifier[clear] ()
identifier[self] . identifier[w] . identifier[nshown] . identifier[set_text] ( literal[string] )
identifier[self] . identifier[fitsimage] . identifier[redraw] () | def clear_marking(self):
"""Clear marking from image.
This does not clear loaded coordinates from memory."""
if self.marktag:
try:
self.canvas.delete_object_by_tag(self.marktag, redraw=False) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if self.markhltag:
try:
self.canvas.delete_object_by_tag(self.markhltag, redraw=False) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
self.treeview.clear() # Clear table too
self.w.nshown.set_text('0')
self.fitsimage.redraw() |
def _depth_first_search(self, target_id, layer_id_list, node_list):
"""Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id."""
assert len(node_list) <= self.n_nodes
u = node_list[-1]
if u == target_id:
return True
for v, layer_id in self.adj_list[u]:
layer_id_list.append(layer_id)
node_list.append(v)
if self._depth_first_search(target_id, layer_id_list, node_list):
return True
layer_id_list.pop()
node_list.pop()
return False | def function[_depth_first_search, parameter[self, target_id, layer_id_list, node_list]]:
constant[Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id.]
assert[compare[call[name[len], parameter[name[node_list]]] less_or_equal[<=] name[self].n_nodes]]
variable[u] assign[=] call[name[node_list]][<ast.UnaryOp object at 0x7da1b2041d20>]
if compare[name[u] equal[==] name[target_id]] begin[:]
return[constant[True]]
for taget[tuple[[<ast.Name object at 0x7da1b20432e0>, <ast.Name object at 0x7da1b2043070>]]] in starred[call[name[self].adj_list][name[u]]] begin[:]
call[name[layer_id_list].append, parameter[name[layer_id]]]
call[name[node_list].append, parameter[name[v]]]
if call[name[self]._depth_first_search, parameter[name[target_id], name[layer_id_list], name[node_list]]] begin[:]
return[constant[True]]
call[name[layer_id_list].pop, parameter[]]
call[name[node_list].pop, parameter[]]
return[constant[False]] | keyword[def] identifier[_depth_first_search] ( identifier[self] , identifier[target_id] , identifier[layer_id_list] , identifier[node_list] ):
literal[string]
keyword[assert] identifier[len] ( identifier[node_list] )<= identifier[self] . identifier[n_nodes]
identifier[u] = identifier[node_list] [- literal[int] ]
keyword[if] identifier[u] == identifier[target_id] :
keyword[return] keyword[True]
keyword[for] identifier[v] , identifier[layer_id] keyword[in] identifier[self] . identifier[adj_list] [ identifier[u] ]:
identifier[layer_id_list] . identifier[append] ( identifier[layer_id] )
identifier[node_list] . identifier[append] ( identifier[v] )
keyword[if] identifier[self] . identifier[_depth_first_search] ( identifier[target_id] , identifier[layer_id_list] , identifier[node_list] ):
keyword[return] keyword[True]
identifier[layer_id_list] . identifier[pop] ()
identifier[node_list] . identifier[pop] ()
keyword[return] keyword[False] | def _depth_first_search(self, target_id, layer_id_list, node_list):
"""Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id."""
assert len(node_list) <= self.n_nodes
u = node_list[-1]
if u == target_id:
return True # depends on [control=['if'], data=[]]
for (v, layer_id) in self.adj_list[u]:
layer_id_list.append(layer_id)
node_list.append(v)
if self._depth_first_search(target_id, layer_id_list, node_list):
return True # depends on [control=['if'], data=[]]
layer_id_list.pop()
node_list.pop() # depends on [control=['for'], data=[]]
return False |
def quarterly(date=datetime.date.today()):
"""
Fixed at: 1/1, 4/1, 7/1, 10/1.
"""
return datetime.date(date.year, ((date.month - 1)//3) * 3 + 1, 1) | def function[quarterly, parameter[date]]:
constant[
Fixed at: 1/1, 4/1, 7/1, 10/1.
]
return[call[name[datetime].date, parameter[name[date].year, binary_operation[binary_operation[binary_operation[binary_operation[name[date].month - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]] * constant[3]] + constant[1]], constant[1]]]] | keyword[def] identifier[quarterly] ( identifier[date] = identifier[datetime] . identifier[date] . identifier[today] ()):
literal[string]
keyword[return] identifier[datetime] . identifier[date] ( identifier[date] . identifier[year] ,(( identifier[date] . identifier[month] - literal[int] )// literal[int] )* literal[int] + literal[int] , literal[int] ) | def quarterly(date=datetime.date.today()):
"""
Fixed at: 1/1, 4/1, 7/1, 10/1.
"""
return datetime.date(date.year, (date.month - 1) // 3 * 3 + 1, 1) |
def set_alive(self):
"""Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
"""
was_alive = self.alive
self.alive = True
self.reachable = True
self.attempt = 0
# We came from dead to alive! We must propagate the good news
if not was_alive:
logger.info("Setting %s satellite as alive :)", self.name)
self.broks.append(self.get_update_status_brok()) | def function[set_alive, parameter[self]]:
constant[Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
]
variable[was_alive] assign[=] name[self].alive
name[self].alive assign[=] constant[True]
name[self].reachable assign[=] constant[True]
name[self].attempt assign[=] constant[0]
if <ast.UnaryOp object at 0x7da1b23477c0> begin[:]
call[name[logger].info, parameter[constant[Setting %s satellite as alive :)], name[self].name]]
call[name[self].broks.append, parameter[call[name[self].get_update_status_brok, parameter[]]]] | keyword[def] identifier[set_alive] ( identifier[self] ):
literal[string]
identifier[was_alive] = identifier[self] . identifier[alive]
identifier[self] . identifier[alive] = keyword[True]
identifier[self] . identifier[reachable] = keyword[True]
identifier[self] . identifier[attempt] = literal[int]
keyword[if] keyword[not] identifier[was_alive] :
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[name] )
identifier[self] . identifier[broks] . identifier[append] ( identifier[self] . identifier[get_update_status_brok] ()) | def set_alive(self):
"""Set alive, reachable, and reset attempts.
If we change state, raise a status brok update
alive, means the daemon is prenset in the system
reachable, means that the HTTP connection is valid
With this function we confirm that the daemon is reachable and, thus, we assume it is alive!
:return: None
"""
was_alive = self.alive
self.alive = True
self.reachable = True
self.attempt = 0
# We came from dead to alive! We must propagate the good news
if not was_alive:
logger.info('Setting %s satellite as alive :)', self.name)
self.broks.append(self.get_update_status_brok()) # depends on [control=['if'], data=[]] |
def execute_process_async(func, *args, **kwargs):
"""
Executes `func` in a separate process. Memory and other resources are not
available. This gives true concurrency at the cost of losing access to
these resources. `args` and `kwargs` are
"""
global _GIPC_EXECUTOR
if _GIPC_EXECUTOR is None:
_GIPC_EXECUTOR = GIPCExecutor(
num_procs=settings.node.gipc_pool_size,
num_greenlets=settings.node.greenlet_pool_size)
return _GIPC_EXECUTOR.submit(func, *args, **kwargs) | def function[execute_process_async, parameter[func]]:
constant[
Executes `func` in a separate process. Memory and other resources are not
available. This gives true concurrency at the cost of losing access to
these resources. `args` and `kwargs` are
]
<ast.Global object at 0x7da18f09f8e0>
if compare[name[_GIPC_EXECUTOR] is constant[None]] begin[:]
variable[_GIPC_EXECUTOR] assign[=] call[name[GIPCExecutor], parameter[]]
return[call[name[_GIPC_EXECUTOR].submit, parameter[name[func], <ast.Starred object at 0x7da18f09d2d0>]]] | keyword[def] identifier[execute_process_async] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[global] identifier[_GIPC_EXECUTOR]
keyword[if] identifier[_GIPC_EXECUTOR] keyword[is] keyword[None] :
identifier[_GIPC_EXECUTOR] = identifier[GIPCExecutor] (
identifier[num_procs] = identifier[settings] . identifier[node] . identifier[gipc_pool_size] ,
identifier[num_greenlets] = identifier[settings] . identifier[node] . identifier[greenlet_pool_size] )
keyword[return] identifier[_GIPC_EXECUTOR] . identifier[submit] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ) | def execute_process_async(func, *args, **kwargs):
"""
Executes `func` in a separate process. Memory and other resources are not
available. This gives true concurrency at the cost of losing access to
these resources. `args` and `kwargs` are
"""
global _GIPC_EXECUTOR
if _GIPC_EXECUTOR is None:
_GIPC_EXECUTOR = GIPCExecutor(num_procs=settings.node.gipc_pool_size, num_greenlets=settings.node.greenlet_pool_size) # depends on [control=['if'], data=['_GIPC_EXECUTOR']]
return _GIPC_EXECUTOR.submit(func, *args, **kwargs) |
def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | def function[focus_next, parameter[self]]:
constant[focus next message in depth first order]
variable[mid] assign[=] call[name[self].get_selected_mid, parameter[]]
variable[newpos] assign[=] call[name[self]._tree.next_position, parameter[name[mid]]]
if compare[name[newpos] is_not constant[None]] begin[:]
variable[newpos] assign[=] call[name[self]._sanitize_position, parameter[tuple[[<ast.Name object at 0x7da1b07d0370>]]]]
call[name[self].body.set_focus, parameter[name[newpos]]] | keyword[def] identifier[focus_next] ( identifier[self] ):
literal[string]
identifier[mid] = identifier[self] . identifier[get_selected_mid] ()
identifier[newpos] = identifier[self] . identifier[_tree] . identifier[next_position] ( identifier[mid] )
keyword[if] identifier[newpos] keyword[is] keyword[not] keyword[None] :
identifier[newpos] = identifier[self] . identifier[_sanitize_position] (( identifier[newpos] ,))
identifier[self] . identifier[body] . identifier[set_focus] ( identifier[newpos] ) | def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) # depends on [control=['if'], data=['newpos']] |
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target) | def function[set_params, parameter[self, targets]]:
constant[Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
]
if <ast.UnaryOp object at 0x7da1b0f2b310> begin[:]
variable[targets] assign[=] name[self]._best_params
for taget[tuple[[<ast.Name object at 0x7da1b0f285e0>, <ast.Name object at 0x7da1b0f2bf70>]]] in starred[call[name[zip], parameter[name[self]._params, name[targets]]]] begin[:]
call[name[param].set_value, parameter[name[target]]] | keyword[def] identifier[set_params] ( identifier[self] , identifier[targets] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[targets] ,( identifier[list] , identifier[tuple] )):
identifier[targets] = identifier[self] . identifier[_best_params]
keyword[for] identifier[param] , identifier[target] keyword[in] identifier[zip] ( identifier[self] . identifier[_params] , identifier[targets] ):
identifier[param] . identifier[set_value] ( identifier[target] ) | def set_params(self, targets=None):
"""Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
"""
if not isinstance(targets, (list, tuple)):
targets = self._best_params # depends on [control=['if'], data=[]]
for (param, target) in zip(self._params, targets):
param.set_value(target) # depends on [control=['for'], data=[]] |
def _add_action(self, notification, action, label, callback, *args):
"""
Show an action button button in mount notifications.
Note, this only works with some libnotify services.
"""
on_action_click = run_bg(lambda *_: callback(*args))
try:
# this is the correct signature for Notify-0.7, the last argument
# being 'user_data':
notification.add_action(action, label, on_action_click, None)
except TypeError:
# this is the signature for some older version, I don't know what
# the last argument is for.
notification.add_action(action, label, on_action_click, None, None)
# gi.Notify does not store hard references to the notification
# objects. When a signal is received and the notification does not
# exist anymore, no handler will be called. Therefore, we need to
# prevent these notifications from being destroyed by storing
# references:
notification.connect('closed', self._notifications.remove)
self._notifications.append(notification) | def function[_add_action, parameter[self, notification, action, label, callback]]:
constant[
Show an action button button in mount notifications.
Note, this only works with some libnotify services.
]
variable[on_action_click] assign[=] call[name[run_bg], parameter[<ast.Lambda object at 0x7da20c7cbbe0>]]
<ast.Try object at 0x7da20e9b0ee0>
call[name[notification].connect, parameter[constant[closed], name[self]._notifications.remove]]
call[name[self]._notifications.append, parameter[name[notification]]] | keyword[def] identifier[_add_action] ( identifier[self] , identifier[notification] , identifier[action] , identifier[label] , identifier[callback] ,* identifier[args] ):
literal[string]
identifier[on_action_click] = identifier[run_bg] ( keyword[lambda] * identifier[_] : identifier[callback] (* identifier[args] ))
keyword[try] :
identifier[notification] . identifier[add_action] ( identifier[action] , identifier[label] , identifier[on_action_click] , keyword[None] )
keyword[except] identifier[TypeError] :
identifier[notification] . identifier[add_action] ( identifier[action] , identifier[label] , identifier[on_action_click] , keyword[None] , keyword[None] )
identifier[notification] . identifier[connect] ( literal[string] , identifier[self] . identifier[_notifications] . identifier[remove] )
identifier[self] . identifier[_notifications] . identifier[append] ( identifier[notification] ) | def _add_action(self, notification, action, label, callback, *args):
"""
Show an action button button in mount notifications.
Note, this only works with some libnotify services.
"""
on_action_click = run_bg(lambda *_: callback(*args))
try:
# this is the correct signature for Notify-0.7, the last argument
# being 'user_data':
notification.add_action(action, label, on_action_click, None) # depends on [control=['try'], data=[]]
except TypeError:
# this is the signature for some older version, I don't know what
# the last argument is for.
notification.add_action(action, label, on_action_click, None, None) # depends on [control=['except'], data=[]]
# gi.Notify does not store hard references to the notification
# objects. When a signal is received and the notification does not
# exist anymore, no handler will be called. Therefore, we need to
# prevent these notifications from being destroyed by storing
# references:
notification.connect('closed', self._notifications.remove)
self._notifications.append(notification) |
def specimens_extract(spec_file='specimens.txt', output_file='specimens.xls', landscape=False,
longtable=False, output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(spec_file, input_dir_path)
except IOError:
print("bad specimen file name")
return False, "bad specimen file name"
spec_df = pd.read_csv(fname, sep='\t', header=1)
spec_df.dropna('columns', how='all', inplace=True)
if 'int_abs' in spec_df.columns:
spec_df.dropna(subset=['int_abs'], inplace=True)
if len(spec_df) > 0:
table_df = map_magic.convert_specimen_dm3_table(spec_df)
out_file = pmag.resolve_file_name(output_file, output_dir_path)
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
if landscape:
info_out.write('\\usepackage{lscape}')
if longtable:
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}\n')
if landscape:
info_out.write('\\begin{landscape}\n')
info_out.write(table_df.to_latex(index=False, longtable=longtable,
escape=True, multicolumn=False))
if landscape:
info_out.write('\end{landscape}\n')
info_out.write('\end{document}\n')
info_out.close()
else:
table_df.to_excel(out_file, index=False)
else:
print("No specimen data for ouput.")
return True, [out_file] | def function[specimens_extract, parameter[spec_file, output_file, landscape, longtable, output_dir_path, input_dir_path, latex]]:
constant[
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
]
<ast.Tuple object at 0x7da2044c1120> assign[=] call[name[pmag].fix_directories, parameter[name[input_dir_path], name[output_dir_path]]]
<ast.Try object at 0x7da2044c03a0>
variable[spec_df] assign[=] call[name[pd].read_csv, parameter[name[fname]]]
call[name[spec_df].dropna, parameter[constant[columns]]]
if compare[constant[int_abs] in name[spec_df].columns] begin[:]
call[name[spec_df].dropna, parameter[]]
if compare[call[name[len], parameter[name[spec_df]]] greater[>] constant[0]] begin[:]
variable[table_df] assign[=] call[name[map_magic].convert_specimen_dm3_table, parameter[name[spec_df]]]
variable[out_file] assign[=] call[name[pmag].resolve_file_name, parameter[name[output_file], name[output_dir_path]]]
if name[latex] begin[:]
if call[name[out_file].endswith, parameter[constant[.xls]]] begin[:]
variable[out_file] assign[=] binary_operation[call[call[name[out_file].rsplit, parameter[constant[.]]]][constant[0]] + constant[.tex]]
variable[info_out] assign[=] call[name[open], parameter[name[out_file], constant[w+]]]
call[name[info_out].write, parameter[constant[\documentclass{article}
]]]
call[name[info_out].write, parameter[constant[\usepackage{booktabs}
]]]
if name[landscape] begin[:]
call[name[info_out].write, parameter[constant[\usepackage{lscape}]]]
if name[longtable] begin[:]
call[name[info_out].write, parameter[constant[\usepackage{longtable}
]]]
call[name[info_out].write, parameter[constant[\begin{document}
]]]
if name[landscape] begin[:]
call[name[info_out].write, parameter[constant[\begin{landscape}
]]]
call[name[info_out].write, parameter[call[name[table_df].to_latex, parameter[]]]]
if name[landscape] begin[:]
call[name[info_out].write, parameter[constant[\end{landscape}
]]]
call[name[info_out].write, parameter[constant[\end{document}
]]]
call[name[info_out].close, parameter[]]
return[tuple[[<ast.Constant object at 0x7da18dc9a320>, <ast.List object at 0x7da18dc9a1a0>]]] | keyword[def] identifier[specimens_extract] ( identifier[spec_file] = literal[string] , identifier[output_file] = literal[string] , identifier[landscape] = keyword[False] ,
identifier[longtable] = keyword[False] , identifier[output_dir_path] = literal[string] , identifier[input_dir_path] = literal[string] , identifier[latex] = keyword[False] ):
literal[string]
identifier[input_dir_path] , identifier[output_dir_path] = identifier[pmag] . identifier[fix_directories] ( identifier[input_dir_path] , identifier[output_dir_path] )
keyword[try] :
identifier[fname] = identifier[pmag] . identifier[resolve_file_name] ( identifier[spec_file] , identifier[input_dir_path] )
keyword[except] identifier[IOError] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
identifier[spec_df] = identifier[pd] . identifier[read_csv] ( identifier[fname] , identifier[sep] = literal[string] , identifier[header] = literal[int] )
identifier[spec_df] . identifier[dropna] ( literal[string] , identifier[how] = literal[string] , identifier[inplace] = keyword[True] )
keyword[if] literal[string] keyword[in] identifier[spec_df] . identifier[columns] :
identifier[spec_df] . identifier[dropna] ( identifier[subset] =[ literal[string] ], identifier[inplace] = keyword[True] )
keyword[if] identifier[len] ( identifier[spec_df] )> literal[int] :
identifier[table_df] = identifier[map_magic] . identifier[convert_specimen_dm3_table] ( identifier[spec_df] )
identifier[out_file] = identifier[pmag] . identifier[resolve_file_name] ( identifier[output_file] , identifier[output_dir_path] )
keyword[if] identifier[latex] :
keyword[if] identifier[out_file] . identifier[endswith] ( literal[string] ):
identifier[out_file] = identifier[out_file] . identifier[rsplit] ( literal[string] )[ literal[int] ]+ literal[string]
identifier[info_out] = identifier[open] ( identifier[out_file] , literal[string] , identifier[errors] = literal[string] )
identifier[info_out] . identifier[write] ( literal[string] )
identifier[info_out] . identifier[write] ( literal[string] )
keyword[if] identifier[landscape] :
identifier[info_out] . identifier[write] ( literal[string] )
keyword[if] identifier[longtable] :
identifier[info_out] . identifier[write] ( literal[string] )
identifier[info_out] . identifier[write] ( literal[string] )
keyword[if] identifier[landscape] :
identifier[info_out] . identifier[write] ( literal[string] )
identifier[info_out] . identifier[write] ( identifier[table_df] . identifier[to_latex] ( identifier[index] = keyword[False] , identifier[longtable] = identifier[longtable] ,
identifier[escape] = keyword[True] , identifier[multicolumn] = keyword[False] ))
keyword[if] identifier[landscape] :
identifier[info_out] . identifier[write] ( literal[string] )
identifier[info_out] . identifier[write] ( literal[string] )
identifier[info_out] . identifier[close] ()
keyword[else] :
identifier[table_df] . identifier[to_excel] ( identifier[out_file] , identifier[index] = keyword[False] )
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] keyword[True] ,[ identifier[out_file] ] | def specimens_extract(spec_file='specimens.txt', output_file='specimens.xls', landscape=False, longtable=False, output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
(input_dir_path, output_dir_path) = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(spec_file, input_dir_path) # depends on [control=['try'], data=[]]
except IOError:
print('bad specimen file name')
return (False, 'bad specimen file name') # depends on [control=['except'], data=[]]
spec_df = pd.read_csv(fname, sep='\t', header=1)
spec_df.dropna('columns', how='all', inplace=True)
if 'int_abs' in spec_df.columns:
spec_df.dropna(subset=['int_abs'], inplace=True) # depends on [control=['if'], data=[]]
if len(spec_df) > 0:
table_df = map_magic.convert_specimen_dm3_table(spec_df)
out_file = pmag.resolve_file_name(output_file, output_dir_path)
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + '.tex' # depends on [control=['if'], data=[]]
info_out = open(out_file, 'w+', errors='backslashreplace')
info_out.write('\\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
if landscape:
info_out.write('\\usepackage{lscape}') # depends on [control=['if'], data=[]]
if longtable:
info_out.write('\\usepackage{longtable}\n') # depends on [control=['if'], data=[]]
info_out.write('\\begin{document}\n')
if landscape:
info_out.write('\\begin{landscape}\n') # depends on [control=['if'], data=[]]
info_out.write(table_df.to_latex(index=False, longtable=longtable, escape=True, multicolumn=False))
if landscape:
info_out.write('\\end{landscape}\n') # depends on [control=['if'], data=[]]
info_out.write('\\end{document}\n')
info_out.close() # depends on [control=['if'], data=[]]
else:
table_df.to_excel(out_file, index=False) # depends on [control=['if'], data=[]]
else:
print('No specimen data for ouput.')
return (True, [out_file]) |
def abort():
"""ABORT Section 9.2.8"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x29) # 00101001
c = RejectCause()
packet = a / b / c
return packet | def function[abort, parameter[]]:
constant[ABORT Section 9.2.8]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[c] assign[=] call[name[RejectCause], parameter[]]
variable[packet] assign[=] binary_operation[binary_operation[name[a] / name[b]] / name[c]]
return[name[packet]] | keyword[def] identifier[abort] ():
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[c] = identifier[RejectCause] ()
identifier[packet] = identifier[a] / identifier[b] / identifier[c]
keyword[return] identifier[packet] | def abort():
"""ABORT Section 9.2.8"""
a = TpPd(pd=5)
b = MessageType(mesType=41) # 00101001
c = RejectCause()
packet = a / b / c
return packet |
def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""Visit top-level classes."""
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None)
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None)
else:
continue
if issubclass(base, runtime.Process):
break
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {
runtime.PROCESS_INPUTS_NAME: descriptor.inputs,
runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs,
}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if (len(item.targets) == 1 and isinstance(item.targets[0], ast.Name)
and isinstance(item.targets[0].ctx, ast.Store)
and item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value)
elif (isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)
and descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name])
descriptor.validate()
self.processes.append(descriptor) | def function[visit_ClassDef, parameter[self, node]]:
constant[Visit top-level classes.]
for taget[name[base]] in starred[name[node].bases] begin[:]
if <ast.BoolOp object at 0x7da1b1a9f430> begin[:]
variable[base] assign[=] call[name[getattr], parameter[name[runtime], name[base].id, constant[None]]]
if call[name[issubclass], parameter[name[base], name[runtime].Process]] begin[:]
break
variable[descriptor] assign[=] call[name[ProcessDescriptor], parameter[]]
variable[embedded_class_fields] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1a2d630>, <ast.Attribute object at 0x7da1b1a2f010>], [<ast.Attribute object at 0x7da1b1a2e8c0>, <ast.Attribute object at 0x7da1b1a2e1a0>]]
for taget[name[item]] in starred[name[node].body] begin[:]
if call[name[isinstance], parameter[name[item], name[ast].Assign]] begin[:]
if <ast.BoolOp object at 0x7da1b1a2c370> begin[:]
variable[value] assign[=] call[call[name[PROCESS_METADATA]][call[name[item].targets][constant[0]].id].get_value, parameter[name[item].value]]
call[name[setattr], parameter[name[descriptor].metadata, call[name[item].targets][constant[0]].id, name[value]]]
call[name[descriptor].validate, parameter[]]
call[name[self].processes.append, parameter[name[descriptor]]] | keyword[def] identifier[visit_ClassDef] ( identifier[self] , identifier[node] ):
literal[string]
keyword[for] identifier[base] keyword[in] identifier[node] . identifier[bases] :
keyword[if] identifier[isinstance] ( identifier[base] , identifier[ast] . identifier[Name] ) keyword[and] identifier[isinstance] ( identifier[base] . identifier[ctx] , identifier[ast] . identifier[Load] ):
identifier[base] = identifier[getattr] ( identifier[runtime] , identifier[base] . identifier[id] , keyword[None] )
keyword[elif] identifier[isinstance] ( identifier[base] , identifier[ast] . identifier[Attribute] ) keyword[and] identifier[isinstance] ( identifier[base] . identifier[ctx] , identifier[ast] . identifier[Load] ):
identifier[base] = identifier[getattr] ( identifier[runtime] , identifier[base] . identifier[attr] , keyword[None] )
keyword[else] :
keyword[continue]
keyword[if] identifier[issubclass] ( identifier[base] , identifier[runtime] . identifier[Process] ):
keyword[break]
keyword[else] :
keyword[return]
identifier[descriptor] = identifier[ProcessDescriptor] ( identifier[source] = identifier[self] . identifier[source] )
identifier[embedded_class_fields] ={
identifier[runtime] . identifier[PROCESS_INPUTS_NAME] : identifier[descriptor] . identifier[inputs] ,
identifier[runtime] . identifier[PROCESS_OUTPUTS_NAME] : identifier[descriptor] . identifier[outputs] ,
}
keyword[for] identifier[item] keyword[in] identifier[node] . identifier[body] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[ast] . identifier[Assign] ):
keyword[if] ( identifier[len] ( identifier[item] . identifier[targets] )== literal[int] keyword[and] identifier[isinstance] ( identifier[item] . identifier[targets] [ literal[int] ], identifier[ast] . identifier[Name] )
keyword[and] identifier[isinstance] ( identifier[item] . identifier[targets] [ literal[int] ]. identifier[ctx] , identifier[ast] . identifier[Store] )
keyword[and] identifier[item] . identifier[targets] [ literal[int] ]. identifier[id] keyword[in] identifier[PROCESS_METADATA] ):
identifier[value] = identifier[PROCESS_METADATA] [ identifier[item] . identifier[targets] [ literal[int] ]. identifier[id] ]. identifier[get_value] ( identifier[item] . identifier[value] )
identifier[setattr] ( identifier[descriptor] . identifier[metadata] , identifier[item] . identifier[targets] [ literal[int] ]. identifier[id] , identifier[value] )
keyword[elif] ( identifier[isinstance] ( identifier[item] , identifier[ast] . identifier[Expr] ) keyword[and] identifier[isinstance] ( identifier[item] . identifier[value] , identifier[ast] . identifier[Str] )
keyword[and] identifier[descriptor] . identifier[metadata] . identifier[description] keyword[is] keyword[None] ):
identifier[descriptor] . identifier[metadata] . identifier[description] = identifier[item] . identifier[value] . identifier[s]
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[ast] . identifier[ClassDef] ) keyword[and] identifier[item] . identifier[name] keyword[in] identifier[embedded_class_fields] . identifier[keys] ():
identifier[self] . identifier[visit_field_class] ( identifier[item] , identifier[descriptor] , identifier[embedded_class_fields] [ identifier[item] . identifier[name] ])
identifier[descriptor] . identifier[validate] ()
identifier[self] . identifier[processes] . identifier[append] ( identifier[descriptor] ) | def visit_ClassDef(self, node): # pylint: disable=invalid-name
'Visit top-level classes.'
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None) # depends on [control=['if'], data=[]]
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None) # depends on [control=['if'], data=[]]
else:
continue
if issubclass(base, runtime.Process):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['base']]
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {runtime.PROCESS_INPUTS_NAME: descriptor.inputs, runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if len(item.targets) == 1 and isinstance(item.targets[0], ast.Name) and isinstance(item.targets[0].ctx, ast.Store) and (item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(item, ast.Expr) and isinstance(item.value, ast.Str) and (descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s # depends on [control=['if'], data=[]]
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
descriptor.validate()
self.processes.append(descriptor) |
def verify(self, tool):
"""
check that the tool exists
"""
if os.path.isfile(tool['file']):
print('Toolbox: program exists = TOK :: ' + tool['file'])
return True
else:
print('Toolbox: program exists = FAIL :: ' + tool['file'])
return False | def function[verify, parameter[self, tool]]:
constant[
check that the tool exists
]
if call[name[os].path.isfile, parameter[call[name[tool]][constant[file]]]] begin[:]
call[name[print], parameter[binary_operation[constant[Toolbox: program exists = TOK :: ] + call[name[tool]][constant[file]]]]]
return[constant[True]] | keyword[def] identifier[verify] ( identifier[self] , identifier[tool] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[tool] [ literal[string] ]):
identifier[print] ( literal[string] + identifier[tool] [ literal[string] ])
keyword[return] keyword[True]
keyword[else] :
identifier[print] ( literal[string] + identifier[tool] [ literal[string] ])
keyword[return] keyword[False] | def verify(self, tool):
"""
check that the tool exists
"""
if os.path.isfile(tool['file']):
print('Toolbox: program exists = TOK :: ' + tool['file'])
return True # depends on [control=['if'], data=[]]
else:
print('Toolbox: program exists = FAIL :: ' + tool['file'])
return False |
def ekappr(handle, segno):
"""
Append a new, empty record at the end of a specified E-kernel segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekappr_c.html
:param handle: File handle.
:type handle: int
:param segno: Segment number.
:type segno: int
:return: Number of appended record.
:rtype: int
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int()
libspice.ekappr_c(handle, segno, ctypes.byref(recno))
return recno.value | def function[ekappr, parameter[handle, segno]]:
constant[
Append a new, empty record at the end of a specified E-kernel segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekappr_c.html
:param handle: File handle.
:type handle: int
:param segno: Segment number.
:type segno: int
:return: Number of appended record.
:rtype: int
]
variable[handle] assign[=] call[name[ctypes].c_int, parameter[name[handle]]]
variable[segno] assign[=] call[name[ctypes].c_int, parameter[name[segno]]]
variable[recno] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].ekappr_c, parameter[name[handle], name[segno], call[name[ctypes].byref, parameter[name[recno]]]]]
return[name[recno].value] | keyword[def] identifier[ekappr] ( identifier[handle] , identifier[segno] ):
literal[string]
identifier[handle] = identifier[ctypes] . identifier[c_int] ( identifier[handle] )
identifier[segno] = identifier[ctypes] . identifier[c_int] ( identifier[segno] )
identifier[recno] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[ekappr_c] ( identifier[handle] , identifier[segno] , identifier[ctypes] . identifier[byref] ( identifier[recno] ))
keyword[return] identifier[recno] . identifier[value] | def ekappr(handle, segno):
"""
Append a new, empty record at the end of a specified E-kernel segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekappr_c.html
:param handle: File handle.
:type handle: int
:param segno: Segment number.
:type segno: int
:return: Number of appended record.
:rtype: int
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int()
libspice.ekappr_c(handle, segno, ctypes.byref(recno))
return recno.value |
def update_milestone(self, milestone_id, title, deadline, party_id, notify,
move_upcoming_milestones=None,
move_upcoming_milestones_off_weekends=None):
"""
Modifies a single milestone. You can use this to shift the deadline of
a single milestone, and optionally shift the deadlines of subsequent
milestones as well.
"""
path = '/milestones/update/%u' % milestone_id
req = ET.Element('request')
req.append(
self._create_milestone_elem(title, deadline, party_id, notify))
if move_upcoming_milestones is not None:
ET.SubElement(req, 'move-upcoming-milestones').text \
= str(bool()).lower()
if move_upcoming_milestones_off_weekends is not None:
ET.SubElement(req, 'move-upcoming-milestones-off-weekends').text \
= str(bool()).lower()
return self._request(path, req) | def function[update_milestone, parameter[self, milestone_id, title, deadline, party_id, notify, move_upcoming_milestones, move_upcoming_milestones_off_weekends]]:
constant[
Modifies a single milestone. You can use this to shift the deadline of
a single milestone, and optionally shift the deadlines of subsequent
milestones as well.
]
variable[path] assign[=] binary_operation[constant[/milestones/update/%u] <ast.Mod object at 0x7da2590d6920> name[milestone_id]]
variable[req] assign[=] call[name[ET].Element, parameter[constant[request]]]
call[name[req].append, parameter[call[name[self]._create_milestone_elem, parameter[name[title], name[deadline], name[party_id], name[notify]]]]]
if compare[name[move_upcoming_milestones] is_not constant[None]] begin[:]
call[name[ET].SubElement, parameter[name[req], constant[move-upcoming-milestones]]].text assign[=] call[call[name[str], parameter[call[name[bool], parameter[]]]].lower, parameter[]]
if compare[name[move_upcoming_milestones_off_weekends] is_not constant[None]] begin[:]
call[name[ET].SubElement, parameter[name[req], constant[move-upcoming-milestones-off-weekends]]].text assign[=] call[call[name[str], parameter[call[name[bool], parameter[]]]].lower, parameter[]]
return[call[name[self]._request, parameter[name[path], name[req]]]] | keyword[def] identifier[update_milestone] ( identifier[self] , identifier[milestone_id] , identifier[title] , identifier[deadline] , identifier[party_id] , identifier[notify] ,
identifier[move_upcoming_milestones] = keyword[None] ,
identifier[move_upcoming_milestones_off_weekends] = keyword[None] ):
literal[string]
identifier[path] = literal[string] % identifier[milestone_id]
identifier[req] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[req] . identifier[append] (
identifier[self] . identifier[_create_milestone_elem] ( identifier[title] , identifier[deadline] , identifier[party_id] , identifier[notify] ))
keyword[if] identifier[move_upcoming_milestones] keyword[is] keyword[not] keyword[None] :
identifier[ET] . identifier[SubElement] ( identifier[req] , literal[string] ). identifier[text] = identifier[str] ( identifier[bool] ()). identifier[lower] ()
keyword[if] identifier[move_upcoming_milestones_off_weekends] keyword[is] keyword[not] keyword[None] :
identifier[ET] . identifier[SubElement] ( identifier[req] , literal[string] ). identifier[text] = identifier[str] ( identifier[bool] ()). identifier[lower] ()
keyword[return] identifier[self] . identifier[_request] ( identifier[path] , identifier[req] ) | def update_milestone(self, milestone_id, title, deadline, party_id, notify, move_upcoming_milestones=None, move_upcoming_milestones_off_weekends=None):
"""
Modifies a single milestone. You can use this to shift the deadline of
a single milestone, and optionally shift the deadlines of subsequent
milestones as well.
"""
path = '/milestones/update/%u' % milestone_id
req = ET.Element('request')
req.append(self._create_milestone_elem(title, deadline, party_id, notify))
if move_upcoming_milestones is not None:
ET.SubElement(req, 'move-upcoming-milestones').text = str(bool()).lower() # depends on [control=['if'], data=[]]
if move_upcoming_milestones_off_weekends is not None:
ET.SubElement(req, 'move-upcoming-milestones-off-weekends').text = str(bool()).lower() # depends on [control=['if'], data=[]]
return self._request(path, req) |
def run_getgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls get()"""
assume_yes = kwargs.get("yes_to_all")
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get() | def function[run_getgist, parameter[filename, user]]:
constant[Passes user inputs to GetGist() and calls get()]
variable[assume_yes] assign[=] call[name[kwargs].get, parameter[constant[yes_to_all]]]
variable[getgist] assign[=] call[name[GetGist], parameter[]]
call[name[getgist].get, parameter[]] | keyword[def] identifier[run_getgist] ( identifier[filename] , identifier[user] ,** identifier[kwargs] ):
literal[string]
identifier[assume_yes] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[getgist] = identifier[GetGist] ( identifier[user] = identifier[user] , identifier[filename] = identifier[filename] , identifier[assume_yes] = identifier[assume_yes] )
identifier[getgist] . identifier[get] () | def run_getgist(filename, user, **kwargs):
"""Passes user inputs to GetGist() and calls get()"""
assume_yes = kwargs.get('yes_to_all')
getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes)
getgist.get() |
def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_number(
self._handle) | def function[strip_number, parameter[self]]:
constant[The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
]
if compare[name[self].type not_equal[!=] name[EventType].TABLET_PAD_STRIP] begin[:]
<ast.Raise object at 0x7da18f00d840>
return[call[name[self]._libinput.libinput_event_tablet_pad_get_strip_number, parameter[name[self]._handle]]] | keyword[def] identifier[strip_number] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[type] != identifier[EventType] . identifier[TABLET_PAD_STRIP] :
keyword[raise] identifier[AttributeError] ( identifier[_wrong_prop] . identifier[format] ( identifier[self] . identifier[type] ))
keyword[return] identifier[self] . identifier[_libinput] . identifier[libinput_event_tablet_pad_get_strip_number] (
identifier[self] . identifier[_handle] ) | def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type)) # depends on [control=['if'], data=[]]
return self._libinput.libinput_event_tablet_pad_get_strip_number(self._handle) |
def from_dict(cls, data):
"""
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
"""
return cls(
auth_class_ref=data.get("auth_class_ref"),
timestamp=data.get("timestamp"),
issuer=data.get("issuer"),
) | def function[from_dict, parameter[cls, data]]:
constant[
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_dict] ( identifier[cls] , identifier[data] ):
literal[string]
keyword[return] identifier[cls] (
identifier[auth_class_ref] = identifier[data] . identifier[get] ( literal[string] ),
identifier[timestamp] = identifier[data] . identifier[get] ( literal[string] ),
identifier[issuer] = identifier[data] . identifier[get] ( literal[string] ),
) | def from_dict(cls, data):
"""
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
"""
return cls(auth_class_ref=data.get('auth_class_ref'), timestamp=data.get('timestamp'), issuer=data.get('issuer')) |
def remove_root_repository(self, repository_id):
"""Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=repository_id)
return self._hierarchy_session.remove_root(id_=repository_id) | def function[remove_root_repository, parameter[self, repository_id]]:
constant[Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.remove_root_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.remove_root, parameter[]]] | keyword[def] identifier[remove_root_repository] ( identifier[self] , identifier[repository_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[remove_root_catalog] ( identifier[catalog_id] = identifier[repository_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[remove_root] ( identifier[id_] = identifier[repository_id] ) | def remove_root_repository(self, repository_id):
"""Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=repository_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.remove_root(id_=repository_id) |
def rewrap(text, width=None):
"""
Rewrap text for output to the console.
Removes common indentation and rewraps paragraphs according to the console
width.
Line feeds between paragraphs preserved.
Formatting of paragraphs that starts with additional indentation
preserved.
"""
if width is None:
width = 80
# Remove common indentation.
text = textwrap.dedent(text)
def needs_wrapping(line):
# Line always non-empty.
return not line[0].isspace()
# Split text by lines and group lines that comprise paragraphs.
wrapped_text = ""
for do_wrap, lines in itertools.groupby(text.splitlines(True),
key=needs_wrapping):
paragraph = ''.join(lines)
if do_wrap:
paragraph = textwrap.fill(paragraph, width)
wrapped_text += paragraph
return wrapped_text | def function[rewrap, parameter[text, width]]:
constant[
Rewrap text for output to the console.
Removes common indentation and rewraps paragraphs according to the console
width.
Line feeds between paragraphs preserved.
Formatting of paragraphs that starts with additional indentation
preserved.
]
if compare[name[width] is constant[None]] begin[:]
variable[width] assign[=] constant[80]
variable[text] assign[=] call[name[textwrap].dedent, parameter[name[text]]]
def function[needs_wrapping, parameter[line]]:
return[<ast.UnaryOp object at 0x7da1b21e0fd0>]
variable[wrapped_text] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b21e1bd0>, <ast.Name object at 0x7da1b21e1750>]]] in starred[call[name[itertools].groupby, parameter[call[name[text].splitlines, parameter[constant[True]]]]]] begin[:]
variable[paragraph] assign[=] call[constant[].join, parameter[name[lines]]]
if name[do_wrap] begin[:]
variable[paragraph] assign[=] call[name[textwrap].fill, parameter[name[paragraph], name[width]]]
<ast.AugAssign object at 0x7da1b21e28c0>
return[name[wrapped_text]] | keyword[def] identifier[rewrap] ( identifier[text] , identifier[width] = keyword[None] ):
literal[string]
keyword[if] identifier[width] keyword[is] keyword[None] :
identifier[width] = literal[int]
identifier[text] = identifier[textwrap] . identifier[dedent] ( identifier[text] )
keyword[def] identifier[needs_wrapping] ( identifier[line] ):
keyword[return] keyword[not] identifier[line] [ literal[int] ]. identifier[isspace] ()
identifier[wrapped_text] = literal[string]
keyword[for] identifier[do_wrap] , identifier[lines] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[text] . identifier[splitlines] ( keyword[True] ),
identifier[key] = identifier[needs_wrapping] ):
identifier[paragraph] = literal[string] . identifier[join] ( identifier[lines] )
keyword[if] identifier[do_wrap] :
identifier[paragraph] = identifier[textwrap] . identifier[fill] ( identifier[paragraph] , identifier[width] )
identifier[wrapped_text] += identifier[paragraph]
keyword[return] identifier[wrapped_text] | def rewrap(text, width=None):
"""
Rewrap text for output to the console.
Removes common indentation and rewraps paragraphs according to the console
width.
Line feeds between paragraphs preserved.
Formatting of paragraphs that starts with additional indentation
preserved.
"""
if width is None:
width = 80 # depends on [control=['if'], data=['width']]
# Remove common indentation.
text = textwrap.dedent(text)
def needs_wrapping(line):
# Line always non-empty.
return not line[0].isspace()
# Split text by lines and group lines that comprise paragraphs.
wrapped_text = ''
for (do_wrap, lines) in itertools.groupby(text.splitlines(True), key=needs_wrapping):
paragraph = ''.join(lines)
if do_wrap:
paragraph = textwrap.fill(paragraph, width) # depends on [control=['if'], data=[]]
wrapped_text += paragraph # depends on [control=['for'], data=[]]
return wrapped_text |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.