code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims,
verbose):
"""Perform AD on all functions in a call tree.
This function walks the call tree and differentiates each function in it. It
also ensures that the global namespaces that each function in the call tree
was in are merged.
The `tangent` and `numpy` packages are added to the namespace here, so that
the gradient templates can assume that they are present.
Args:
See `grad`.
Returns:
final: A single module which contains the primals and adjoints of all the
functions in the call tree.
namespace: A merged dictionary with all the variables in the global
namespaces of each function. The primals and adjoints need access to
these in order to execute.
"""
# Imported here to avoid circular imports
import tangent
namespace = {'tangent': tangent, 'numpy': numpy}
done = set()
final = gast.Module(body=[])
namespace.update(six.get_function_globals(func))
node, required = autodiff_ast(func, wrt, motion, mode, preserve_result,
check_dims, verbose)
final.body.extend(node.body)
to_do = set(required)
if motion == 'split' and mode == 'reverse':
done.add((func, wrt))
to_do -= done
while to_do:
func, wrt = to_do.pop()
namespace.update(six.get_function_globals(func))
node, required = autodiff_ast(
func=func,
wrt=wrt,
motion='split',
mode=mode,
preserve_result=True,
check_dims=False,
verbose=verbose)
final.body.extend(node.body)
done.add((func, wrt))
to_do.update(required)
to_do -= done
return final, namespace | def function[autodiff_tree, parameter[func, wrt, motion, mode, preserve_result, check_dims, verbose]]:
constant[Perform AD on all functions in a call tree.
This function walks the call tree and differentiates each function in it. It
also ensures that the global namespaces that each function in the call tree
was in are merged.
The `tangent` and `numpy` packages are added to the namespace here, so that
the gradient templates can assume that they are present.
Args:
See `grad`.
Returns:
final: A single module which contains the primals and adjoints of all the
functions in the call tree.
namespace: A merged dictionary with all the variables in the global
namespaces of each function. The primals and adjoints need access to
these in order to execute.
]
import module[tangent]
variable[namespace] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cca00>, <ast.Constant object at 0x7da18c4ccf70>], [<ast.Name object at 0x7da18c4ccd60>, <ast.Name object at 0x7da18c4cf5b0>]]
variable[done] assign[=] call[name[set], parameter[]]
variable[final] assign[=] call[name[gast].Module, parameter[]]
call[name[namespace].update, parameter[call[name[six].get_function_globals, parameter[name[func]]]]]
<ast.Tuple object at 0x7da18f00cca0> assign[=] call[name[autodiff_ast], parameter[name[func], name[wrt], name[motion], name[mode], name[preserve_result], name[check_dims], name[verbose]]]
call[name[final].body.extend, parameter[name[node].body]]
variable[to_do] assign[=] call[name[set], parameter[name[required]]]
if <ast.BoolOp object at 0x7da18ede4460> begin[:]
call[name[done].add, parameter[tuple[[<ast.Name object at 0x7da18ede5720>, <ast.Name object at 0x7da18ede4520>]]]]
<ast.AugAssign object at 0x7da18ede43d0>
while name[to_do] begin[:]
<ast.Tuple object at 0x7da18ede5780> assign[=] call[name[to_do].pop, parameter[]]
call[name[namespace].update, parameter[call[name[six].get_function_globals, parameter[name[func]]]]]
<ast.Tuple object at 0x7da18ede6620> assign[=] call[name[autodiff_ast], parameter[]]
call[name[final].body.extend, parameter[name[node].body]]
call[name[done].add, parameter[tuple[[<ast.Name object at 0x7da18c4ccdf0>, <ast.Name object at 0x7da18c4cc0a0>]]]]
call[name[to_do].update, parameter[name[required]]]
<ast.AugAssign object at 0x7da18c4cda20>
return[tuple[[<ast.Name object at 0x7da18c4ce470>, <ast.Name object at 0x7da18c4cfa30>]]] | keyword[def] identifier[autodiff_tree] ( identifier[func] , identifier[wrt] , identifier[motion] , identifier[mode] , identifier[preserve_result] , identifier[check_dims] ,
identifier[verbose] ):
literal[string]
keyword[import] identifier[tangent]
identifier[namespace] ={ literal[string] : identifier[tangent] , literal[string] : identifier[numpy] }
identifier[done] = identifier[set] ()
identifier[final] = identifier[gast] . identifier[Module] ( identifier[body] =[])
identifier[namespace] . identifier[update] ( identifier[six] . identifier[get_function_globals] ( identifier[func] ))
identifier[node] , identifier[required] = identifier[autodiff_ast] ( identifier[func] , identifier[wrt] , identifier[motion] , identifier[mode] , identifier[preserve_result] ,
identifier[check_dims] , identifier[verbose] )
identifier[final] . identifier[body] . identifier[extend] ( identifier[node] . identifier[body] )
identifier[to_do] = identifier[set] ( identifier[required] )
keyword[if] identifier[motion] == literal[string] keyword[and] identifier[mode] == literal[string] :
identifier[done] . identifier[add] (( identifier[func] , identifier[wrt] ))
identifier[to_do] -= identifier[done]
keyword[while] identifier[to_do] :
identifier[func] , identifier[wrt] = identifier[to_do] . identifier[pop] ()
identifier[namespace] . identifier[update] ( identifier[six] . identifier[get_function_globals] ( identifier[func] ))
identifier[node] , identifier[required] = identifier[autodiff_ast] (
identifier[func] = identifier[func] ,
identifier[wrt] = identifier[wrt] ,
identifier[motion] = literal[string] ,
identifier[mode] = identifier[mode] ,
identifier[preserve_result] = keyword[True] ,
identifier[check_dims] = keyword[False] ,
identifier[verbose] = identifier[verbose] )
identifier[final] . identifier[body] . identifier[extend] ( identifier[node] . identifier[body] )
identifier[done] . identifier[add] (( identifier[func] , identifier[wrt] ))
identifier[to_do] . identifier[update] ( identifier[required] )
identifier[to_do] -= identifier[done]
keyword[return] identifier[final] , identifier[namespace] | def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose):
"""Perform AD on all functions in a call tree.
This function walks the call tree and differentiates each function in it. It
also ensures that the global namespaces that each function in the call tree
was in are merged.
The `tangent` and `numpy` packages are added to the namespace here, so that
the gradient templates can assume that they are present.
Args:
See `grad`.
Returns:
final: A single module which contains the primals and adjoints of all the
functions in the call tree.
namespace: A merged dictionary with all the variables in the global
namespaces of each function. The primals and adjoints need access to
these in order to execute.
"""
# Imported here to avoid circular imports
import tangent
namespace = {'tangent': tangent, 'numpy': numpy}
done = set()
final = gast.Module(body=[])
namespace.update(six.get_function_globals(func))
(node, required) = autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose)
final.body.extend(node.body)
to_do = set(required)
if motion == 'split' and mode == 'reverse':
done.add((func, wrt))
to_do -= done # depends on [control=['if'], data=[]]
while to_do:
(func, wrt) = to_do.pop()
namespace.update(six.get_function_globals(func))
(node, required) = autodiff_ast(func=func, wrt=wrt, motion='split', mode=mode, preserve_result=True, check_dims=False, verbose=verbose)
final.body.extend(node.body)
done.add((func, wrt))
to_do.update(required)
to_do -= done # depends on [control=['while'], data=[]]
return (final, namespace) |
def pyprf(strCsvCnfg, lgcTest=False, varRat=None, strPathHrf=None):
"""
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu
if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')):
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Create or load pRF time course models
# Create model time courses. Also return logical for inclusion of model
# parameters which will be needed later when we create model parameters
# in degree.
aryPrfTc, lgcMdlInc = model_creation(dicCnfg, varRat=varRat,
strPathHrf=strPathHrf)
# Deduce the number of features from the pRF time course models array
cfg.varNumFtr = aryPrfTc.shape[1]
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
# The functional data will be masked and demeaned:
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
# *************************************************************************
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + \
'Cross-validation on GPU is currently not supported. ' + \
'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# Make sure that if cython is used, the number of features is 1 or 2,
# not higher
if cfg.strVersion == 'cython':
strErrMsg = 'Stopping program. ' + \
'Cython is not supported for more features than 1. ' + \
'Set strVersion equal \'numpy\'.'
assert cfg.varNumFtr in [1, 2], strErrMsg
# Check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False
strErrMsg = 'Stopping program. ' + \
'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# *************************************************************************
# *** Find pRF models for voxel time courses
print('------Find pRF models for voxel time courses')
# Number of voxels for which pRF finding will be performed:
cfg.varNumVoxInc = aryFunc.shape[0]
print('---------Number of voxels on which pRF finding will be performed: '
+ str(cfg.varNumVoxInc))
print('---------Number of features pRF finding will be performed with: '
+ str(cfg.varNumFtr))
print('---------Preparing parallel pRF model finding')
# Get array with all possible model parameter combination:
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Create list with chunks of functional data for the parallel processes:
lstFunc = np.array_split(aryFunc, cfg.varPar)
# We don't need the original array with the functional data anymore:
del(aryFunc)
# Prepare dictionary to pass as kwargs to find_prf_cpu
dctKw = {'lgcRstr': None,
'lgcPrint': True}
# CPU version (using numpy or cython for pRF finding):
if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')):
print('---------pRF finding on CPU')
print('---------Creating parallel processes')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu,
args=(idxPrc,
lstFunc[idxPrc],
aryPrfTc,
aryMdlParams,
cfg.strVersion,
cfg.lgcXval,
cfg.varNumXval,
queOut),
kwargs=dctKw,
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
print('---------pRF finding on GPU')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu,
args=(idxPrc,
aryMdlParams,
lstFunc[idxPrc],
aryPrfTc,
queOut),
kwargs=dctKw,
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start()
# Delete reference to list with function data (the data continues to exists
# in child process):
del(lstFunc)
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join()
# *************************************************************************
# *************************************************************************
# *** Prepare pRF finding results for export
print('---------Prepare pRF finding results for export')
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D')
# Delete unneeded large objects:
del(lstPrfRes)
# *************************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos),
np.square(aryBstYpos)))
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2,
aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in
lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar,
tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') | def function[pyprf, parameter[strCsvCnfg, lgcTest, varRat, strPathHrf]]:
constant[
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
]
call[name[print], parameter[constant[---pRF analysis]]]
variable[varTme01] assign[=] call[name[time].time, parameter[]]
variable[dicCnfg] assign[=] call[name[load_config], parameter[name[strCsvCnfg]]]
variable[cfg] assign[=] call[name[cls_set_config], parameter[name[dicCnfg]]]
if compare[name[cfg].strVersion equal[==] constant[gpu]] begin[:]
from relative_module[pyprf_feature.analysis.find_prf_gpu] import module[find_prf_gpu]
if <ast.BoolOp object at 0x7da1b0f1f0a0> begin[:]
from relative_module[pyprf_feature.analysis.find_prf_cpu] import module[find_prf_cpu]
name[cfg].varSdSmthTmp assign[=] call[name[np].divide, parameter[name[cfg].varSdSmthTmp, name[cfg].varTr]]
<ast.Tuple object at 0x7da1b0f1ec80> assign[=] call[name[model_creation], parameter[name[dicCnfg]]]
name[cfg].varNumFtr assign[=] call[name[aryPrfTc].shape][constant[1]]
variable[aryPrfTc] assign[=] call[name[prep_models], parameter[name[aryPrfTc]]]
<ast.Tuple object at 0x7da1b0f1e7a0> assign[=] call[name[prep_func], parameter[name[cfg].strPathNiiMask, name[cfg].lstPathNiiFunc]]
call[name[hdrMsk].set_data_dtype, parameter[name[np].float32]]
if compare[name[cfg].strVersion equal[==] constant[gpu]] begin[:]
variable[strErrMsg] assign[=] binary_operation[binary_operation[constant[Stopping program. ] + constant[Cross-validation on GPU is currently not supported. ]] + constant[Set varNumXval equal to 1 in csv file in order to continue. ]]
assert[compare[name[cfg].varNumXval equal[==] constant[1]]]
if compare[name[cfg].strVersion equal[==] constant[gpu]] begin[:]
name[cfg].varPar assign[=] constant[1]
if compare[name[cfg].strVersion equal[==] constant[cython]] begin[:]
variable[strErrMsg] assign[=] binary_operation[binary_operation[constant[Stopping program. ] + constant[Cython is not supported for more features than 1. ]] + constant[Set strVersion equal 'numpy'.]]
assert[compare[name[cfg].varNumFtr in list[[<ast.Constant object at 0x7da1b0f1dae0>, <ast.Constant object at 0x7da1b0f1dab0>]]]]
if call[name[np].greater, parameter[name[cfg].varNumXval, constant[1]]] begin[:]
name[cfg].lgcXval assign[=] constant[True]
variable[strErrMsg] assign[=] binary_operation[constant[Stopping program. ] + constant[Set numXval (number of crossvalidation folds) to 1 or higher]]
assert[call[name[np].greater_equal, parameter[name[cfg].varNumXval, constant[1]]]]
call[name[print], parameter[constant[------Find pRF models for voxel time courses]]]
name[cfg].varNumVoxInc assign[=] call[name[aryFunc].shape][constant[0]]
call[name[print], parameter[binary_operation[constant[---------Number of voxels on which pRF finding will be performed: ] + call[name[str], parameter[name[cfg].varNumVoxInc]]]]]
call[name[print], parameter[binary_operation[constant[---------Number of features pRF finding will be performed with: ] + call[name[str], parameter[name[cfg].varNumFtr]]]]]
call[name[print], parameter[constant[---------Preparing parallel pRF model finding]]]
variable[aryMdlParams] assign[=] call[name[crt_mdl_prms], parameter[tuple[[<ast.Call object at 0x7da1b0f1cbe0>, <ast.Call object at 0x7da1b0f1cb20>]], name[cfg].varNum1, name[cfg].varExtXmin, name[cfg].varExtXmax, name[cfg].varNum2, name[cfg].varExtYmin, name[cfg].varExtYmax, name[cfg].varNumPrfSizes, name[cfg].varPrfStdMin, name[cfg].varPrfStdMax]]
variable[aryMdlParams] assign[=] call[name[aryMdlParams]][tuple[[<ast.Name object at 0x7da1b0f1c520>, <ast.Slice object at 0x7da1b0f1c4f0>]]]
variable[lstPrfRes] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0f1c400>]] * name[cfg].varPar]
variable[lstPrcs] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0f1c2b0>]] * name[cfg].varPar]
variable[queOut] assign[=] call[name[mp].Queue, parameter[]]
variable[lstFunc] assign[=] call[name[np].array_split, parameter[name[aryFunc], name[cfg].varPar]]
<ast.Delete object at 0x7da1b0f13f70>
variable[dctKw] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f13e50>, <ast.Constant object at 0x7da1b0f13e20>], [<ast.Constant object at 0x7da1b0f13df0>, <ast.Constant object at 0x7da1b0f13dc0>]]
if <ast.BoolOp object at 0x7da1b0f13d60> begin[:]
call[name[print], parameter[constant[---------pRF finding on CPU]]]
call[name[print], parameter[constant[---------Creating parallel processes]]]
for taget[name[idxPrc]] in starred[call[name[range], parameter[constant[0], name[cfg].varPar]]] begin[:]
call[name[lstPrcs]][name[idxPrc]] assign[=] call[name[mp].Process, parameter[]]
call[name[lstPrcs]][name[idxPrc]].Daemon assign[=] constant[True]
for taget[name[idxPrc]] in starred[call[name[range], parameter[constant[0], name[cfg].varPar]]] begin[:]
call[call[name[lstPrcs]][name[idxPrc]].start, parameter[]]
<ast.Delete object at 0x7da1b10f7fd0>
for taget[name[idxPrc]] in starred[call[name[range], parameter[constant[0], name[cfg].varPar]]] begin[:]
call[name[lstPrfRes]][name[idxPrc]] assign[=] call[name[queOut].get, parameter[constant[True]]]
for taget[name[idxPrc]] in starred[call[name[range], parameter[constant[0], name[cfg].varPar]]] begin[:]
call[call[name[lstPrcs]][name[idxPrc]].join, parameter[]]
call[name[print], parameter[constant[---------Prepare pRF finding results for export]]]
variable[lstPrfRes] assign[=] call[name[sorted], parameter[name[lstPrfRes]]]
variable[aryBstXpos] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[1]]]
variable[aryBstYpos] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[2]]]
variable[aryBstSd] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[3]]]
variable[aryBstR2] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[4]]]
variable[aryBstBts] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[5]]]
if call[name[np].greater, parameter[name[cfg].varNumXval, constant[1]]] begin[:]
variable[aryBstR2Single] assign[=] call[name[joinRes], parameter[name[lstPrfRes], name[cfg].varPar, constant[6]]]
<ast.Delete object at 0x7da1b10f58a0>
variable[aryPlrAng] assign[=] call[name[np].arctan2, parameter[name[aryBstYpos], name[aryBstXpos]]]
variable[aryEcc] assign[=] call[name[np].sqrt, parameter[call[name[np].add, parameter[call[name[np].square, parameter[name[aryBstXpos]]], call[name[np].square, parameter[name[aryBstYpos]]]]]]]
call[name[print], parameter[constant[---------Exporting results]]]
if compare[name[strPathHrf] is_not constant[None]] begin[:]
name[cfg].strPathOut assign[=] binary_operation[name[cfg].strPathOut + constant[_hrf]]
variable[aryBstMaps] assign[=] call[name[np].stack, parameter[list[[<ast.Name object at 0x7da1b10474f0>, <ast.Name object at 0x7da1b1045630>, <ast.Name object at 0x7da1b1047280>, <ast.Name object at 0x7da1b1045690>, <ast.Name object at 0x7da1b1045ed0>, <ast.Name object at 0x7da1b10479d0>]]]]
variable[lstNiiNames] assign[=] list[[<ast.Constant object at 0x7da1b1044bb0>, <ast.Constant object at 0x7da1b10474c0>, <ast.Constant object at 0x7da1b1047fd0>, <ast.Constant object at 0x7da1b10463b0>, <ast.Constant object at 0x7da1b1047130>, <ast.Constant object at 0x7da1b10444c0>]]
if compare[name[varRat] is_not constant[None]] begin[:]
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b1047a60>
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b1045450>
call[name[export_nii], parameter[name[aryBstMaps], name[lstNiiNames], name[aryLgcMsk], name[aryLgcVar], name[tplNiiShp], name[aryAff], name[hdrMsk]]]
variable[lstNiiNames] assign[=] list[[<ast.Constant object at 0x7da1b0f056f0>]]
if compare[name[varRat] is_not constant[None]] begin[:]
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b0f05840>
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b0f05bd0>
call[name[export_nii], parameter[name[aryBstBts], name[lstNiiNames], name[aryLgcMsk], name[aryLgcVar], name[tplNiiShp], name[aryAff], name[hdrMsk]]]
if call[name[np].greater, parameter[name[cfg].varNumXval, constant[1]]] begin[:]
call[name[aryBstR2Single]][call[name[np].where, parameter[call[name[np].less_equal, parameter[name[aryBstR2Single], <ast.UnaryOp object at 0x7da1b0f05990>]]]]] assign[=] <ast.UnaryOp object at 0x7da1b0f05510>
variable[lstNiiNames] assign[=] list[[<ast.Constant object at 0x7da1b0f06110>]]
if compare[name[varRat] is_not constant[None]] begin[:]
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b0f056c0>
variable[lstNiiNames] assign[=] <ast.ListComp object at 0x7da1b0f07310>
call[name[export_nii], parameter[name[aryBstR2Single], name[lstNiiNames], name[aryLgcMsk], name[aryLgcVar], name[tplNiiShp], name[aryAff], name[hdrMsk]]]
variable[varTme02] assign[=] call[name[time].time, parameter[]]
variable[varTme03] assign[=] binary_operation[name[varTme02] - name[varTme01]]
call[name[print], parameter[binary_operation[binary_operation[constant[---Elapsed time: ] + call[name[str], parameter[name[varTme03]]]] + constant[ s]]]]
call[name[print], parameter[constant[---Done.]]] | keyword[def] identifier[pyprf] ( identifier[strCsvCnfg] , identifier[lgcTest] = keyword[False] , identifier[varRat] = keyword[None] , identifier[strPathHrf] = keyword[None] ):
literal[string]
identifier[print] ( literal[string] )
identifier[varTme01] = identifier[time] . identifier[time] ()
identifier[dicCnfg] = identifier[load_config] ( identifier[strCsvCnfg] , identifier[lgcTest] = identifier[lgcTest] )
identifier[cfg] = identifier[cls_set_config] ( identifier[dicCnfg] )
keyword[if] identifier[cfg] . identifier[strVersion] == literal[string] :
keyword[from] identifier[pyprf_feature] . identifier[analysis] . identifier[find_prf_gpu] keyword[import] identifier[find_prf_gpu]
keyword[if] (( identifier[cfg] . identifier[strVersion] == literal[string] ) keyword[or] ( identifier[cfg] . identifier[strVersion] == literal[string] )):
keyword[from] identifier[pyprf_feature] . identifier[analysis] . identifier[find_prf_cpu] keyword[import] identifier[find_prf_cpu]
identifier[cfg] . identifier[varSdSmthTmp] = identifier[np] . identifier[divide] ( identifier[cfg] . identifier[varSdSmthTmp] , identifier[cfg] . identifier[varTr] )
identifier[aryPrfTc] , identifier[lgcMdlInc] = identifier[model_creation] ( identifier[dicCnfg] , identifier[varRat] = identifier[varRat] ,
identifier[strPathHrf] = identifier[strPathHrf] )
identifier[cfg] . identifier[varNumFtr] = identifier[aryPrfTc] . identifier[shape] [ literal[int] ]
identifier[aryPrfTc] = identifier[prep_models] ( identifier[aryPrfTc] , identifier[varSdSmthTmp] = identifier[cfg] . identifier[varSdSmthTmp] )
identifier[aryLgcMsk] , identifier[aryLgcVar] , identifier[hdrMsk] , identifier[aryAff] , identifier[aryFunc] , identifier[tplNiiShp] = identifier[prep_func] (
identifier[cfg] . identifier[strPathNiiMask] , identifier[cfg] . identifier[lstPathNiiFunc] , identifier[varAvgThr] =- literal[int] )
identifier[hdrMsk] . identifier[set_data_dtype] ( identifier[np] . identifier[float32] )
keyword[if] identifier[cfg] . identifier[strVersion] == literal[string] :
identifier[strErrMsg] = literal[string] + literal[string] + literal[string]
keyword[assert] identifier[cfg] . identifier[varNumXval] == literal[int] , identifier[strErrMsg]
keyword[if] identifier[cfg] . identifier[strVersion] == literal[string] :
identifier[cfg] . identifier[varPar] = literal[int]
keyword[if] identifier[cfg] . identifier[strVersion] == literal[string] :
identifier[strErrMsg] = literal[string] + literal[string] + literal[string]
keyword[assert] identifier[cfg] . identifier[varNumFtr] keyword[in] [ literal[int] , literal[int] ], identifier[strErrMsg]
keyword[if] identifier[np] . identifier[greater] ( identifier[cfg] . identifier[varNumXval] , literal[int] ):
identifier[cfg] . identifier[lgcXval] = keyword[True]
keyword[elif] identifier[np] . identifier[equal] ( identifier[cfg] . identifier[varNumXval] , literal[int] ):
identifier[cfg] . identifier[lgcXval] = keyword[False]
identifier[strErrMsg] = literal[string] + literal[string]
keyword[assert] identifier[np] . identifier[greater_equal] ( identifier[cfg] . identifier[varNumXval] , literal[int] ), identifier[strErrMsg]
identifier[print] ( literal[string] )
identifier[cfg] . identifier[varNumVoxInc] = identifier[aryFunc] . identifier[shape] [ literal[int] ]
identifier[print] ( literal[string]
+ identifier[str] ( identifier[cfg] . identifier[varNumVoxInc] ))
identifier[print] ( literal[string]
+ identifier[str] ( identifier[cfg] . identifier[varNumFtr] ))
identifier[print] ( literal[string] )
identifier[aryMdlParams] = identifier[crt_mdl_prms] (( identifier[int] ( identifier[cfg] . identifier[varVslSpcSzeX] ),
identifier[int] ( identifier[cfg] . identifier[varVslSpcSzeY] )), identifier[cfg] . identifier[varNum1] ,
identifier[cfg] . identifier[varExtXmin] , identifier[cfg] . identifier[varExtXmax] , identifier[cfg] . identifier[varNum2] ,
identifier[cfg] . identifier[varExtYmin] , identifier[cfg] . identifier[varExtYmax] ,
identifier[cfg] . identifier[varNumPrfSizes] , identifier[cfg] . identifier[varPrfStdMin] ,
identifier[cfg] . identifier[varPrfStdMax] , identifier[kwUnt] = literal[string] ,
identifier[kwCrd] = identifier[cfg] . identifier[strKwCrd] )
identifier[aryMdlParams] = identifier[aryMdlParams] [ identifier[lgcMdlInc] ,:]
identifier[lstPrfRes] =[ keyword[None] ]* identifier[cfg] . identifier[varPar]
identifier[lstPrcs] =[ keyword[None] ]* identifier[cfg] . identifier[varPar]
identifier[queOut] = identifier[mp] . identifier[Queue] ()
identifier[lstFunc] = identifier[np] . identifier[array_split] ( identifier[aryFunc] , identifier[cfg] . identifier[varPar] )
keyword[del] ( identifier[aryFunc] )
identifier[dctKw] ={ literal[string] : keyword[None] ,
literal[string] : keyword[True] }
keyword[if] (( identifier[cfg] . identifier[strVersion] == literal[string] ) keyword[or] ( identifier[cfg] . identifier[strVersion] == literal[string] )):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[for] identifier[idxPrc] keyword[in] identifier[range] ( literal[int] , identifier[cfg] . identifier[varPar] ):
identifier[lstPrcs] [ identifier[idxPrc] ]= identifier[mp] . identifier[Process] ( identifier[target] = identifier[find_prf_cpu] ,
identifier[args] =( identifier[idxPrc] ,
identifier[lstFunc] [ identifier[idxPrc] ],
identifier[aryPrfTc] ,
identifier[aryMdlParams] ,
identifier[cfg] . identifier[strVersion] ,
identifier[cfg] . identifier[lgcXval] ,
identifier[cfg] . identifier[varNumXval] ,
identifier[queOut] ),
identifier[kwargs] = identifier[dctKw] ,
)
identifier[lstPrcs] [ identifier[idxPrc] ]. identifier[Daemon] = keyword[True]
keyword[elif] identifier[cfg] . identifier[strVersion] == literal[string] :
identifier[print] ( literal[string] )
keyword[for] identifier[idxPrc] keyword[in] identifier[range] ( literal[int] , identifier[cfg] . identifier[varPar] ):
identifier[lstPrcs] [ identifier[idxPrc] ]= identifier[mp] . identifier[Process] ( identifier[target] = identifier[find_prf_gpu] ,
identifier[args] =( identifier[idxPrc] ,
identifier[aryMdlParams] ,
identifier[lstFunc] [ identifier[idxPrc] ],
identifier[aryPrfTc] ,
identifier[queOut] ),
identifier[kwargs] = identifier[dctKw] ,
)
identifier[lstPrcs] [ identifier[idxPrc] ]. identifier[Daemon] = keyword[True]
keyword[for] identifier[idxPrc] keyword[in] identifier[range] ( literal[int] , identifier[cfg] . identifier[varPar] ):
identifier[lstPrcs] [ identifier[idxPrc] ]. identifier[start] ()
keyword[del] ( identifier[lstFunc] )
keyword[for] identifier[idxPrc] keyword[in] identifier[range] ( literal[int] , identifier[cfg] . identifier[varPar] ):
identifier[lstPrfRes] [ identifier[idxPrc] ]= identifier[queOut] . identifier[get] ( keyword[True] )
keyword[for] identifier[idxPrc] keyword[in] identifier[range] ( literal[int] , identifier[cfg] . identifier[varPar] ):
identifier[lstPrcs] [ identifier[idxPrc] ]. identifier[join] ()
identifier[print] ( literal[string] )
identifier[lstPrfRes] = identifier[sorted] ( identifier[lstPrfRes] )
identifier[aryBstXpos] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
identifier[aryBstYpos] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
identifier[aryBstSd] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
identifier[aryBstR2] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
identifier[aryBstBts] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
keyword[if] identifier[np] . identifier[greater] ( identifier[cfg] . identifier[varNumXval] , literal[int] ):
identifier[aryBstR2Single] = identifier[joinRes] ( identifier[lstPrfRes] , identifier[cfg] . identifier[varPar] , literal[int] , identifier[inFormat] = literal[string] )
keyword[del] ( identifier[lstPrfRes] )
identifier[aryPlrAng] = identifier[np] . identifier[arctan2] ( identifier[aryBstYpos] , identifier[aryBstXpos] )
identifier[aryEcc] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[add] ( identifier[np] . identifier[square] ( identifier[aryBstXpos] ),
identifier[np] . identifier[square] ( identifier[aryBstYpos] )))
identifier[print] ( literal[string] )
keyword[if] identifier[strPathHrf] keyword[is] keyword[not] keyword[None] :
identifier[cfg] . identifier[strPathOut] = identifier[cfg] . identifier[strPathOut] + literal[string]
identifier[aryBstMaps] = identifier[np] . identifier[stack] ([ identifier[aryBstXpos] , identifier[aryBstYpos] , identifier[aryBstSd] , identifier[aryBstR2] ,
identifier[aryPlrAng] , identifier[aryEcc] ], identifier[axis] = literal[int] )
identifier[lstNiiNames] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
keyword[if] identifier[varRat] keyword[is] keyword[not] keyword[None] :
identifier[lstNiiNames] =[ identifier[strNii] + literal[string] + identifier[str] ( identifier[varRat] ) keyword[for] identifier[strNii] keyword[in] identifier[lstNiiNames] ]
identifier[lstNiiNames] =[ identifier[cfg] . identifier[strPathOut] + identifier[strNii] + literal[string] keyword[for] identifier[strNii] keyword[in]
identifier[lstNiiNames] ]
identifier[export_nii] ( identifier[aryBstMaps] , identifier[lstNiiNames] , identifier[aryLgcMsk] , identifier[aryLgcVar] , identifier[tplNiiShp] ,
identifier[aryAff] , identifier[hdrMsk] , identifier[outFormat] = literal[string] )
identifier[lstNiiNames] =[ literal[string] ]
keyword[if] identifier[varRat] keyword[is] keyword[not] keyword[None] :
identifier[lstNiiNames] =[ identifier[strNii] + literal[string] + identifier[str] ( identifier[varRat] ) keyword[for] identifier[strNii] keyword[in] identifier[lstNiiNames] ]
identifier[lstNiiNames] =[ identifier[cfg] . identifier[strPathOut] + identifier[strNii] + literal[string] keyword[for] identifier[strNii] keyword[in]
identifier[lstNiiNames] ]
identifier[export_nii] ( identifier[aryBstBts] , identifier[lstNiiNames] , identifier[aryLgcMsk] , identifier[aryLgcVar] , identifier[tplNiiShp] ,
identifier[aryAff] , identifier[hdrMsk] , identifier[outFormat] = literal[string] )
keyword[if] identifier[np] . identifier[greater] ( identifier[cfg] . identifier[varNumXval] , literal[int] ):
identifier[aryBstR2Single] [ identifier[np] . identifier[where] ( identifier[np] . identifier[less_equal] ( identifier[aryBstR2Single] ,- literal[int] ))]=- literal[int]
identifier[lstNiiNames] =[ literal[string] ]
keyword[if] identifier[varRat] keyword[is] keyword[not] keyword[None] :
identifier[lstNiiNames] =[ identifier[strNii] + literal[string] + identifier[str] ( identifier[varRat] ) keyword[for] identifier[strNii] keyword[in]
identifier[lstNiiNames] ]
identifier[lstNiiNames] =[ identifier[cfg] . identifier[strPathOut] + identifier[strNii] + literal[string] keyword[for] identifier[strNii] keyword[in]
identifier[lstNiiNames] ]
identifier[export_nii] ( identifier[aryBstR2Single] , identifier[lstNiiNames] , identifier[aryLgcMsk] , identifier[aryLgcVar] ,
identifier[tplNiiShp] , identifier[aryAff] , identifier[hdrMsk] , identifier[outFormat] = literal[string] )
identifier[varTme02] = identifier[time] . identifier[time] ()
identifier[varTme03] = identifier[varTme02] - identifier[varTme01]
identifier[print] ( literal[string] + identifier[str] ( identifier[varTme03] )+ literal[string] )
identifier[print] ( literal[string] ) | def pyprf(strCsvCnfg, lgcTest=False, varRat=None, strPathHrf=None):
"""
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu # depends on [control=['if'], data=[]]
if cfg.strVersion == 'cython' or cfg.strVersion == 'numpy':
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu # depends on [control=['if'], data=[]]
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Create or load pRF time course models
# Create model time courses. Also return logical for inclusion of model
# parameters which will be needed later when we create model parameters
# in degree.
(aryPrfTc, lgcMdlInc) = model_creation(dicCnfg, varRat=varRat, strPathHrf=strPathHrf)
# Deduce the number of features from the pRF time course models array
cfg.varNumFtr = aryPrfTc.shape[1]
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
# The functional data will be masked and demeaned:
(aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp) = prep_func(cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
# *************************************************************************
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + 'Cross-validation on GPU is currently not supported. ' + 'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg # depends on [control=['if'], data=[]]
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1 # depends on [control=['if'], data=[]]
# Make sure that if cython is used, the number of features is 1 or 2,
# not higher
if cfg.strVersion == 'cython':
strErrMsg = 'Stopping program. ' + 'Cython is not supported for more features than 1. ' + "Set strVersion equal 'numpy'."
assert cfg.varNumFtr in [1, 2], strErrMsg # depends on [control=['if'], data=[]]
# Check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True # depends on [control=['if'], data=[]]
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False # depends on [control=['if'], data=[]]
strErrMsg = 'Stopping program. ' + 'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# *************************************************************************
# *** Find pRF models for voxel time courses
print('------Find pRF models for voxel time courses')
# Number of voxels for which pRF finding will be performed:
cfg.varNumVoxInc = aryFunc.shape[0]
print('---------Number of voxels on which pRF finding will be performed: ' + str(cfg.varNumVoxInc))
print('---------Number of features pRF finding will be performed with: ' + str(cfg.varNumFtr))
print('---------Preparing parallel pRF model finding')
# Get array with all possible model parameter combination:
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varNum1, cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2, cfg.varExtYmin, cfg.varExtYmax, cfg.varNumPrfSizes, cfg.varPrfStdMin, cfg.varPrfStdMax, kwUnt='deg', kwCrd=cfg.strKwCrd)
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Create list with chunks of functional data for the parallel processes:
lstFunc = np.array_split(aryFunc, cfg.varPar)
# We don't need the original array with the functional data anymore:
del aryFunc
# Prepare dictionary to pass as kwargs to find_prf_cpu
dctKw = {'lgcRstr': None, 'lgcPrint': True}
# CPU version (using numpy or cython for pRF finding):
if cfg.strVersion == 'numpy' or cfg.strVersion == 'cython':
print('---------pRF finding on CPU')
print('---------Creating parallel processes')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu, args=(idxPrc, lstFunc[idxPrc], aryPrfTc, aryMdlParams, cfg.strVersion, cfg.lgcXval, cfg.varNumXval, queOut), kwargs=dctKw)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True # depends on [control=['for'], data=['idxPrc']] # depends on [control=['if'], data=[]]
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
print('---------pRF finding on GPU')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu, args=(idxPrc, aryMdlParams, lstFunc[idxPrc], aryPrfTc, queOut), kwargs=dctKw)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True # depends on [control=['for'], data=['idxPrc']] # depends on [control=['if'], data=[]]
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start() # depends on [control=['for'], data=['idxPrc']]
# Delete reference to list with function data (the data continues to exists
# in child process):
del lstFunc
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True) # depends on [control=['for'], data=['idxPrc']]
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join() # depends on [control=['for'], data=['idxPrc']]
# *************************************************************************
# *************************************************************************
# *** Prepare pRF finding results for export
print('---------Prepare pRF finding results for export')
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D') # depends on [control=['if'], data=[]]
# Delete unneeded large objects:
del lstPrfRes
# *************************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos), np.square(aryBstYpos)))
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf' # depends on [control=['if'], data=[]]
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2, aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # depends on [control=['if'], data=['varRat']]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # depends on [control=['if'], data=['varRat']]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # depends on [control=['if'], data=['varRat']]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # depends on [control=['if'], data=[]]
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') |
def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug("parsing from/import statement.")
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import) | def function[parse_from_import_statement, parameter[self]]:
constant[Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
]
call[name[self].log.debug, parameter[constant[parsing from/import statement.]]]
variable[is_future_import] assign[=] call[name[self]._parse_from_import_source, parameter[]]
call[name[self]._parse_from_import_names, parameter[name[is_future_import]]] | keyword[def] identifier[parse_from_import_statement] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[is_future_import] = identifier[self] . identifier[_parse_from_import_source] ()
identifier[self] . identifier[_parse_from_import_names] ( identifier[is_future_import] ) | def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug('parsing from/import statement.')
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import) |
def export_public_key(device_type, args):
"""Generate a new pubkey for a new/existing GPG identity."""
log.warning('NOTE: in order to re-generate the exact same GPG key later, '
'run this command with "--time=%d" commandline flag (to set '
'the timestamp of the GPG key manually).', args.time)
c = client.Client(device=device_type())
identity = client.create_identity(user_id=args.user_id,
curve_name=args.ecdsa_curve)
verifying_key = c.pubkey(identity=identity, ecdh=False)
decryption_key = c.pubkey(identity=identity, ecdh=True)
signer_func = functools.partial(c.sign, identity=identity)
if args.subkey: # add as subkey
log.info('adding %s GPG subkey for "%s" to existing key',
args.ecdsa_curve, args.user_id)
# subkey for signing
signing_key = protocol.PublicKey(
curve_name=args.ecdsa_curve, created=args.time,
verifying_key=verifying_key, ecdh=False)
# subkey for encryption
encryption_key = protocol.PublicKey(
curve_name=formats.get_ecdh_curve_name(args.ecdsa_curve),
created=args.time, verifying_key=decryption_key, ecdh=True)
primary_bytes = keyring.export_public_key(args.user_id)
result = encode.create_subkey(primary_bytes=primary_bytes,
subkey=signing_key,
signer_func=signer_func)
result = encode.create_subkey(primary_bytes=result,
subkey=encryption_key,
signer_func=signer_func)
else: # add as primary
log.info('creating new %s GPG primary key for "%s"',
args.ecdsa_curve, args.user_id)
# primary key for signing
primary = protocol.PublicKey(
curve_name=args.ecdsa_curve, created=args.time,
verifying_key=verifying_key, ecdh=False)
# subkey for encryption
subkey = protocol.PublicKey(
curve_name=formats.get_ecdh_curve_name(args.ecdsa_curve),
created=args.time, verifying_key=decryption_key, ecdh=True)
result = encode.create_primary(user_id=args.user_id,
pubkey=primary,
signer_func=signer_func)
result = encode.create_subkey(primary_bytes=result,
subkey=subkey,
signer_func=signer_func)
return protocol.armor(result, 'PUBLIC KEY BLOCK') | def function[export_public_key, parameter[device_type, args]]:
constant[Generate a new pubkey for a new/existing GPG identity.]
call[name[log].warning, parameter[constant[NOTE: in order to re-generate the exact same GPG key later, run this command with "--time=%d" commandline flag (to set the timestamp of the GPG key manually).], name[args].time]]
variable[c] assign[=] call[name[client].Client, parameter[]]
variable[identity] assign[=] call[name[client].create_identity, parameter[]]
variable[verifying_key] assign[=] call[name[c].pubkey, parameter[]]
variable[decryption_key] assign[=] call[name[c].pubkey, parameter[]]
variable[signer_func] assign[=] call[name[functools].partial, parameter[name[c].sign]]
if name[args].subkey begin[:]
call[name[log].info, parameter[constant[adding %s GPG subkey for "%s" to existing key], name[args].ecdsa_curve, name[args].user_id]]
variable[signing_key] assign[=] call[name[protocol].PublicKey, parameter[]]
variable[encryption_key] assign[=] call[name[protocol].PublicKey, parameter[]]
variable[primary_bytes] assign[=] call[name[keyring].export_public_key, parameter[name[args].user_id]]
variable[result] assign[=] call[name[encode].create_subkey, parameter[]]
variable[result] assign[=] call[name[encode].create_subkey, parameter[]]
return[call[name[protocol].armor, parameter[name[result], constant[PUBLIC KEY BLOCK]]]] | keyword[def] identifier[export_public_key] ( identifier[device_type] , identifier[args] ):
literal[string]
identifier[log] . identifier[warning] ( literal[string]
literal[string]
literal[string] , identifier[args] . identifier[time] )
identifier[c] = identifier[client] . identifier[Client] ( identifier[device] = identifier[device_type] ())
identifier[identity] = identifier[client] . identifier[create_identity] ( identifier[user_id] = identifier[args] . identifier[user_id] ,
identifier[curve_name] = identifier[args] . identifier[ecdsa_curve] )
identifier[verifying_key] = identifier[c] . identifier[pubkey] ( identifier[identity] = identifier[identity] , identifier[ecdh] = keyword[False] )
identifier[decryption_key] = identifier[c] . identifier[pubkey] ( identifier[identity] = identifier[identity] , identifier[ecdh] = keyword[True] )
identifier[signer_func] = identifier[functools] . identifier[partial] ( identifier[c] . identifier[sign] , identifier[identity] = identifier[identity] )
keyword[if] identifier[args] . identifier[subkey] :
identifier[log] . identifier[info] ( literal[string] ,
identifier[args] . identifier[ecdsa_curve] , identifier[args] . identifier[user_id] )
identifier[signing_key] = identifier[protocol] . identifier[PublicKey] (
identifier[curve_name] = identifier[args] . identifier[ecdsa_curve] , identifier[created] = identifier[args] . identifier[time] ,
identifier[verifying_key] = identifier[verifying_key] , identifier[ecdh] = keyword[False] )
identifier[encryption_key] = identifier[protocol] . identifier[PublicKey] (
identifier[curve_name] = identifier[formats] . identifier[get_ecdh_curve_name] ( identifier[args] . identifier[ecdsa_curve] ),
identifier[created] = identifier[args] . identifier[time] , identifier[verifying_key] = identifier[decryption_key] , identifier[ecdh] = keyword[True] )
identifier[primary_bytes] = identifier[keyring] . identifier[export_public_key] ( identifier[args] . identifier[user_id] )
identifier[result] = identifier[encode] . identifier[create_subkey] ( identifier[primary_bytes] = identifier[primary_bytes] ,
identifier[subkey] = identifier[signing_key] ,
identifier[signer_func] = identifier[signer_func] )
identifier[result] = identifier[encode] . identifier[create_subkey] ( identifier[primary_bytes] = identifier[result] ,
identifier[subkey] = identifier[encryption_key] ,
identifier[signer_func] = identifier[signer_func] )
keyword[else] :
identifier[log] . identifier[info] ( literal[string] ,
identifier[args] . identifier[ecdsa_curve] , identifier[args] . identifier[user_id] )
identifier[primary] = identifier[protocol] . identifier[PublicKey] (
identifier[curve_name] = identifier[args] . identifier[ecdsa_curve] , identifier[created] = identifier[args] . identifier[time] ,
identifier[verifying_key] = identifier[verifying_key] , identifier[ecdh] = keyword[False] )
identifier[subkey] = identifier[protocol] . identifier[PublicKey] (
identifier[curve_name] = identifier[formats] . identifier[get_ecdh_curve_name] ( identifier[args] . identifier[ecdsa_curve] ),
identifier[created] = identifier[args] . identifier[time] , identifier[verifying_key] = identifier[decryption_key] , identifier[ecdh] = keyword[True] )
identifier[result] = identifier[encode] . identifier[create_primary] ( identifier[user_id] = identifier[args] . identifier[user_id] ,
identifier[pubkey] = identifier[primary] ,
identifier[signer_func] = identifier[signer_func] )
identifier[result] = identifier[encode] . identifier[create_subkey] ( identifier[primary_bytes] = identifier[result] ,
identifier[subkey] = identifier[subkey] ,
identifier[signer_func] = identifier[signer_func] )
keyword[return] identifier[protocol] . identifier[armor] ( identifier[result] , literal[string] ) | def export_public_key(device_type, args):
"""Generate a new pubkey for a new/existing GPG identity."""
log.warning('NOTE: in order to re-generate the exact same GPG key later, run this command with "--time=%d" commandline flag (to set the timestamp of the GPG key manually).', args.time)
c = client.Client(device=device_type())
identity = client.create_identity(user_id=args.user_id, curve_name=args.ecdsa_curve)
verifying_key = c.pubkey(identity=identity, ecdh=False)
decryption_key = c.pubkey(identity=identity, ecdh=True)
signer_func = functools.partial(c.sign, identity=identity)
if args.subkey: # add as subkey
log.info('adding %s GPG subkey for "%s" to existing key', args.ecdsa_curve, args.user_id)
# subkey for signing
signing_key = protocol.PublicKey(curve_name=args.ecdsa_curve, created=args.time, verifying_key=verifying_key, ecdh=False)
# subkey for encryption
encryption_key = protocol.PublicKey(curve_name=formats.get_ecdh_curve_name(args.ecdsa_curve), created=args.time, verifying_key=decryption_key, ecdh=True)
primary_bytes = keyring.export_public_key(args.user_id)
result = encode.create_subkey(primary_bytes=primary_bytes, subkey=signing_key, signer_func=signer_func)
result = encode.create_subkey(primary_bytes=result, subkey=encryption_key, signer_func=signer_func) # depends on [control=['if'], data=[]]
else: # add as primary
log.info('creating new %s GPG primary key for "%s"', args.ecdsa_curve, args.user_id)
# primary key for signing
primary = protocol.PublicKey(curve_name=args.ecdsa_curve, created=args.time, verifying_key=verifying_key, ecdh=False)
# subkey for encryption
subkey = protocol.PublicKey(curve_name=formats.get_ecdh_curve_name(args.ecdsa_curve), created=args.time, verifying_key=decryption_key, ecdh=True)
result = encode.create_primary(user_id=args.user_id, pubkey=primary, signer_func=signer_func)
result = encode.create_subkey(primary_bytes=result, subkey=subkey, signer_func=signer_func)
return protocol.armor(result, 'PUBLIC KEY BLOCK') |
def ExtractBarcodes(read, match,
extract_umi=False,
extract_cell=False,
discard=False,
retain_umi=False):
'''Extract the cell and umi barcodes using a regex.match object
inputs:
- read 1 and read2 = Record objects
- match = regex.match object
- extract_umi and extract_cell = switches to determine whether these
barcodes should be extracted
- discard = is there a region(s) of the sequence which should be
discarded entirely?
- retain_umi = Should UMI sequence be retained on the read sequence
returns:
- cell_barcode = Cell barcode string
- cell_barcode_quals = Cell barcode quality scores
- umi = UMI barcode string.
- umi_quals = UMI barcode quality scores
- new_seq = Read1 sequence after extraction
- new_quals = Read1 qualities after extraction
Barcodes and qualities default to empty strings where extract_cell
or extract_umi are false.
'''
cell_barcode, umi, cell_barcode_quals, umi_quals, new_seq, new_quals = ("",)*6
if not extract_cell and not extract_umi:
U.error("must set either extract_cell and/or extract_umi to true")
groupdict = match.groupdict()
cell_bases = set()
umi_bases = set()
discard_bases = set()
for k in sorted(list(groupdict)):
span = match.span(k)
if extract_cell and k.startswith("cell_"):
cell_barcode += groupdict[k]
cell_bases.update(range(span[0], span[1]))
elif extract_umi and k.startswith("umi_"):
umi += groupdict[k]
umi_bases.update(range(span[0], span[1]))
elif discard and k.startswith("discard_"):
discard_bases.update(range(span[0], span[1]))
new_seq, new_quals, umi_quals, cell_quals = extractSeqAndQuals(
read.seq, read.quals, umi_bases, cell_bases, discard_bases, retain_umi)
return (cell_barcode, cell_barcode_quals,
umi, umi_quals,
new_seq, new_quals) | def function[ExtractBarcodes, parameter[read, match, extract_umi, extract_cell, discard, retain_umi]]:
constant[Extract the cell and umi barcodes using a regex.match object
inputs:
- read 1 and read2 = Record objects
- match = regex.match object
- extract_umi and extract_cell = switches to determine whether these
barcodes should be extracted
- discard = is there a region(s) of the sequence which should be
discarded entirely?
- retain_umi = Should UMI sequence be retained on the read sequence
returns:
- cell_barcode = Cell barcode string
- cell_barcode_quals = Cell barcode quality scores
- umi = UMI barcode string.
- umi_quals = UMI barcode quality scores
- new_seq = Read1 sequence after extraction
- new_quals = Read1 qualities after extraction
Barcodes and qualities default to empty strings where extract_cell
or extract_umi are false.
]
<ast.Tuple object at 0x7da2049635b0> assign[=] binary_operation[tuple[[<ast.Constant object at 0x7da204960880>]] * constant[6]]
if <ast.BoolOp object at 0x7da2049601c0> begin[:]
call[name[U].error, parameter[constant[must set either extract_cell and/or extract_umi to true]]]
variable[groupdict] assign[=] call[name[match].groupdict, parameter[]]
variable[cell_bases] assign[=] call[name[set], parameter[]]
variable[umi_bases] assign[=] call[name[set], parameter[]]
variable[discard_bases] assign[=] call[name[set], parameter[]]
for taget[name[k]] in starred[call[name[sorted], parameter[call[name[list], parameter[name[groupdict]]]]]] begin[:]
variable[span] assign[=] call[name[match].span, parameter[name[k]]]
if <ast.BoolOp object at 0x7da204962ce0> begin[:]
<ast.AugAssign object at 0x7da204961f00>
call[name[cell_bases].update, parameter[call[name[range], parameter[call[name[span]][constant[0]], call[name[span]][constant[1]]]]]]
<ast.Tuple object at 0x7da20c9930a0> assign[=] call[name[extractSeqAndQuals], parameter[name[read].seq, name[read].quals, name[umi_bases], name[cell_bases], name[discard_bases], name[retain_umi]]]
return[tuple[[<ast.Name object at 0x7da204566dd0>, <ast.Name object at 0x7da2045650c0>, <ast.Name object at 0x7da204566980>, <ast.Name object at 0x7da2045672b0>, <ast.Name object at 0x7da204567490>, <ast.Name object at 0x7da2045668f0>]]] | keyword[def] identifier[ExtractBarcodes] ( identifier[read] , identifier[match] ,
identifier[extract_umi] = keyword[False] ,
identifier[extract_cell] = keyword[False] ,
identifier[discard] = keyword[False] ,
identifier[retain_umi] = keyword[False] ):
literal[string]
identifier[cell_barcode] , identifier[umi] , identifier[cell_barcode_quals] , identifier[umi_quals] , identifier[new_seq] , identifier[new_quals] =( literal[string] ,)* literal[int]
keyword[if] keyword[not] identifier[extract_cell] keyword[and] keyword[not] identifier[extract_umi] :
identifier[U] . identifier[error] ( literal[string] )
identifier[groupdict] = identifier[match] . identifier[groupdict] ()
identifier[cell_bases] = identifier[set] ()
identifier[umi_bases] = identifier[set] ()
identifier[discard_bases] = identifier[set] ()
keyword[for] identifier[k] keyword[in] identifier[sorted] ( identifier[list] ( identifier[groupdict] )):
identifier[span] = identifier[match] . identifier[span] ( identifier[k] )
keyword[if] identifier[extract_cell] keyword[and] identifier[k] . identifier[startswith] ( literal[string] ):
identifier[cell_barcode] += identifier[groupdict] [ identifier[k] ]
identifier[cell_bases] . identifier[update] ( identifier[range] ( identifier[span] [ literal[int] ], identifier[span] [ literal[int] ]))
keyword[elif] identifier[extract_umi] keyword[and] identifier[k] . identifier[startswith] ( literal[string] ):
identifier[umi] += identifier[groupdict] [ identifier[k] ]
identifier[umi_bases] . identifier[update] ( identifier[range] ( identifier[span] [ literal[int] ], identifier[span] [ literal[int] ]))
keyword[elif] identifier[discard] keyword[and] identifier[k] . identifier[startswith] ( literal[string] ):
identifier[discard_bases] . identifier[update] ( identifier[range] ( identifier[span] [ literal[int] ], identifier[span] [ literal[int] ]))
identifier[new_seq] , identifier[new_quals] , identifier[umi_quals] , identifier[cell_quals] = identifier[extractSeqAndQuals] (
identifier[read] . identifier[seq] , identifier[read] . identifier[quals] , identifier[umi_bases] , identifier[cell_bases] , identifier[discard_bases] , identifier[retain_umi] )
keyword[return] ( identifier[cell_barcode] , identifier[cell_barcode_quals] ,
identifier[umi] , identifier[umi_quals] ,
identifier[new_seq] , identifier[new_quals] ) | def ExtractBarcodes(read, match, extract_umi=False, extract_cell=False, discard=False, retain_umi=False):
"""Extract the cell and umi barcodes using a regex.match object
inputs:
- read 1 and read2 = Record objects
- match = regex.match object
- extract_umi and extract_cell = switches to determine whether these
barcodes should be extracted
- discard = is there a region(s) of the sequence which should be
discarded entirely?
- retain_umi = Should UMI sequence be retained on the read sequence
returns:
- cell_barcode = Cell barcode string
- cell_barcode_quals = Cell barcode quality scores
- umi = UMI barcode string.
- umi_quals = UMI barcode quality scores
- new_seq = Read1 sequence after extraction
- new_quals = Read1 qualities after extraction
Barcodes and qualities default to empty strings where extract_cell
or extract_umi are false.
"""
(cell_barcode, umi, cell_barcode_quals, umi_quals, new_seq, new_quals) = ('',) * 6
if not extract_cell and (not extract_umi):
U.error('must set either extract_cell and/or extract_umi to true') # depends on [control=['if'], data=[]]
groupdict = match.groupdict()
cell_bases = set()
umi_bases = set()
discard_bases = set()
for k in sorted(list(groupdict)):
span = match.span(k)
if extract_cell and k.startswith('cell_'):
cell_barcode += groupdict[k]
cell_bases.update(range(span[0], span[1])) # depends on [control=['if'], data=[]]
elif extract_umi and k.startswith('umi_'):
umi += groupdict[k]
umi_bases.update(range(span[0], span[1])) # depends on [control=['if'], data=[]]
elif discard and k.startswith('discard_'):
discard_bases.update(range(span[0], span[1])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
(new_seq, new_quals, umi_quals, cell_quals) = extractSeqAndQuals(read.seq, read.quals, umi_bases, cell_bases, discard_bases, retain_umi)
return (cell_barcode, cell_barcode_quals, umi, umi_quals, new_seq, new_quals) |
def walkdirs(self, *args, **kwargs):
""" D.walkdirs() -> iterator over subdirs, recursively.
"""
return (
item
for item in self.walk(*args, **kwargs)
if item.isdir()
) | def function[walkdirs, parameter[self]]:
constant[ D.walkdirs() -> iterator over subdirs, recursively.
]
return[<ast.GeneratorExp object at 0x7da20cabc0a0>] | keyword[def] identifier[walkdirs] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] (
identifier[item]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[walk] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[item] . identifier[isdir] ()
) | def walkdirs(self, *args, **kwargs):
""" D.walkdirs() -> iterator over subdirs, recursively.
"""
return (item for item in self.walk(*args, **kwargs) if item.isdir()) |
def canonical_url(app, pagename, templatename, context, doctree):
"""Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``.
"""
base = context.get("canonical_url")
if not base:
return
target = app.builder.get_target_uri(pagename)
context["page_canonical_url"] = base + target | def function[canonical_url, parameter[app, pagename, templatename, context, doctree]]:
constant[Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``.
]
variable[base] assign[=] call[name[context].get, parameter[constant[canonical_url]]]
if <ast.UnaryOp object at 0x7da18bc73790> begin[:]
return[None]
variable[target] assign[=] call[name[app].builder.get_target_uri, parameter[name[pagename]]]
call[name[context]][constant[page_canonical_url]] assign[=] binary_operation[name[base] + name[target]] | keyword[def] identifier[canonical_url] ( identifier[app] , identifier[pagename] , identifier[templatename] , identifier[context] , identifier[doctree] ):
literal[string]
identifier[base] = identifier[context] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[base] :
keyword[return]
identifier[target] = identifier[app] . identifier[builder] . identifier[get_target_uri] ( identifier[pagename] )
identifier[context] [ literal[string] ]= identifier[base] + identifier[target] | def canonical_url(app, pagename, templatename, context, doctree):
"""Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``.
"""
base = context.get('canonical_url')
if not base:
return # depends on [control=['if'], data=[]]
target = app.builder.get_target_uri(pagename)
context['page_canonical_url'] = base + target |
def move_column(column=1, file=sys.stdout):
""" Move the cursor to the specified column, default 1.
Esc[<column>G
"""
move.column(column).write(file=file) | def function[move_column, parameter[column, file]]:
constant[ Move the cursor to the specified column, default 1.
Esc[<column>G
]
call[call[name[move].column, parameter[name[column]]].write, parameter[]] | keyword[def] identifier[move_column] ( identifier[column] = literal[int] , identifier[file] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[move] . identifier[column] ( identifier[column] ). identifier[write] ( identifier[file] = identifier[file] ) | def move_column(column=1, file=sys.stdout):
""" Move the cursor to the specified column, default 1.
Esc[<column>G
"""
move.column(column).write(file=file) |
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) | def function[time2isoz, parameter[t]]:
constant[Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
]
if compare[name[t] is constant[None]] begin[:]
variable[dt] assign[=] call[name[datetime].datetime.utcnow, parameter[]]
return[binary_operation[constant[%04d-%02d-%02d %02d:%02d:%02dZ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc9b280>, <ast.Attribute object at 0x7da18dc98700>, <ast.Attribute object at 0x7da18dc9a530>, <ast.Attribute object at 0x7da18dc9ab90>, <ast.Attribute object at 0x7da18dc9ada0>, <ast.Attribute object at 0x7da18dc9b490>]]]] | keyword[def] identifier[time2isoz] ( identifier[t] = keyword[None] ):
literal[string]
keyword[if] identifier[t] keyword[is] keyword[None] :
identifier[dt] = identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
keyword[else] :
identifier[dt] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[t] )
keyword[return] literal[string] %(
identifier[dt] . identifier[year] , identifier[dt] . identifier[month] , identifier[dt] . identifier[day] , identifier[dt] . identifier[hour] , identifier[dt] . identifier[minute] , identifier[dt] . identifier[second] ) | def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None:
dt = datetime.datetime.utcnow() # depends on [control=['if'], data=[]]
else:
dt = datetime.datetime.utcfromtimestamp(t)
return '%04d-%02d-%02d %02d:%02d:%02dZ' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) |
def CreateTaskStorage(self, task):
"""Creates a task storage.
Args:
task (Task): task.
Returns:
FakeStorageWriter: storage writer.
Raises:
IOError: if the task storage already exists.
OSError: if the task storage already exists.
"""
if task.identifier in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} already exists.'.format(
task.identifier))
storage_writer = FakeStorageWriter(
self._session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
self._task_storage_writers[task.identifier] = storage_writer
return storage_writer | def function[CreateTaskStorage, parameter[self, task]]:
constant[Creates a task storage.
Args:
task (Task): task.
Returns:
FakeStorageWriter: storage writer.
Raises:
IOError: if the task storage already exists.
OSError: if the task storage already exists.
]
if compare[name[task].identifier in name[self]._task_storage_writers] begin[:]
<ast.Raise object at 0x7da18c4cdde0>
variable[storage_writer] assign[=] call[name[FakeStorageWriter], parameter[name[self]._session]]
call[name[self]._task_storage_writers][name[task].identifier] assign[=] name[storage_writer]
return[name[storage_writer]] | keyword[def] identifier[CreateTaskStorage] ( identifier[self] , identifier[task] ):
literal[string]
keyword[if] identifier[task] . identifier[identifier] keyword[in] identifier[self] . identifier[_task_storage_writers] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] (
identifier[task] . identifier[identifier] ))
identifier[storage_writer] = identifier[FakeStorageWriter] (
identifier[self] . identifier[_session] , identifier[storage_type] = identifier[definitions] . identifier[STORAGE_TYPE_TASK] , identifier[task] = identifier[task] )
identifier[self] . identifier[_task_storage_writers] [ identifier[task] . identifier[identifier] ]= identifier[storage_writer]
keyword[return] identifier[storage_writer] | def CreateTaskStorage(self, task):
"""Creates a task storage.
Args:
task (Task): task.
Returns:
FakeStorageWriter: storage writer.
Raises:
IOError: if the task storage already exists.
OSError: if the task storage already exists.
"""
if task.identifier in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} already exists.'.format(task.identifier)) # depends on [control=['if'], data=[]]
storage_writer = FakeStorageWriter(self._session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
self._task_storage_writers[task.identifier] = storage_writer
return storage_writer |
def _set_standard(self, v, load=False):
"""
Setter method for standard, mapped from YANG variable /ipv6_acl/ipv6/access_list/standard (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_standard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_standard() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """standard must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='list', is_config=True)""",
})
self.__standard = t
if hasattr(self, '_set'):
self._set() | def function[_set_standard, parameter[self, v, load]]:
constant[
Setter method for standard, mapped from YANG variable /ipv6_acl/ipv6/access_list/standard (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_standard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_standard() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20e9605b0>
name[self].__standard assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_standard] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[standard] . identifier[standard] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[False] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__standard] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_standard(self, v, load=False):
"""
Setter method for standard, mapped from YANG variable /ipv6_acl/ipv6/access_list/standard (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_standard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_standard() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGListType('name', standard.standard, yang_name='standard', rest_name='standard', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}), is_container='list', yang_name='standard', rest_name='standard', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_std_acl_cp', u'cli-mode-name': u'conf-ip6acl-std'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='list', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'standard must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'name\', extensions={u\'tailf-common\': {u\'info\': u\'Standard IP ACL\', u\'cli-no-key-completion\': None, u\'cli-full-no\': None, u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-full-command\': None, u\'callpoint\': u\'ip6_std_acl_cp\', u\'cli-mode-name\': u\'conf-ip6acl-std\'}}), is_container=\'list\', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Standard IP ACL\', u\'cli-no-key-completion\': None, u\'cli-full-no\': None, u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-full-command\': None, u\'callpoint\': u\'ip6_std_acl_cp\', u\'cli-mode-name\': u\'conf-ip6acl-std\'}}, namespace=\'urn:brocade.com:mgmt:brocade-ipv6-access-list\', defining_module=\'brocade-ipv6-access-list\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__standard = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def invoice_access(request, access_code):
''' Redirects to an invoice for the attendee that matches the given access
code, if any.
If the attendee has multiple invoices, we use the following tie-break:
- If there's an unpaid invoice, show that, otherwise
- If there's a paid invoice, show the most recent one, otherwise
- Show the most recent invoid of all
Arguments:
access_code (castable to int): The access code for the user whose
invoice you want to see.
Returns:
redirect:
Redirect to the selected invoice for that user.
Raises:
Http404: If the user has no invoices.
'''
invoices = commerce.Invoice.objects.filter(
user__attendee__access_code=access_code,
).order_by("-issue_time")
if not invoices:
raise Http404()
unpaid = invoices.filter(status=commerce.Invoice.STATUS_UNPAID)
paid = invoices.filter(status=commerce.Invoice.STATUS_PAID)
if unpaid:
invoice = unpaid[0] # (should only be 1 unpaid invoice?)
elif paid:
invoice = paid[0] # Most recent paid invoice
else:
invoice = invoices[0] # Most recent of any invoices
return redirect("invoice", invoice.id, access_code) | def function[invoice_access, parameter[request, access_code]]:
constant[ Redirects to an invoice for the attendee that matches the given access
code, if any.
If the attendee has multiple invoices, we use the following tie-break:
- If there's an unpaid invoice, show that, otherwise
- If there's a paid invoice, show the most recent one, otherwise
- Show the most recent invoid of all
Arguments:
access_code (castable to int): The access code for the user whose
invoice you want to see.
Returns:
redirect:
Redirect to the selected invoice for that user.
Raises:
Http404: If the user has no invoices.
]
variable[invoices] assign[=] call[call[name[commerce].Invoice.objects.filter, parameter[]].order_by, parameter[constant[-issue_time]]]
if <ast.UnaryOp object at 0x7da207f006d0> begin[:]
<ast.Raise object at 0x7da207f008e0>
variable[unpaid] assign[=] call[name[invoices].filter, parameter[]]
variable[paid] assign[=] call[name[invoices].filter, parameter[]]
if name[unpaid] begin[:]
variable[invoice] assign[=] call[name[unpaid]][constant[0]]
return[call[name[redirect], parameter[constant[invoice], name[invoice].id, name[access_code]]]] | keyword[def] identifier[invoice_access] ( identifier[request] , identifier[access_code] ):
literal[string]
identifier[invoices] = identifier[commerce] . identifier[Invoice] . identifier[objects] . identifier[filter] (
identifier[user__attendee__access_code] = identifier[access_code] ,
). identifier[order_by] ( literal[string] )
keyword[if] keyword[not] identifier[invoices] :
keyword[raise] identifier[Http404] ()
identifier[unpaid] = identifier[invoices] . identifier[filter] ( identifier[status] = identifier[commerce] . identifier[Invoice] . identifier[STATUS_UNPAID] )
identifier[paid] = identifier[invoices] . identifier[filter] ( identifier[status] = identifier[commerce] . identifier[Invoice] . identifier[STATUS_PAID] )
keyword[if] identifier[unpaid] :
identifier[invoice] = identifier[unpaid] [ literal[int] ]
keyword[elif] identifier[paid] :
identifier[invoice] = identifier[paid] [ literal[int] ]
keyword[else] :
identifier[invoice] = identifier[invoices] [ literal[int] ]
keyword[return] identifier[redirect] ( literal[string] , identifier[invoice] . identifier[id] , identifier[access_code] ) | def invoice_access(request, access_code):
""" Redirects to an invoice for the attendee that matches the given access
code, if any.
If the attendee has multiple invoices, we use the following tie-break:
- If there's an unpaid invoice, show that, otherwise
- If there's a paid invoice, show the most recent one, otherwise
- Show the most recent invoid of all
Arguments:
access_code (castable to int): The access code for the user whose
invoice you want to see.
Returns:
redirect:
Redirect to the selected invoice for that user.
Raises:
Http404: If the user has no invoices.
"""
invoices = commerce.Invoice.objects.filter(user__attendee__access_code=access_code).order_by('-issue_time')
if not invoices:
raise Http404() # depends on [control=['if'], data=[]]
unpaid = invoices.filter(status=commerce.Invoice.STATUS_UNPAID)
paid = invoices.filter(status=commerce.Invoice.STATUS_PAID)
if unpaid:
invoice = unpaid[0] # (should only be 1 unpaid invoice?) # depends on [control=['if'], data=[]]
elif paid:
invoice = paid[0] # Most recent paid invoice # depends on [control=['if'], data=[]]
else:
invoice = invoices[0] # Most recent of any invoices
return redirect('invoice', invoice.id, access_code) |
def convolve_filter(signal, impulse_response):
"""
Convovle the two input signals, if impulse_response is None,
returns the unaltered signal
"""
if impulse_response is not None:
# print 'interpolated calibration'#, self.calibration_frequencies
adjusted_signal = fftconvolve(signal, impulse_response)
adjusted_signal = adjusted_signal[
len(impulse_response) / 2:len(adjusted_signal) - len(impulse_response) / 2 + 1]
return adjusted_signal
else:
return signal | def function[convolve_filter, parameter[signal, impulse_response]]:
constant[
Convovle the two input signals, if impulse_response is None,
returns the unaltered signal
]
if compare[name[impulse_response] is_not constant[None]] begin[:]
variable[adjusted_signal] assign[=] call[name[fftconvolve], parameter[name[signal], name[impulse_response]]]
variable[adjusted_signal] assign[=] call[name[adjusted_signal]][<ast.Slice object at 0x7da207f997e0>]
return[name[adjusted_signal]] | keyword[def] identifier[convolve_filter] ( identifier[signal] , identifier[impulse_response] ):
literal[string]
keyword[if] identifier[impulse_response] keyword[is] keyword[not] keyword[None] :
identifier[adjusted_signal] = identifier[fftconvolve] ( identifier[signal] , identifier[impulse_response] )
identifier[adjusted_signal] = identifier[adjusted_signal] [
identifier[len] ( identifier[impulse_response] )/ literal[int] : identifier[len] ( identifier[adjusted_signal] )- identifier[len] ( identifier[impulse_response] )/ literal[int] + literal[int] ]
keyword[return] identifier[adjusted_signal]
keyword[else] :
keyword[return] identifier[signal] | def convolve_filter(signal, impulse_response):
"""
Convovle the two input signals, if impulse_response is None,
returns the unaltered signal
"""
if impulse_response is not None:
# print 'interpolated calibration'#, self.calibration_frequencies
adjusted_signal = fftconvolve(signal, impulse_response)
adjusted_signal = adjusted_signal[len(impulse_response) / 2:len(adjusted_signal) - len(impulse_response) / 2 + 1]
return adjusted_signal # depends on [control=['if'], data=['impulse_response']]
else:
return signal |
def effective_len(self):
"""
Get the length of the sequence if N's are disregarded.
"""
if self._effective_len is None:
self._effective_len = len([nuc for nuc in self.sequenceData
if nuc != "N" and nuc != "n"])
return self._effective_len | def function[effective_len, parameter[self]]:
constant[
Get the length of the sequence if N's are disregarded.
]
if compare[name[self]._effective_len is constant[None]] begin[:]
name[self]._effective_len assign[=] call[name[len], parameter[<ast.ListComp object at 0x7da1b1461540>]]
return[name[self]._effective_len] | keyword[def] identifier[effective_len] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_effective_len] keyword[is] keyword[None] :
identifier[self] . identifier[_effective_len] = identifier[len] ([ identifier[nuc] keyword[for] identifier[nuc] keyword[in] identifier[self] . identifier[sequenceData]
keyword[if] identifier[nuc] != literal[string] keyword[and] identifier[nuc] != literal[string] ])
keyword[return] identifier[self] . identifier[_effective_len] | def effective_len(self):
"""
Get the length of the sequence if N's are disregarded.
"""
if self._effective_len is None:
self._effective_len = len([nuc for nuc in self.sequenceData if nuc != 'N' and nuc != 'n']) # depends on [control=['if'], data=[]]
return self._effective_len |
def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams | def function[imagetransformer_base_10l_16h_big_uncond_dr01_imgnet, parameter[]]:
constant[big 1d model for conditional image generation.]
variable[hparams] assign[=] call[name[imagetransformer_base_14l_8h_big_dr01], parameter[]]
name[hparams].num_decoder_layers assign[=] constant[10]
name[hparams].num_heads assign[=] constant[16]
name[hparams].hidden_size assign[=] constant[1024]
name[hparams].filter_size assign[=] constant[4096]
name[hparams].batch_size assign[=] constant[1]
name[hparams].layer_prepostprocess_dropout assign[=] constant[0.1]
return[name[hparams]] | keyword[def] identifier[imagetransformer_base_10l_16h_big_uncond_dr01_imgnet] ():
literal[string]
identifier[hparams] = identifier[imagetransformer_base_14l_8h_big_dr01] ()
identifier[hparams] . identifier[num_decoder_layers] = literal[int]
identifier[hparams] . identifier[num_heads] = literal[int]
identifier[hparams] . identifier[hidden_size] = literal[int]
identifier[hparams] . identifier[filter_size] = literal[int]
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[layer_prepostprocess_dropout] = literal[int]
keyword[return] identifier[hparams] | def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams |
def updateVocalAuto(self, component, files):
"""Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-parameter from the model.
:param component: Component that the auto-parameter is modifying
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param files: list of file names to act as the auto-parameter list
:type files: list<str>
"""
auto_model = self.model().autoParams()
row = auto_model.fileParameter(component)
if len(files) > 1:
clean_component = self.model().data(self.model().indexByComponent(component), AbstractDragView.DragRole)
p = {'parameter' : 'filename',
'names' : files,
'selection' : [clean_component]
}
if row is None:
auto_model.insertItem(auto_model.index(0,0), p)
else:
auto_model.setData(auto_model.index(row,0),p)
elif row is not None:
# remove the autoparameter
auto_model.removeRow(row)
# if row is none and len(files) == 1 then we don't need to do anything
self.countChanged.emit() | def function[updateVocalAuto, parameter[self, component, files]]:
constant[Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-parameter from the model.
:param component: Component that the auto-parameter is modifying
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param files: list of file names to act as the auto-parameter list
:type files: list<str>
]
variable[auto_model] assign[=] call[call[name[self].model, parameter[]].autoParams, parameter[]]
variable[row] assign[=] call[name[auto_model].fileParameter, parameter[name[component]]]
if compare[call[name[len], parameter[name[files]]] greater[>] constant[1]] begin[:]
variable[clean_component] assign[=] call[call[name[self].model, parameter[]].data, parameter[call[call[name[self].model, parameter[]].indexByComponent, parameter[name[component]]], name[AbstractDragView].DragRole]]
variable[p] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d4d780>, <ast.Constant object at 0x7da1b1d4d900>, <ast.Constant object at 0x7da1b1d4d090>], [<ast.Constant object at 0x7da1b1d4dd20>, <ast.Name object at 0x7da1b1d4df30>, <ast.List object at 0x7da1b1d4f1f0>]]
if compare[name[row] is constant[None]] begin[:]
call[name[auto_model].insertItem, parameter[call[name[auto_model].index, parameter[constant[0], constant[0]]], name[p]]]
call[name[self].countChanged.emit, parameter[]] | keyword[def] identifier[updateVocalAuto] ( identifier[self] , identifier[component] , identifier[files] ):
literal[string]
identifier[auto_model] = identifier[self] . identifier[model] (). identifier[autoParams] ()
identifier[row] = identifier[auto_model] . identifier[fileParameter] ( identifier[component] )
keyword[if] identifier[len] ( identifier[files] )> literal[int] :
identifier[clean_component] = identifier[self] . identifier[model] (). identifier[data] ( identifier[self] . identifier[model] (). identifier[indexByComponent] ( identifier[component] ), identifier[AbstractDragView] . identifier[DragRole] )
identifier[p] ={ literal[string] : literal[string] ,
literal[string] : identifier[files] ,
literal[string] :[ identifier[clean_component] ]
}
keyword[if] identifier[row] keyword[is] keyword[None] :
identifier[auto_model] . identifier[insertItem] ( identifier[auto_model] . identifier[index] ( literal[int] , literal[int] ), identifier[p] )
keyword[else] :
identifier[auto_model] . identifier[setData] ( identifier[auto_model] . identifier[index] ( identifier[row] , literal[int] ), identifier[p] )
keyword[elif] identifier[row] keyword[is] keyword[not] keyword[None] :
identifier[auto_model] . identifier[removeRow] ( identifier[row] )
identifier[self] . identifier[countChanged] . identifier[emit] () | def updateVocalAuto(self, component, files):
"""Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-parameter from the model.
:param component: Component that the auto-parameter is modifying
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param files: list of file names to act as the auto-parameter list
:type files: list<str>
"""
auto_model = self.model().autoParams()
row = auto_model.fileParameter(component)
if len(files) > 1:
clean_component = self.model().data(self.model().indexByComponent(component), AbstractDragView.DragRole)
p = {'parameter': 'filename', 'names': files, 'selection': [clean_component]}
if row is None:
auto_model.insertItem(auto_model.index(0, 0), p) # depends on [control=['if'], data=[]]
else:
auto_model.setData(auto_model.index(row, 0), p) # depends on [control=['if'], data=[]]
elif row is not None:
# remove the autoparameter
auto_model.removeRow(row) # depends on [control=['if'], data=['row']]
# if row is none and len(files) == 1 then we don't need to do anything
self.countChanged.emit() |
def get_location(vm_=None):
'''
Return the joyent data center to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
default=DEFAULT_LOCATION,
search_global=False
)
) | def function[get_location, parameter[vm_]]:
constant[
Return the joyent data center to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
]
return[call[name[__opts__].get, parameter[constant[location], call[name[config].get_cloud_config_value, parameter[constant[location], <ast.BoolOp object at 0x7da1b1f767a0>, name[__opts__]]]]]] | keyword[def] identifier[get_location] ( identifier[vm_] = keyword[None] ):
literal[string]
keyword[return] identifier[__opts__] . identifier[get] (
literal[string] ,
identifier[config] . identifier[get_cloud_config_value] (
literal[string] ,
identifier[vm_] keyword[or] identifier[get_configured_provider] (),
identifier[__opts__] ,
identifier[default] = identifier[DEFAULT_LOCATION] ,
identifier[search_global] = keyword[False]
)
) | def get_location(vm_=None):
"""
Return the joyent data center to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
"""
return __opts__.get('location', config.get_cloud_config_value('location', vm_ or get_configured_provider(), __opts__, default=DEFAULT_LOCATION, search_global=False)) |
def get_route53_client(agent, region, cooperator=None):
"""
Get a non-registration Route53 client.
"""
if cooperator is None:
cooperator = task
return region.get_client(
_Route53Client,
agent=agent,
creds=region.creds,
region=REGION_US_EAST_1,
endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT),
cooperator=cooperator,
) | def function[get_route53_client, parameter[agent, region, cooperator]]:
constant[
Get a non-registration Route53 client.
]
if compare[name[cooperator] is constant[None]] begin[:]
variable[cooperator] assign[=] name[task]
return[call[name[region].get_client, parameter[name[_Route53Client]]]] | keyword[def] identifier[get_route53_client] ( identifier[agent] , identifier[region] , identifier[cooperator] = keyword[None] ):
literal[string]
keyword[if] identifier[cooperator] keyword[is] keyword[None] :
identifier[cooperator] = identifier[task]
keyword[return] identifier[region] . identifier[get_client] (
identifier[_Route53Client] ,
identifier[agent] = identifier[agent] ,
identifier[creds] = identifier[region] . identifier[creds] ,
identifier[region] = identifier[REGION_US_EAST_1] ,
identifier[endpoint] = identifier[AWSServiceEndpoint] ( identifier[_OTHER_ENDPOINT] ),
identifier[cooperator] = identifier[cooperator] ,
) | def get_route53_client(agent, region, cooperator=None):
"""
Get a non-registration Route53 client.
"""
if cooperator is None:
cooperator = task # depends on [control=['if'], data=['cooperator']]
return region.get_client(_Route53Client, agent=agent, creds=region.creds, region=REGION_US_EAST_1, endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT), cooperator=cooperator) |
def download(self, songs, template=None):
"""Download Google Music songs.
Parameters:
songs (list or dict): Google Music song dict(s).
template (str): A filepath which can include template patterns.
Returns:
A list of result dictionaries.
::
[
{'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded
{'result': 'error', 'id': song_id, 'message': error[song_id]} # error
]
"""
if not template:
template = os.getcwd()
songnum = 0
total = len(songs)
results = []
errors = {}
pad = len(str(total))
for result in self._download(songs, template):
song_id = songs[songnum]['id']
songnum += 1
downloaded, error = result
if downloaded:
logger.info(
"({num:>{pad}}/{total}) Successfully downloaded -- {file} ({song_id})".format(
num=songnum, pad=pad, total=total, file=downloaded[song_id], song_id=song_id
)
)
results.append({'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]})
elif error:
title = songs[songnum].get('title', "<empty>")
artist = songs[songnum].get('artist', "<empty>")
album = songs[songnum].get('album', "<empty>")
logger.info(
"({num:>{pad}}/{total}) Error on download -- {title} -- {artist} -- {album} ({song_id})".format(
num=songnum, pad=pad, total=total, title=title, artist=artist, album=album, song_id=song_id
)
)
results.append({'result': 'error', 'id': song_id, 'message': error[song_id]})
if errors:
logger.info("\n\nThe following errors occurred:\n")
for filepath, e in errors.items():
logger.info("{file} | {error}".format(file=filepath, error=e))
logger.info("\nThese files may need to be synced again.\n")
return results | def function[download, parameter[self, songs, template]]:
constant[Download Google Music songs.
Parameters:
songs (list or dict): Google Music song dict(s).
template (str): A filepath which can include template patterns.
Returns:
A list of result dictionaries.
::
[
{'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded
{'result': 'error', 'id': song_id, 'message': error[song_id]} # error
]
]
if <ast.UnaryOp object at 0x7da18f09f6a0> begin[:]
variable[template] assign[=] call[name[os].getcwd, parameter[]]
variable[songnum] assign[=] constant[0]
variable[total] assign[=] call[name[len], parameter[name[songs]]]
variable[results] assign[=] list[[]]
variable[errors] assign[=] dictionary[[], []]
variable[pad] assign[=] call[name[len], parameter[call[name[str], parameter[name[total]]]]]
for taget[name[result]] in starred[call[name[self]._download, parameter[name[songs], name[template]]]] begin[:]
variable[song_id] assign[=] call[call[name[songs]][name[songnum]]][constant[id]]
<ast.AugAssign object at 0x7da18f09dcf0>
<ast.Tuple object at 0x7da18f09f5b0> assign[=] name[result]
if name[downloaded] begin[:]
call[name[logger].info, parameter[call[constant[({num:>{pad}}/{total}) Successfully downloaded -- {file} ({song_id})].format, parameter[]]]]
call[name[results].append, parameter[dictionary[[<ast.Constant object at 0x7da18f09dd80>, <ast.Constant object at 0x7da18f09f550>, <ast.Constant object at 0x7da18f09c640>], [<ast.Constant object at 0x7da18f09d660>, <ast.Name object at 0x7da18f09d630>, <ast.Subscript object at 0x7da18f09fb50>]]]]
if name[errors] begin[:]
call[name[logger].info, parameter[constant[
The following errors occurred:
]]]
for taget[tuple[[<ast.Name object at 0x7da18f09e230>, <ast.Name object at 0x7da18f09e110>]]] in starred[call[name[errors].items, parameter[]]] begin[:]
call[name[logger].info, parameter[call[constant[{file} | {error}].format, parameter[]]]]
call[name[logger].info, parameter[constant[
These files may need to be synced again.
]]]
return[name[results]] | keyword[def] identifier[download] ( identifier[self] , identifier[songs] , identifier[template] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[template] :
identifier[template] = identifier[os] . identifier[getcwd] ()
identifier[songnum] = literal[int]
identifier[total] = identifier[len] ( identifier[songs] )
identifier[results] =[]
identifier[errors] ={}
identifier[pad] = identifier[len] ( identifier[str] ( identifier[total] ))
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[_download] ( identifier[songs] , identifier[template] ):
identifier[song_id] = identifier[songs] [ identifier[songnum] ][ literal[string] ]
identifier[songnum] += literal[int]
identifier[downloaded] , identifier[error] = identifier[result]
keyword[if] identifier[downloaded] :
identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[num] = identifier[songnum] , identifier[pad] = identifier[pad] , identifier[total] = identifier[total] , identifier[file] = identifier[downloaded] [ identifier[song_id] ], identifier[song_id] = identifier[song_id]
)
)
identifier[results] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[song_id] , literal[string] : identifier[downloaded] [ identifier[song_id] ]})
keyword[elif] identifier[error] :
identifier[title] = identifier[songs] [ identifier[songnum] ]. identifier[get] ( literal[string] , literal[string] )
identifier[artist] = identifier[songs] [ identifier[songnum] ]. identifier[get] ( literal[string] , literal[string] )
identifier[album] = identifier[songs] [ identifier[songnum] ]. identifier[get] ( literal[string] , literal[string] )
identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[num] = identifier[songnum] , identifier[pad] = identifier[pad] , identifier[total] = identifier[total] , identifier[title] = identifier[title] , identifier[artist] = identifier[artist] , identifier[album] = identifier[album] , identifier[song_id] = identifier[song_id]
)
)
identifier[results] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[song_id] , literal[string] : identifier[error] [ identifier[song_id] ]})
keyword[if] identifier[errors] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[for] identifier[filepath] , identifier[e] keyword[in] identifier[errors] . identifier[items] ():
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[file] = identifier[filepath] , identifier[error] = identifier[e] ))
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[results] | def download(self, songs, template=None):
"""Download Google Music songs.
Parameters:
songs (list or dict): Google Music song dict(s).
template (str): A filepath which can include template patterns.
Returns:
A list of result dictionaries.
::
[
{'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded
{'result': 'error', 'id': song_id, 'message': error[song_id]} # error
]
"""
if not template:
template = os.getcwd() # depends on [control=['if'], data=[]]
songnum = 0
total = len(songs)
results = []
errors = {}
pad = len(str(total))
for result in self._download(songs, template):
song_id = songs[songnum]['id']
songnum += 1
(downloaded, error) = result
if downloaded:
logger.info('({num:>{pad}}/{total}) Successfully downloaded -- {file} ({song_id})'.format(num=songnum, pad=pad, total=total, file=downloaded[song_id], song_id=song_id))
results.append({'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}) # depends on [control=['if'], data=[]]
elif error:
title = songs[songnum].get('title', '<empty>')
artist = songs[songnum].get('artist', '<empty>')
album = songs[songnum].get('album', '<empty>')
logger.info('({num:>{pad}}/{total}) Error on download -- {title} -- {artist} -- {album} ({song_id})'.format(num=songnum, pad=pad, total=total, title=title, artist=artist, album=album, song_id=song_id))
results.append({'result': 'error', 'id': song_id, 'message': error[song_id]}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']]
if errors:
logger.info('\n\nThe following errors occurred:\n')
for (filepath, e) in errors.items():
logger.info('{file} | {error}'.format(file=filepath, error=e)) # depends on [control=['for'], data=[]]
logger.info('\nThese files may need to be synced again.\n') # depends on [control=['if'], data=[]]
return results |
def list_packages(conn=None):
'''
List files for an installed package
'''
close = False
if conn is None:
close = True
conn = init()
ret = []
data = conn.execute('SELECT package FROM packages')
for pkg in data.fetchall():
ret.append(pkg)
if close:
conn.close()
return ret | def function[list_packages, parameter[conn]]:
constant[
List files for an installed package
]
variable[close] assign[=] constant[False]
if compare[name[conn] is constant[None]] begin[:]
variable[close] assign[=] constant[True]
variable[conn] assign[=] call[name[init], parameter[]]
variable[ret] assign[=] list[[]]
variable[data] assign[=] call[name[conn].execute, parameter[constant[SELECT package FROM packages]]]
for taget[name[pkg]] in starred[call[name[data].fetchall, parameter[]]] begin[:]
call[name[ret].append, parameter[name[pkg]]]
if name[close] begin[:]
call[name[conn].close, parameter[]]
return[name[ret]] | keyword[def] identifier[list_packages] ( identifier[conn] = keyword[None] ):
literal[string]
identifier[close] = keyword[False]
keyword[if] identifier[conn] keyword[is] keyword[None] :
identifier[close] = keyword[True]
identifier[conn] = identifier[init] ()
identifier[ret] =[]
identifier[data] = identifier[conn] . identifier[execute] ( literal[string] )
keyword[for] identifier[pkg] keyword[in] identifier[data] . identifier[fetchall] ():
identifier[ret] . identifier[append] ( identifier[pkg] )
keyword[if] identifier[close] :
identifier[conn] . identifier[close] ()
keyword[return] identifier[ret] | def list_packages(conn=None):
"""
List files for an installed package
"""
close = False
if conn is None:
close = True
conn = init() # depends on [control=['if'], data=['conn']]
ret = []
data = conn.execute('SELECT package FROM packages')
for pkg in data.fetchall():
ret.append(pkg) # depends on [control=['for'], data=['pkg']]
if close:
conn.close() # depends on [control=['if'], data=[]]
return ret |
def is_newer_than(pth1, pth2):
"""
Return true if either file pth1 or file pth2 don't exist, or if
pth1 has been modified more recently than pth2
"""
return not os.path.exists(pth1) or not os.path.exists(pth2) or \
os.stat(pth1).st_mtime > os.stat(pth2).st_mtime | def function[is_newer_than, parameter[pth1, pth2]]:
constant[
Return true if either file pth1 or file pth2 don't exist, or if
pth1 has been modified more recently than pth2
]
return[<ast.BoolOp object at 0x7da1b07fb5e0>] | keyword[def] identifier[is_newer_than] ( identifier[pth1] , identifier[pth2] ):
literal[string]
keyword[return] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[pth1] ) keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[pth2] ) keyword[or] identifier[os] . identifier[stat] ( identifier[pth1] ). identifier[st_mtime] > identifier[os] . identifier[stat] ( identifier[pth2] ). identifier[st_mtime] | def is_newer_than(pth1, pth2):
"""
Return true if either file pth1 or file pth2 don't exist, or if
pth1 has been modified more recently than pth2
"""
return not os.path.exists(pth1) or not os.path.exists(pth2) or os.stat(pth1).st_mtime > os.stat(pth2).st_mtime |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b23450c0> begin[:]
call[name[_dict]][constant[label]] assign[=] name[self].label
if <ast.BoolOp object at 0x7da18fe91570> begin[:]
call[name[_dict]][constant[provenance_ids]] assign[=] name[self].provenance_ids
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[label]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[provenance_ids] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[provenance_ids]
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label # depends on [control=['if'], data=[]]
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids # depends on [control=['if'], data=[]]
return _dict |
def add_swagger(app, json_route, html_route):
"""
a convenience method for both adding a swagger.json route,
as well as adding a page showing the html documentation
"""
app.router.add_route('GET', json_route, create_swagger_json_handler(app))
add_swagger_api_route(app, html_route, json_route) | def function[add_swagger, parameter[app, json_route, html_route]]:
constant[
a convenience method for both adding a swagger.json route,
as well as adding a page showing the html documentation
]
call[name[app].router.add_route, parameter[constant[GET], name[json_route], call[name[create_swagger_json_handler], parameter[name[app]]]]]
call[name[add_swagger_api_route], parameter[name[app], name[html_route], name[json_route]]] | keyword[def] identifier[add_swagger] ( identifier[app] , identifier[json_route] , identifier[html_route] ):
literal[string]
identifier[app] . identifier[router] . identifier[add_route] ( literal[string] , identifier[json_route] , identifier[create_swagger_json_handler] ( identifier[app] ))
identifier[add_swagger_api_route] ( identifier[app] , identifier[html_route] , identifier[json_route] ) | def add_swagger(app, json_route, html_route):
"""
a convenience method for both adding a swagger.json route,
as well as adding a page showing the html documentation
"""
app.router.add_route('GET', json_route, create_swagger_json_handler(app))
add_swagger_api_route(app, html_route, json_route) |
def convert_timestamp(timestamp):
"""
Converts bokehJS timestamp to datetime64.
"""
datetime = dt.datetime.utcfromtimestamp(timestamp/1000.)
return np.datetime64(datetime.replace(tzinfo=None)) | def function[convert_timestamp, parameter[timestamp]]:
constant[
Converts bokehJS timestamp to datetime64.
]
variable[datetime] assign[=] call[name[dt].datetime.utcfromtimestamp, parameter[binary_operation[name[timestamp] / constant[1000.0]]]]
return[call[name[np].datetime64, parameter[call[name[datetime].replace, parameter[]]]]] | keyword[def] identifier[convert_timestamp] ( identifier[timestamp] ):
literal[string]
identifier[datetime] = identifier[dt] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[timestamp] / literal[int] )
keyword[return] identifier[np] . identifier[datetime64] ( identifier[datetime] . identifier[replace] ( identifier[tzinfo] = keyword[None] )) | def convert_timestamp(timestamp):
"""
Converts bokehJS timestamp to datetime64.
"""
datetime = dt.datetime.utcfromtimestamp(timestamp / 1000.0)
return np.datetime64(datetime.replace(tzinfo=None)) |
def check_required(self):
""" Check all required settings have been provided
"""
die = False
for key, value in self.spec.items():
if not getattr(self, key.upper()) and value['required']:
print(f"{key} is a required setting. "
"Set via command-line params, env or file. "
"For examples, try '--generate' or '--help'.")
die = True
if die:
sys.exit(1) | def function[check_required, parameter[self]]:
constant[ Check all required settings have been provided
]
variable[die] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da207f01a80>, <ast.Name object at 0x7da207f002e0>]]] in starred[call[name[self].spec.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da207f018d0> begin[:]
call[name[print], parameter[<ast.JoinedStr object at 0x7da207f01fc0>]]
variable[die] assign[=] constant[True]
if name[die] begin[:]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[check_required] ( identifier[self] ):
literal[string]
identifier[die] = keyword[False]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[spec] . identifier[items] ():
keyword[if] keyword[not] identifier[getattr] ( identifier[self] , identifier[key] . identifier[upper] ()) keyword[and] identifier[value] [ literal[string] ]:
identifier[print] ( literal[string]
literal[string]
literal[string] )
identifier[die] = keyword[True]
keyword[if] identifier[die] :
identifier[sys] . identifier[exit] ( literal[int] ) | def check_required(self):
""" Check all required settings have been provided
"""
die = False
for (key, value) in self.spec.items():
if not getattr(self, key.upper()) and value['required']:
print(f"{key} is a required setting. Set via command-line params, env or file. For examples, try '--generate' or '--help'.")
die = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if die:
sys.exit(1) # depends on [control=['if'], data=[]] |
def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | def function[option_group_exists, parameter[name, tags, region, key, keyid, profile]]:
constant[
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da204620040> | keyword[def] identifier[option_group_exists] ( identifier[name] , identifier[tags] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[rds] = identifier[conn] . identifier[describe_option_groups] ( identifier[OptionGroupName] = identifier[name] )
keyword[return] { literal[string] : identifier[bool] ( identifier[rds] )}
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )} | def option_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None):
"""
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)} # depends on [control=['try'], data=[]]
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']] |
def session_preparation(self):
"""Prepare the session after the connection has been established."""
# 0 will defer to the global delay factor
delay_factor = self.select_delay_factor(delay_factor=0)
self._test_channel_read()
self.set_base_prompt()
cmd = "{}set cli mode -page OFF{}".format(self.RETURN, self.RETURN)
self.disable_paging(command=cmd)
time.sleep(1 * delay_factor)
self.set_base_prompt()
time.sleep(0.3 * delay_factor)
self.clear_buffer() | def function[session_preparation, parameter[self]]:
constant[Prepare the session after the connection has been established.]
variable[delay_factor] assign[=] call[name[self].select_delay_factor, parameter[]]
call[name[self]._test_channel_read, parameter[]]
call[name[self].set_base_prompt, parameter[]]
variable[cmd] assign[=] call[constant[{}set cli mode -page OFF{}].format, parameter[name[self].RETURN, name[self].RETURN]]
call[name[self].disable_paging, parameter[]]
call[name[time].sleep, parameter[binary_operation[constant[1] * name[delay_factor]]]]
call[name[self].set_base_prompt, parameter[]]
call[name[time].sleep, parameter[binary_operation[constant[0.3] * name[delay_factor]]]]
call[name[self].clear_buffer, parameter[]] | keyword[def] identifier[session_preparation] ( identifier[self] ):
literal[string]
identifier[delay_factor] = identifier[self] . identifier[select_delay_factor] ( identifier[delay_factor] = literal[int] )
identifier[self] . identifier[_test_channel_read] ()
identifier[self] . identifier[set_base_prompt] ()
identifier[cmd] = literal[string] . identifier[format] ( identifier[self] . identifier[RETURN] , identifier[self] . identifier[RETURN] )
identifier[self] . identifier[disable_paging] ( identifier[command] = identifier[cmd] )
identifier[time] . identifier[sleep] ( literal[int] * identifier[delay_factor] )
identifier[self] . identifier[set_base_prompt] ()
identifier[time] . identifier[sleep] ( literal[int] * identifier[delay_factor] )
identifier[self] . identifier[clear_buffer] () | def session_preparation(self):
"""Prepare the session after the connection has been established."""
# 0 will defer to the global delay factor
delay_factor = self.select_delay_factor(delay_factor=0)
self._test_channel_read()
self.set_base_prompt()
cmd = '{}set cli mode -page OFF{}'.format(self.RETURN, self.RETURN)
self.disable_paging(command=cmd)
time.sleep(1 * delay_factor)
self.set_base_prompt()
time.sleep(0.3 * delay_factor)
self.clear_buffer() |
def gff3_parse_attributes(attributes_string):
"""
Parse a string of GFF3 attributes ('key=value' pairs delimited by ';')
and return a dictionary.
"""
attributes = dict()
fields = attributes_string.split(';')
for f in fields:
if '=' in f:
key, value = f.split('=')
attributes[unquote_plus(key).strip()] = unquote_plus(value.strip())
elif len(f) > 0:
# not strictly kosher
attributes[unquote_plus(f).strip()] = True
return attributes | def function[gff3_parse_attributes, parameter[attributes_string]]:
constant[
Parse a string of GFF3 attributes ('key=value' pairs delimited by ';')
and return a dictionary.
]
variable[attributes] assign[=] call[name[dict], parameter[]]
variable[fields] assign[=] call[name[attributes_string].split, parameter[constant[;]]]
for taget[name[f]] in starred[name[fields]] begin[:]
if compare[constant[=] in name[f]] begin[:]
<ast.Tuple object at 0x7da1b265eb60> assign[=] call[name[f].split, parameter[constant[=]]]
call[name[attributes]][call[call[name[unquote_plus], parameter[name[key]]].strip, parameter[]]] assign[=] call[name[unquote_plus], parameter[call[name[value].strip, parameter[]]]]
return[name[attributes]] | keyword[def] identifier[gff3_parse_attributes] ( identifier[attributes_string] ):
literal[string]
identifier[attributes] = identifier[dict] ()
identifier[fields] = identifier[attributes_string] . identifier[split] ( literal[string] )
keyword[for] identifier[f] keyword[in] identifier[fields] :
keyword[if] literal[string] keyword[in] identifier[f] :
identifier[key] , identifier[value] = identifier[f] . identifier[split] ( literal[string] )
identifier[attributes] [ identifier[unquote_plus] ( identifier[key] ). identifier[strip] ()]= identifier[unquote_plus] ( identifier[value] . identifier[strip] ())
keyword[elif] identifier[len] ( identifier[f] )> literal[int] :
identifier[attributes] [ identifier[unquote_plus] ( identifier[f] ). identifier[strip] ()]= keyword[True]
keyword[return] identifier[attributes] | def gff3_parse_attributes(attributes_string):
"""
Parse a string of GFF3 attributes ('key=value' pairs delimited by ';')
and return a dictionary.
"""
attributes = dict()
fields = attributes_string.split(';')
for f in fields:
if '=' in f:
(key, value) = f.split('=')
attributes[unquote_plus(key).strip()] = unquote_plus(value.strip()) # depends on [control=['if'], data=['f']]
elif len(f) > 0:
# not strictly kosher
attributes[unquote_plus(f).strip()] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return attributes |
def get_info(self):
'''
Get information about the counter
.. note::
GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes
if this is called after sampling data.
'''
if not self.info:
ci = win32pdh.GetCounterInfo(self.handle, 0)
self.info = {
'type': ci[0],
'version': ci[1],
'scale': ci[2],
'default_scale': ci[3],
'user_data': ci[4],
'query_user_data': ci[5],
'full_path': ci[6],
'machine_name': ci[7][0],
'object_name': ci[7][1],
'instance_name': ci[7][2],
'parent_instance': ci[7][3],
'instance_index': ci[7][4],
'counter_name': ci[7][5],
'explain_text': ci[8]
}
return self.info | def function[get_info, parameter[self]]:
constant[
Get information about the counter
.. note::
GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes
if this is called after sampling data.
]
if <ast.UnaryOp object at 0x7da18f00cac0> begin[:]
variable[ci] assign[=] call[name[win32pdh].GetCounterInfo, parameter[name[self].handle, constant[0]]]
name[self].info assign[=] dictionary[[<ast.Constant object at 0x7da18f00faf0>, <ast.Constant object at 0x7da18f00d540>, <ast.Constant object at 0x7da18f00c9d0>, <ast.Constant object at 0x7da18f00d5a0>, <ast.Constant object at 0x7da18f00f9d0>, <ast.Constant object at 0x7da18f00e6b0>, <ast.Constant object at 0x7da18f00d420>, <ast.Constant object at 0x7da18f00d480>, <ast.Constant object at 0x7da18f00f760>, <ast.Constant object at 0x7da18f00e1d0>, <ast.Constant object at 0x7da18f00ec20>, <ast.Constant object at 0x7da18f00f730>, <ast.Constant object at 0x7da18f00d2d0>, <ast.Constant object at 0x7da18f00d570>], [<ast.Subscript object at 0x7da18f00c2e0>, <ast.Subscript object at 0x7da18f00eb90>, <ast.Subscript object at 0x7da18f00e920>, <ast.Subscript object at 0x7da18f00cf10>, <ast.Subscript object at 0x7da18f00fd90>, <ast.Subscript object at 0x7da18f00c7c0>, <ast.Subscript object at 0x7da20c6c6ad0>, <ast.Subscript object at 0x7da20c6c6e00>, <ast.Subscript object at 0x7da20c6c4ac0>, <ast.Subscript object at 0x7da20c6c79a0>, <ast.Subscript object at 0x7da20c6c4e50>, <ast.Subscript object at 0x7da20c6c5300>, <ast.Subscript object at 0x7da20c6c5bd0>, <ast.Subscript object at 0x7da20c6c47f0>]]
return[name[self].info] | keyword[def] identifier[get_info] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[info] :
identifier[ci] = identifier[win32pdh] . identifier[GetCounterInfo] ( identifier[self] . identifier[handle] , literal[int] )
identifier[self] . identifier[info] ={
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ][ literal[int] ],
literal[string] : identifier[ci] [ literal[int] ]
}
keyword[return] identifier[self] . identifier[info] | def get_info(self):
"""
Get information about the counter
.. note::
GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes
if this is called after sampling data.
"""
if not self.info:
ci = win32pdh.GetCounterInfo(self.handle, 0)
self.info = {'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8]} # depends on [control=['if'], data=[]]
return self.info |
def wrap(value):
"""
Wraps the given value in a Document or DocumentList as applicable.
"""
if isinstance(value, Document) or isinstance(value, DocumentList):
return value
elif isinstance(value, dict):
return Document(value)
elif isinstance(value, list):
return DocumentList(value)
else:
return value | def function[wrap, parameter[value]]:
constant[
Wraps the given value in a Document or DocumentList as applicable.
]
if <ast.BoolOp object at 0x7da1b12904f0> begin[:]
return[name[value]] | keyword[def] identifier[wrap] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Document] ) keyword[or] identifier[isinstance] ( identifier[value] , identifier[DocumentList] ):
keyword[return] identifier[value]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[return] identifier[Document] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] identifier[DocumentList] ( identifier[value] )
keyword[else] :
keyword[return] identifier[value] | def wrap(value):
"""
Wraps the given value in a Document or DocumentList as applicable.
"""
if isinstance(value, Document) or isinstance(value, DocumentList):
return value # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
return Document(value) # depends on [control=['if'], data=[]]
elif isinstance(value, list):
return DocumentList(value) # depends on [control=['if'], data=[]]
else:
return value |
def AmericanDateToEpoch(self, date_str):
"""Take a US format date and return epoch."""
try:
epoch = time.strptime(date_str, "%m/%d/%Y")
return int(calendar.timegm(epoch)) * 1000000
except ValueError:
return 0 | def function[AmericanDateToEpoch, parameter[self, date_str]]:
constant[Take a US format date and return epoch.]
<ast.Try object at 0x7da1b1b6afe0> | keyword[def] identifier[AmericanDateToEpoch] ( identifier[self] , identifier[date_str] ):
literal[string]
keyword[try] :
identifier[epoch] = identifier[time] . identifier[strptime] ( identifier[date_str] , literal[string] )
keyword[return] identifier[int] ( identifier[calendar] . identifier[timegm] ( identifier[epoch] ))* literal[int]
keyword[except] identifier[ValueError] :
keyword[return] literal[int] | def AmericanDateToEpoch(self, date_str):
"""Take a US format date and return epoch."""
try:
epoch = time.strptime(date_str, '%m/%d/%Y')
return int(calendar.timegm(epoch)) * 1000000 # depends on [control=['try'], data=[]]
except ValueError:
return 0 # depends on [control=['except'], data=[]] |
def open(self, fname, mode='rb'):
"""
(Re-)opens a backup file
"""
self.close()
self.fp = open(fname, mode)
self.fname = fname | def function[open, parameter[self, fname, mode]]:
constant[
(Re-)opens a backup file
]
call[name[self].close, parameter[]]
name[self].fp assign[=] call[name[open], parameter[name[fname], name[mode]]]
name[self].fname assign[=] name[fname] | keyword[def] identifier[open] ( identifier[self] , identifier[fname] , identifier[mode] = literal[string] ):
literal[string]
identifier[self] . identifier[close] ()
identifier[self] . identifier[fp] = identifier[open] ( identifier[fname] , identifier[mode] )
identifier[self] . identifier[fname] = identifier[fname] | def open(self, fname, mode='rb'):
"""
(Re-)opens a backup file
"""
self.close()
self.fp = open(fname, mode)
self.fname = fname |
def node_is_upstream_leaf(graph: BELGraph, node: BaseEntity) -> bool:
"""Return if the node is an upstream leaf.
An upstream leaf is defined as a node that has no in-edges, and exactly 1 out-edge.
"""
return 0 == len(graph.predecessors(node)) and 1 == len(graph.successors(node)) | def function[node_is_upstream_leaf, parameter[graph, node]]:
constant[Return if the node is an upstream leaf.
An upstream leaf is defined as a node that has no in-edges, and exactly 1 out-edge.
]
return[<ast.BoolOp object at 0x7da20c796a70>] | keyword[def] identifier[node_is_upstream_leaf] ( identifier[graph] : identifier[BELGraph] , identifier[node] : identifier[BaseEntity] )-> identifier[bool] :
literal[string]
keyword[return] literal[int] == identifier[len] ( identifier[graph] . identifier[predecessors] ( identifier[node] )) keyword[and] literal[int] == identifier[len] ( identifier[graph] . identifier[successors] ( identifier[node] )) | def node_is_upstream_leaf(graph: BELGraph, node: BaseEntity) -> bool:
"""Return if the node is an upstream leaf.
An upstream leaf is defined as a node that has no in-edges, and exactly 1 out-edge.
"""
return 0 == len(graph.predecessors(node)) and 1 == len(graph.successors(node)) |
def do_blot(source, source_wcs, blot_wcs, exptime, coeffs = True,
interp='poly5', sinscl=1.0, stepsize=10, wcsmap=None):
""" Core functionality of performing the 'blot' operation to create a single
blotted image from a single source image.
All distortion information is assumed to be included in the WCS specification
of the 'output' blotted image given in 'blot_wcs'.
This is the simplest interface that can be called for stand-alone
use of the blotting function.
Parameters
----------
source
Input numpy array of undistorted source image in units of 'cps'.
source_wcs
HSTWCS object representing source image distortion-corrected WCS.
blot_wcs
(py)wcs.WCS object representing the blotted image WCS.
exptime
exptime to use for scaling output blot image. A value of 1 will
result in output blot image in units of 'cps'.
coeffs
Flag to specify whether or not to use distortion coefficients
associated with blot_wcs. If False, do not apply any distortion
model.
interp
Form of interpolation to use when blotting pixels. Valid options::
"nearest","linear","poly3", "poly5"(default), "spline3", "sinc"
sinscl
Scale for sinc interpolation kernel (in output, blotted pixels)
stepsize
Number of pixels for WCS interpolation
wcsmap
Custom mapping class to use to provide transformation from
drizzled to blotted WCS. Default will be to use
`drizzlepac.wcs_functions.WCSMap`.
"""
_outsci = np.zeros(blot_wcs.array_shape, dtype=np.float32)
# Now pass numpy objects to callable version of Blot...
build=False
misval = 0.0
kscale = 1.0
xmin = 1
ymin = 1
xmax, ymax = source_wcs.pixel_shape
# compute the undistorted 'natural' plate scale for this chip
if coeffs:
wcslin = distortion.utils.make_orthogonal_cd(blot_wcs)
else:
wcslin = blot_wcs
blot_wcs.sip = None
blot_wcs.cpdis1 = None
blot_wcs.cpdis2 = None
blot_wcs.det2im = None
if wcsmap is None and cdriz is not None:
"""
Use default C mapping function.
"""
print('Using default C-based coordinate transformation...')
mapping = cdriz.DefaultWCSMapping(
blot_wcs, source_wcs,
blot_wcs.pixel_shape[0], blot_wcs.pixel_shape[1],
stepsize
)
pix_ratio = source_wcs.pscale/wcslin.pscale
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
print('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap
wmap = wcsmap(blot_wcs,source_wcs)
mapping = wmap.forward
pix_ratio = source_wcs.pscale/wcslin.pscale
t = cdriz.tblot(
source, _outsci,xmin,xmax,ymin,ymax,
pix_ratio, kscale, 1.0, 1.0,
'center',interp, exptime,
misval, sinscl, 1, mapping)
del mapping
return _outsci | def function[do_blot, parameter[source, source_wcs, blot_wcs, exptime, coeffs, interp, sinscl, stepsize, wcsmap]]:
constant[ Core functionality of performing the 'blot' operation to create a single
blotted image from a single source image.
All distortion information is assumed to be included in the WCS specification
of the 'output' blotted image given in 'blot_wcs'.
This is the simplest interface that can be called for stand-alone
use of the blotting function.
Parameters
----------
source
Input numpy array of undistorted source image in units of 'cps'.
source_wcs
HSTWCS object representing source image distortion-corrected WCS.
blot_wcs
(py)wcs.WCS object representing the blotted image WCS.
exptime
exptime to use for scaling output blot image. A value of 1 will
result in output blot image in units of 'cps'.
coeffs
Flag to specify whether or not to use distortion coefficients
associated with blot_wcs. If False, do not apply any distortion
model.
interp
Form of interpolation to use when blotting pixels. Valid options::
"nearest","linear","poly3", "poly5"(default), "spline3", "sinc"
sinscl
Scale for sinc interpolation kernel (in output, blotted pixels)
stepsize
Number of pixels for WCS interpolation
wcsmap
Custom mapping class to use to provide transformation from
drizzled to blotted WCS. Default will be to use
`drizzlepac.wcs_functions.WCSMap`.
]
variable[_outsci] assign[=] call[name[np].zeros, parameter[name[blot_wcs].array_shape]]
variable[build] assign[=] constant[False]
variable[misval] assign[=] constant[0.0]
variable[kscale] assign[=] constant[1.0]
variable[xmin] assign[=] constant[1]
variable[ymin] assign[=] constant[1]
<ast.Tuple object at 0x7da18f58d8d0> assign[=] name[source_wcs].pixel_shape
if name[coeffs] begin[:]
variable[wcslin] assign[=] call[name[distortion].utils.make_orthogonal_cd, parameter[name[blot_wcs]]]
if <ast.BoolOp object at 0x7da18f58fd60> begin[:]
constant[
Use default C mapping function.
]
call[name[print], parameter[constant[Using default C-based coordinate transformation...]]]
variable[mapping] assign[=] call[name[cdriz].DefaultWCSMapping, parameter[name[blot_wcs], name[source_wcs], call[name[blot_wcs].pixel_shape][constant[0]], call[name[blot_wcs].pixel_shape][constant[1]], name[stepsize]]]
variable[pix_ratio] assign[=] binary_operation[name[source_wcs].pscale / name[wcslin].pscale]
variable[t] assign[=] call[name[cdriz].tblot, parameter[name[source], name[_outsci], name[xmin], name[xmax], name[ymin], name[ymax], name[pix_ratio], name[kscale], constant[1.0], constant[1.0], constant[center], name[interp], name[exptime], name[misval], name[sinscl], constant[1], name[mapping]]]
<ast.Delete object at 0x7da18f58ccd0>
return[name[_outsci]] | keyword[def] identifier[do_blot] ( identifier[source] , identifier[source_wcs] , identifier[blot_wcs] , identifier[exptime] , identifier[coeffs] = keyword[True] ,
identifier[interp] = literal[string] , identifier[sinscl] = literal[int] , identifier[stepsize] = literal[int] , identifier[wcsmap] = keyword[None] ):
literal[string]
identifier[_outsci] = identifier[np] . identifier[zeros] ( identifier[blot_wcs] . identifier[array_shape] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[build] = keyword[False]
identifier[misval] = literal[int]
identifier[kscale] = literal[int]
identifier[xmin] = literal[int]
identifier[ymin] = literal[int]
identifier[xmax] , identifier[ymax] = identifier[source_wcs] . identifier[pixel_shape]
keyword[if] identifier[coeffs] :
identifier[wcslin] = identifier[distortion] . identifier[utils] . identifier[make_orthogonal_cd] ( identifier[blot_wcs] )
keyword[else] :
identifier[wcslin] = identifier[blot_wcs]
identifier[blot_wcs] . identifier[sip] = keyword[None]
identifier[blot_wcs] . identifier[cpdis1] = keyword[None]
identifier[blot_wcs] . identifier[cpdis2] = keyword[None]
identifier[blot_wcs] . identifier[det2im] = keyword[None]
keyword[if] identifier[wcsmap] keyword[is] keyword[None] keyword[and] identifier[cdriz] keyword[is] keyword[not] keyword[None] :
literal[string]
identifier[print] ( literal[string] )
identifier[mapping] = identifier[cdriz] . identifier[DefaultWCSMapping] (
identifier[blot_wcs] , identifier[source_wcs] ,
identifier[blot_wcs] . identifier[pixel_shape] [ literal[int] ], identifier[blot_wcs] . identifier[pixel_shape] [ literal[int] ],
identifier[stepsize]
)
identifier[pix_ratio] = identifier[source_wcs] . identifier[pscale] / identifier[wcslin] . identifier[pscale]
keyword[else] :
identifier[print] ( literal[string] )
keyword[if] identifier[wcsmap] keyword[is] keyword[None] :
identifier[wcsmap] = identifier[wcs_functions] . identifier[WCSMap]
identifier[wmap] = identifier[wcsmap] ( identifier[blot_wcs] , identifier[source_wcs] )
identifier[mapping] = identifier[wmap] . identifier[forward]
identifier[pix_ratio] = identifier[source_wcs] . identifier[pscale] / identifier[wcslin] . identifier[pscale]
identifier[t] = identifier[cdriz] . identifier[tblot] (
identifier[source] , identifier[_outsci] , identifier[xmin] , identifier[xmax] , identifier[ymin] , identifier[ymax] ,
identifier[pix_ratio] , identifier[kscale] , literal[int] , literal[int] ,
literal[string] , identifier[interp] , identifier[exptime] ,
identifier[misval] , identifier[sinscl] , literal[int] , identifier[mapping] )
keyword[del] identifier[mapping]
keyword[return] identifier[_outsci] | def do_blot(source, source_wcs, blot_wcs, exptime, coeffs=True, interp='poly5', sinscl=1.0, stepsize=10, wcsmap=None):
""" Core functionality of performing the 'blot' operation to create a single
blotted image from a single source image.
All distortion information is assumed to be included in the WCS specification
of the 'output' blotted image given in 'blot_wcs'.
This is the simplest interface that can be called for stand-alone
use of the blotting function.
Parameters
----------
source
Input numpy array of undistorted source image in units of 'cps'.
source_wcs
HSTWCS object representing source image distortion-corrected WCS.
blot_wcs
(py)wcs.WCS object representing the blotted image WCS.
exptime
exptime to use for scaling output blot image. A value of 1 will
result in output blot image in units of 'cps'.
coeffs
Flag to specify whether or not to use distortion coefficients
associated with blot_wcs. If False, do not apply any distortion
model.
interp
Form of interpolation to use when blotting pixels. Valid options::
"nearest","linear","poly3", "poly5"(default), "spline3", "sinc"
sinscl
Scale for sinc interpolation kernel (in output, blotted pixels)
stepsize
Number of pixels for WCS interpolation
wcsmap
Custom mapping class to use to provide transformation from
drizzled to blotted WCS. Default will be to use
`drizzlepac.wcs_functions.WCSMap`.
"""
_outsci = np.zeros(blot_wcs.array_shape, dtype=np.float32)
# Now pass numpy objects to callable version of Blot...
build = False
misval = 0.0
kscale = 1.0
xmin = 1
ymin = 1
(xmax, ymax) = source_wcs.pixel_shape
# compute the undistorted 'natural' plate scale for this chip
if coeffs:
wcslin = distortion.utils.make_orthogonal_cd(blot_wcs) # depends on [control=['if'], data=[]]
else:
wcslin = blot_wcs
blot_wcs.sip = None
blot_wcs.cpdis1 = None
blot_wcs.cpdis2 = None
blot_wcs.det2im = None
if wcsmap is None and cdriz is not None:
'\n Use default C mapping function.\n '
print('Using default C-based coordinate transformation...')
mapping = cdriz.DefaultWCSMapping(blot_wcs, source_wcs, blot_wcs.pixel_shape[0], blot_wcs.pixel_shape[1], stepsize)
pix_ratio = source_wcs.pscale / wcslin.pscale # depends on [control=['if'], data=[]]
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
print('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap # depends on [control=['if'], data=['wcsmap']]
wmap = wcsmap(blot_wcs, source_wcs)
mapping = wmap.forward
pix_ratio = source_wcs.pscale / wcslin.pscale
t = cdriz.tblot(source, _outsci, xmin, xmax, ymin, ymax, pix_ratio, kscale, 1.0, 1.0, 'center', interp, exptime, misval, sinscl, 1, mapping)
del mapping
return _outsci |
def print_setting(self):
"""
prints the setting of the parameter class
:return:
"""
num, param_list = self.num_param()
num_linear = self.num_param_linear()
print("The following model options are chosen:")
print("Lens models:", self._lens_model_list)
print("Source models:", self._source_light_model_list)
print("Lens light models:", self._lens_light_model_list)
print("Point source models:", self._point_source_model_list)
print("===================")
print("The following parameters are being fixed:")
print("Lens:", self.lensParams.kwargs_fixed)
print("Source:", self.souceParams.kwargs_fixed)
print("Lens light:", self.lensLightParams.kwargs_fixed)
print("Point source:", self.pointSourceParams.kwargs_fixed)
print("===================")
print("Joint parameters for different models")
print("Joint lens with lens:", self._joint_lens_with_lens)
print("Joint lens with lens light:", self._joint_lens_light_with_lens_light)
print("Joint source with source:", self._joint_source_with_source)
print("Joint lens with light:", self._joint_lens_with_light)
print("Joint source with point source:", self._joint_source_with_point_source)
print("===================")
print("Number of non-linear parameters being sampled: ", num)
print("Parameters being sampled: ", param_list)
print("Number of linear parameters being solved for: ", num_linear) | def function[print_setting, parameter[self]]:
constant[
prints the setting of the parameter class
:return:
]
<ast.Tuple object at 0x7da1b26adc60> assign[=] call[name[self].num_param, parameter[]]
variable[num_linear] assign[=] call[name[self].num_param_linear, parameter[]]
call[name[print], parameter[constant[The following model options are chosen:]]]
call[name[print], parameter[constant[Lens models:], name[self]._lens_model_list]]
call[name[print], parameter[constant[Source models:], name[self]._source_light_model_list]]
call[name[print], parameter[constant[Lens light models:], name[self]._lens_light_model_list]]
call[name[print], parameter[constant[Point source models:], name[self]._point_source_model_list]]
call[name[print], parameter[constant[===================]]]
call[name[print], parameter[constant[The following parameters are being fixed:]]]
call[name[print], parameter[constant[Lens:], name[self].lensParams.kwargs_fixed]]
call[name[print], parameter[constant[Source:], name[self].souceParams.kwargs_fixed]]
call[name[print], parameter[constant[Lens light:], name[self].lensLightParams.kwargs_fixed]]
call[name[print], parameter[constant[Point source:], name[self].pointSourceParams.kwargs_fixed]]
call[name[print], parameter[constant[===================]]]
call[name[print], parameter[constant[Joint parameters for different models]]]
call[name[print], parameter[constant[Joint lens with lens:], name[self]._joint_lens_with_lens]]
call[name[print], parameter[constant[Joint lens with lens light:], name[self]._joint_lens_light_with_lens_light]]
call[name[print], parameter[constant[Joint source with source:], name[self]._joint_source_with_source]]
call[name[print], parameter[constant[Joint lens with light:], name[self]._joint_lens_with_light]]
call[name[print], parameter[constant[Joint source with point source:], name[self]._joint_source_with_point_source]]
call[name[print], parameter[constant[===================]]]
call[name[print], parameter[constant[Number of non-linear parameters being sampled: ], name[num]]]
call[name[print], parameter[constant[Parameters being sampled: ], name[param_list]]]
call[name[print], parameter[constant[Number of linear parameters being solved for: ], name[num_linear]]] | keyword[def] identifier[print_setting] ( identifier[self] ):
literal[string]
identifier[num] , identifier[param_list] = identifier[self] . identifier[num_param] ()
identifier[num_linear] = identifier[self] . identifier[num_param_linear] ()
identifier[print] ( literal[string] )
identifier[print] ( literal[string] , identifier[self] . identifier[_lens_model_list] )
identifier[print] ( literal[string] , identifier[self] . identifier[_source_light_model_list] )
identifier[print] ( literal[string] , identifier[self] . identifier[_lens_light_model_list] )
identifier[print] ( literal[string] , identifier[self] . identifier[_point_source_model_list] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] , identifier[self] . identifier[lensParams] . identifier[kwargs_fixed] )
identifier[print] ( literal[string] , identifier[self] . identifier[souceParams] . identifier[kwargs_fixed] )
identifier[print] ( literal[string] , identifier[self] . identifier[lensLightParams] . identifier[kwargs_fixed] )
identifier[print] ( literal[string] , identifier[self] . identifier[pointSourceParams] . identifier[kwargs_fixed] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] , identifier[self] . identifier[_joint_lens_with_lens] )
identifier[print] ( literal[string] , identifier[self] . identifier[_joint_lens_light_with_lens_light] )
identifier[print] ( literal[string] , identifier[self] . identifier[_joint_source_with_source] )
identifier[print] ( literal[string] , identifier[self] . identifier[_joint_lens_with_light] )
identifier[print] ( literal[string] , identifier[self] . identifier[_joint_source_with_point_source] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] , identifier[num] )
identifier[print] ( literal[string] , identifier[param_list] )
identifier[print] ( literal[string] , identifier[num_linear] ) | def print_setting(self):
"""
prints the setting of the parameter class
:return:
"""
(num, param_list) = self.num_param()
num_linear = self.num_param_linear()
print('The following model options are chosen:')
print('Lens models:', self._lens_model_list)
print('Source models:', self._source_light_model_list)
print('Lens light models:', self._lens_light_model_list)
print('Point source models:', self._point_source_model_list)
print('===================')
print('The following parameters are being fixed:')
print('Lens:', self.lensParams.kwargs_fixed)
print('Source:', self.souceParams.kwargs_fixed)
print('Lens light:', self.lensLightParams.kwargs_fixed)
print('Point source:', self.pointSourceParams.kwargs_fixed)
print('===================')
print('Joint parameters for different models')
print('Joint lens with lens:', self._joint_lens_with_lens)
print('Joint lens with lens light:', self._joint_lens_light_with_lens_light)
print('Joint source with source:', self._joint_source_with_source)
print('Joint lens with light:', self._joint_lens_with_light)
print('Joint source with point source:', self._joint_source_with_point_source)
print('===================')
print('Number of non-linear parameters being sampled: ', num)
print('Parameters being sampled: ', param_list)
print('Number of linear parameters being solved for: ', num_linear) |
def configure(self, options, conf):
"""Configure which kinds of exceptions trigger plugin.
"""
self.conf = conf
self.enabled = options.epdb_debugErrors or options.epdb_debugFailures
self.enabled_for_errors = options.epdb_debugErrors
self.enabled_for_failures = options.epdb_debugFailures | def function[configure, parameter[self, options, conf]]:
constant[Configure which kinds of exceptions trigger plugin.
]
name[self].conf assign[=] name[conf]
name[self].enabled assign[=] <ast.BoolOp object at 0x7da18dc98ca0>
name[self].enabled_for_errors assign[=] name[options].epdb_debugErrors
name[self].enabled_for_failures assign[=] name[options].epdb_debugFailures | keyword[def] identifier[configure] ( identifier[self] , identifier[options] , identifier[conf] ):
literal[string]
identifier[self] . identifier[conf] = identifier[conf]
identifier[self] . identifier[enabled] = identifier[options] . identifier[epdb_debugErrors] keyword[or] identifier[options] . identifier[epdb_debugFailures]
identifier[self] . identifier[enabled_for_errors] = identifier[options] . identifier[epdb_debugErrors]
identifier[self] . identifier[enabled_for_failures] = identifier[options] . identifier[epdb_debugFailures] | def configure(self, options, conf):
"""Configure which kinds of exceptions trigger plugin.
"""
self.conf = conf
self.enabled = options.epdb_debugErrors or options.epdb_debugFailures
self.enabled_for_errors = options.epdb_debugErrors
self.enabled_for_failures = options.epdb_debugFailures |
def collection_to_included_trees(collection):
"""Takes a collection object (or a filepath to collection object), returns
each element of the `decisions` list that has the decision set to included.
"""
if is_str_type(collection):
collection = read_as_json(collection)
inc = []
for d in collection.get('decisions', []):
if d['decision'] == 'INCLUDED':
inc.append(d)
return inc | def function[collection_to_included_trees, parameter[collection]]:
constant[Takes a collection object (or a filepath to collection object), returns
each element of the `decisions` list that has the decision set to included.
]
if call[name[is_str_type], parameter[name[collection]]] begin[:]
variable[collection] assign[=] call[name[read_as_json], parameter[name[collection]]]
variable[inc] assign[=] list[[]]
for taget[name[d]] in starred[call[name[collection].get, parameter[constant[decisions], list[[]]]]] begin[:]
if compare[call[name[d]][constant[decision]] equal[==] constant[INCLUDED]] begin[:]
call[name[inc].append, parameter[name[d]]]
return[name[inc]] | keyword[def] identifier[collection_to_included_trees] ( identifier[collection] ):
literal[string]
keyword[if] identifier[is_str_type] ( identifier[collection] ):
identifier[collection] = identifier[read_as_json] ( identifier[collection] )
identifier[inc] =[]
keyword[for] identifier[d] keyword[in] identifier[collection] . identifier[get] ( literal[string] ,[]):
keyword[if] identifier[d] [ literal[string] ]== literal[string] :
identifier[inc] . identifier[append] ( identifier[d] )
keyword[return] identifier[inc] | def collection_to_included_trees(collection):
"""Takes a collection object (or a filepath to collection object), returns
each element of the `decisions` list that has the decision set to included.
"""
if is_str_type(collection):
collection = read_as_json(collection) # depends on [control=['if'], data=[]]
inc = []
for d in collection.get('decisions', []):
if d['decision'] == 'INCLUDED':
inc.append(d) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
return inc |
def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=1)
return self.data_type.read(file, self.endianness) | def function[_read_value, parameter[self, file]]:
constant[Read a single value from the given file]
if compare[name[self].data_type.nptype is_not constant[None]] begin[:]
variable[dtype] assign[=] call[call[name[np].dtype, parameter[name[self].data_type.nptype]].newbyteorder, parameter[name[self].endianness]]
return[call[name[fromfile], parameter[name[file]]]]
return[call[name[self].data_type.read, parameter[name[file], name[self].endianness]]] | keyword[def] identifier[_read_value] ( identifier[self] , identifier[file] ):
literal[string]
keyword[if] identifier[self] . identifier[data_type] . identifier[nptype] keyword[is] keyword[not] keyword[None] :
identifier[dtype] =( identifier[np] . identifier[dtype] ( identifier[self] . identifier[data_type] . identifier[nptype] ). identifier[newbyteorder] (
identifier[self] . identifier[endianness] ))
keyword[return] identifier[fromfile] ( identifier[file] , identifier[dtype] = identifier[dtype] , identifier[count] = literal[int] )
keyword[return] identifier[self] . identifier[data_type] . identifier[read] ( identifier[file] , identifier[self] . identifier[endianness] ) | def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = np.dtype(self.data_type.nptype).newbyteorder(self.endianness)
return fromfile(file, dtype=dtype, count=1) # depends on [control=['if'], data=[]]
return self.data_type.read(file, self.endianness) |
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment) | def function[get_mirror, parameter[self, target_root, source_root]]:
constant[
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
]
variable[fragment] assign[=] call[name[self].get_relative_path, parameter[<ast.IfExp object at 0x7da1b28f2aa0>]]
return[call[call[name[Folder], parameter[name[target_root]]].child, parameter[name[fragment]]]] | keyword[def] identifier[get_mirror] ( identifier[self] , identifier[target_root] , identifier[source_root] = keyword[None] ):
literal[string]
identifier[fragment] = identifier[self] . identifier[get_relative_path] (
identifier[source_root] keyword[if] identifier[source_root] keyword[else] identifier[self] . identifier[parent] )
keyword[return] identifier[Folder] ( identifier[target_root] ). identifier[child] ( identifier[fragment] ) | def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(source_root if source_root else self.parent)
return Folder(target_root).child(fragment) |
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique | def function[get_unique_families, parameter[hkls]]:
constant[
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
]
def function[is_perm, parameter[hkl1, hkl2]]:
variable[h1] assign[=] call[name[np].abs, parameter[name[hkl1]]]
variable[h2] assign[=] call[name[np].abs, parameter[name[hkl2]]]
return[call[name[all], parameter[<ast.ListComp object at 0x7da1b26ad450>]]]
variable[unique] assign[=] call[name[collections].defaultdict, parameter[name[list]]]
for taget[name[hkl1]] in starred[name[hkls]] begin[:]
variable[found] assign[=] constant[False]
for taget[name[hkl2]] in starred[call[name[unique].keys, parameter[]]] begin[:]
if call[name[is_perm], parameter[name[hkl1], name[hkl2]]] begin[:]
variable[found] assign[=] constant[True]
call[call[name[unique]][name[hkl2]].append, parameter[name[hkl1]]]
break
if <ast.UnaryOp object at 0x7da1b26ac4c0> begin[:]
call[call[name[unique]][name[hkl1]].append, parameter[name[hkl1]]]
variable[pretty_unique] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b26ad2d0>, <ast.Name object at 0x7da1b26aec20>]]] in starred[call[name[unique].items, parameter[]]] begin[:]
call[name[pretty_unique]][call[call[name[sorted], parameter[name[v]]]][<ast.UnaryOp object at 0x7da1b26acb20>]] assign[=] call[name[len], parameter[name[v]]]
return[name[pretty_unique]] | keyword[def] identifier[get_unique_families] ( identifier[hkls] ):
literal[string]
keyword[def] identifier[is_perm] ( identifier[hkl1] , identifier[hkl2] ):
identifier[h1] = identifier[np] . identifier[abs] ( identifier[hkl1] )
identifier[h2] = identifier[np] . identifier[abs] ( identifier[hkl2] )
keyword[return] identifier[all] ([ identifier[i] == identifier[j] keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[sorted] ( identifier[h1] ), identifier[sorted] ( identifier[h2] ))])
identifier[unique] = identifier[collections] . identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[hkl1] keyword[in] identifier[hkls] :
identifier[found] = keyword[False]
keyword[for] identifier[hkl2] keyword[in] identifier[unique] . identifier[keys] ():
keyword[if] identifier[is_perm] ( identifier[hkl1] , identifier[hkl2] ):
identifier[found] = keyword[True]
identifier[unique] [ identifier[hkl2] ]. identifier[append] ( identifier[hkl1] )
keyword[break]
keyword[if] keyword[not] identifier[found] :
identifier[unique] [ identifier[hkl1] ]. identifier[append] ( identifier[hkl1] )
identifier[pretty_unique] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[unique] . identifier[items] ():
identifier[pretty_unique] [ identifier[sorted] ( identifier[v] )[- literal[int] ]]= identifier[len] ( identifier[v] )
keyword[return] identifier[pretty_unique] | def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for (i, j) in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['hkl2']]
if not found:
unique[hkl1].append(hkl1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['hkl1']]
pretty_unique = {}
for (k, v) in unique.items():
pretty_unique[sorted(v)[-1]] = len(v) # depends on [control=['for'], data=[]]
return pretty_unique |
def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number = None) -> str:
"""
Format line type and times into the beginning of a spoken line string
"""
if not type_:
return ''
if type_ == 'BECMG':
return f"At {start.dt.hour or 'midnight'} zulu becoming"
ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,"
if probability and probability.value:
ret += f" there's a {probability.value}% chance for"
if type_ == 'INTER':
ret += ' intermittent'
elif type_ == 'TEMPO':
ret += ' temporary'
return ret | def function[type_and_times, parameter[type_, start, end, probability]]:
constant[
Format line type and times into the beginning of a spoken line string
]
if <ast.UnaryOp object at 0x7da20c6e5570> begin[:]
return[constant[]]
if compare[name[type_] equal[==] constant[BECMG]] begin[:]
return[<ast.JoinedStr object at 0x7da20c76c8e0>]
variable[ret] assign[=] <ast.JoinedStr object at 0x7da20c76de40>
if <ast.BoolOp object at 0x7da20c76c220> begin[:]
<ast.AugAssign object at 0x7da20c76c3a0>
if compare[name[type_] equal[==] constant[INTER]] begin[:]
<ast.AugAssign object at 0x7da204960ee0>
return[name[ret]] | keyword[def] identifier[type_and_times] ( identifier[type_] : identifier[str] , identifier[start] : identifier[Timestamp] , identifier[end] : identifier[Timestamp] , identifier[probability] : identifier[Number] = keyword[None] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[type_] :
keyword[return] literal[string]
keyword[if] identifier[type_] == literal[string] :
keyword[return] literal[string]
identifier[ret] = literal[string]
keyword[if] identifier[probability] keyword[and] identifier[probability] . identifier[value] :
identifier[ret] += literal[string]
keyword[if] identifier[type_] == literal[string] :
identifier[ret] += literal[string]
keyword[elif] identifier[type_] == literal[string] :
identifier[ret] += literal[string]
keyword[return] identifier[ret] | def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number=None) -> str:
"""
Format line type and times into the beginning of a spoken line string
"""
if not type_:
return '' # depends on [control=['if'], data=[]]
if type_ == 'BECMG':
return f"At {start.dt.hour or 'midnight'} zulu becoming" # depends on [control=['if'], data=[]]
ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,"
if probability and probability.value:
ret += f" there's a {probability.value}% chance for" # depends on [control=['if'], data=[]]
if type_ == 'INTER':
ret += ' intermittent' # depends on [control=['if'], data=[]]
elif type_ == 'TEMPO':
ret += ' temporary' # depends on [control=['if'], data=[]]
return ret |
def init_app(self, app):
'''Initalizes the application with the extension.
:param app: The Flask application object.
'''
self.app = app
self._session_conf = app.config.get('BEAKER_SESSION', {
'session.type': 'file',
'session.data_dir': '/tmp/session/data',
'session.lock_dir': '/tmp/session/lock'
})
app.wsgi_app = SessionMiddleware(app.wsgi_app, self._session_conf)
app.session_interface = BeakerSessionInterface() | def function[init_app, parameter[self, app]]:
constant[Initalizes the application with the extension.
:param app: The Flask application object.
]
name[self].app assign[=] name[app]
name[self]._session_conf assign[=] call[name[app].config.get, parameter[constant[BEAKER_SESSION], dictionary[[<ast.Constant object at 0x7da1b236ba60>, <ast.Constant object at 0x7da1b236ac80>, <ast.Constant object at 0x7da1b2368f10>], [<ast.Constant object at 0x7da1b2368310>, <ast.Constant object at 0x7da1b236aa40>, <ast.Constant object at 0x7da1b23684c0>]]]]
name[app].wsgi_app assign[=] call[name[SessionMiddleware], parameter[name[app].wsgi_app, name[self]._session_conf]]
name[app].session_interface assign[=] call[name[BeakerSessionInterface], parameter[]] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
identifier[self] . identifier[app] = identifier[app]
identifier[self] . identifier[_session_conf] = identifier[app] . identifier[config] . identifier[get] ( literal[string] ,{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
})
identifier[app] . identifier[wsgi_app] = identifier[SessionMiddleware] ( identifier[app] . identifier[wsgi_app] , identifier[self] . identifier[_session_conf] )
identifier[app] . identifier[session_interface] = identifier[BeakerSessionInterface] () | def init_app(self, app):
"""Initalizes the application with the extension.
:param app: The Flask application object.
"""
self.app = app
self._session_conf = app.config.get('BEAKER_SESSION', {'session.type': 'file', 'session.data_dir': '/tmp/session/data', 'session.lock_dir': '/tmp/session/lock'})
app.wsgi_app = SessionMiddleware(app.wsgi_app, self._session_conf)
app.session_interface = BeakerSessionInterface() |
def transition_issue(self, issue, transition, fields=None, comment=None, worklog=None, **fieldargs):
"""Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when
performing the transition.
:param fields: a dict containing field names and the values to use.
If present, all other keyword arguments will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except Exception:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId}}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if worklog:
data['update'] = {'worklog': [{'add': {'timeSpent': worklog}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json | def function[transition_issue, parameter[self, issue, transition, fields, comment, worklog]]:
constant[Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when
performing the transition.
:param fields: a dict containing field names and the values to use.
If present, all other keyword arguments will be ignored
]
variable[transitionId] assign[=] constant[None]
<ast.Try object at 0x7da1b21ba680>
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b21bbe80>], [<ast.Dict object at 0x7da1b21ba4a0>]]
if name[comment] begin[:]
call[name[data]][constant[update]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21b8640>], [<ast.List object at 0x7da1b21bb1c0>]]
if name[worklog] begin[:]
call[name[data]][constant[update]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21b8d90>], [<ast.List object at 0x7da1b21bafb0>]]
if compare[name[fields] is_not constant[None]] begin[:]
call[name[data]][constant[fields]] assign[=] name[fields]
variable[url] assign[=] call[name[self]._get_url, parameter[binary_operation[binary_operation[constant[issue/] + call[name[str], parameter[name[issue]]]] + constant[/transitions]]]]
variable[r] assign[=] call[name[self]._session.post, parameter[name[url]]]
<ast.Try object at 0x7da1b21d55a0>
return[name[r_json]] | keyword[def] identifier[transition_issue] ( identifier[self] , identifier[issue] , identifier[transition] , identifier[fields] = keyword[None] , identifier[comment] = keyword[None] , identifier[worklog] = keyword[None] ,** identifier[fieldargs] ):
literal[string]
identifier[transitionId] = keyword[None]
keyword[try] :
identifier[transitionId] = identifier[int] ( identifier[transition] )
keyword[except] identifier[Exception] :
identifier[transitionId] = identifier[self] . identifier[find_transitionid_by_name] ( identifier[issue] , identifier[transition] )
keyword[if] identifier[transitionId] keyword[is] keyword[None] :
keyword[raise] identifier[JIRAError] ( literal[string] % identifier[transition] )
identifier[data] ={
literal[string] :{
literal[string] : identifier[transitionId] }}
keyword[if] identifier[comment] :
identifier[data] [ literal[string] ]={ literal[string] :[{ literal[string] :{ literal[string] : identifier[comment] }}]}
keyword[if] identifier[worklog] :
identifier[data] [ literal[string] ]={ literal[string] :[{ literal[string] :{ literal[string] : identifier[worklog] }}]}
keyword[if] identifier[fields] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[fields]
keyword[else] :
identifier[fields_dict] ={}
keyword[for] identifier[field] keyword[in] identifier[fieldargs] :
identifier[fields_dict] [ identifier[field] ]= identifier[fieldargs] [ identifier[field] ]
identifier[data] [ literal[string] ]= identifier[fields_dict]
identifier[url] = identifier[self] . identifier[_get_url] ( literal[string] + identifier[str] ( identifier[issue] )+ literal[string] )
identifier[r] = identifier[self] . identifier[_session] . identifier[post] (
identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] ))
keyword[try] :
identifier[r_json] = identifier[json_loads] ( identifier[r] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] %( identifier[e] , identifier[r] . identifier[text] ))
keyword[raise] identifier[e]
keyword[return] identifier[r_json] | def transition_issue(self, issue, transition, fields=None, comment=None, worklog=None, **fieldargs):
"""Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when
performing the transition.
:param fields: a dict containing field names and the values to use.
If present, all other keyword arguments will be ignored
"""
transitionId = None
try:
transitionId = int(transition) # depends on [control=['try'], data=[]]
except Exception:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError('Invalid transition name. %s' % transition) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
data = {'transition': {'id': transitionId}}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]} # depends on [control=['if'], data=[]]
if worklog:
data['update'] = {'worklog': [{'add': {'timeSpent': worklog}}]} # depends on [control=['if'], data=[]]
if fields is not None:
data['fields'] = fields # depends on [control=['if'], data=['fields']]
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field] # depends on [control=['for'], data=['field']]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(url, data=json.dumps(data))
try:
r_json = json_loads(r) # depends on [control=['try'], data=[]]
except ValueError as e:
logging.error('%s\n%s' % (e, r.text))
raise e # depends on [control=['except'], data=['e']]
return r_json |
def intensity_at_radius(self, radius):
""" Compute the intensity of the profile at a given radius.
Parameters
----------
radius : float
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant * (((radius / self.effective_radius) ** (1. / self.sersic_index)) - 1)) | def function[intensity_at_radius, parameter[self, radius]]:
constant[ Compute the intensity of the profile at a given radius.
Parameters
----------
radius : float
The distance from the centre of the profile.
]
return[binary_operation[name[self].intensity * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da20c76f670> * binary_operation[binary_operation[binary_operation[name[radius] / name[self].effective_radius] ** binary_operation[constant[1.0] / name[self].sersic_index]] - constant[1]]]]]]] | keyword[def] identifier[intensity_at_radius] ( identifier[self] , identifier[radius] ):
literal[string]
keyword[return] identifier[self] . identifier[intensity] * identifier[np] . identifier[exp] (
- identifier[self] . identifier[sersic_constant] *((( identifier[radius] / identifier[self] . identifier[effective_radius] )**( literal[int] / identifier[self] . identifier[sersic_index] ))- literal[int] )) | def intensity_at_radius(self, radius):
""" Compute the intensity of the profile at a given radius.
Parameters
----------
radius : float
The distance from the centre of the profile.
"""
return self.intensity * np.exp(-self.sersic_constant * ((radius / self.effective_radius) ** (1.0 / self.sersic_index) - 1)) |
def _issubclass_Tuple(subclass, superclass, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
"""Helper for _issubclass, a.k.a pytypes.issubtype.
"""
# this function is partly based on code from typing module 3.5.2.2
if subclass in _extra_dict:
subclass = _extra_dict[subclass]
if not is_Type(subclass):
# To TypeError.
return False
if not is_Tuple(subclass):
if is_Generic(subclass):
try:
return _issubclass_Generic(subclass, superclass,
bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs,
_recursion_check)
except:
pass
elif is_Union(subclass):
return all(_issubclass_Tuple(t, superclass, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check)
for t in get_Union_params(subclass))
else:
return False
super_args = get_Tuple_params(superclass)
if super_args is None:
return True
sub_args = get_Tuple_params(subclass)
if sub_args is None:
return False # ???
# Covariance.
# For now we check ellipsis in most explicit manner.
# Todo: Compactify and Pythonify ellipsis branches (tests required before this).
if is_Tuple_ellipsis(subclass):
if is_Tuple_ellipsis(superclass):
# both are ellipsis, so no length check
common = min(len(super_args), len(sub_args))
for i in range(common):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
if len(super_args) < len(sub_args):
for i in range(len(super_args), len(sub_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
elif len(super_args) > len(sub_args):
for i in range(len(sub_args), len(super_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
return True
else:
# only subclass has ellipsis
if len(super_args) < len(sub_args)-1:
return False
for i in range(len(sub_args)-1):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
for i in range(len(sub_args), len(super_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
return True
elif is_Tuple_ellipsis(superclass):
# only superclass has ellipsis
if len(super_args)-1 > len(sub_args):
return False
for i in range(len(super_args)-1):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
for i in range(len(super_args), len(sub_args)):
# Check remaining sub args against the ellipsis type
if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False
return True
else:
# none has ellipsis, so strict length check
return (len(super_args) == len(sub_args) and
all(_issubclass(x, p, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check)
for x, p in zip(sub_args, super_args))) | def function[_issubclass_Tuple, parameter[subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check]]:
constant[Helper for _issubclass, a.k.a pytypes.issubtype.
]
if compare[name[subclass] in name[_extra_dict]] begin[:]
variable[subclass] assign[=] call[name[_extra_dict]][name[subclass]]
if <ast.UnaryOp object at 0x7da1b0e26470> begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b0e26b90> begin[:]
if call[name[is_Generic], parameter[name[subclass]]] begin[:]
<ast.Try object at 0x7da1b0e25b70>
variable[super_args] assign[=] call[name[get_Tuple_params], parameter[name[superclass]]]
if compare[name[super_args] is constant[None]] begin[:]
return[constant[True]]
variable[sub_args] assign[=] call[name[get_Tuple_params], parameter[name[subclass]]]
if compare[name[sub_args] is constant[None]] begin[:]
return[constant[False]]
if call[name[is_Tuple_ellipsis], parameter[name[subclass]]] begin[:]
if call[name[is_Tuple_ellipsis], parameter[name[superclass]]] begin[:]
variable[common] assign[=] call[name[min], parameter[call[name[len], parameter[name[super_args]]], call[name[len], parameter[name[sub_args]]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[common]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0de8760> begin[:]
return[constant[False]]
if compare[call[name[len], parameter[name[super_args]]] less[<] call[name[len], parameter[name[sub_args]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[super_args]]], call[name[len], parameter[name[sub_args]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0de9e70> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_issubclass_Tuple] ( identifier[subclass] , identifier[superclass] , identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
literal[string]
keyword[if] identifier[subclass] keyword[in] identifier[_extra_dict] :
identifier[subclass] = identifier[_extra_dict] [ identifier[subclass] ]
keyword[if] keyword[not] identifier[is_Type] ( identifier[subclass] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[is_Tuple] ( identifier[subclass] ):
keyword[if] identifier[is_Generic] ( identifier[subclass] ):
keyword[try] :
keyword[return] identifier[_issubclass_Generic] ( identifier[subclass] , identifier[superclass] ,
identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] ,
identifier[_recursion_check] )
keyword[except] :
keyword[pass]
keyword[elif] identifier[is_Union] ( identifier[subclass] ):
keyword[return] identifier[all] ( identifier[_issubclass_Tuple] ( identifier[t] , identifier[superclass] , identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] )
keyword[for] identifier[t] keyword[in] identifier[get_Union_params] ( identifier[subclass] ))
keyword[else] :
keyword[return] keyword[False]
identifier[super_args] = identifier[get_Tuple_params] ( identifier[superclass] )
keyword[if] identifier[super_args] keyword[is] keyword[None] :
keyword[return] keyword[True]
identifier[sub_args] = identifier[get_Tuple_params] ( identifier[subclass] )
keyword[if] identifier[sub_args] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[is_Tuple_ellipsis] ( identifier[subclass] ):
keyword[if] identifier[is_Tuple_ellipsis] ( identifier[superclass] ):
identifier[common] = identifier[min] ( identifier[len] ( identifier[super_args] ), identifier[len] ( identifier[sub_args] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[common] ):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [ identifier[i] ], identifier[super_args] [ identifier[i] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[if] identifier[len] ( identifier[super_args] )< identifier[len] ( identifier[sub_args] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[super_args] ), identifier[len] ( identifier[sub_args] )):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [ identifier[i] ], identifier[super_args] [- literal[int] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[elif] identifier[len] ( identifier[super_args] )> identifier[len] ( identifier[sub_args] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sub_args] ), identifier[len] ( identifier[super_args] )):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [- literal[int] ], identifier[super_args] [ identifier[i] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[else] :
keyword[if] identifier[len] ( identifier[super_args] )< identifier[len] ( identifier[sub_args] )- literal[int] :
keyword[return] keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sub_args] )- literal[int] ):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [ identifier[i] ], identifier[super_args] [ identifier[i] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sub_args] ), identifier[len] ( identifier[super_args] )):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [- literal[int] ], identifier[super_args] [ identifier[i] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[elif] identifier[is_Tuple_ellipsis] ( identifier[superclass] ):
keyword[if] identifier[len] ( identifier[super_args] )- literal[int] > identifier[len] ( identifier[sub_args] ):
keyword[return] keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[super_args] )- literal[int] ):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [ identifier[i] ], identifier[super_args] [ identifier[i] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[super_args] ), identifier[len] ( identifier[sub_args] )):
keyword[if] keyword[not] identifier[_issubclass] ( identifier[sub_args] [ identifier[i] ], identifier[super_args] [- literal[int] ], identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[else] :
keyword[return] ( identifier[len] ( identifier[super_args] )== identifier[len] ( identifier[sub_args] ) keyword[and]
identifier[all] ( identifier[_issubclass] ( identifier[x] , identifier[p] , identifier[bound_Generic] , identifier[bound_typevars] ,
identifier[bound_typevars_readonly] , identifier[follow_fwd_refs] , identifier[_recursion_check] )
keyword[for] identifier[x] , identifier[p] keyword[in] identifier[zip] ( identifier[sub_args] , identifier[super_args] ))) | def _issubclass_Tuple(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
"""Helper for _issubclass, a.k.a pytypes.issubtype.
"""
# this function is partly based on code from typing module 3.5.2.2
if subclass in _extra_dict:
subclass = _extra_dict[subclass] # depends on [control=['if'], data=['subclass', '_extra_dict']]
if not is_Type(subclass):
# To TypeError.
return False # depends on [control=['if'], data=[]]
if not is_Tuple(subclass):
if is_Generic(subclass):
try:
return _issubclass_Generic(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif is_Union(subclass):
return all((_issubclass_Tuple(t, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) for t in get_Union_params(subclass))) # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
super_args = get_Tuple_params(superclass)
if super_args is None:
return True # depends on [control=['if'], data=[]]
sub_args = get_Tuple_params(subclass)
if sub_args is None:
return False # ??? # depends on [control=['if'], data=[]]
# Covariance.
# For now we check ellipsis in most explicit manner.
# Todo: Compactify and Pythonify ellipsis branches (tests required before this).
if is_Tuple_ellipsis(subclass):
if is_Tuple_ellipsis(superclass):
# both are ellipsis, so no length check
common = min(len(super_args), len(sub_args))
for i in range(common):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if len(super_args) < len(sub_args):
for i in range(len(super_args), len(sub_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
elif len(super_args) > len(sub_args):
for i in range(len(sub_args), len(super_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return True # depends on [control=['if'], data=[]]
else:
# only subclass has ellipsis
if len(super_args) < len(sub_args) - 1:
return False # depends on [control=['if'], data=[]]
for i in range(len(sub_args) - 1):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
for i in range(len(sub_args), len(super_args)):
# Check remaining super args against the ellipsis type
if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return True # depends on [control=['if'], data=[]]
elif is_Tuple_ellipsis(superclass):
# only superclass has ellipsis
if len(super_args) - 1 > len(sub_args):
return False # depends on [control=['if'], data=[]]
for i in range(len(super_args) - 1):
if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
for i in range(len(super_args), len(sub_args)):
# Check remaining sub args against the ellipsis type
if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return True # depends on [control=['if'], data=[]]
else:
# none has ellipsis, so strict length check
return len(super_args) == len(sub_args) and all((_issubclass(x, p, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) for (x, p) in zip(sub_args, super_args))) |
def _build_path(self, path, metric_type):
"""Return a normalized path.
:param list path: elements of the metric path to record
:param str metric_type: The metric type
:rtype: str
"""
path = self._get_prefixes(metric_type) + list(path)
return '{}.{}'.format(self._namespace,
'.'.join(str(p).replace('.', '-') for p in path)) | def function[_build_path, parameter[self, path, metric_type]]:
constant[Return a normalized path.
:param list path: elements of the metric path to record
:param str metric_type: The metric type
:rtype: str
]
variable[path] assign[=] binary_operation[call[name[self]._get_prefixes, parameter[name[metric_type]]] + call[name[list], parameter[name[path]]]]
return[call[constant[{}.{}].format, parameter[name[self]._namespace, call[constant[.].join, parameter[<ast.GeneratorExp object at 0x7da1b2214130>]]]]] | keyword[def] identifier[_build_path] ( identifier[self] , identifier[path] , identifier[metric_type] ):
literal[string]
identifier[path] = identifier[self] . identifier[_get_prefixes] ( identifier[metric_type] )+ identifier[list] ( identifier[path] )
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[_namespace] ,
literal[string] . identifier[join] ( identifier[str] ( identifier[p] ). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[p] keyword[in] identifier[path] )) | def _build_path(self, path, metric_type):
"""Return a normalized path.
:param list path: elements of the metric path to record
:param str metric_type: The metric type
:rtype: str
"""
path = self._get_prefixes(metric_type) + list(path)
return '{}.{}'.format(self._namespace, '.'.join((str(p).replace('.', '-') for p in path))) |
def _massage_metakeys(dct, prfx):
"""
Returns a copy of the supplied dictionary, prefixing any keys that do
not begin with the specified prefix accordingly.
"""
lowprefix = prfx.lower()
ret = {}
for k, v in list(dct.items()):
if not k.lower().startswith(lowprefix):
k = "%s%s" % (prfx, k)
ret[k] = v
return ret | def function[_massage_metakeys, parameter[dct, prfx]]:
constant[
Returns a copy of the supplied dictionary, prefixing any keys that do
not begin with the specified prefix accordingly.
]
variable[lowprefix] assign[=] call[name[prfx].lower, parameter[]]
variable[ret] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b056e5c0>, <ast.Name object at 0x7da1b056f9d0>]]] in starred[call[name[list], parameter[call[name[dct].items, parameter[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b056f820> begin[:]
variable[k] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b056d600>, <ast.Name object at 0x7da1b056fca0>]]]
call[name[ret]][name[k]] assign[=] name[v]
return[name[ret]] | keyword[def] identifier[_massage_metakeys] ( identifier[dct] , identifier[prfx] ):
literal[string]
identifier[lowprefix] = identifier[prfx] . identifier[lower] ()
identifier[ret] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[dct] . identifier[items] ()):
keyword[if] keyword[not] identifier[k] . identifier[lower] (). identifier[startswith] ( identifier[lowprefix] ):
identifier[k] = literal[string] %( identifier[prfx] , identifier[k] )
identifier[ret] [ identifier[k] ]= identifier[v]
keyword[return] identifier[ret] | def _massage_metakeys(dct, prfx):
"""
Returns a copy of the supplied dictionary, prefixing any keys that do
not begin with the specified prefix accordingly.
"""
lowprefix = prfx.lower()
ret = {}
for (k, v) in list(dct.items()):
if not k.lower().startswith(lowprefix):
k = '%s%s' % (prfx, k) # depends on [control=['if'], data=[]]
ret[k] = v # depends on [control=['for'], data=[]]
return ret |
def pre_approval_cancel(self, code):
""" cancel a subscribe """
response = self.get(url=self.config.PRE_APPROVAL_CANCEL_URL % code)
return PagSeguroPreApprovalCancel(response.content, self.config) | def function[pre_approval_cancel, parameter[self, code]]:
constant[ cancel a subscribe ]
variable[response] assign[=] call[name[self].get, parameter[]]
return[call[name[PagSeguroPreApprovalCancel], parameter[name[response].content, name[self].config]]] | keyword[def] identifier[pre_approval_cancel] ( identifier[self] , identifier[code] ):
literal[string]
identifier[response] = identifier[self] . identifier[get] ( identifier[url] = identifier[self] . identifier[config] . identifier[PRE_APPROVAL_CANCEL_URL] % identifier[code] )
keyword[return] identifier[PagSeguroPreApprovalCancel] ( identifier[response] . identifier[content] , identifier[self] . identifier[config] ) | def pre_approval_cancel(self, code):
""" cancel a subscribe """
response = self.get(url=self.config.PRE_APPROVAL_CANCEL_URL % code)
return PagSeguroPreApprovalCancel(response.content, self.config) |
def slice_index(self, *slice_dims, **kwargs):
"""
Returns a tuple of slices, each slice equal to the
slice(lower_extent, upper_extent, 1) of the dimensions
supplied in slice_dims. If the dimension is integer d,
slice(0, d, 1) will be used instead of the lower and upper extents
.. code-block:: python
A = np.ones(ntime, na)
idx = cube.slice_index('ntime','na', 3)
A[idx].sum()
ntime, na, components = cube.slice_index('ntime', 'na', 3)
A[ntime, na, components].sum()
Parameters
----------
*slice_dims : tuple
Tuple of dimensions for which slice
objects should be returned.
Returns
-------
tuple
Tuple of :code:`slice(lower,upper,1)` objects
"""
return tuple(slice(l, u, 1) for l, u in zip(
self.dim_lower_extent(*slice_dims, single=False),
self.dim_upper_extent(*slice_dims, single=False))) | def function[slice_index, parameter[self]]:
constant[
Returns a tuple of slices, each slice equal to the
slice(lower_extent, upper_extent, 1) of the dimensions
supplied in slice_dims. If the dimension is integer d,
slice(0, d, 1) will be used instead of the lower and upper extents
.. code-block:: python
A = np.ones(ntime, na)
idx = cube.slice_index('ntime','na', 3)
A[idx].sum()
ntime, na, components = cube.slice_index('ntime', 'na', 3)
A[ntime, na, components].sum()
Parameters
----------
*slice_dims : tuple
Tuple of dimensions for which slice
objects should be returned.
Returns
-------
tuple
Tuple of :code:`slice(lower,upper,1)` objects
]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c76da80>]]] | keyword[def] identifier[slice_index] ( identifier[self] ,* identifier[slice_dims] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[slice] ( identifier[l] , identifier[u] , literal[int] ) keyword[for] identifier[l] , identifier[u] keyword[in] identifier[zip] (
identifier[self] . identifier[dim_lower_extent] (* identifier[slice_dims] , identifier[single] = keyword[False] ),
identifier[self] . identifier[dim_upper_extent] (* identifier[slice_dims] , identifier[single] = keyword[False] ))) | def slice_index(self, *slice_dims, **kwargs):
"""
Returns a tuple of slices, each slice equal to the
slice(lower_extent, upper_extent, 1) of the dimensions
supplied in slice_dims. If the dimension is integer d,
slice(0, d, 1) will be used instead of the lower and upper extents
.. code-block:: python
A = np.ones(ntime, na)
idx = cube.slice_index('ntime','na', 3)
A[idx].sum()
ntime, na, components = cube.slice_index('ntime', 'na', 3)
A[ntime, na, components].sum()
Parameters
----------
*slice_dims : tuple
Tuple of dimensions for which slice
objects should be returned.
Returns
-------
tuple
Tuple of :code:`slice(lower,upper,1)` objects
"""
return tuple((slice(l, u, 1) for (l, u) in zip(self.dim_lower_extent(*slice_dims, single=False), self.dim_upper_extent(*slice_dims, single=False)))) |
def _get_desired_pkg(name, desired):
'''
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
'''
if not desired[name] or desired[name].startswith(('<', '>', '=')):
oper = ''
else:
oper = '='
return '{0}{1}{2}'.format(name, oper,
'' if not desired[name] else desired[name]) | def function[_get_desired_pkg, parameter[name, desired]]:
constant[
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
]
if <ast.BoolOp object at 0x7da1b212d900> begin[:]
variable[oper] assign[=] constant[]
return[call[constant[{0}{1}{2}].format, parameter[name[name], name[oper], <ast.IfExp object at 0x7da1b212c250>]]] | keyword[def] identifier[_get_desired_pkg] ( identifier[name] , identifier[desired] ):
literal[string]
keyword[if] keyword[not] identifier[desired] [ identifier[name] ] keyword[or] identifier[desired] [ identifier[name] ]. identifier[startswith] (( literal[string] , literal[string] , literal[string] )):
identifier[oper] = literal[string]
keyword[else] :
identifier[oper] = literal[string]
keyword[return] literal[string] . identifier[format] ( identifier[name] , identifier[oper] ,
literal[string] keyword[if] keyword[not] identifier[desired] [ identifier[name] ] keyword[else] identifier[desired] [ identifier[name] ]) | def _get_desired_pkg(name, desired):
"""
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
"""
if not desired[name] or desired[name].startswith(('<', '>', '=')):
oper = '' # depends on [control=['if'], data=[]]
else:
oper = '='
return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) |
def run(self):
"""
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
"""
while not self.is_stopped():
# we can get an OSError: [Errno9] Bad file descriptor when shutting down
# just ignore it
try:
if self.arduino.inWaiting():
c = self.arduino.read()
self.command_deque.append(ord(c))
else:
time.sleep(.1)
except OSError:
pass
except IOError:
self.stop()
self.close() | def function[run, parameter[self]]:
constant[
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
]
while <ast.UnaryOp object at 0x7da1b021c7c0> begin[:]
<ast.Try object at 0x7da1b021fc40>
call[name[self].close, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[is_stopped] ():
keyword[try] :
keyword[if] identifier[self] . identifier[arduino] . identifier[inWaiting] ():
identifier[c] = identifier[self] . identifier[arduino] . identifier[read] ()
identifier[self] . identifier[command_deque] . identifier[append] ( identifier[ord] ( identifier[c] ))
keyword[else] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[except] identifier[IOError] :
identifier[self] . identifier[stop] ()
identifier[self] . identifier[close] () | def run(self):
"""
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
"""
while not self.is_stopped():
# we can get an OSError: [Errno9] Bad file descriptor when shutting down
# just ignore it
try:
if self.arduino.inWaiting():
c = self.arduino.read()
self.command_deque.append(ord(c)) # depends on [control=['if'], data=[]]
else:
time.sleep(0.1) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
except IOError:
self.stop() # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
self.close() |
def build_arg_parser2():
"""
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
"""
usage_str = "Smatch table calculator -- arguments"
parser = optparse.OptionParser(usage=usage_str)
parser.add_option("--fl", dest="fl", type="string", help='AMR ID list file')
parser.add_option("-f", dest="f", type="string", action="callback", callback=cb, help="AMR IDs (at least one)")
parser.add_option("-p", dest="p", type="string", action="callback", callback=cb, help="User list")
parser.add_option("--fd", dest="fd", type="string", help="file directory")
parser.add_option("-r", "--restart", dest="r", type="int", help='Restart number (Default: 4)')
parser.add_option("-v", "--verbose", action='store_true', dest="v", help='Verbose output (Default:False)')
parser.set_defaults(r=4, v=False, ms=False, fd=isi_dir_pre)
return parser | def function[build_arg_parser2, parameter[]]:
constant[
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
]
variable[usage_str] assign[=] constant[Smatch table calculator -- arguments]
variable[parser] assign[=] call[name[optparse].OptionParser, parameter[]]
call[name[parser].add_option, parameter[constant[--fl]]]
call[name[parser].add_option, parameter[constant[-f]]]
call[name[parser].add_option, parameter[constant[-p]]]
call[name[parser].add_option, parameter[constant[--fd]]]
call[name[parser].add_option, parameter[constant[-r], constant[--restart]]]
call[name[parser].add_option, parameter[constant[-v], constant[--verbose]]]
call[name[parser].set_defaults, parameter[]]
return[name[parser]] | keyword[def] identifier[build_arg_parser2] ():
literal[string]
identifier[usage_str] = literal[string]
identifier[parser] = identifier[optparse] . identifier[OptionParser] ( identifier[usage] = identifier[usage_str] )
identifier[parser] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[type] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[type] = literal[string] , identifier[action] = literal[string] , identifier[callback] = identifier[cb] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[type] = literal[string] , identifier[action] = literal[string] , identifier[callback] = identifier[cb] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[type] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[type] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[set_defaults] ( identifier[r] = literal[int] , identifier[v] = keyword[False] , identifier[ms] = keyword[False] , identifier[fd] = identifier[isi_dir_pre] )
keyword[return] identifier[parser] | def build_arg_parser2():
"""
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
"""
usage_str = 'Smatch table calculator -- arguments'
parser = optparse.OptionParser(usage=usage_str)
parser.add_option('--fl', dest='fl', type='string', help='AMR ID list file')
parser.add_option('-f', dest='f', type='string', action='callback', callback=cb, help='AMR IDs (at least one)')
parser.add_option('-p', dest='p', type='string', action='callback', callback=cb, help='User list')
parser.add_option('--fd', dest='fd', type='string', help='file directory')
parser.add_option('-r', '--restart', dest='r', type='int', help='Restart number (Default: 4)')
parser.add_option('-v', '--verbose', action='store_true', dest='v', help='Verbose output (Default:False)')
parser.set_defaults(r=4, v=False, ms=False, fd=isi_dir_pre)
return parser |
def check_pdb_status(pdbid):
"""Returns the status and up-to-date entry in the PDB for a given PDB ID"""
url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath('//record'):
status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT'
if status == 'OBSOLETE':
current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries
return [status, current_pdbid.lower()] | def function[check_pdb_status, parameter[pdbid]]:
constant[Returns the status and up-to-date entry in the PDB for a given PDB ID]
variable[url] assign[=] binary_operation[constant[http://www.rcsb.org/pdb/rest/idStatus?structureId=%s] <ast.Mod object at 0x7da2590d6920> name[pdbid]]
variable[xmlf] assign[=] call[name[urlopen], parameter[name[url]]]
variable[xml] assign[=] call[name[et].parse, parameter[name[xmlf]]]
call[name[xmlf].close, parameter[]]
variable[status] assign[=] constant[None]
variable[current_pdbid] assign[=] name[pdbid]
for taget[name[df]] in starred[call[name[xml].xpath, parameter[constant[//record]]]] begin[:]
variable[status] assign[=] call[name[df].attrib][constant[status]]
if compare[name[status] equal[==] constant[OBSOLETE]] begin[:]
variable[current_pdbid] assign[=] call[name[df].attrib][constant[replacedBy]]
return[list[[<ast.Name object at 0x7da2054a4670>, <ast.Call object at 0x7da2054a6e30>]]] | keyword[def] identifier[check_pdb_status] ( identifier[pdbid] ):
literal[string]
identifier[url] = literal[string] % identifier[pdbid]
identifier[xmlf] = identifier[urlopen] ( identifier[url] )
identifier[xml] = identifier[et] . identifier[parse] ( identifier[xmlf] )
identifier[xmlf] . identifier[close] ()
identifier[status] = keyword[None]
identifier[current_pdbid] = identifier[pdbid]
keyword[for] identifier[df] keyword[in] identifier[xml] . identifier[xpath] ( literal[string] ):
identifier[status] = identifier[df] . identifier[attrib] [ literal[string] ]
keyword[if] identifier[status] == literal[string] :
identifier[current_pdbid] = identifier[df] . identifier[attrib] [ literal[string] ]
keyword[return] [ identifier[status] , identifier[current_pdbid] . identifier[lower] ()] | def check_pdb_status(pdbid):
"""Returns the status and up-to-date entry in the PDB for a given PDB ID"""
url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath('//record'):
status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT'
if status == 'OBSOLETE':
current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['df']]
return [status, current_pdbid.lower()] |
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number)) | def function[Name, parameter[self, number]]:
constant[Returns a string containing the name of an enum value.]
if compare[name[number] in name[self]._enum_type.values_by_number] begin[:]
return[call[name[self]._enum_type.values_by_number][name[number]].name]
<ast.Raise object at 0x7da1b21c5c00> | keyword[def] identifier[Name] ( identifier[self] , identifier[number] ):
literal[string]
keyword[if] identifier[number] keyword[in] identifier[self] . identifier[_enum_type] . identifier[values_by_number] :
keyword[return] identifier[self] . identifier[_enum_type] . identifier[values_by_number] [ identifier[number] ]. identifier[name]
keyword[raise] identifier[ValueError] ( literal[string] %(
identifier[self] . identifier[_enum_type] . identifier[name] , identifier[number] )) | def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name # depends on [control=['if'], data=['number']]
raise ValueError('Enum %s has no name defined for value %d' % (self._enum_type.name, number)) |
def evolve(self, profile, t, return_log=False):
"""
Compute the probability of the sequence state of the child
at time t later, given the parent profile.
Parameters
----------
profile : numpy.array
Sequence profile. Shape = (L, a),
where L - sequence length, a - alphabet size.
t : double
Time to propagate
return_log: bool
If True, return log-probability
Returns
-------
res : np.array
Profile of the sequence after time t in the future.
Shape = (L, a), where L - sequence length, a - alphabet size.
"""
Qt = self.expQt(t).T
res = profile.dot(Qt)
return np.log(res) if return_log else res | def function[evolve, parameter[self, profile, t, return_log]]:
constant[
Compute the probability of the sequence state of the child
at time t later, given the parent profile.
Parameters
----------
profile : numpy.array
Sequence profile. Shape = (L, a),
where L - sequence length, a - alphabet size.
t : double
Time to propagate
return_log: bool
If True, return log-probability
Returns
-------
res : np.array
Profile of the sequence after time t in the future.
Shape = (L, a), where L - sequence length, a - alphabet size.
]
variable[Qt] assign[=] call[name[self].expQt, parameter[name[t]]].T
variable[res] assign[=] call[name[profile].dot, parameter[name[Qt]]]
return[<ast.IfExp object at 0x7da1b02a45e0>] | keyword[def] identifier[evolve] ( identifier[self] , identifier[profile] , identifier[t] , identifier[return_log] = keyword[False] ):
literal[string]
identifier[Qt] = identifier[self] . identifier[expQt] ( identifier[t] ). identifier[T]
identifier[res] = identifier[profile] . identifier[dot] ( identifier[Qt] )
keyword[return] identifier[np] . identifier[log] ( identifier[res] ) keyword[if] identifier[return_log] keyword[else] identifier[res] | def evolve(self, profile, t, return_log=False):
"""
Compute the probability of the sequence state of the child
at time t later, given the parent profile.
Parameters
----------
profile : numpy.array
Sequence profile. Shape = (L, a),
where L - sequence length, a - alphabet size.
t : double
Time to propagate
return_log: bool
If True, return log-probability
Returns
-------
res : np.array
Profile of the sequence after time t in the future.
Shape = (L, a), where L - sequence length, a - alphabet size.
"""
Qt = self.expQt(t).T
res = profile.dot(Qt)
return np.log(res) if return_log else res |
def get_policies_from_aws(client, scope='Local'):
"""Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the
policies for the specified scope
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
scope (`str`): The policy scope to use. Default: Local
Returns:
:obj:`list` of `dict`
"""
done = False
marker = None
policies = []
while not done:
if marker:
response = client.list_policies(Marker=marker, Scope=scope)
else:
response = client.list_policies(Scope=scope)
policies += response['Policies']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return policies | def function[get_policies_from_aws, parameter[client, scope]]:
constant[Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the
policies for the specified scope
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
scope (`str`): The policy scope to use. Default: Local
Returns:
:obj:`list` of `dict`
]
variable[done] assign[=] constant[False]
variable[marker] assign[=] constant[None]
variable[policies] assign[=] list[[]]
while <ast.UnaryOp object at 0x7da1b2052c50> begin[:]
if name[marker] begin[:]
variable[response] assign[=] call[name[client].list_policies, parameter[]]
<ast.AugAssign object at 0x7da1b2050430>
if call[name[response]][constant[IsTruncated]] begin[:]
variable[marker] assign[=] call[name[response]][constant[Marker]]
return[name[policies]] | keyword[def] identifier[get_policies_from_aws] ( identifier[client] , identifier[scope] = literal[string] ):
literal[string]
identifier[done] = keyword[False]
identifier[marker] = keyword[None]
identifier[policies] =[]
keyword[while] keyword[not] identifier[done] :
keyword[if] identifier[marker] :
identifier[response] = identifier[client] . identifier[list_policies] ( identifier[Marker] = identifier[marker] , identifier[Scope] = identifier[scope] )
keyword[else] :
identifier[response] = identifier[client] . identifier[list_policies] ( identifier[Scope] = identifier[scope] )
identifier[policies] += identifier[response] [ literal[string] ]
keyword[if] identifier[response] [ literal[string] ]:
identifier[marker] = identifier[response] [ literal[string] ]
keyword[else] :
identifier[done] = keyword[True]
keyword[return] identifier[policies] | def get_policies_from_aws(client, scope='Local'):
"""Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the
policies for the specified scope
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
scope (`str`): The policy scope to use. Default: Local
Returns:
:obj:`list` of `dict`
"""
done = False
marker = None
policies = []
while not done:
if marker:
response = client.list_policies(Marker=marker, Scope=scope) # depends on [control=['if'], data=[]]
else:
response = client.list_policies(Scope=scope)
policies += response['Policies']
if response['IsTruncated']:
marker = response['Marker'] # depends on [control=['if'], data=[]]
else:
done = True # depends on [control=['while'], data=[]]
return policies |
def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantMethod %s? %s", method, wanted)
return wanted | def function[wantMethod, parameter[self, method]]:
constant[Is the method a test method?
]
<ast.Try object at 0x7da1b021c0d0>
if call[name[method_name].startswith, parameter[constant[_]]] begin[:]
return[constant[False]]
variable[declared] assign[=] call[name[getattr], parameter[name[method], constant[__test__], constant[None]]]
if compare[name[declared] is_not constant[None]] begin[:]
variable[wanted] assign[=] name[declared]
variable[plug_wants] assign[=] call[name[self].plugins.wantMethod, parameter[name[method]]]
if compare[name[plug_wants] is_not constant[None]] begin[:]
variable[wanted] assign[=] name[plug_wants]
call[name[log].debug, parameter[constant[wantMethod %s? %s], name[method], name[wanted]]]
return[name[wanted]] | keyword[def] identifier[wantMethod] ( identifier[self] , identifier[method] ):
literal[string]
keyword[try] :
identifier[method_name] = identifier[method] . identifier[__name__]
keyword[except] identifier[AttributeError] :
keyword[return] keyword[False]
keyword[if] identifier[method_name] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[False]
identifier[declared] = identifier[getattr] ( identifier[method] , literal[string] , keyword[None] )
keyword[if] identifier[declared] keyword[is] keyword[not] keyword[None] :
identifier[wanted] = identifier[declared]
keyword[else] :
identifier[wanted] = identifier[self] . identifier[matches] ( identifier[method_name] )
identifier[plug_wants] = identifier[self] . identifier[plugins] . identifier[wantMethod] ( identifier[method] )
keyword[if] identifier[plug_wants] keyword[is] keyword[not] keyword[None] :
identifier[wanted] = identifier[plug_wants]
identifier[log] . identifier[debug] ( literal[string] , identifier[method] , identifier[wanted] )
keyword[return] identifier[wanted] | def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__ # depends on [control=['try'], data=[]]
except AttributeError:
# not a method
return False # depends on [control=['except'], data=[]]
if method_name.startswith('_'):
# never collect 'private' methods
return False # depends on [control=['if'], data=[]]
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared # depends on [control=['if'], data=['declared']]
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants # depends on [control=['if'], data=['plug_wants']]
log.debug('wantMethod %s? %s', method, wanted)
return wanted |
def log_indexing_error(cls, indexing_errors):
""" Logs indexing errors and raises a general ElasticSearch Exception"""
indexing_errors_log = []
for indexing_error in indexing_errors:
indexing_errors_log.append(str(indexing_error))
raise exceptions.ElasticsearchException(', '.join(indexing_errors_log)) | def function[log_indexing_error, parameter[cls, indexing_errors]]:
constant[ Logs indexing errors and raises a general ElasticSearch Exception]
variable[indexing_errors_log] assign[=] list[[]]
for taget[name[indexing_error]] in starred[name[indexing_errors]] begin[:]
call[name[indexing_errors_log].append, parameter[call[name[str], parameter[name[indexing_error]]]]]
<ast.Raise object at 0x7da1b00ddff0> | keyword[def] identifier[log_indexing_error] ( identifier[cls] , identifier[indexing_errors] ):
literal[string]
identifier[indexing_errors_log] =[]
keyword[for] identifier[indexing_error] keyword[in] identifier[indexing_errors] :
identifier[indexing_errors_log] . identifier[append] ( identifier[str] ( identifier[indexing_error] ))
keyword[raise] identifier[exceptions] . identifier[ElasticsearchException] ( literal[string] . identifier[join] ( identifier[indexing_errors_log] )) | def log_indexing_error(cls, indexing_errors):
""" Logs indexing errors and raises a general ElasticSearch Exception"""
indexing_errors_log = []
for indexing_error in indexing_errors:
indexing_errors_log.append(str(indexing_error)) # depends on [control=['for'], data=['indexing_error']]
raise exceptions.ElasticsearchException(', '.join(indexing_errors_log)) |
def compile_and_process(self, in_path):
"""compile a file, save it to the ouput file if the inline flag true"""
out_path = self.path_mapping[in_path]
if not self.embed:
pdebug("[%s::%s] %s -> %s" % (
self.compiler_name,
self.name,
os.path.relpath(in_path),
os.path.relpath(out_path)),
groups=["build_task"],
autobreak=True)
else:
pdebug("[%s::%s] %s -> <cache>" % (
self.compiler_name,
self.name,
os.path.relpath(in_path)),
groups=["build_task"],
autobreak=True)
compiled_string = self.compile_file(in_path)
if not self.embed:
if compiled_string != "":
with open(out_path, "w") as f:
f.write(compiled_string)
return compiled_string | def function[compile_and_process, parameter[self, in_path]]:
constant[compile a file, save it to the ouput file if the inline flag true]
variable[out_path] assign[=] call[name[self].path_mapping][name[in_path]]
if <ast.UnaryOp object at 0x7da20e955c90> begin[:]
call[name[pdebug], parameter[binary_operation[constant[[%s::%s] %s -> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e954580>, <ast.Attribute object at 0x7da20e957100>, <ast.Call object at 0x7da20e957a60>, <ast.Call object at 0x7da20e9552d0>]]]]]
variable[compiled_string] assign[=] call[name[self].compile_file, parameter[name[in_path]]]
if <ast.UnaryOp object at 0x7da20e957250> begin[:]
if compare[name[compiled_string] not_equal[!=] constant[]] begin[:]
with call[name[open], parameter[name[out_path], constant[w]]] begin[:]
call[name[f].write, parameter[name[compiled_string]]]
return[name[compiled_string]] | keyword[def] identifier[compile_and_process] ( identifier[self] , identifier[in_path] ):
literal[string]
identifier[out_path] = identifier[self] . identifier[path_mapping] [ identifier[in_path] ]
keyword[if] keyword[not] identifier[self] . identifier[embed] :
identifier[pdebug] ( literal[string] %(
identifier[self] . identifier[compiler_name] ,
identifier[self] . identifier[name] ,
identifier[os] . identifier[path] . identifier[relpath] ( identifier[in_path] ),
identifier[os] . identifier[path] . identifier[relpath] ( identifier[out_path] )),
identifier[groups] =[ literal[string] ],
identifier[autobreak] = keyword[True] )
keyword[else] :
identifier[pdebug] ( literal[string] %(
identifier[self] . identifier[compiler_name] ,
identifier[self] . identifier[name] ,
identifier[os] . identifier[path] . identifier[relpath] ( identifier[in_path] )),
identifier[groups] =[ literal[string] ],
identifier[autobreak] = keyword[True] )
identifier[compiled_string] = identifier[self] . identifier[compile_file] ( identifier[in_path] )
keyword[if] keyword[not] identifier[self] . identifier[embed] :
keyword[if] identifier[compiled_string] != literal[string] :
keyword[with] identifier[open] ( identifier[out_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[compiled_string] )
keyword[return] identifier[compiled_string] | def compile_and_process(self, in_path):
"""compile a file, save it to the ouput file if the inline flag true"""
out_path = self.path_mapping[in_path]
if not self.embed:
pdebug('[%s::%s] %s -> %s' % (self.compiler_name, self.name, os.path.relpath(in_path), os.path.relpath(out_path)), groups=['build_task'], autobreak=True) # depends on [control=['if'], data=[]]
else:
pdebug('[%s::%s] %s -> <cache>' % (self.compiler_name, self.name, os.path.relpath(in_path)), groups=['build_task'], autobreak=True)
compiled_string = self.compile_file(in_path)
if not self.embed:
if compiled_string != '':
with open(out_path, 'w') as f:
f.write(compiled_string) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=['compiled_string']] # depends on [control=['if'], data=[]]
return compiled_string |
def cmd_balance():
"""
Посмотреть баланс основного счёта.
"""
r = rocket.operations.cool_feed.get(params={"per_page": 1})
r = handle_error(r)
j = r.json()
template = "".join([
click.style("{rur} {code}, ", fg="green", bold=True),
"{miles} рокетрублей"])
click.echo(template.format(
rur=j["balance"]["amount"],
code=j["balance"]["currency_code"],
miles=int(j["miles"]))) | def function[cmd_balance, parameter[]]:
constant[
Посмотреть баланс основного счёта.
]
variable[r] assign[=] call[name[rocket].operations.cool_feed.get, parameter[]]
variable[r] assign[=] call[name[handle_error], parameter[name[r]]]
variable[j] assign[=] call[name[r].json, parameter[]]
variable[template] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b25da0b0>, <ast.Constant object at 0x7da1b25da2f0>]]]]
call[name[click].echo, parameter[call[name[template].format, parameter[]]]] | keyword[def] identifier[cmd_balance] ():
literal[string]
identifier[r] = identifier[rocket] . identifier[operations] . identifier[cool_feed] . identifier[get] ( identifier[params] ={ literal[string] : literal[int] })
identifier[r] = identifier[handle_error] ( identifier[r] )
identifier[j] = identifier[r] . identifier[json] ()
identifier[template] = literal[string] . identifier[join] ([
identifier[click] . identifier[style] ( literal[string] , identifier[fg] = literal[string] , identifier[bold] = keyword[True] ),
literal[string] ])
identifier[click] . identifier[echo] ( identifier[template] . identifier[format] (
identifier[rur] = identifier[j] [ literal[string] ][ literal[string] ],
identifier[code] = identifier[j] [ literal[string] ][ literal[string] ],
identifier[miles] = identifier[int] ( identifier[j] [ literal[string] ]))) | def cmd_balance():
"""
Посмотреть баланс основного счёта.
"""
r = rocket.operations.cool_feed.get(params={'per_page': 1})
r = handle_error(r)
j = r.json()
template = ''.join([click.style('{rur} {code}, ', fg='green', bold=True), '{miles} рокетрублей'])
click.echo(template.format(rur=j['balance']['amount'], code=j['balance']['currency_code'], miles=int(j['miles']))) |
def enumerated_list(self, nth, text):
"""Example::
1. item
"""
if not isinstance(nth, int):
raise Exception("'nth' argument has to be an integer!")
return "{nth}. {text}".format(nth=nth, text=text) | def function[enumerated_list, parameter[self, nth, text]]:
constant[Example::
1. item
]
if <ast.UnaryOp object at 0x7da1b1628c10> begin[:]
<ast.Raise object at 0x7da1b162ad40>
return[call[constant[{nth}. {text}].format, parameter[]]] | keyword[def] identifier[enumerated_list] ( identifier[self] , identifier[nth] , identifier[text] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[nth] , identifier[int] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[nth] = identifier[nth] , identifier[text] = identifier[text] ) | def enumerated_list(self, nth, text):
"""Example::
1. item
"""
if not isinstance(nth, int):
raise Exception("'nth' argument has to be an integer!") # depends on [control=['if'], data=[]]
return '{nth}. {text}'.format(nth=nth, text=text) |
def clicks(times=None, frames=None, sr=22050, hop_length=512,
click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive')
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive')
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10,
num=int(np.round(sr * click_duration)),
base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError('length must be a positive integer')
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal | def function[clicks, parameter[times, frames, sr, hop_length, click_freq, click_duration, click, length]]:
constant[Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
]
if compare[name[times] is constant[None]] begin[:]
if compare[name[frames] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f09f970>
variable[positions] assign[=] call[name[frames_to_samples], parameter[name[frames]]]
if compare[name[click] is_not constant[None]] begin[:]
call[name[util].valid_audio, parameter[name[click]]]
if compare[name[length] is constant[None]] begin[:]
variable[length] assign[=] binary_operation[call[name[positions].max, parameter[]] + call[name[click].shape][constant[0]]]
variable[click_signal] assign[=] call[name[np].zeros, parameter[name[length]]]
for taget[name[start]] in starred[name[positions]] begin[:]
variable[end] assign[=] binary_operation[name[start] + call[name[click].shape][constant[0]]]
if compare[name[end] greater_or_equal[>=] name[length]] begin[:]
<ast.AugAssign object at 0x7da20c6c4b20>
return[name[click_signal]] | keyword[def] identifier[clicks] ( identifier[times] = keyword[None] , identifier[frames] = keyword[None] , identifier[sr] = literal[int] , identifier[hop_length] = literal[int] ,
identifier[click_freq] = literal[int] , identifier[click_duration] = literal[int] , identifier[click] = keyword[None] , identifier[length] = keyword[None] ):
literal[string]
keyword[if] identifier[times] keyword[is] keyword[None] :
keyword[if] identifier[frames] keyword[is] keyword[None] :
keyword[raise] identifier[ParameterError] ( literal[string] )
identifier[positions] = identifier[frames_to_samples] ( identifier[frames] , identifier[hop_length] = identifier[hop_length] )
keyword[else] :
identifier[positions] = identifier[time_to_samples] ( identifier[times] , identifier[sr] = identifier[sr] )
keyword[if] identifier[click] keyword[is] keyword[not] keyword[None] :
identifier[util] . identifier[valid_audio] ( identifier[click] , identifier[mono] = keyword[True] )
keyword[else] :
keyword[if] identifier[click_duration] <= literal[int] :
keyword[raise] identifier[ParameterError] ( literal[string] )
keyword[if] identifier[click_freq] <= literal[int] :
keyword[raise] identifier[ParameterError] ( literal[string] )
identifier[angular_freq] = literal[int] * identifier[np] . identifier[pi] * identifier[click_freq] / identifier[float] ( identifier[sr] )
identifier[click] = identifier[np] . identifier[logspace] ( literal[int] ,- literal[int] ,
identifier[num] = identifier[int] ( identifier[np] . identifier[round] ( identifier[sr] * identifier[click_duration] )),
identifier[base] = literal[int] )
identifier[click] *= identifier[np] . identifier[sin] ( identifier[angular_freq] * identifier[np] . identifier[arange] ( identifier[len] ( identifier[click] )))
keyword[if] identifier[length] keyword[is] keyword[None] :
identifier[length] = identifier[positions] . identifier[max] ()+ identifier[click] . identifier[shape] [ literal[int] ]
keyword[else] :
keyword[if] identifier[length] < literal[int] :
keyword[raise] identifier[ParameterError] ( literal[string] )
identifier[positions] = identifier[positions] [ identifier[positions] < identifier[length] ]
identifier[click_signal] = identifier[np] . identifier[zeros] ( identifier[length] , identifier[dtype] = identifier[np] . identifier[float32] )
keyword[for] identifier[start] keyword[in] identifier[positions] :
identifier[end] = identifier[start] + identifier[click] . identifier[shape] [ literal[int] ]
keyword[if] identifier[end] >= identifier[length] :
identifier[click_signal] [ identifier[start] :]+= identifier[click] [: identifier[length] - identifier[start] ]
keyword[else] :
identifier[click_signal] [ identifier[start] : identifier[end] ]+= identifier[click]
keyword[return] identifier[click_signal] | def clicks(times=None, frames=None, sr=22050, hop_length=512, click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided') # depends on [control=['if'], data=[]]
positions = frames_to_samples(frames, hop_length=hop_length) # depends on [control=['if'], data=[]]
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True) # depends on [control=['if'], data=['click']]
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive') # depends on [control=['if'], data=[]]
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive') # depends on [control=['if'], data=[]]
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10, num=int(np.round(sr * click_duration)), base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0] # depends on [control=['if'], data=['length']]
else:
if length < 1:
raise ParameterError('length must be a positive integer') # depends on [control=['if'], data=[]]
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start] # depends on [control=['if'], data=['length']]
else:
# Normally, just add a click here
click_signal[start:end] += click # depends on [control=['for'], data=['start']]
return click_signal |
def new_gp_object(typename):
""" Create an indirect pointer to a GPhoto2 type, call its matching
constructor function and return the pointer to it.
:param typename: Name of the type to create.
:return: A pointer to the specified data type.
"""
obj_p = backend.ffi.new("{0}**".format(typename))
backend.CONSTRUCTORS[typename](obj_p)
return obj_p[0] | def function[new_gp_object, parameter[typename]]:
constant[ Create an indirect pointer to a GPhoto2 type, call its matching
constructor function and return the pointer to it.
:param typename: Name of the type to create.
:return: A pointer to the specified data type.
]
variable[obj_p] assign[=] call[name[backend].ffi.new, parameter[call[constant[{0}**].format, parameter[name[typename]]]]]
call[call[name[backend].CONSTRUCTORS][name[typename]], parameter[name[obj_p]]]
return[call[name[obj_p]][constant[0]]] | keyword[def] identifier[new_gp_object] ( identifier[typename] ):
literal[string]
identifier[obj_p] = identifier[backend] . identifier[ffi] . identifier[new] ( literal[string] . identifier[format] ( identifier[typename] ))
identifier[backend] . identifier[CONSTRUCTORS] [ identifier[typename] ]( identifier[obj_p] )
keyword[return] identifier[obj_p] [ literal[int] ] | def new_gp_object(typename):
""" Create an indirect pointer to a GPhoto2 type, call its matching
constructor function and return the pointer to it.
:param typename: Name of the type to create.
:return: A pointer to the specified data type.
"""
obj_p = backend.ffi.new('{0}**'.format(typename))
backend.CONSTRUCTORS[typename](obj_p)
return obj_p[0] |
def read_text(self, file_handle):
"""Parse the TEXT segment of the FCS file.
The TEXT segment contains meta data associated with the FCS file.
Converting all meta keywords to lower case.
"""
header = self.annotation['__header__'] # For convenience
#####
# Read in the TEXT segment of the FCS file
# There are some differences in how the
file_handle.seek(header['text start'], 0)
raw_text = file_handle.read(header['text end'] - header['text start'] + 1)
try:
raw_text = raw_text.decode(self._encoding)
except UnicodeDecodeError as e:
# Catching the exception and logging it in this way kills the traceback, but
# we can worry about this later.
logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 '
u'characters will be ignored.\n{}'.format(e))
raw_text = raw_text.decode(self._encoding, errors='ignore')
text = self._extract_text_dict(raw_text)
##
# Extract channel names and convert some of the channel properties
# and other fields into numeric data types (from string)
# Note: do not use regular expressions for manipulations here.
# Regular expressions are too heavy in terms of computation time.
pars = int(text['$PAR'])
if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1
self.channel_numbers = range(0, pars) # Channel number count starts from 0
else:
self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1
# Extract parameter names
try:
names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers])
except KeyError:
names_n = []
try:
names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers])
except KeyError:
names_s = []
self.channel_names_s = names_s
self.channel_names_n = names_n
# Convert some of the fields into integer values
keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers]
add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT']
keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int
for key in keys_to_convert_to_int:
value = text[key]
text[key] = int(value)
self.annotation.update(text)
# Update data start segments if needed
if self._data_start == 0:
self._data_start = int(text['$BEGINDATA'])
if self._data_end == 0:
self._data_end = int(text['$ENDDATA']) | def function[read_text, parameter[self, file_handle]]:
constant[Parse the TEXT segment of the FCS file.
The TEXT segment contains meta data associated with the FCS file.
Converting all meta keywords to lower case.
]
variable[header] assign[=] call[name[self].annotation][constant[__header__]]
call[name[file_handle].seek, parameter[call[name[header]][constant[text start]], constant[0]]]
variable[raw_text] assign[=] call[name[file_handle].read, parameter[binary_operation[binary_operation[call[name[header]][constant[text end]] - call[name[header]][constant[text start]]] + constant[1]]]]
<ast.Try object at 0x7da1b0317160>
variable[text] assign[=] call[name[self]._extract_text_dict, parameter[name[raw_text]]]
variable[pars] assign[=] call[name[int], parameter[call[name[text]][constant[$PAR]]]]
if compare[constant[$P0B] in call[name[text].keys, parameter[]]] begin[:]
name[self].channel_numbers assign[=] call[name[range], parameter[constant[0], name[pars]]]
<ast.Try object at 0x7da1b04d8400>
<ast.Try object at 0x7da1b04d9a50>
name[self].channel_names_s assign[=] name[names_s]
name[self].channel_names_n assign[=] name[names_n]
variable[keys_encoding_bits] assign[=] <ast.ListComp object at 0x7da1b04d8af0>
variable[add_keys_to_convert_to_int] assign[=] list[[<ast.Constant object at 0x7da1b04d8640>, <ast.Constant object at 0x7da1b04d99c0>, <ast.Constant object at 0x7da1b04dab00>]]
variable[keys_to_convert_to_int] assign[=] binary_operation[name[keys_encoding_bits] + name[add_keys_to_convert_to_int]]
for taget[name[key]] in starred[name[keys_to_convert_to_int]] begin[:]
variable[value] assign[=] call[name[text]][name[key]]
call[name[text]][name[key]] assign[=] call[name[int], parameter[name[value]]]
call[name[self].annotation.update, parameter[name[text]]]
if compare[name[self]._data_start equal[==] constant[0]] begin[:]
name[self]._data_start assign[=] call[name[int], parameter[call[name[text]][constant[$BEGINDATA]]]]
if compare[name[self]._data_end equal[==] constant[0]] begin[:]
name[self]._data_end assign[=] call[name[int], parameter[call[name[text]][constant[$ENDDATA]]]] | keyword[def] identifier[read_text] ( identifier[self] , identifier[file_handle] ):
literal[string]
identifier[header] = identifier[self] . identifier[annotation] [ literal[string] ]
identifier[file_handle] . identifier[seek] ( identifier[header] [ literal[string] ], literal[int] )
identifier[raw_text] = identifier[file_handle] . identifier[read] ( identifier[header] [ literal[string] ]- identifier[header] [ literal[string] ]+ literal[int] )
keyword[try] :
identifier[raw_text] = identifier[raw_text] . identifier[decode] ( identifier[self] . identifier[_encoding] )
keyword[except] identifier[UnicodeDecodeError] keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] . identifier[format] ( identifier[e] ))
identifier[raw_text] = identifier[raw_text] . identifier[decode] ( identifier[self] . identifier[_encoding] , identifier[errors] = literal[string] )
identifier[text] = identifier[self] . identifier[_extract_text_dict] ( identifier[raw_text] )
identifier[pars] = identifier[int] ( identifier[text] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[text] . identifier[keys] ():
identifier[self] . identifier[channel_numbers] = identifier[range] ( literal[int] , identifier[pars] )
keyword[else] :
identifier[self] . identifier[channel_numbers] = identifier[range] ( literal[int] , identifier[pars] + literal[int] )
keyword[try] :
identifier[names_n] = identifier[tuple] ([ identifier[text] [ literal[string] . identifier[format] ( identifier[i] )] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[channel_numbers] ])
keyword[except] identifier[KeyError] :
identifier[names_n] =[]
keyword[try] :
identifier[names_s] = identifier[tuple] ([ identifier[text] [ literal[string] . identifier[format] ( identifier[i] )] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[channel_numbers] ])
keyword[except] identifier[KeyError] :
identifier[names_s] =[]
identifier[self] . identifier[channel_names_s] = identifier[names_s]
identifier[self] . identifier[channel_names_n] = identifier[names_n]
identifier[keys_encoding_bits] =[ literal[string] . identifier[format] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[channel_numbers] ]
identifier[add_keys_to_convert_to_int] =[ literal[string] , literal[string] , literal[string] ]
identifier[keys_to_convert_to_int] = identifier[keys_encoding_bits] + identifier[add_keys_to_convert_to_int]
keyword[for] identifier[key] keyword[in] identifier[keys_to_convert_to_int] :
identifier[value] = identifier[text] [ identifier[key] ]
identifier[text] [ identifier[key] ]= identifier[int] ( identifier[value] )
identifier[self] . identifier[annotation] . identifier[update] ( identifier[text] )
keyword[if] identifier[self] . identifier[_data_start] == literal[int] :
identifier[self] . identifier[_data_start] = identifier[int] ( identifier[text] [ literal[string] ])
keyword[if] identifier[self] . identifier[_data_end] == literal[int] :
identifier[self] . identifier[_data_end] = identifier[int] ( identifier[text] [ literal[string] ]) | def read_text(self, file_handle):
"""Parse the TEXT segment of the FCS file.
The TEXT segment contains meta data associated with the FCS file.
Converting all meta keywords to lower case.
"""
header = self.annotation['__header__'] # For convenience
#####
# Read in the TEXT segment of the FCS file
# There are some differences in how the
file_handle.seek(header['text start'], 0)
raw_text = file_handle.read(header['text end'] - header['text start'] + 1)
try:
raw_text = raw_text.decode(self._encoding) # depends on [control=['try'], data=[]]
except UnicodeDecodeError as e:
# Catching the exception and logging it in this way kills the traceback, but
# we can worry about this later.
logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 characters will be ignored.\n{}'.format(e))
raw_text = raw_text.decode(self._encoding, errors='ignore') # depends on [control=['except'], data=['e']]
text = self._extract_text_dict(raw_text)
##
# Extract channel names and convert some of the channel properties
# and other fields into numeric data types (from string)
# Note: do not use regular expressions for manipulations here.
# Regular expressions are too heavy in terms of computation time.
pars = int(text['$PAR'])
if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1
self.channel_numbers = range(0, pars) # Channel number count starts from 0 # depends on [control=['if'], data=[]]
else:
self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1
# Extract parameter names
try:
names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers]) # depends on [control=['try'], data=[]]
except KeyError:
names_n = [] # depends on [control=['except'], data=[]]
try:
names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers]) # depends on [control=['try'], data=[]]
except KeyError:
names_s = [] # depends on [control=['except'], data=[]]
self.channel_names_s = names_s
self.channel_names_n = names_n
# Convert some of the fields into integer values
keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers]
add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT']
keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int
for key in keys_to_convert_to_int:
value = text[key]
text[key] = int(value) # depends on [control=['for'], data=['key']]
self.annotation.update(text)
# Update data start segments if needed
if self._data_start == 0:
self._data_start = int(text['$BEGINDATA']) # depends on [control=['if'], data=[]]
if self._data_end == 0:
self._data_end = int(text['$ENDDATA']) # depends on [control=['if'], data=[]] |
def get_optimal_variant(self, variants, start_words, **kwargs):
"""
Возвращает оптимальный вариант, из выборки.
"""
if not start_words:
return (choice(variants), {})
_variants = []
_weights = []
for tok in frozenset(variants):
if not self.token_is_correct(tok):
continue
weight = variants.count(tok)
for word in start_words:
for token in self.ONLY_WORDS.finditer(word.strip().lower()):
if token.group() == tok:
weight <<= 1
_variants.append(tok)
_weights.append(weight)
if not _variants:
return (choice(variants), {})
return (choices(_variants, weights=_weights, k=1)[0], {}) | def function[get_optimal_variant, parameter[self, variants, start_words]]:
constant[
Возвращает оптимальный вариант, из выборки.
]
if <ast.UnaryOp object at 0x7da207f03a00> begin[:]
return[tuple[[<ast.Call object at 0x7da207f032b0>, <ast.Dict object at 0x7da207f01000>]]]
variable[_variants] assign[=] list[[]]
variable[_weights] assign[=] list[[]]
for taget[name[tok]] in starred[call[name[frozenset], parameter[name[variants]]]] begin[:]
if <ast.UnaryOp object at 0x7da207f00190> begin[:]
continue
variable[weight] assign[=] call[name[variants].count, parameter[name[tok]]]
for taget[name[word]] in starred[name[start_words]] begin[:]
for taget[name[token]] in starred[call[name[self].ONLY_WORDS.finditer, parameter[call[call[name[word].strip, parameter[]].lower, parameter[]]]]] begin[:]
if compare[call[name[token].group, parameter[]] equal[==] name[tok]] begin[:]
<ast.AugAssign object at 0x7da207f03f10>
call[name[_variants].append, parameter[name[tok]]]
call[name[_weights].append, parameter[name[weight]]]
if <ast.UnaryOp object at 0x7da207f025f0> begin[:]
return[tuple[[<ast.Call object at 0x7da207f033a0>, <ast.Dict object at 0x7da207f00e20>]]]
return[tuple[[<ast.Subscript object at 0x7da207f00a30>, <ast.Dict object at 0x7da207f00b20>]]] | keyword[def] identifier[get_optimal_variant] ( identifier[self] , identifier[variants] , identifier[start_words] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[start_words] :
keyword[return] ( identifier[choice] ( identifier[variants] ),{})
identifier[_variants] =[]
identifier[_weights] =[]
keyword[for] identifier[tok] keyword[in] identifier[frozenset] ( identifier[variants] ):
keyword[if] keyword[not] identifier[self] . identifier[token_is_correct] ( identifier[tok] ):
keyword[continue]
identifier[weight] = identifier[variants] . identifier[count] ( identifier[tok] )
keyword[for] identifier[word] keyword[in] identifier[start_words] :
keyword[for] identifier[token] keyword[in] identifier[self] . identifier[ONLY_WORDS] . identifier[finditer] ( identifier[word] . identifier[strip] (). identifier[lower] ()):
keyword[if] identifier[token] . identifier[group] ()== identifier[tok] :
identifier[weight] <<= literal[int]
identifier[_variants] . identifier[append] ( identifier[tok] )
identifier[_weights] . identifier[append] ( identifier[weight] )
keyword[if] keyword[not] identifier[_variants] :
keyword[return] ( identifier[choice] ( identifier[variants] ),{})
keyword[return] ( identifier[choices] ( identifier[_variants] , identifier[weights] = identifier[_weights] , identifier[k] = literal[int] )[ literal[int] ],{}) | def get_optimal_variant(self, variants, start_words, **kwargs):
"""
Возвращает оптимальный вариант, из выборки.
"""
if not start_words:
return (choice(variants), {}) # depends on [control=['if'], data=[]]
_variants = []
_weights = []
for tok in frozenset(variants):
if not self.token_is_correct(tok):
continue # depends on [control=['if'], data=[]]
weight = variants.count(tok)
for word in start_words:
for token in self.ONLY_WORDS.finditer(word.strip().lower()):
if token.group() == tok:
weight <<= 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['token']] # depends on [control=['for'], data=['word']]
_variants.append(tok)
_weights.append(weight) # depends on [control=['for'], data=['tok']]
if not _variants:
return (choice(variants), {}) # depends on [control=['if'], data=[]]
return (choices(_variants, weights=_weights, k=1)[0], {}) |
def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True
else:
return not self.builtin(ref, context) | def function[custom, parameter[self, ref, context]]:
constant[
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
]
if compare[name[ref] is constant[None]] begin[:]
return[constant[True]] | keyword[def] identifier[custom] ( identifier[self] , identifier[ref] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[ref] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[not] identifier[self] . identifier[builtin] ( identifier[ref] , identifier[context] ) | def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True # depends on [control=['if'], data=[]]
else:
return not self.builtin(ref, context) |
def _create_messages(self, metrics):
"""Create a list of zabbix messages from a list of ZabbixMetrics.
:type metrics_array: list
:param metrics_array: List of :class:`zabbix.sender.ZabbixMetric`.
:rtype: list
:return: List of zabbix messages.
"""
messages = []
# Fill the list of messages
for m in metrics:
messages.append(str(m))
logger.debug('Messages: %s', messages)
return messages | def function[_create_messages, parameter[self, metrics]]:
constant[Create a list of zabbix messages from a list of ZabbixMetrics.
:type metrics_array: list
:param metrics_array: List of :class:`zabbix.sender.ZabbixMetric`.
:rtype: list
:return: List of zabbix messages.
]
variable[messages] assign[=] list[[]]
for taget[name[m]] in starred[name[metrics]] begin[:]
call[name[messages].append, parameter[call[name[str], parameter[name[m]]]]]
call[name[logger].debug, parameter[constant[Messages: %s], name[messages]]]
return[name[messages]] | keyword[def] identifier[_create_messages] ( identifier[self] , identifier[metrics] ):
literal[string]
identifier[messages] =[]
keyword[for] identifier[m] keyword[in] identifier[metrics] :
identifier[messages] . identifier[append] ( identifier[str] ( identifier[m] ))
identifier[logger] . identifier[debug] ( literal[string] , identifier[messages] )
keyword[return] identifier[messages] | def _create_messages(self, metrics):
"""Create a list of zabbix messages from a list of ZabbixMetrics.
:type metrics_array: list
:param metrics_array: List of :class:`zabbix.sender.ZabbixMetric`.
:rtype: list
:return: List of zabbix messages.
"""
messages = []
# Fill the list of messages
for m in metrics:
messages.append(str(m)) # depends on [control=['for'], data=['m']]
logger.debug('Messages: %s', messages)
return messages |
def _fetch_issues(self, from_date):
"""Fetch the issues from a project (distribution/package)"""
issues_groups = self.client.issues(start=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)['entries']
for issue in issues:
issue = self.__init_extra_issue_fields(issue)
issue_id = self.__extract_issue_id(issue['bug_link'])
for field in TARGET_ISSUE_FIELDS:
if not issue[field]:
continue
if field == 'bug_link':
issue['bug_data'] = self.__fetch_issue_data(issue_id)
issue['activity_data'] = [activity for activity in self.__fetch_issue_activities(issue_id)]
issue['messages_data'] = [message for message in self.__fetch_issue_messages(issue_id)]
issue['attachments_data'] = [attachment for attachment in self.__fetch_issue_attachments(issue_id)]
elif field == 'assignee_link':
issue['assignee_data'] = self.__fetch_user_data('{ASSIGNEE}', issue[field])
elif field == 'owner_link':
issue['owner_data'] = self.__fetch_user_data('{OWNER}', issue[field])
yield issue | def function[_fetch_issues, parameter[self, from_date]]:
constant[Fetch the issues from a project (distribution/package)]
variable[issues_groups] assign[=] call[name[self].client.issues, parameter[]]
for taget[name[raw_issues]] in starred[name[issues_groups]] begin[:]
variable[issues] assign[=] call[call[name[json].loads, parameter[name[raw_issues]]]][constant[entries]]
for taget[name[issue]] in starred[name[issues]] begin[:]
variable[issue] assign[=] call[name[self].__init_extra_issue_fields, parameter[name[issue]]]
variable[issue_id] assign[=] call[name[self].__extract_issue_id, parameter[call[name[issue]][constant[bug_link]]]]
for taget[name[field]] in starred[name[TARGET_ISSUE_FIELDS]] begin[:]
if <ast.UnaryOp object at 0x7da1b059ce20> begin[:]
continue
if compare[name[field] equal[==] constant[bug_link]] begin[:]
call[name[issue]][constant[bug_data]] assign[=] call[name[self].__fetch_issue_data, parameter[name[issue_id]]]
call[name[issue]][constant[activity_data]] assign[=] <ast.ListComp object at 0x7da1b059e800>
call[name[issue]][constant[messages_data]] assign[=] <ast.ListComp object at 0x7da1b02f1450>
call[name[issue]][constant[attachments_data]] assign[=] <ast.ListComp object at 0x7da1b02f13f0>
<ast.Yield object at 0x7da1b0284f70> | keyword[def] identifier[_fetch_issues] ( identifier[self] , identifier[from_date] ):
literal[string]
identifier[issues_groups] = identifier[self] . identifier[client] . identifier[issues] ( identifier[start] = identifier[from_date] )
keyword[for] identifier[raw_issues] keyword[in] identifier[issues_groups] :
identifier[issues] = identifier[json] . identifier[loads] ( identifier[raw_issues] )[ literal[string] ]
keyword[for] identifier[issue] keyword[in] identifier[issues] :
identifier[issue] = identifier[self] . identifier[__init_extra_issue_fields] ( identifier[issue] )
identifier[issue_id] = identifier[self] . identifier[__extract_issue_id] ( identifier[issue] [ literal[string] ])
keyword[for] identifier[field] keyword[in] identifier[TARGET_ISSUE_FIELDS] :
keyword[if] keyword[not] identifier[issue] [ identifier[field] ]:
keyword[continue]
keyword[if] identifier[field] == literal[string] :
identifier[issue] [ literal[string] ]= identifier[self] . identifier[__fetch_issue_data] ( identifier[issue_id] )
identifier[issue] [ literal[string] ]=[ identifier[activity] keyword[for] identifier[activity] keyword[in] identifier[self] . identifier[__fetch_issue_activities] ( identifier[issue_id] )]
identifier[issue] [ literal[string] ]=[ identifier[message] keyword[for] identifier[message] keyword[in] identifier[self] . identifier[__fetch_issue_messages] ( identifier[issue_id] )]
identifier[issue] [ literal[string] ]=[ identifier[attachment] keyword[for] identifier[attachment] keyword[in] identifier[self] . identifier[__fetch_issue_attachments] ( identifier[issue_id] )]
keyword[elif] identifier[field] == literal[string] :
identifier[issue] [ literal[string] ]= identifier[self] . identifier[__fetch_user_data] ( literal[string] , identifier[issue] [ identifier[field] ])
keyword[elif] identifier[field] == literal[string] :
identifier[issue] [ literal[string] ]= identifier[self] . identifier[__fetch_user_data] ( literal[string] , identifier[issue] [ identifier[field] ])
keyword[yield] identifier[issue] | def _fetch_issues(self, from_date):
"""Fetch the issues from a project (distribution/package)"""
issues_groups = self.client.issues(start=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)['entries']
for issue in issues:
issue = self.__init_extra_issue_fields(issue)
issue_id = self.__extract_issue_id(issue['bug_link'])
for field in TARGET_ISSUE_FIELDS:
if not issue[field]:
continue # depends on [control=['if'], data=[]]
if field == 'bug_link':
issue['bug_data'] = self.__fetch_issue_data(issue_id)
issue['activity_data'] = [activity for activity in self.__fetch_issue_activities(issue_id)]
issue['messages_data'] = [message for message in self.__fetch_issue_messages(issue_id)]
issue['attachments_data'] = [attachment for attachment in self.__fetch_issue_attachments(issue_id)] # depends on [control=['if'], data=[]]
elif field == 'assignee_link':
issue['assignee_data'] = self.__fetch_user_data('{ASSIGNEE}', issue[field]) # depends on [control=['if'], data=['field']]
elif field == 'owner_link':
issue['owner_data'] = self.__fetch_user_data('{OWNER}', issue[field]) # depends on [control=['if'], data=['field']] # depends on [control=['for'], data=['field']]
yield issue # depends on [control=['for'], data=['issue']] # depends on [control=['for'], data=['raw_issues']] |
def jx_expression_to_function(expr):
"""
RETURN FUNCTION THAT REQUIRES PARAMETERS (row, rownum=None, rows=None):
"""
if is_expression(expr):
if is_op(expr, ScriptOp) and not is_text(expr.script):
return expr.script
else:
return compile_expression(Python[expr].to_python())
if (
expr != None
and not is_data(expr)
and not is_list(expr)
and hasattr(expr, "__call__")
):
return expr
return compile_expression(Python[jx_expression(expr)].to_python()) | def function[jx_expression_to_function, parameter[expr]]:
constant[
RETURN FUNCTION THAT REQUIRES PARAMETERS (row, rownum=None, rows=None):
]
if call[name[is_expression], parameter[name[expr]]] begin[:]
if <ast.BoolOp object at 0x7da20c992830> begin[:]
return[name[expr].script]
if <ast.BoolOp object at 0x7da20c990be0> begin[:]
return[name[expr]]
return[call[name[compile_expression], parameter[call[call[name[Python]][call[name[jx_expression], parameter[name[expr]]]].to_python, parameter[]]]]] | keyword[def] identifier[jx_expression_to_function] ( identifier[expr] ):
literal[string]
keyword[if] identifier[is_expression] ( identifier[expr] ):
keyword[if] identifier[is_op] ( identifier[expr] , identifier[ScriptOp] ) keyword[and] keyword[not] identifier[is_text] ( identifier[expr] . identifier[script] ):
keyword[return] identifier[expr] . identifier[script]
keyword[else] :
keyword[return] identifier[compile_expression] ( identifier[Python] [ identifier[expr] ]. identifier[to_python] ())
keyword[if] (
identifier[expr] != keyword[None]
keyword[and] keyword[not] identifier[is_data] ( identifier[expr] )
keyword[and] keyword[not] identifier[is_list] ( identifier[expr] )
keyword[and] identifier[hasattr] ( identifier[expr] , literal[string] )
):
keyword[return] identifier[expr]
keyword[return] identifier[compile_expression] ( identifier[Python] [ identifier[jx_expression] ( identifier[expr] )]. identifier[to_python] ()) | def jx_expression_to_function(expr):
"""
RETURN FUNCTION THAT REQUIRES PARAMETERS (row, rownum=None, rows=None):
"""
if is_expression(expr):
if is_op(expr, ScriptOp) and (not is_text(expr.script)):
return expr.script # depends on [control=['if'], data=[]]
else:
return compile_expression(Python[expr].to_python()) # depends on [control=['if'], data=[]]
if expr != None and (not is_data(expr)) and (not is_list(expr)) and hasattr(expr, '__call__'):
return expr # depends on [control=['if'], data=[]]
return compile_expression(Python[jx_expression(expr)].to_python()) |
def read_form_data(self):
"""Attempt to read the form data from the request"""
if self.processed_data:
raise exceptions.AlreadyProcessed('The data has already been processed for this form')
if self.readonly:
return
if request.method == self.method:
if self.method == 'POST':
data = request.form
else:
data = request.args
if self.submitted_hidden_input_name in data:
# The form has been submitted
self.processed_data = True
for field in self.all_fields:
# We need to skip readonly fields
if field.readonly:
pass
else:
field.extract_value(data)
# Validate the field
if not field.validate():
log.debug('Validation error in field \'%s\': %s' % (field.name, field.error))
self.has_errors = True | def function[read_form_data, parameter[self]]:
constant[Attempt to read the form data from the request]
if name[self].processed_data begin[:]
<ast.Raise object at 0x7da20c796560>
if name[self].readonly begin[:]
return[None]
if compare[name[request].method equal[==] name[self].method] begin[:]
if compare[name[self].method equal[==] constant[POST]] begin[:]
variable[data] assign[=] name[request].form
if compare[name[self].submitted_hidden_input_name in name[data]] begin[:]
name[self].processed_data assign[=] constant[True]
for taget[name[field]] in starred[name[self].all_fields] begin[:]
if name[field].readonly begin[:]
pass | keyword[def] identifier[read_form_data] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[processed_data] :
keyword[raise] identifier[exceptions] . identifier[AlreadyProcessed] ( literal[string] )
keyword[if] identifier[self] . identifier[readonly] :
keyword[return]
keyword[if] identifier[request] . identifier[method] == identifier[self] . identifier[method] :
keyword[if] identifier[self] . identifier[method] == literal[string] :
identifier[data] = identifier[request] . identifier[form]
keyword[else] :
identifier[data] = identifier[request] . identifier[args]
keyword[if] identifier[self] . identifier[submitted_hidden_input_name] keyword[in] identifier[data] :
identifier[self] . identifier[processed_data] = keyword[True]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[all_fields] :
keyword[if] identifier[field] . identifier[readonly] :
keyword[pass]
keyword[else] :
identifier[field] . identifier[extract_value] ( identifier[data] )
keyword[if] keyword[not] identifier[field] . identifier[validate] ():
identifier[log] . identifier[debug] ( literal[string] %( identifier[field] . identifier[name] , identifier[field] . identifier[error] ))
identifier[self] . identifier[has_errors] = keyword[True] | def read_form_data(self):
"""Attempt to read the form data from the request"""
if self.processed_data:
raise exceptions.AlreadyProcessed('The data has already been processed for this form') # depends on [control=['if'], data=[]]
if self.readonly:
return # depends on [control=['if'], data=[]]
if request.method == self.method:
if self.method == 'POST':
data = request.form # depends on [control=['if'], data=[]]
else:
data = request.args
if self.submitted_hidden_input_name in data:
# The form has been submitted
self.processed_data = True
for field in self.all_fields:
# We need to skip readonly fields
if field.readonly:
pass # depends on [control=['if'], data=[]]
else:
field.extract_value(data)
# Validate the field
if not field.validate():
log.debug("Validation error in field '%s': %s" % (field.name, field.error))
self.has_errors = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=['data']] # depends on [control=['if'], data=[]] |
def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
# Note: Showcase ads require that the campaign has a ManualCpc
# BiddingStrategyConfiguration.
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit | def function[CreateUnit, parameter[self, parent, value, bid_amount]]:
constant[Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
]
variable[unit] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b0e4a0>, <ast.Constant object at 0x7da1b1b0d420>], [<ast.Constant object at 0x7da1b1b0d660>, <ast.Constant object at 0x7da1b1b0d6c0>]]
if compare[name[parent] is_not constant[None]] begin[:]
call[name[unit]][constant[parentCriterionId]] assign[=] call[name[parent]][constant[id]]
call[name[unit]][constant[caseValue]] assign[=] name[value]
if <ast.BoolOp object at 0x7da1b1b0c610> begin[:]
variable[bidding_strategy_configuration] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b0eef0>], [<ast.List object at 0x7da1b1b0f0a0>]]
variable[adgroup_criterion] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b0e5c0>, <ast.Constant object at 0x7da1b1bbf460>], [<ast.Constant object at 0x7da1b1bbc430>, <ast.Name object at 0x7da1b1bbc100>]]
call[name[adgroup_criterion]][constant[adGroupId]] assign[=] name[self].adgroup_id
call[name[adgroup_criterion]][constant[criterion]] assign[=] name[unit]
call[name[self].CreateAddOperation, parameter[name[adgroup_criterion]]]
return[name[unit]] | keyword[def] identifier[CreateUnit] ( identifier[self] , identifier[parent] = keyword[None] , identifier[value] = keyword[None] , identifier[bid_amount] = keyword[None] ):
literal[string]
identifier[unit] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[if] identifier[parent] keyword[is] keyword[not] keyword[None] :
identifier[unit] [ literal[string] ]= identifier[parent] [ literal[string] ]
identifier[unit] [ literal[string] ]= identifier[value]
keyword[if] identifier[bid_amount] keyword[is] keyword[not] keyword[None] keyword[and] identifier[bid_amount] > literal[int] :
identifier[bidding_strategy_configuration] ={
literal[string] :[{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[str] ( identifier[bid_amount] )
}
}]
}
identifier[adgroup_criterion] ={
literal[string] : literal[string] ,
literal[string] : identifier[bidding_strategy_configuration]
}
keyword[else] :
identifier[adgroup_criterion] ={
literal[string] : literal[string]
}
identifier[adgroup_criterion] [ literal[string] ]= identifier[self] . identifier[adgroup_id]
identifier[adgroup_criterion] [ literal[string] ]= identifier[unit]
identifier[self] . identifier[CreateAddOperation] ( identifier[adgroup_criterion] )
keyword[return] identifier[unit] | def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value # depends on [control=['if'], data=['parent']]
if bid_amount is not None and bid_amount > 0:
# Note: Showcase ads require that the campaign has a ManualCpc
# BiddingStrategyConfiguration.
bidding_strategy_configuration = {'bids': [{'xsi_type': 'CpcBid', 'bid': {'xsi_type': 'Money', 'microAmount': str(bid_amount)}}]}
adgroup_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'biddingStrategyConfiguration': bidding_strategy_configuration} # depends on [control=['if'], data=[]]
else:
adgroup_criterion = {'xsi_type': 'NegativeAdGroupCriterion'}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit |
def _compute_distance_matrix(self):
"""Compute the full distance matrix on pairs of nodes.
The distance map self._dist_matrix is computed from the graph using
all_pairs_shortest_path_length.
"""
if not self.is_connected():
raise CouplingError("coupling graph not connected")
lengths = nx.all_pairs_shortest_path_length(self.graph.to_undirected(as_view=True))
lengths = dict(lengths)
size = len(lengths)
cmap = np.zeros((size, size))
for idx in range(size):
cmap[idx, np.fromiter(lengths[idx].keys(), dtype=int)] = np.fromiter(
lengths[idx].values(), dtype=int)
self._dist_matrix = cmap | def function[_compute_distance_matrix, parameter[self]]:
constant[Compute the full distance matrix on pairs of nodes.
The distance map self._dist_matrix is computed from the graph using
all_pairs_shortest_path_length.
]
if <ast.UnaryOp object at 0x7da18f00e440> begin[:]
<ast.Raise object at 0x7da18f00de40>
variable[lengths] assign[=] call[name[nx].all_pairs_shortest_path_length, parameter[call[name[self].graph.to_undirected, parameter[]]]]
variable[lengths] assign[=] call[name[dict], parameter[name[lengths]]]
variable[size] assign[=] call[name[len], parameter[name[lengths]]]
variable[cmap] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b03e3190>, <ast.Name object at 0x7da1b03e3e50>]]]]
for taget[name[idx]] in starred[call[name[range], parameter[name[size]]]] begin[:]
call[name[cmap]][tuple[[<ast.Name object at 0x7da1b03e0760>, <ast.Call object at 0x7da1b03e2530>]]] assign[=] call[name[np].fromiter, parameter[call[call[name[lengths]][name[idx]].values, parameter[]]]]
name[self]._dist_matrix assign[=] name[cmap] | keyword[def] identifier[_compute_distance_matrix] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_connected] ():
keyword[raise] identifier[CouplingError] ( literal[string] )
identifier[lengths] = identifier[nx] . identifier[all_pairs_shortest_path_length] ( identifier[self] . identifier[graph] . identifier[to_undirected] ( identifier[as_view] = keyword[True] ))
identifier[lengths] = identifier[dict] ( identifier[lengths] )
identifier[size] = identifier[len] ( identifier[lengths] )
identifier[cmap] = identifier[np] . identifier[zeros] (( identifier[size] , identifier[size] ))
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[size] ):
identifier[cmap] [ identifier[idx] , identifier[np] . identifier[fromiter] ( identifier[lengths] [ identifier[idx] ]. identifier[keys] (), identifier[dtype] = identifier[int] )]= identifier[np] . identifier[fromiter] (
identifier[lengths] [ identifier[idx] ]. identifier[values] (), identifier[dtype] = identifier[int] )
identifier[self] . identifier[_dist_matrix] = identifier[cmap] | def _compute_distance_matrix(self):
"""Compute the full distance matrix on pairs of nodes.
The distance map self._dist_matrix is computed from the graph using
all_pairs_shortest_path_length.
"""
if not self.is_connected():
raise CouplingError('coupling graph not connected') # depends on [control=['if'], data=[]]
lengths = nx.all_pairs_shortest_path_length(self.graph.to_undirected(as_view=True))
lengths = dict(lengths)
size = len(lengths)
cmap = np.zeros((size, size))
for idx in range(size):
cmap[idx, np.fromiter(lengths[idx].keys(), dtype=int)] = np.fromiter(lengths[idx].values(), dtype=int) # depends on [control=['for'], data=['idx']]
self._dist_matrix = cmap |
def dict_query(dic, query):
""" Query a dict with 'dotted notation'. Returns an OrderedDict.
A query of "foo.bar.baz" would retrieve 'wat' from this::
dic = {
'foo': {
'bar': {
'baz': 'wat',
}
}
}
Multiple queries can be specified if comma-separated. For instance, the
query "foo.bar.baz,foo.bar.something_else" would return this::
OrderedDict({
"foo.bar.baz": "wat",
"foo.bar.something_else": None,
})
"""
if not isinstance(query, six.string_types):
raise ValueError("query must be a string, not %r" % type(query))
def _browse(tokens, d):
""" Recurse through a dict to retrieve a value. """
current, rest = tokens[0], tokens[1:]
if not rest:
return d.get(current, None)
if current in d:
if isinstance(d[current], dict):
return _browse(rest, d[current])
elif rest:
return None
else:
return d[current]
keys = [key.strip().split('.') for key in query.split(',')]
return OrderedDict([
('.'.join(tokens), _browse(tokens, dic)) for tokens in keys
]) | def function[dict_query, parameter[dic, query]]:
constant[ Query a dict with 'dotted notation'. Returns an OrderedDict.
A query of "foo.bar.baz" would retrieve 'wat' from this::
dic = {
'foo': {
'bar': {
'baz': 'wat',
}
}
}
Multiple queries can be specified if comma-separated. For instance, the
query "foo.bar.baz,foo.bar.something_else" would return this::
OrderedDict({
"foo.bar.baz": "wat",
"foo.bar.something_else": None,
})
]
if <ast.UnaryOp object at 0x7da1b0478af0> begin[:]
<ast.Raise object at 0x7da20c990f70>
def function[_browse, parameter[tokens, d]]:
constant[ Recurse through a dict to retrieve a value. ]
<ast.Tuple object at 0x7da20c990ac0> assign[=] tuple[[<ast.Subscript object at 0x7da20c991300>, <ast.Subscript object at 0x7da20c991180>]]
if <ast.UnaryOp object at 0x7da20c991de0> begin[:]
return[call[name[d].get, parameter[name[current], constant[None]]]]
if compare[name[current] in name[d]] begin[:]
if call[name[isinstance], parameter[call[name[d]][name[current]], name[dict]]] begin[:]
return[call[name[_browse], parameter[name[rest], call[name[d]][name[current]]]]]
variable[keys] assign[=] <ast.ListComp object at 0x7da20c993280>
return[call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da20c990100>]]] | keyword[def] identifier[dict_query] ( identifier[dic] , identifier[query] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[query] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[type] ( identifier[query] ))
keyword[def] identifier[_browse] ( identifier[tokens] , identifier[d] ):
literal[string]
identifier[current] , identifier[rest] = identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] :]
keyword[if] keyword[not] identifier[rest] :
keyword[return] identifier[d] . identifier[get] ( identifier[current] , keyword[None] )
keyword[if] identifier[current] keyword[in] identifier[d] :
keyword[if] identifier[isinstance] ( identifier[d] [ identifier[current] ], identifier[dict] ):
keyword[return] identifier[_browse] ( identifier[rest] , identifier[d] [ identifier[current] ])
keyword[elif] identifier[rest] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[d] [ identifier[current] ]
identifier[keys] =[ identifier[key] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[for] identifier[key] keyword[in] identifier[query] . identifier[split] ( literal[string] )]
keyword[return] identifier[OrderedDict] ([
( literal[string] . identifier[join] ( identifier[tokens] ), identifier[_browse] ( identifier[tokens] , identifier[dic] )) keyword[for] identifier[tokens] keyword[in] identifier[keys]
]) | def dict_query(dic, query):
""" Query a dict with 'dotted notation'. Returns an OrderedDict.
A query of "foo.bar.baz" would retrieve 'wat' from this::
dic = {
'foo': {
'bar': {
'baz': 'wat',
}
}
}
Multiple queries can be specified if comma-separated. For instance, the
query "foo.bar.baz,foo.bar.something_else" would return this::
OrderedDict({
"foo.bar.baz": "wat",
"foo.bar.something_else": None,
})
"""
if not isinstance(query, six.string_types):
raise ValueError('query must be a string, not %r' % type(query)) # depends on [control=['if'], data=[]]
def _browse(tokens, d):
""" Recurse through a dict to retrieve a value. """
(current, rest) = (tokens[0], tokens[1:])
if not rest:
return d.get(current, None) # depends on [control=['if'], data=[]]
if current in d:
if isinstance(d[current], dict):
return _browse(rest, d[current]) # depends on [control=['if'], data=[]]
elif rest:
return None # depends on [control=['if'], data=[]]
else:
return d[current] # depends on [control=['if'], data=['current', 'd']]
keys = [key.strip().split('.') for key in query.split(',')]
return OrderedDict([('.'.join(tokens), _browse(tokens, dic)) for tokens in keys]) |
def below(self, ref):
"""
Move this object below the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError(
"%r can only be moved below instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
if self.order == ref.order:
return
if self.order > ref.order:
o = self.get_ordering_queryset().filter(order__gt=ref.order).aggregate(Min('order')).get('order__min') or 0
else:
o = ref.order
self.to(o) | def function[below, parameter[self, ref]]:
constant[
Move this object below the referenced object.
]
if <ast.UnaryOp object at 0x7da1b00e76d0> begin[:]
<ast.Raise object at 0x7da1b00e66e0>
if compare[name[self].order equal[==] name[ref].order] begin[:]
return[None]
if compare[name[self].order greater[>] name[ref].order] begin[:]
variable[o] assign[=] <ast.BoolOp object at 0x7da1b00e6530>
call[name[self].to, parameter[name[o]]] | keyword[def] identifier[below] ( identifier[self] , identifier[ref] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_valid_ordering_reference] ( identifier[ref] ):
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[self] , identifier[self] . identifier[__class__] , identifier[self] . identifier[order_with_respect_to] ,
identifier[self] . identifier[_get_order_with_respect_to] ()
)
)
keyword[if] identifier[self] . identifier[order] == identifier[ref] . identifier[order] :
keyword[return]
keyword[if] identifier[self] . identifier[order] > identifier[ref] . identifier[order] :
identifier[o] = identifier[self] . identifier[get_ordering_queryset] (). identifier[filter] ( identifier[order__gt] = identifier[ref] . identifier[order] ). identifier[aggregate] ( identifier[Min] ( literal[string] )). identifier[get] ( literal[string] ) keyword[or] literal[int]
keyword[else] :
identifier[o] = identifier[ref] . identifier[order]
identifier[self] . identifier[to] ( identifier[o] ) | def below(self, ref):
"""
Move this object below the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError('%r can only be moved below instances of %r which %s equals %r.' % (self, self.__class__, self.order_with_respect_to, self._get_order_with_respect_to())) # depends on [control=['if'], data=[]]
if self.order == ref.order:
return # depends on [control=['if'], data=[]]
if self.order > ref.order:
o = self.get_ordering_queryset().filter(order__gt=ref.order).aggregate(Min('order')).get('order__min') or 0 # depends on [control=['if'], data=[]]
else:
o = ref.order
self.to(o) |
def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return (val == "File" or (isinstance(val, (list, tuple)) and
("File" in val or any(isinstance(x, dict) and _is_file(val)) for x in val)))
secondary_files = inp_tool.pop("secondaryFiles", None)
if secondary_files:
key = []
while (not _is_file(tz.get_in(key + ["type"], inp_tool))
and not _is_file(tz.get_in(key + ["items"], inp_tool))
and not _is_file(tz.get_in(key + ["items", "items"], inp_tool))):
key.append("type")
if tz.get_in(key, inp_tool):
inp_tool["secondaryFiles"] = secondary_files
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding["prefix"] = "ignore="
nested_inp_binding["secondaryFiles"] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding)
return inp_tool | def function[_place_secondary_files, parameter[inp_tool, inp_binding]]:
constant[Put secondaryFiles at the level of the File item to ensure indexes get passed.
]
def function[_is_file, parameter[val]]:
return[<ast.BoolOp object at 0x7da1b18bcd30>]
variable[secondary_files] assign[=] call[name[inp_tool].pop, parameter[constant[secondaryFiles], constant[None]]]
if name[secondary_files] begin[:]
variable[key] assign[=] list[[]]
while <ast.BoolOp object at 0x7da1b18bdd20> begin[:]
call[name[key].append, parameter[constant[type]]]
if call[name[tz].get_in, parameter[name[key], name[inp_tool]]] begin[:]
call[name[inp_tool]][constant[secondaryFiles]] assign[=] name[secondary_files]
return[name[inp_tool]] | keyword[def] identifier[_place_secondary_files] ( identifier[inp_tool] , identifier[inp_binding] = keyword[None] ):
literal[string]
keyword[def] identifier[_is_file] ( identifier[val] ):
keyword[return] ( identifier[val] == literal[string] keyword[or] ( identifier[isinstance] ( identifier[val] ,( identifier[list] , identifier[tuple] )) keyword[and]
( literal[string] keyword[in] identifier[val] keyword[or] identifier[any] ( identifier[isinstance] ( identifier[x] , identifier[dict] ) keyword[and] identifier[_is_file] ( identifier[val] )) keyword[for] identifier[x] keyword[in] identifier[val] )))
identifier[secondary_files] = identifier[inp_tool] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[secondary_files] :
identifier[key] =[]
keyword[while] ( keyword[not] identifier[_is_file] ( identifier[tz] . identifier[get_in] ( identifier[key] +[ literal[string] ], identifier[inp_tool] ))
keyword[and] keyword[not] identifier[_is_file] ( identifier[tz] . identifier[get_in] ( identifier[key] +[ literal[string] ], identifier[inp_tool] ))
keyword[and] keyword[not] identifier[_is_file] ( identifier[tz] . identifier[get_in] ( identifier[key] +[ literal[string] , literal[string] ], identifier[inp_tool] ))):
identifier[key] . identifier[append] ( literal[string] )
keyword[if] identifier[tz] . identifier[get_in] ( identifier[key] , identifier[inp_tool] ):
identifier[inp_tool] [ literal[string] ]= identifier[secondary_files]
keyword[elif] identifier[inp_binding] :
identifier[nested_inp_binding] = identifier[copy] . identifier[deepcopy] ( identifier[inp_binding] )
identifier[nested_inp_binding] [ literal[string] ]= literal[string]
identifier[nested_inp_binding] [ literal[string] ]= identifier[secondary_files]
identifier[inp_tool] = identifier[tz] . identifier[update_in] ( identifier[inp_tool] , identifier[key] , keyword[lambda] identifier[x] : identifier[nested_inp_binding] )
keyword[return] identifier[inp_tool] | def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return val == 'File' or (isinstance(val, (list, tuple)) and ('File' in val or any(isinstance(x, dict) and _is_file(val)) for x in val))
secondary_files = inp_tool.pop('secondaryFiles', None)
if secondary_files:
key = []
while not _is_file(tz.get_in(key + ['type'], inp_tool)) and (not _is_file(tz.get_in(key + ['items'], inp_tool))) and (not _is_file(tz.get_in(key + ['items', 'items'], inp_tool))):
key.append('type') # depends on [control=['while'], data=[]]
if tz.get_in(key, inp_tool):
inp_tool['secondaryFiles'] = secondary_files # depends on [control=['if'], data=[]]
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding['prefix'] = 'ignore='
nested_inp_binding['secondaryFiles'] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return inp_tool |
def __run_py_file(self, filepath, module_name):
"""Execute the python file at the passed path
Parameters
----------
filepath: str
the path of the file to execute
module_name: str
the name of the python module
"""
# Import the module
spec = importlib.util.spec_from_file_location(module_name, filepath)
delta_py = importlib.util.module_from_spec(spec)
spec.loader.exec_module(delta_py)
# Get the python file's directory path
# Note: we add a separator for backward compatibility, as existing DeltaPy subclasses
# may assume that delta_dir ends with a separator
dir_ = dirname(filepath) + os.sep
# Search for subclasses of DeltaPy
for name in dir(delta_py):
obj = getattr(delta_py, name)
if inspect.isclass(obj) and not obj == DeltaPy and issubclass(
obj, DeltaPy):
delta_py_inst = obj(
self.current_db_version(), dir_, self.dirs, self.pg_service,
self.upgrades_table, variables=self.variables)
delta_py_inst.run() | def function[__run_py_file, parameter[self, filepath, module_name]]:
constant[Execute the python file at the passed path
Parameters
----------
filepath: str
the path of the file to execute
module_name: str
the name of the python module
]
variable[spec] assign[=] call[name[importlib].util.spec_from_file_location, parameter[name[module_name], name[filepath]]]
variable[delta_py] assign[=] call[name[importlib].util.module_from_spec, parameter[name[spec]]]
call[name[spec].loader.exec_module, parameter[name[delta_py]]]
variable[dir_] assign[=] binary_operation[call[name[dirname], parameter[name[filepath]]] + name[os].sep]
for taget[name[name]] in starred[call[name[dir], parameter[name[delta_py]]]] begin[:]
variable[obj] assign[=] call[name[getattr], parameter[name[delta_py], name[name]]]
if <ast.BoolOp object at 0x7da20c9910f0> begin[:]
variable[delta_py_inst] assign[=] call[name[obj], parameter[call[name[self].current_db_version, parameter[]], name[dir_], name[self].dirs, name[self].pg_service, name[self].upgrades_table]]
call[name[delta_py_inst].run, parameter[]] | keyword[def] identifier[__run_py_file] ( identifier[self] , identifier[filepath] , identifier[module_name] ):
literal[string]
identifier[spec] = identifier[importlib] . identifier[util] . identifier[spec_from_file_location] ( identifier[module_name] , identifier[filepath] )
identifier[delta_py] = identifier[importlib] . identifier[util] . identifier[module_from_spec] ( identifier[spec] )
identifier[spec] . identifier[loader] . identifier[exec_module] ( identifier[delta_py] )
identifier[dir_] = identifier[dirname] ( identifier[filepath] )+ identifier[os] . identifier[sep]
keyword[for] identifier[name] keyword[in] identifier[dir] ( identifier[delta_py] ):
identifier[obj] = identifier[getattr] ( identifier[delta_py] , identifier[name] )
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[obj] ) keyword[and] keyword[not] identifier[obj] == identifier[DeltaPy] keyword[and] identifier[issubclass] (
identifier[obj] , identifier[DeltaPy] ):
identifier[delta_py_inst] = identifier[obj] (
identifier[self] . identifier[current_db_version] (), identifier[dir_] , identifier[self] . identifier[dirs] , identifier[self] . identifier[pg_service] ,
identifier[self] . identifier[upgrades_table] , identifier[variables] = identifier[self] . identifier[variables] )
identifier[delta_py_inst] . identifier[run] () | def __run_py_file(self, filepath, module_name):
"""Execute the python file at the passed path
Parameters
----------
filepath: str
the path of the file to execute
module_name: str
the name of the python module
"""
# Import the module
spec = importlib.util.spec_from_file_location(module_name, filepath)
delta_py = importlib.util.module_from_spec(spec)
spec.loader.exec_module(delta_py)
# Get the python file's directory path
# Note: we add a separator for backward compatibility, as existing DeltaPy subclasses
# may assume that delta_dir ends with a separator
dir_ = dirname(filepath) + os.sep
# Search for subclasses of DeltaPy
for name in dir(delta_py):
obj = getattr(delta_py, name)
if inspect.isclass(obj) and (not obj == DeltaPy) and issubclass(obj, DeltaPy):
delta_py_inst = obj(self.current_db_version(), dir_, self.dirs, self.pg_service, self.upgrades_table, variables=self.variables)
delta_py_inst.run() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] |
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None,
coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
frac_coords = np.array(coords, dtype=np.float) \
if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties) | def function[from_spacegroup, parameter[cls, sg, lattice, species, coords, site_properties, coords_are_cartesian, tol]]:
constant[
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
]
from relative_module[pymatgen.symmetry.groups] import module[SpaceGroup]
<ast.Try object at 0x7da18bc706a0>
if call[name[isinstance], parameter[name[lattice], name[Lattice]]] begin[:]
variable[latt] assign[=] name[lattice]
if <ast.UnaryOp object at 0x7da18bc71d50> begin[:]
<ast.Raise object at 0x7da18bc71c00>
if compare[call[name[len], parameter[name[species]]] not_equal[!=] call[name[len], parameter[name[coords]]]] begin[:]
<ast.Raise object at 0x7da18bc724a0>
variable[frac_coords] assign[=] <ast.IfExp object at 0x7da18bc70250>
variable[props] assign[=] <ast.IfExp object at 0x7da18bc73700>
variable[all_sp] assign[=] list[[]]
variable[all_coords] assign[=] list[[]]
variable[all_site_properties] assign[=] call[name[collections].defaultdict, parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da18bc71840>, <ast.Tuple object at 0x7da18bc70b80>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[species], name[frac_coords]]]]]] begin[:]
variable[cc] assign[=] call[name[sgp].get_orbit, parameter[name[c]]]
call[name[all_sp].extend, parameter[binary_operation[list[[<ast.Name object at 0x7da18bc71420>]] * call[name[len], parameter[name[cc]]]]]]
call[name[all_coords].extend, parameter[name[cc]]]
for taget[tuple[[<ast.Name object at 0x7da18bc71ae0>, <ast.Name object at 0x7da18bc721a0>]]] in starred[call[name[props].items, parameter[]]] begin[:]
call[call[name[all_site_properties]][name[k]].extend, parameter[binary_operation[list[[<ast.Subscript object at 0x7da20c6c5060>]] * call[name[len], parameter[name[cc]]]]]]
return[call[name[cls], parameter[name[latt], name[all_sp], name[all_coords]]]] | keyword[def] identifier[from_spacegroup] ( identifier[cls] , identifier[sg] , identifier[lattice] , identifier[species] , identifier[coords] , identifier[site_properties] = keyword[None] ,
identifier[coords_are_cartesian] = keyword[False] , identifier[tol] = literal[int] ):
literal[string]
keyword[from] identifier[pymatgen] . identifier[symmetry] . identifier[groups] keyword[import] identifier[SpaceGroup]
keyword[try] :
identifier[i] = identifier[int] ( identifier[sg] )
identifier[sgp] = identifier[SpaceGroup] . identifier[from_int_number] ( identifier[i] )
keyword[except] identifier[ValueError] :
identifier[sgp] = identifier[SpaceGroup] ( identifier[sg] )
keyword[if] identifier[isinstance] ( identifier[lattice] , identifier[Lattice] ):
identifier[latt] = identifier[lattice]
keyword[else] :
identifier[latt] = identifier[Lattice] ( identifier[lattice] )
keyword[if] keyword[not] identifier[sgp] . identifier[is_compatible] ( identifier[latt] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[latt] . identifier[lengths_and_angles] ,
identifier[sgp] . identifier[symbol] )
)
keyword[if] identifier[len] ( identifier[species] )!= identifier[len] ( identifier[coords] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[len] ( identifier[species] ), identifier[len] ( identifier[coords] ))
)
identifier[frac_coords] = identifier[np] . identifier[array] ( identifier[coords] , identifier[dtype] = identifier[np] . identifier[float] ) keyword[if] keyword[not] identifier[coords_are_cartesian] keyword[else] identifier[lattice] . identifier[get_fractional_coords] ( identifier[coords] )
identifier[props] ={} keyword[if] identifier[site_properties] keyword[is] keyword[None] keyword[else] identifier[site_properties]
identifier[all_sp] =[]
identifier[all_coords] =[]
identifier[all_site_properties] = identifier[collections] . identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[i] ,( identifier[sp] , identifier[c] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[species] , identifier[frac_coords] )):
identifier[cc] = identifier[sgp] . identifier[get_orbit] ( identifier[c] , identifier[tol] = identifier[tol] )
identifier[all_sp] . identifier[extend] ([ identifier[sp] ]* identifier[len] ( identifier[cc] ))
identifier[all_coords] . identifier[extend] ( identifier[cc] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[props] . identifier[items] ():
identifier[all_site_properties] [ identifier[k] ]. identifier[extend] ([ identifier[v] [ identifier[i] ]]* identifier[len] ( identifier[cc] ))
keyword[return] identifier[cls] ( identifier[latt] , identifier[all_sp] , identifier[all_coords] ,
identifier[site_properties] = identifier[all_site_properties] ) | def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None, coords_are_cartesian=False, tol=1e-05):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i) # depends on [control=['try'], data=[]]
except ValueError:
sgp = SpaceGroup(sg) # depends on [control=['except'], data=[]]
if isinstance(lattice, Lattice):
latt = lattice # depends on [control=['if'], data=[]]
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError('Supplied lattice with parameters %s is incompatible with supplied spacegroup %s!' % (latt.lengths_and_angles, sgp.symbol)) # depends on [control=['if'], data=[]]
if len(species) != len(coords):
raise ValueError('Supplied species and coords lengths (%d vs %d) are different!' % (len(species), len(coords))) # depends on [control=['if'], data=[]]
frac_coords = np.array(coords, dtype=np.float) if not coords_are_cartesian else lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for (i, (sp, c)) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for (k, v) in props.items():
all_site_properties[k].extend([v[i]] * len(cc)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return cls(latt, all_sp, all_coords, site_properties=all_site_properties) |
def build_input_pipeline(x, y, batch_size):
"""Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
"""
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
batch_features, batch_labels = training_iterator.get_next()
return batch_features, batch_labels | def function[build_input_pipeline, parameter[x, y, batch_size]]:
constant[Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
]
variable[training_dataset] assign[=] call[name[tf].data.Dataset.from_tensor_slices, parameter[tuple[[<ast.Name object at 0x7da1b034a8c0>, <ast.Name object at 0x7da1b0348310>]]]]
variable[training_batches] assign[=] call[call[name[training_dataset].repeat, parameter[]].batch, parameter[name[batch_size]]]
variable[training_iterator] assign[=] call[name[tf].compat.v1.data.make_one_shot_iterator, parameter[name[training_batches]]]
<ast.Tuple object at 0x7da18f00ed40> assign[=] call[name[training_iterator].get_next, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b0348be0>, <ast.Name object at 0x7da1b034ad70>]]] | keyword[def] identifier[build_input_pipeline] ( identifier[x] , identifier[y] , identifier[batch_size] ):
literal[string]
identifier[training_dataset] = identifier[tf] . identifier[data] . identifier[Dataset] . identifier[from_tensor_slices] (( identifier[x] , identifier[y] ))
identifier[training_batches] = identifier[training_dataset] . identifier[repeat] (). identifier[batch] ( identifier[batch_size] )
identifier[training_iterator] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[data] . identifier[make_one_shot_iterator] ( identifier[training_batches] )
identifier[batch_features] , identifier[batch_labels] = identifier[training_iterator] . identifier[get_next] ()
keyword[return] identifier[batch_features] , identifier[batch_labels] | def build_input_pipeline(x, y, batch_size):
"""Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
"""
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
(batch_features, batch_labels) = training_iterator.get_next()
return (batch_features, batch_labels) |
def get_versioned_references_for(self, instance):
"""Returns the versioned references for the given instance
"""
vrefs = []
# Retrieve the referenced objects
refs = instance.getRefs(relationship=self.relationship)
ref_versions = getattr(instance, REFERENCE_VERSIONS, None)
# No versions stored, return the original references
if ref_versions is None:
return refs
for ref in refs:
uid = api.get_uid(ref)
# get the linked version to the reference
version = ref_versions.get(uid)
# append the versioned reference
vrefs.append(self.retrieve_version(ref, version))
return vrefs | def function[get_versioned_references_for, parameter[self, instance]]:
constant[Returns the versioned references for the given instance
]
variable[vrefs] assign[=] list[[]]
variable[refs] assign[=] call[name[instance].getRefs, parameter[]]
variable[ref_versions] assign[=] call[name[getattr], parameter[name[instance], name[REFERENCE_VERSIONS], constant[None]]]
if compare[name[ref_versions] is constant[None]] begin[:]
return[name[refs]]
for taget[name[ref]] in starred[name[refs]] begin[:]
variable[uid] assign[=] call[name[api].get_uid, parameter[name[ref]]]
variable[version] assign[=] call[name[ref_versions].get, parameter[name[uid]]]
call[name[vrefs].append, parameter[call[name[self].retrieve_version, parameter[name[ref], name[version]]]]]
return[name[vrefs]] | keyword[def] identifier[get_versioned_references_for] ( identifier[self] , identifier[instance] ):
literal[string]
identifier[vrefs] =[]
identifier[refs] = identifier[instance] . identifier[getRefs] ( identifier[relationship] = identifier[self] . identifier[relationship] )
identifier[ref_versions] = identifier[getattr] ( identifier[instance] , identifier[REFERENCE_VERSIONS] , keyword[None] )
keyword[if] identifier[ref_versions] keyword[is] keyword[None] :
keyword[return] identifier[refs]
keyword[for] identifier[ref] keyword[in] identifier[refs] :
identifier[uid] = identifier[api] . identifier[get_uid] ( identifier[ref] )
identifier[version] = identifier[ref_versions] . identifier[get] ( identifier[uid] )
identifier[vrefs] . identifier[append] ( identifier[self] . identifier[retrieve_version] ( identifier[ref] , identifier[version] ))
keyword[return] identifier[vrefs] | def get_versioned_references_for(self, instance):
"""Returns the versioned references for the given instance
"""
vrefs = []
# Retrieve the referenced objects
refs = instance.getRefs(relationship=self.relationship)
ref_versions = getattr(instance, REFERENCE_VERSIONS, None)
# No versions stored, return the original references
if ref_versions is None:
return refs # depends on [control=['if'], data=[]]
for ref in refs:
uid = api.get_uid(ref)
# get the linked version to the reference
version = ref_versions.get(uid)
# append the versioned reference
vrefs.append(self.retrieve_version(ref, version)) # depends on [control=['for'], data=['ref']]
return vrefs |
def sync_mptt_tree_fields_from_draft_to_published(
draft_copy, dry_run=False, force_update_cached_urls=False):
"""
Sync tree structure changes from a draft publishable object to its
published copy, and updates the published copy's Fluent cached URLs when
necessary. Or simulates doing this if ``dry_run`` is ``True``.
Syncs both actual structural changes (i.e. different parent) and MPTT's
fields which are a cached representation (and may or may not be correct).
"""
mptt_opts = getattr(draft_copy, '_mptt_meta', None)
published_copy = getattr(draft_copy, 'publishing_linked', None)
if not mptt_opts or not published_copy:
return {}
# Identify changed values and prepare dict of changes to apply to DB
parent_changed = draft_copy.parent != published_copy.parent
update_kwargs = {
mptt_opts.parent_attr: draft_copy._mpttfield('parent'),
mptt_opts.tree_id_attr: draft_copy._mpttfield('tree_id'),
mptt_opts.left_attr: draft_copy._mpttfield('left'),
mptt_opts.right_attr: draft_copy._mpttfield('right'),
mptt_opts.level_attr: draft_copy._mpttfield('level'),
}
# Strip out DB update entries for unchanged or invalid tree fields
update_kwargs = dict(
(field, value) for field, value in update_kwargs.items()
if getattr(draft_copy, field) != getattr(published_copy, field)
# Only parent may be None, never set tree_id/left/right/level to None
and not (field != 'parent' and value is None)
)
change_report = []
for field, new_value in update_kwargs.items():
old_value = getattr(published_copy, field)
change_report.append((draft_copy, field, old_value, new_value))
# Forcibly update MPTT field values via UPDATE commands instead of normal
# model attr changes, which MPTT ignores when you `save`
if update_kwargs and not dry_run:
type(published_copy).objects.filter(pk=published_copy.pk).update(
**update_kwargs)
# If real tree structure (not just MPTT fields) has changed we must
# regenerate the cached URLs for published copy translations.
if parent_changed or force_update_cached_urls:
# Make our local published obj aware of DB change made by `update`
published_copy.parent = draft_copy.parent
# Regenerate the cached URLs for published copy translations.
change_report += \
update_fluent_cached_urls(published_copy, dry_run=dry_run)
return change_report | def function[sync_mptt_tree_fields_from_draft_to_published, parameter[draft_copy, dry_run, force_update_cached_urls]]:
constant[
Sync tree structure changes from a draft publishable object to its
published copy, and updates the published copy's Fluent cached URLs when
necessary. Or simulates doing this if ``dry_run`` is ``True``.
Syncs both actual structural changes (i.e. different parent) and MPTT's
fields which are a cached representation (and may or may not be correct).
]
variable[mptt_opts] assign[=] call[name[getattr], parameter[name[draft_copy], constant[_mptt_meta], constant[None]]]
variable[published_copy] assign[=] call[name[getattr], parameter[name[draft_copy], constant[publishing_linked], constant[None]]]
if <ast.BoolOp object at 0x7da204566b60> begin[:]
return[dictionary[[], []]]
variable[parent_changed] assign[=] compare[name[draft_copy].parent not_equal[!=] name[published_copy].parent]
variable[update_kwargs] assign[=] dictionary[[<ast.Attribute object at 0x7da204566da0>, <ast.Attribute object at 0x7da204565c00>, <ast.Attribute object at 0x7da2045641c0>, <ast.Attribute object at 0x7da204566a10>, <ast.Attribute object at 0x7da204566e90>], [<ast.Call object at 0x7da204564160>, <ast.Call object at 0x7da204567b20>, <ast.Call object at 0x7da2045640d0>, <ast.Call object at 0x7da204566f50>, <ast.Call object at 0x7da204566350>]]
variable[update_kwargs] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da204567400>]]
variable[change_report] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204567df0>, <ast.Name object at 0x7da2045640a0>]]] in starred[call[name[update_kwargs].items, parameter[]]] begin[:]
variable[old_value] assign[=] call[name[getattr], parameter[name[published_copy], name[field]]]
call[name[change_report].append, parameter[tuple[[<ast.Name object at 0x7da2045660e0>, <ast.Name object at 0x7da204565000>, <ast.Name object at 0x7da204566470>, <ast.Name object at 0x7da204566500>]]]]
if <ast.BoolOp object at 0x7da204565330> begin[:]
call[call[call[name[type], parameter[name[published_copy]]].objects.filter, parameter[]].update, parameter[]]
if <ast.BoolOp object at 0x7da204564c10> begin[:]
name[published_copy].parent assign[=] name[draft_copy].parent
<ast.AugAssign object at 0x7da2045651b0>
return[name[change_report]] | keyword[def] identifier[sync_mptt_tree_fields_from_draft_to_published] (
identifier[draft_copy] , identifier[dry_run] = keyword[False] , identifier[force_update_cached_urls] = keyword[False] ):
literal[string]
identifier[mptt_opts] = identifier[getattr] ( identifier[draft_copy] , literal[string] , keyword[None] )
identifier[published_copy] = identifier[getattr] ( identifier[draft_copy] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[mptt_opts] keyword[or] keyword[not] identifier[published_copy] :
keyword[return] {}
identifier[parent_changed] = identifier[draft_copy] . identifier[parent] != identifier[published_copy] . identifier[parent]
identifier[update_kwargs] ={
identifier[mptt_opts] . identifier[parent_attr] : identifier[draft_copy] . identifier[_mpttfield] ( literal[string] ),
identifier[mptt_opts] . identifier[tree_id_attr] : identifier[draft_copy] . identifier[_mpttfield] ( literal[string] ),
identifier[mptt_opts] . identifier[left_attr] : identifier[draft_copy] . identifier[_mpttfield] ( literal[string] ),
identifier[mptt_opts] . identifier[right_attr] : identifier[draft_copy] . identifier[_mpttfield] ( literal[string] ),
identifier[mptt_opts] . identifier[level_attr] : identifier[draft_copy] . identifier[_mpttfield] ( literal[string] ),
}
identifier[update_kwargs] = identifier[dict] (
( identifier[field] , identifier[value] ) keyword[for] identifier[field] , identifier[value] keyword[in] identifier[update_kwargs] . identifier[items] ()
keyword[if] identifier[getattr] ( identifier[draft_copy] , identifier[field] )!= identifier[getattr] ( identifier[published_copy] , identifier[field] )
keyword[and] keyword[not] ( identifier[field] != literal[string] keyword[and] identifier[value] keyword[is] keyword[None] )
)
identifier[change_report] =[]
keyword[for] identifier[field] , identifier[new_value] keyword[in] identifier[update_kwargs] . identifier[items] ():
identifier[old_value] = identifier[getattr] ( identifier[published_copy] , identifier[field] )
identifier[change_report] . identifier[append] (( identifier[draft_copy] , identifier[field] , identifier[old_value] , identifier[new_value] ))
keyword[if] identifier[update_kwargs] keyword[and] keyword[not] identifier[dry_run] :
identifier[type] ( identifier[published_copy] ). identifier[objects] . identifier[filter] ( identifier[pk] = identifier[published_copy] . identifier[pk] ). identifier[update] (
** identifier[update_kwargs] )
keyword[if] identifier[parent_changed] keyword[or] identifier[force_update_cached_urls] :
identifier[published_copy] . identifier[parent] = identifier[draft_copy] . identifier[parent]
identifier[change_report] += identifier[update_fluent_cached_urls] ( identifier[published_copy] , identifier[dry_run] = identifier[dry_run] )
keyword[return] identifier[change_report] | def sync_mptt_tree_fields_from_draft_to_published(draft_copy, dry_run=False, force_update_cached_urls=False):
"""
Sync tree structure changes from a draft publishable object to its
published copy, and updates the published copy's Fluent cached URLs when
necessary. Or simulates doing this if ``dry_run`` is ``True``.
Syncs both actual structural changes (i.e. different parent) and MPTT's
fields which are a cached representation (and may or may not be correct).
"""
mptt_opts = getattr(draft_copy, '_mptt_meta', None)
published_copy = getattr(draft_copy, 'publishing_linked', None)
if not mptt_opts or not published_copy:
return {} # depends on [control=['if'], data=[]]
# Identify changed values and prepare dict of changes to apply to DB
parent_changed = draft_copy.parent != published_copy.parent
update_kwargs = {mptt_opts.parent_attr: draft_copy._mpttfield('parent'), mptt_opts.tree_id_attr: draft_copy._mpttfield('tree_id'), mptt_opts.left_attr: draft_copy._mpttfield('left'), mptt_opts.right_attr: draft_copy._mpttfield('right'), mptt_opts.level_attr: draft_copy._mpttfield('level')}
# Strip out DB update entries for unchanged or invalid tree fields
# Only parent may be None, never set tree_id/left/right/level to None
update_kwargs = dict(((field, value) for (field, value) in update_kwargs.items() if getattr(draft_copy, field) != getattr(published_copy, field) and (not (field != 'parent' and value is None))))
change_report = []
for (field, new_value) in update_kwargs.items():
old_value = getattr(published_copy, field)
change_report.append((draft_copy, field, old_value, new_value)) # depends on [control=['for'], data=[]]
# Forcibly update MPTT field values via UPDATE commands instead of normal
# model attr changes, which MPTT ignores when you `save`
if update_kwargs and (not dry_run):
type(published_copy).objects.filter(pk=published_copy.pk).update(**update_kwargs) # depends on [control=['if'], data=[]]
# If real tree structure (not just MPTT fields) has changed we must
# regenerate the cached URLs for published copy translations.
if parent_changed or force_update_cached_urls:
# Make our local published obj aware of DB change made by `update`
published_copy.parent = draft_copy.parent
# Regenerate the cached URLs for published copy translations.
change_report += update_fluent_cached_urls(published_copy, dry_run=dry_run) # depends on [control=['if'], data=[]]
return change_report |
def assemble_oligos(dna_list, reference=None):
'''Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA
'''
# FIXME: this protocol currently only supports 5' ends on the assembly
# Find all matches for every oligo. If more than 2 per side, error.
# Self-oligo is included in case the 3' end is self-complementary.
# 1) Find all unique 3' binders (and non-binders).
match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in
enumerate(dna_list)]
# 2) Find all unique 5' binders (and non-binders).
match_5 = [bind_unique(seq, dna_list, right=False) for i, seq in
enumerate(dna_list)]
# Assemble into 2-tuple
zipped = zip(match_5, match_3)
# 3) If none found, error out with 'oligo n has no binders'
for i, oligo_match in enumerate(zipped):
if not any(oligo_match):
error = 'Oligo {} has no binding partners.'.format(i + 1)
raise AssemblyError(error)
# 4) There should be exactly 2 oligos that bind at 3' end but
# not 5'.
ends = []
for i, (five, three) in enumerate(zipped):
if five is None and three is not None:
ends.append(i)
# 5) If more than 2, error with 'too many ends'.
if len(ends) > 2:
raise AssemblyError('Too many (>2) end oligos found.')
# 6) If more than 2, error with 'not enough ends'.
if len(ends) < 2:
raise AssemblyError('Not enough (<2) end oligos found.')
# NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
# 8) Start with first end and build iteratively
last_index = ends[0]
assembly = dna_list[last_index]
flip = True
# This would be slightly less complicated if the sequences were tied to
# their match info in a tuple
# Append next region n - 1 times
for i in range(len(dna_list) - 1):
if flip:
# Next oligo needs to be flipped before concatenation
# Grab 3' match from last oligo's info
current_index, matchlen = zipped[last_index][1]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement for concatenation
next_oligo = next_oligo.reverse_complement()
# Don't reverse complement the next one
flip = False
else:
# Grab 5' match from last oligo's info
current_index, matchlen = zipped[last_index][0]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement the next one
flip = True
# Trim overlap from new sequence
next_oligo = next_oligo[(matchlen - 1):]
# Concatenate and update last oligo's information
assembly += next_oligo
last_index = current_index
if reference:
if assembly == reference or assembly == reference.reverse_complement():
return assembly
else:
raise AssemblyError('Assembly did not match reference')
else:
return assembly | def function[assemble_oligos, parameter[dna_list, reference]]:
constant[Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA
]
variable[match_3] assign[=] <ast.ListComp object at 0x7da1b06de470>
variable[match_5] assign[=] <ast.ListComp object at 0x7da1b06de710>
variable[zipped] assign[=] call[name[zip], parameter[name[match_5], name[match_3]]]
for taget[tuple[[<ast.Name object at 0x7da1b2344fa0>, <ast.Name object at 0x7da1b23453c0>]]] in starred[call[name[enumerate], parameter[name[zipped]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2345d50> begin[:]
variable[error] assign[=] call[constant[Oligo {} has no binding partners.].format, parameter[binary_operation[name[i] + constant[1]]]]
<ast.Raise object at 0x7da1b2344400>
variable[ends] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b23472b0>, <ast.Tuple object at 0x7da1b2344370>]]] in starred[call[name[enumerate], parameter[name[zipped]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2344790> begin[:]
call[name[ends].append, parameter[name[i]]]
if compare[call[name[len], parameter[name[ends]]] greater[>] constant[2]] begin[:]
<ast.Raise object at 0x7da1b2344a90>
if compare[call[name[len], parameter[name[ends]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b2344ee0>
variable[last_index] assign[=] call[name[ends]][constant[0]]
variable[assembly] assign[=] call[name[dna_list]][name[last_index]]
variable[flip] assign[=] constant[True]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[dna_list]]] - constant[1]]]]] begin[:]
if name[flip] begin[:]
<ast.Tuple object at 0x7da1b23479a0> assign[=] call[call[name[zipped]][name[last_index]]][constant[1]]
variable[next_oligo] assign[=] call[call[name[dna_list]][name[current_index]].to_ds, parameter[]]
variable[next_oligo] assign[=] call[name[next_oligo].reverse_complement, parameter[]]
variable[flip] assign[=] constant[False]
variable[next_oligo] assign[=] call[name[next_oligo]][<ast.Slice object at 0x7da20ed9b820>]
<ast.AugAssign object at 0x7da18f721e70>
variable[last_index] assign[=] name[current_index]
if name[reference] begin[:]
if <ast.BoolOp object at 0x7da1b0545bd0> begin[:]
return[name[assembly]] | keyword[def] identifier[assemble_oligos] ( identifier[dna_list] , identifier[reference] = keyword[None] ):
literal[string]
identifier[match_3] =[ identifier[bind_unique] ( identifier[seq] , identifier[dna_list] , identifier[right] = keyword[True] ) keyword[for] identifier[i] , identifier[seq] keyword[in]
identifier[enumerate] ( identifier[dna_list] )]
identifier[match_5] =[ identifier[bind_unique] ( identifier[seq] , identifier[dna_list] , identifier[right] = keyword[False] ) keyword[for] identifier[i] , identifier[seq] keyword[in]
identifier[enumerate] ( identifier[dna_list] )]
identifier[zipped] = identifier[zip] ( identifier[match_5] , identifier[match_3] )
keyword[for] identifier[i] , identifier[oligo_match] keyword[in] identifier[enumerate] ( identifier[zipped] ):
keyword[if] keyword[not] identifier[any] ( identifier[oligo_match] ):
identifier[error] = literal[string] . identifier[format] ( identifier[i] + literal[int] )
keyword[raise] identifier[AssemblyError] ( identifier[error] )
identifier[ends] =[]
keyword[for] identifier[i] ,( identifier[five] , identifier[three] ) keyword[in] identifier[enumerate] ( identifier[zipped] ):
keyword[if] identifier[five] keyword[is] keyword[None] keyword[and] identifier[three] keyword[is] keyword[not] keyword[None] :
identifier[ends] . identifier[append] ( identifier[i] )
keyword[if] identifier[len] ( identifier[ends] )> literal[int] :
keyword[raise] identifier[AssemblyError] ( literal[string] )
keyword[if] identifier[len] ( identifier[ends] )< literal[int] :
keyword[raise] identifier[AssemblyError] ( literal[string] )
identifier[last_index] = identifier[ends] [ literal[int] ]
identifier[assembly] = identifier[dna_list] [ identifier[last_index] ]
identifier[flip] = keyword[True]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[dna_list] )- literal[int] ):
keyword[if] identifier[flip] :
identifier[current_index] , identifier[matchlen] = identifier[zipped] [ identifier[last_index] ][ literal[int] ]
identifier[next_oligo] = identifier[dna_list] [ identifier[current_index] ]. identifier[to_ds] ()
identifier[next_oligo] = identifier[next_oligo] . identifier[reverse_complement] ()
identifier[flip] = keyword[False]
keyword[else] :
identifier[current_index] , identifier[matchlen] = identifier[zipped] [ identifier[last_index] ][ literal[int] ]
identifier[next_oligo] = identifier[dna_list] [ identifier[current_index] ]. identifier[to_ds] ()
identifier[flip] = keyword[True]
identifier[next_oligo] = identifier[next_oligo] [( identifier[matchlen] - literal[int] ):]
identifier[assembly] += identifier[next_oligo]
identifier[last_index] = identifier[current_index]
keyword[if] identifier[reference] :
keyword[if] identifier[assembly] == identifier[reference] keyword[or] identifier[assembly] == identifier[reference] . identifier[reverse_complement] ():
keyword[return] identifier[assembly]
keyword[else] :
keyword[raise] identifier[AssemblyError] ( literal[string] )
keyword[else] :
keyword[return] identifier[assembly] | def assemble_oligos(dna_list, reference=None):
"""Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA
"""
# FIXME: this protocol currently only supports 5' ends on the assembly
# Find all matches for every oligo. If more than 2 per side, error.
# Self-oligo is included in case the 3' end is self-complementary.
# 1) Find all unique 3' binders (and non-binders).
match_3 = [bind_unique(seq, dna_list, right=True) for (i, seq) in enumerate(dna_list)]
# 2) Find all unique 5' binders (and non-binders).
match_5 = [bind_unique(seq, dna_list, right=False) for (i, seq) in enumerate(dna_list)]
# Assemble into 2-tuple
zipped = zip(match_5, match_3)
# 3) If none found, error out with 'oligo n has no binders'
for (i, oligo_match) in enumerate(zipped):
if not any(oligo_match):
error = 'Oligo {} has no binding partners.'.format(i + 1)
raise AssemblyError(error) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# 4) There should be exactly 2 oligos that bind at 3' end but
# not 5'.
ends = []
for (i, (five, three)) in enumerate(zipped):
if five is None and three is not None:
ends.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# 5) If more than 2, error with 'too many ends'.
if len(ends) > 2:
raise AssemblyError('Too many (>2) end oligos found.') # depends on [control=['if'], data=[]]
# 6) If more than 2, error with 'not enough ends'.
if len(ends) < 2:
raise AssemblyError('Not enough (<2) end oligos found.') # depends on [control=['if'], data=[]]
# NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
# 8) Start with first end and build iteratively
last_index = ends[0]
assembly = dna_list[last_index]
flip = True
# This would be slightly less complicated if the sequences were tied to
# their match info in a tuple
# Append next region n - 1 times
for i in range(len(dna_list) - 1):
if flip:
# Next oligo needs to be flipped before concatenation
# Grab 3' match from last oligo's info
(current_index, matchlen) = zipped[last_index][1]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement for concatenation
next_oligo = next_oligo.reverse_complement()
# Don't reverse complement the next one
flip = False # depends on [control=['if'], data=[]]
else:
# Grab 5' match from last oligo's info
(current_index, matchlen) = zipped[last_index][0]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement the next one
flip = True
# Trim overlap from new sequence
next_oligo = next_oligo[matchlen - 1:]
# Concatenate and update last oligo's information
assembly += next_oligo
last_index = current_index # depends on [control=['for'], data=[]]
if reference:
if assembly == reference or assembly == reference.reverse_complement():
return assembly # depends on [control=['if'], data=[]]
else:
raise AssemblyError('Assembly did not match reference') # depends on [control=['if'], data=[]]
else:
return assembly |
def check_migrations_applied(app_configs, **kwargs):
"""
A Django check to see if all migrations have been applied correctly.
"""
from django.db.migrations.loader import MigrationLoader
errors = []
# Load migrations from disk/DB
try:
loader = MigrationLoader(connection, ignore_no_migrations=True)
except (ImproperlyConfigured, ProgrammingError, OperationalError):
msg = "Can't connect to database to check migrations"
return [checks.Info(msg, id=health.INFO_CANT_CHECK_MIGRATIONS)]
if app_configs:
app_labels = [app.label for app in app_configs]
else:
app_labels = loader.migrated_apps
for node, migration in loader.graph.nodes.items():
if migration.app_label not in app_labels:
continue
if node not in loader.applied_migrations:
msg = 'Unapplied migration {}'.format(migration)
# NB: This *must* be a Warning, not an Error, because Errors
# prevent migrations from being run.
errors.append(checks.Warning(msg,
id=health.WARNING_UNAPPLIED_MIGRATION))
return errors | def function[check_migrations_applied, parameter[app_configs]]:
constant[
A Django check to see if all migrations have been applied correctly.
]
from relative_module[django.db.migrations.loader] import module[MigrationLoader]
variable[errors] assign[=] list[[]]
<ast.Try object at 0x7da18eb56920>
if name[app_configs] begin[:]
variable[app_labels] assign[=] <ast.ListComp object at 0x7da1b26af220>
for taget[tuple[[<ast.Name object at 0x7da1b26ae3e0>, <ast.Name object at 0x7da1b26ad030>]]] in starred[call[name[loader].graph.nodes.items, parameter[]]] begin[:]
if compare[name[migration].app_label <ast.NotIn object at 0x7da2590d7190> name[app_labels]] begin[:]
continue
if compare[name[node] <ast.NotIn object at 0x7da2590d7190> name[loader].applied_migrations] begin[:]
variable[msg] assign[=] call[constant[Unapplied migration {}].format, parameter[name[migration]]]
call[name[errors].append, parameter[call[name[checks].Warning, parameter[name[msg]]]]]
return[name[errors]] | keyword[def] identifier[check_migrations_applied] ( identifier[app_configs] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[django] . identifier[db] . identifier[migrations] . identifier[loader] keyword[import] identifier[MigrationLoader]
identifier[errors] =[]
keyword[try] :
identifier[loader] = identifier[MigrationLoader] ( identifier[connection] , identifier[ignore_no_migrations] = keyword[True] )
keyword[except] ( identifier[ImproperlyConfigured] , identifier[ProgrammingError] , identifier[OperationalError] ):
identifier[msg] = literal[string]
keyword[return] [ identifier[checks] . identifier[Info] ( identifier[msg] , identifier[id] = identifier[health] . identifier[INFO_CANT_CHECK_MIGRATIONS] )]
keyword[if] identifier[app_configs] :
identifier[app_labels] =[ identifier[app] . identifier[label] keyword[for] identifier[app] keyword[in] identifier[app_configs] ]
keyword[else] :
identifier[app_labels] = identifier[loader] . identifier[migrated_apps]
keyword[for] identifier[node] , identifier[migration] keyword[in] identifier[loader] . identifier[graph] . identifier[nodes] . identifier[items] ():
keyword[if] identifier[migration] . identifier[app_label] keyword[not] keyword[in] identifier[app_labels] :
keyword[continue]
keyword[if] identifier[node] keyword[not] keyword[in] identifier[loader] . identifier[applied_migrations] :
identifier[msg] = literal[string] . identifier[format] ( identifier[migration] )
identifier[errors] . identifier[append] ( identifier[checks] . identifier[Warning] ( identifier[msg] ,
identifier[id] = identifier[health] . identifier[WARNING_UNAPPLIED_MIGRATION] ))
keyword[return] identifier[errors] | def check_migrations_applied(app_configs, **kwargs):
"""
A Django check to see if all migrations have been applied correctly.
"""
from django.db.migrations.loader import MigrationLoader
errors = []
# Load migrations from disk/DB
try:
loader = MigrationLoader(connection, ignore_no_migrations=True) # depends on [control=['try'], data=[]]
except (ImproperlyConfigured, ProgrammingError, OperationalError):
msg = "Can't connect to database to check migrations"
return [checks.Info(msg, id=health.INFO_CANT_CHECK_MIGRATIONS)] # depends on [control=['except'], data=[]]
if app_configs:
app_labels = [app.label for app in app_configs] # depends on [control=['if'], data=[]]
else:
app_labels = loader.migrated_apps
for (node, migration) in loader.graph.nodes.items():
if migration.app_label not in app_labels:
continue # depends on [control=['if'], data=[]]
if node not in loader.applied_migrations:
msg = 'Unapplied migration {}'.format(migration)
# NB: This *must* be a Warning, not an Error, because Errors
# prevent migrations from being run.
errors.append(checks.Warning(msg, id=health.WARNING_UNAPPLIED_MIGRATION)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return errors |
def single_from_classes(path:Union[Path, str], classes:Collection[str], ds_tfms:TfmList=None, **kwargs):
"Create an empty `ImageDataBunch` in `path` with `classes`. Typically used for inference."
warn("""This method is deprecated and will be removed in a future version, use `load_learner` after
`Learner.export()`""", DeprecationWarning)
sd = ImageList([], path=path, ignore_empty=True).split_none()
return sd.label_const(0, label_cls=CategoryList, classes=classes).transform(ds_tfms, **kwargs).databunch() | def function[single_from_classes, parameter[path, classes, ds_tfms]]:
constant[Create an empty `ImageDataBunch` in `path` with `classes`. Typically used for inference.]
call[name[warn], parameter[constant[This method is deprecated and will be removed in a future version, use `load_learner` after
`Learner.export()`], name[DeprecationWarning]]]
variable[sd] assign[=] call[call[name[ImageList], parameter[list[[]]]].split_none, parameter[]]
return[call[call[call[name[sd].label_const, parameter[constant[0]]].transform, parameter[name[ds_tfms]]].databunch, parameter[]]] | keyword[def] identifier[single_from_classes] ( identifier[path] : identifier[Union] [ identifier[Path] , identifier[str] ], identifier[classes] : identifier[Collection] [ identifier[str] ], identifier[ds_tfms] : identifier[TfmList] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[warn] ( literal[string] , identifier[DeprecationWarning] )
identifier[sd] = identifier[ImageList] ([], identifier[path] = identifier[path] , identifier[ignore_empty] = keyword[True] ). identifier[split_none] ()
keyword[return] identifier[sd] . identifier[label_const] ( literal[int] , identifier[label_cls] = identifier[CategoryList] , identifier[classes] = identifier[classes] ). identifier[transform] ( identifier[ds_tfms] ,** identifier[kwargs] ). identifier[databunch] () | def single_from_classes(path: Union[Path, str], classes: Collection[str], ds_tfms: TfmList=None, **kwargs):
"""Create an empty `ImageDataBunch` in `path` with `classes`. Typically used for inference."""
warn('This method is deprecated and will be removed in a future version, use `load_learner` after\n `Learner.export()`', DeprecationWarning)
sd = ImageList([], path=path, ignore_empty=True).split_none()
return sd.label_const(0, label_cls=CategoryList, classes=classes).transform(ds_tfms, **kwargs).databunch() |
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext | def function[_basename, parameter[fname]]:
constant[Return file name without path.]
if <ast.UnaryOp object at 0x7da1b0465c00> begin[:]
variable[fname] assign[=] call[name[Path], parameter[name[fname]]]
<ast.Tuple object at 0x7da18eb544c0> assign[=] tuple[[<ast.Attribute object at 0x7da1b04cb790>, <ast.Attribute object at 0x7da1b04c93f0>, <ast.Attribute object at 0x7da1b04c91b0>]]
return[tuple[[<ast.Name object at 0x7da1b04c94e0>, <ast.Name object at 0x7da1b04c9c30>, <ast.Name object at 0x7da1b04c9ba0>]]] | keyword[def] identifier[_basename] ( identifier[fname] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[fname] , identifier[Path] ):
identifier[fname] = identifier[Path] ( identifier[fname] )
identifier[path] , identifier[name] , identifier[ext] = identifier[fname] . identifier[parent] , identifier[fname] . identifier[stem] , identifier[fname] . identifier[suffix]
keyword[return] identifier[path] , identifier[name] , identifier[ext] | def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname) # depends on [control=['if'], data=[]]
(path, name, ext) = (fname.parent, fname.stem, fname.suffix)
return (path, name, ext) |
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s | def function[toEncoding, parameter[self, s, encoding]]:
constant[Encodes an object to a string in some encoding, or to Unicode.
.]
if call[name[isinstance], parameter[name[s], name[unicode]]] begin[:]
if name[encoding] begin[:]
variable[s] assign[=] call[name[s].encode, parameter[name[encoding]]]
return[name[s]] | keyword[def] identifier[toEncoding] ( identifier[self] , identifier[s] , identifier[encoding] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[unicode] ):
keyword[if] identifier[encoding] :
identifier[s] = identifier[s] . identifier[encode] ( identifier[encoding] )
keyword[elif] identifier[isinstance] ( identifier[s] , identifier[str] ):
keyword[if] identifier[encoding] :
identifier[s] = identifier[s] . identifier[encode] ( identifier[encoding] )
keyword[else] :
identifier[s] = identifier[unicode] ( identifier[s] )
keyword[else] :
keyword[if] identifier[encoding] :
identifier[s] = identifier[self] . identifier[toEncoding] ( identifier[str] ( identifier[s] ), identifier[encoding] )
keyword[else] :
identifier[s] = identifier[unicode] ( identifier[s] )
keyword[return] identifier[s] | def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(s, str):
if encoding:
s = s.encode(encoding) # depends on [control=['if'], data=[]]
else:
s = unicode(s) # depends on [control=['if'], data=[]]
elif encoding:
s = self.toEncoding(str(s), encoding) # depends on [control=['if'], data=[]]
else:
s = unicode(s)
return s |
def st_mtime(self):
"""Return the modification time in seconds."""
mtime = self._st_mtime_ns / 1e9
return mtime if self.use_float else int(mtime) | def function[st_mtime, parameter[self]]:
constant[Return the modification time in seconds.]
variable[mtime] assign[=] binary_operation[name[self]._st_mtime_ns / constant[1000000000.0]]
return[<ast.IfExp object at 0x7da20c6e6aa0>] | keyword[def] identifier[st_mtime] ( identifier[self] ):
literal[string]
identifier[mtime] = identifier[self] . identifier[_st_mtime_ns] / literal[int]
keyword[return] identifier[mtime] keyword[if] identifier[self] . identifier[use_float] keyword[else] identifier[int] ( identifier[mtime] ) | def st_mtime(self):
"""Return the modification time in seconds."""
mtime = self._st_mtime_ns / 1000000000.0
return mtime if self.use_float else int(mtime) |
def add_asset(self, asset_type, asset_name):
"""
Add an asset to the adversary
Args:
asset_type: (str) Either PHONE, HANDLER, or URL
asset_name: (str) the value for the asset
Returns:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if asset_type == 'PHONE':
return self.tc_requests.add_adversary_phone_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_name
)
if asset_type == 'HANDLER':
return self.tc_requests.add_adversary_handler_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_name
)
if asset_type == 'URL':
return self.tc_requests.add_adversary_url_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_name
)
self._tcex.handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return None | def function[add_asset, parameter[self, asset_type, asset_name]]:
constant[
Add an asset to the adversary
Args:
asset_type: (str) Either PHONE, HANDLER, or URL
asset_name: (str) the value for the asset
Returns:
]
if <ast.UnaryOp object at 0x7da18dc07a90> begin[:]
call[name[self]._tcex.handle_error, parameter[constant[910], list[[<ast.Attribute object at 0x7da18dc04f10>]]]]
if compare[name[asset_type] equal[==] constant[PHONE]] begin[:]
return[call[name[self].tc_requests.add_adversary_phone_asset, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id, name[asset_name]]]]
if compare[name[asset_type] equal[==] constant[HANDLER]] begin[:]
return[call[name[self].tc_requests.add_adversary_handler_asset, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id, name[asset_name]]]]
if compare[name[asset_type] equal[==] constant[URL]] begin[:]
return[call[name[self].tc_requests.add_adversary_url_asset, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id, name[asset_name]]]]
call[name[self]._tcex.handle_error, parameter[constant[925], list[[<ast.Constant object at 0x7da18dc070a0>, <ast.Constant object at 0x7da18dc06c20>, <ast.Constant object at 0x7da18dc05780>, <ast.Constant object at 0x7da18dc05510>, <ast.Name object at 0x7da18dc074c0>]]]]
return[constant[None]] | keyword[def] identifier[add_asset] ( identifier[self] , identifier[asset_type] , identifier[asset_name] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[can_update] ():
identifier[self] . identifier[_tcex] . identifier[handle_error] ( literal[int] ,[ identifier[self] . identifier[type] ])
keyword[if] identifier[asset_type] == literal[string] :
keyword[return] identifier[self] . identifier[tc_requests] . identifier[add_adversary_phone_asset] (
identifier[self] . identifier[api_type] , identifier[self] . identifier[api_sub_type] , identifier[self] . identifier[unique_id] , identifier[asset_name]
)
keyword[if] identifier[asset_type] == literal[string] :
keyword[return] identifier[self] . identifier[tc_requests] . identifier[add_adversary_handler_asset] (
identifier[self] . identifier[api_type] , identifier[self] . identifier[api_sub_type] , identifier[self] . identifier[unique_id] , identifier[asset_name]
)
keyword[if] identifier[asset_type] == literal[string] :
keyword[return] identifier[self] . identifier[tc_requests] . identifier[add_adversary_url_asset] (
identifier[self] . identifier[api_type] , identifier[self] . identifier[api_sub_type] , identifier[self] . identifier[unique_id] , identifier[asset_name]
)
identifier[self] . identifier[_tcex] . identifier[handle_error] (
literal[int] ,[ literal[string] , literal[string] , literal[string] , literal[string] , identifier[asset_type] ]
)
keyword[return] keyword[None] | def add_asset(self, asset_type, asset_name):
"""
Add an asset to the adversary
Args:
asset_type: (str) Either PHONE, HANDLER, or URL
asset_name: (str) the value for the asset
Returns:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type]) # depends on [control=['if'], data=[]]
if asset_type == 'PHONE':
return self.tc_requests.add_adversary_phone_asset(self.api_type, self.api_sub_type, self.unique_id, asset_name) # depends on [control=['if'], data=[]]
if asset_type == 'HANDLER':
return self.tc_requests.add_adversary_handler_asset(self.api_type, self.api_sub_type, self.unique_id, asset_name) # depends on [control=['if'], data=[]]
if asset_type == 'URL':
return self.tc_requests.add_adversary_url_asset(self.api_type, self.api_sub_type, self.unique_id, asset_name) # depends on [control=['if'], data=[]]
self._tcex.handle_error(925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type])
return None |
def eval_simple_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
# Is the pattern an expression to be expanded?
if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \
re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
son = self.expand_expression(pattern, hosts, services,
hostgroups, servicegroups, running)
if node.operand != 'of:':
node.operand = '&'
node.sons.extend(son.sons)
node.configuration_errors.extend(son.configuration_errors)
node.switch_zeros_of_values()
else:
node.operand = 'object'
obj, error = self.find_object(pattern, hosts, services)
# here we have Alignak SchedulingItem object (Host/Service)
if obj is not None:
# Set host or service
# pylint: disable=E1101
node.operand = obj.__class__.my_type
node.sons.append(obj.uuid) # Only store the uuid, not the full object.
else:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node | def function[eval_simple_cor_pattern, parameter[self, pattern, hosts, services, hostgroups, servicegroups, running]]:
constant[Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
]
variable[node] assign[=] call[name[DependencyNode], parameter[]]
variable[pattern] assign[=] call[name[self].eval_xof_pattern, parameter[name[node], name[pattern]]]
if call[name[pattern].startswith, parameter[constant[!]]] begin[:]
name[node].not_value assign[=] constant[True]
variable[pattern] assign[=] call[name[pattern]][<ast.Slice object at 0x7da18fe90cd0>]
if <ast.BoolOp object at 0x7da18fe927a0> begin[:]
variable[son] assign[=] call[name[self].expand_expression, parameter[name[pattern], name[hosts], name[services], name[hostgroups], name[servicegroups], name[running]]]
if compare[name[node].operand not_equal[!=] constant[of:]] begin[:]
name[node].operand assign[=] constant[&]
call[name[node].sons.extend, parameter[name[son].sons]]
call[name[node].configuration_errors.extend, parameter[name[son].configuration_errors]]
call[name[node].switch_zeros_of_values, parameter[]]
return[name[node]] | keyword[def] identifier[eval_simple_cor_pattern] ( identifier[self] , identifier[pattern] , identifier[hosts] , identifier[services] ,
identifier[hostgroups] , identifier[servicegroups] , identifier[running] = keyword[False] ):
literal[string]
identifier[node] = identifier[DependencyNode] ()
identifier[pattern] = identifier[self] . identifier[eval_xof_pattern] ( identifier[node] , identifier[pattern] )
keyword[if] identifier[pattern] . identifier[startswith] ( literal[string] ):
identifier[node] . identifier[not_value] = keyword[True]
identifier[pattern] = identifier[pattern] [ literal[int] :]
keyword[if] identifier[re] . identifier[search] ( literal[string] % identifier[self] . identifier[host_flags] , identifier[pattern] ) keyword[or] identifier[re] . identifier[search] ( literal[string] % identifier[self] . identifier[service_flags] , identifier[pattern] ):
identifier[son] = identifier[self] . identifier[expand_expression] ( identifier[pattern] , identifier[hosts] , identifier[services] ,
identifier[hostgroups] , identifier[servicegroups] , identifier[running] )
keyword[if] identifier[node] . identifier[operand] != literal[string] :
identifier[node] . identifier[operand] = literal[string]
identifier[node] . identifier[sons] . identifier[extend] ( identifier[son] . identifier[sons] )
identifier[node] . identifier[configuration_errors] . identifier[extend] ( identifier[son] . identifier[configuration_errors] )
identifier[node] . identifier[switch_zeros_of_values] ()
keyword[else] :
identifier[node] . identifier[operand] = literal[string]
identifier[obj] , identifier[error] = identifier[self] . identifier[find_object] ( identifier[pattern] , identifier[hosts] , identifier[services] )
keyword[if] identifier[obj] keyword[is] keyword[not] keyword[None] :
identifier[node] . identifier[operand] = identifier[obj] . identifier[__class__] . identifier[my_type]
identifier[node] . identifier[sons] . identifier[append] ( identifier[obj] . identifier[uuid] )
keyword[else] :
keyword[if] identifier[running] keyword[is] keyword[False] :
identifier[node] . identifier[configuration_errors] . identifier[append] ( identifier[error] )
keyword[else] :
keyword[raise] identifier[Exception] ( identifier[error] )
keyword[return] identifier[node] | def eval_simple_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:] # depends on [control=['if'], data=[]]
# Is the pattern an expression to be expanded?
if re.search('^([%s]+|\\*):' % self.host_flags, pattern) or re.search(',\\s*([%s]+:.*|\\*)$' % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
son = self.expand_expression(pattern, hosts, services, hostgroups, servicegroups, running)
if node.operand != 'of:':
node.operand = '&' # depends on [control=['if'], data=[]]
node.sons.extend(son.sons)
node.configuration_errors.extend(son.configuration_errors)
node.switch_zeros_of_values() # depends on [control=['if'], data=[]]
else:
node.operand = 'object'
(obj, error) = self.find_object(pattern, hosts, services)
# here we have Alignak SchedulingItem object (Host/Service)
if obj is not None:
# Set host or service
# pylint: disable=E1101
node.operand = obj.__class__.my_type
node.sons.append(obj.uuid) # Only store the uuid, not the full object. # depends on [control=['if'], data=['obj']]
elif running is False:
node.configuration_errors.append(error) # depends on [control=['if'], data=[]]
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node |
def generate_encoded_user_data(
env='dev',
region='us-east-1',
generated=None,
group_name='',
pipeline_type='',
canary=False,
):
r"""Generate base64 encoded User Data.
Args:
env (str): Deployment environment, e.g. dev, stage.
region (str): AWS Region, e.g. us-east-1.
generated (gogoutils.Generator): Generated naming formats.
group_name (str): Application group nane, e.g. core.
pipeline_type (str): Type of Foremast Pipeline to configure.
Returns:
str: base64 encoded User Data script.
#!/bin/bash
export CLOUD_ENVIRONMENT=dev
export CLOUD_ENVIRONMENT_C=dev
export CLOUD_ENVIRONMENT_P=dev
export CLOUD_ENVIRONMENT_S=dev
export CLOUD_APP=coreforrest
export CLOUD_APP_GROUP=forrest
export CLOUD_STACK=forrest
export EC2_REGION=us-east-1
export CLOUD_DOMAIN=dev.example.com
printenv | grep 'CLOUD\|EC2' | awk '$0="export "$0'>> /etc/gogo/cloud_env
"""
# We need to handle the case of prodp and prods for different URL generation
if env in ["prod", "prodp", "prods"]:
env_c, env_p, env_s = "prod", "prodp", "prods"
else:
env_c, env_p, env_s = env, env, env
user_data = get_template(
template_file='infrastructure/user_data.sh.j2',
env=env,
env_c=env_c,
env_p=env_p,
env_s=env_s,
region=region,
app_name=generated.app_name(),
group_name=group_name,
pipeline_type=pipeline_type,
canary=canary,
formats=generated,
)
return base64.b64encode(user_data.encode()).decode() | def function[generate_encoded_user_data, parameter[env, region, generated, group_name, pipeline_type, canary]]:
constant[Generate base64 encoded User Data.
Args:
env (str): Deployment environment, e.g. dev, stage.
region (str): AWS Region, e.g. us-east-1.
generated (gogoutils.Generator): Generated naming formats.
group_name (str): Application group nane, e.g. core.
pipeline_type (str): Type of Foremast Pipeline to configure.
Returns:
str: base64 encoded User Data script.
#!/bin/bash
export CLOUD_ENVIRONMENT=dev
export CLOUD_ENVIRONMENT_C=dev
export CLOUD_ENVIRONMENT_P=dev
export CLOUD_ENVIRONMENT_S=dev
export CLOUD_APP=coreforrest
export CLOUD_APP_GROUP=forrest
export CLOUD_STACK=forrest
export EC2_REGION=us-east-1
export CLOUD_DOMAIN=dev.example.com
printenv | grep 'CLOUD\|EC2' | awk '$0="export "$0'>> /etc/gogo/cloud_env
]
if compare[name[env] in list[[<ast.Constant object at 0x7da204347f40>, <ast.Constant object at 0x7da204344c10>, <ast.Constant object at 0x7da204346a10>]]] begin[:]
<ast.Tuple object at 0x7da2043471f0> assign[=] tuple[[<ast.Constant object at 0x7da204347fd0>, <ast.Constant object at 0x7da204347550>, <ast.Constant object at 0x7da2043462c0>]]
variable[user_data] assign[=] call[name[get_template], parameter[]]
return[call[call[name[base64].b64encode, parameter[call[name[user_data].encode, parameter[]]]].decode, parameter[]]] | keyword[def] identifier[generate_encoded_user_data] (
identifier[env] = literal[string] ,
identifier[region] = literal[string] ,
identifier[generated] = keyword[None] ,
identifier[group_name] = literal[string] ,
identifier[pipeline_type] = literal[string] ,
identifier[canary] = keyword[False] ,
):
literal[string]
keyword[if] identifier[env] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[env_c] , identifier[env_p] , identifier[env_s] = literal[string] , literal[string] , literal[string]
keyword[else] :
identifier[env_c] , identifier[env_p] , identifier[env_s] = identifier[env] , identifier[env] , identifier[env]
identifier[user_data] = identifier[get_template] (
identifier[template_file] = literal[string] ,
identifier[env] = identifier[env] ,
identifier[env_c] = identifier[env_c] ,
identifier[env_p] = identifier[env_p] ,
identifier[env_s] = identifier[env_s] ,
identifier[region] = identifier[region] ,
identifier[app_name] = identifier[generated] . identifier[app_name] (),
identifier[group_name] = identifier[group_name] ,
identifier[pipeline_type] = identifier[pipeline_type] ,
identifier[canary] = identifier[canary] ,
identifier[formats] = identifier[generated] ,
)
keyword[return] identifier[base64] . identifier[b64encode] ( identifier[user_data] . identifier[encode] ()). identifier[decode] () | def generate_encoded_user_data(env='dev', region='us-east-1', generated=None, group_name='', pipeline_type='', canary=False):
"""Generate base64 encoded User Data.
Args:
env (str): Deployment environment, e.g. dev, stage.
region (str): AWS Region, e.g. us-east-1.
generated (gogoutils.Generator): Generated naming formats.
group_name (str): Application group nane, e.g. core.
pipeline_type (str): Type of Foremast Pipeline to configure.
Returns:
str: base64 encoded User Data script.
#!/bin/bash
export CLOUD_ENVIRONMENT=dev
export CLOUD_ENVIRONMENT_C=dev
export CLOUD_ENVIRONMENT_P=dev
export CLOUD_ENVIRONMENT_S=dev
export CLOUD_APP=coreforrest
export CLOUD_APP_GROUP=forrest
export CLOUD_STACK=forrest
export EC2_REGION=us-east-1
export CLOUD_DOMAIN=dev.example.com
printenv | grep 'CLOUD\\|EC2' | awk '$0="export "$0'>> /etc/gogo/cloud_env
"""
# We need to handle the case of prodp and prods for different URL generation
if env in ['prod', 'prodp', 'prods']:
(env_c, env_p, env_s) = ('prod', 'prodp', 'prods') # depends on [control=['if'], data=[]]
else:
(env_c, env_p, env_s) = (env, env, env)
user_data = get_template(template_file='infrastructure/user_data.sh.j2', env=env, env_c=env_c, env_p=env_p, env_s=env_s, region=region, app_name=generated.app_name(), group_name=group_name, pipeline_type=pipeline_type, canary=canary, formats=generated)
return base64.b64encode(user_data.encode()).decode() |
def formatter(self):
"""
Creates and returns a Formatter capable of nicely formatting Lambda function logs
Returns
-------
LogsFormatter
"""
formatter_chain = [
LambdaLogMsgFormatters.colorize_errors,
# Format JSON "before" highlighting the keywords. Otherwise, JSON will be invalid from all the
# ANSI color codes and fail to pretty print
JSONMsgFormatter.format_json,
KeywordHighlighter(self._filter_pattern).highlight_keywords,
]
return LogsFormatter(self.colored, formatter_chain) | def function[formatter, parameter[self]]:
constant[
Creates and returns a Formatter capable of nicely formatting Lambda function logs
Returns
-------
LogsFormatter
]
variable[formatter_chain] assign[=] list[[<ast.Attribute object at 0x7da1b1f71240>, <ast.Attribute object at 0x7da1b1f736a0>, <ast.Attribute object at 0x7da1b1f73e20>]]
return[call[name[LogsFormatter], parameter[name[self].colored, name[formatter_chain]]]] | keyword[def] identifier[formatter] ( identifier[self] ):
literal[string]
identifier[formatter_chain] =[
identifier[LambdaLogMsgFormatters] . identifier[colorize_errors] ,
identifier[JSONMsgFormatter] . identifier[format_json] ,
identifier[KeywordHighlighter] ( identifier[self] . identifier[_filter_pattern] ). identifier[highlight_keywords] ,
]
keyword[return] identifier[LogsFormatter] ( identifier[self] . identifier[colored] , identifier[formatter_chain] ) | def formatter(self):
"""
Creates and returns a Formatter capable of nicely formatting Lambda function logs
Returns
-------
LogsFormatter
"""
# Format JSON "before" highlighting the keywords. Otherwise, JSON will be invalid from all the
# ANSI color codes and fail to pretty print
formatter_chain = [LambdaLogMsgFormatters.colorize_errors, JSONMsgFormatter.format_json, KeywordHighlighter(self._filter_pattern).highlight_keywords]
return LogsFormatter(self.colored, formatter_chain) |
def _get_default(self, obj):
''' Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc.
'''
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default | def function[_get_default, parameter[self, obj]]:
constant[ Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc.
]
if compare[name[self].name in name[obj]._property_values] begin[:]
<ast.Raise object at 0x7da1b21d7df0>
variable[is_themed] assign[=] <ast.BoolOp object at 0x7da1b21d4e20>
variable[default] assign[=] call[name[self].instance_default, parameter[name[obj]]]
if name[is_themed] begin[:]
variable[unstable_dict] assign[=] name[obj]._unstable_themed_values
if compare[name[self].name in name[unstable_dict]] begin[:]
return[call[name[unstable_dict]][name[self].name]]
if call[name[self].property._may_have_unstable_default, parameter[]] begin[:]
if call[name[isinstance], parameter[name[default], name[PropertyValueContainer]]] begin[:]
call[name[default]._register_owner, parameter[name[obj], name[self]]]
call[name[unstable_dict]][name[self].name] assign[=] name[default]
return[name[default]] | keyword[def] identifier[_get_default] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[self] . identifier[name] keyword[in] identifier[obj] . identifier[_property_values] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[is_themed] = identifier[obj] . identifier[themed_values] () keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[name] keyword[in] identifier[obj] . identifier[themed_values] ()
identifier[default] = identifier[self] . identifier[instance_default] ( identifier[obj] )
keyword[if] identifier[is_themed] :
identifier[unstable_dict] = identifier[obj] . identifier[_unstable_themed_values]
keyword[else] :
identifier[unstable_dict] = identifier[obj] . identifier[_unstable_default_values]
keyword[if] identifier[self] . identifier[name] keyword[in] identifier[unstable_dict] :
keyword[return] identifier[unstable_dict] [ identifier[self] . identifier[name] ]
keyword[if] identifier[self] . identifier[property] . identifier[_may_have_unstable_default] ():
keyword[if] identifier[isinstance] ( identifier[default] , identifier[PropertyValueContainer] ):
identifier[default] . identifier[_register_owner] ( identifier[obj] , identifier[self] )
identifier[unstable_dict] [ identifier[self] . identifier[name] ]= identifier[default]
keyword[return] identifier[default] | def _get_default(self, obj):
""" Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc.
"""
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError('Bokeh internal error, does not handle the case of self.name already in _property_values') # depends on [control=['if'], data=[]]
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values # depends on [control=['if'], data=[]]
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name] # depends on [control=['if'], data=['unstable_dict']]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self) # depends on [control=['if'], data=[]]
unstable_dict[self.name] = default # depends on [control=['if'], data=[]]
return default |
def fire_freerun_worker(self, thread_handler_header):
""" fires free-run worker with no dependencies to track """
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info('{0} {{'.format(thread_handler_header.key))
state_machine = self.timetable.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(thread_handler_header.process_entry)
except Exception as e:
self.logger.error('fire_freerun_worker: {0}'.format(e))
finally:
self.logger.info('}') | def function[fire_freerun_worker, parameter[self, thread_handler_header]]:
constant[ fires free-run worker with no dependencies to track ]
<ast.Try object at 0x7da1b2424280> | keyword[def] identifier[fire_freerun_worker] ( identifier[self] , identifier[thread_handler_header] ):
literal[string]
keyword[try] :
keyword[assert] identifier[isinstance] ( identifier[thread_handler_header] , identifier[ThreadHandlerHeader] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[thread_handler_header] . identifier[key] ))
identifier[state_machine] = identifier[self] . identifier[timetable] . identifier[state_machines] [ identifier[STATE_MACHINE_FREERUN] ]
identifier[state_machine] . identifier[manage_schedulable] ( identifier[thread_handler_header] . identifier[process_entry] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[finally] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] ) | def fire_freerun_worker(self, thread_handler_header):
""" fires free-run worker with no dependencies to track """
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info('{0} {{'.format(thread_handler_header.key))
state_machine = self.timetable.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(thread_handler_header.process_entry) # depends on [control=['try'], data=[]]
except Exception as e:
self.logger.error('fire_freerun_worker: {0}'.format(e)) # depends on [control=['except'], data=['e']]
finally:
self.logger.info('}') |
def Overlay_setSuspended(self, suspended):
"""
Function path: Overlay.setSuspended
Domain: Overlay
Method name: setSuspended
Parameters:
Required arguments:
'suspended' (type: boolean) -> Whether overlay should be suspended and not consume any resources until resumed.
No return value.
"""
assert isinstance(suspended, (bool,)
), "Argument 'suspended' must be of type '['bool']'. Received type: '%s'" % type(
suspended)
subdom_funcs = self.synchronous_command('Overlay.setSuspended', suspended
=suspended)
return subdom_funcs | def function[Overlay_setSuspended, parameter[self, suspended]]:
constant[
Function path: Overlay.setSuspended
Domain: Overlay
Method name: setSuspended
Parameters:
Required arguments:
'suspended' (type: boolean) -> Whether overlay should be suspended and not consume any resources until resumed.
No return value.
]
assert[call[name[isinstance], parameter[name[suspended], tuple[[<ast.Name object at 0x7da1b10287c0>]]]]]
variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Overlay.setSuspended]]]
return[name[subdom_funcs]] | keyword[def] identifier[Overlay_setSuspended] ( identifier[self] , identifier[suspended] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[suspended] ,( identifier[bool] ,)
), literal[string] % identifier[type] (
identifier[suspended] )
identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] , identifier[suspended]
= identifier[suspended] )
keyword[return] identifier[subdom_funcs] | def Overlay_setSuspended(self, suspended):
"""
Function path: Overlay.setSuspended
Domain: Overlay
Method name: setSuspended
Parameters:
Required arguments:
'suspended' (type: boolean) -> Whether overlay should be suspended and not consume any resources until resumed.
No return value.
"""
assert isinstance(suspended, (bool,)), "Argument 'suspended' must be of type '['bool']'. Received type: '%s'" % type(suspended)
subdom_funcs = self.synchronous_command('Overlay.setSuspended', suspended=suspended)
return subdom_funcs |
def WriteClientSnapshot(self, snapshot):
"""Writes new client snapshot."""
client_id = snapshot.client_id
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
startup_info = snapshot.startup_info
snapshot.startup_info = None
ts = rdfvalue.RDFDatetime.Now()
history = self.clients.setdefault(client_id, {})
history[ts] = snapshot.SerializeToString()
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
snapshot.startup_info = startup_info | def function[WriteClientSnapshot, parameter[self, snapshot]]:
constant[Writes new client snapshot.]
variable[client_id] assign[=] name[snapshot].client_id
if compare[name[client_id] <ast.NotIn object at 0x7da2590d7190> name[self].metadatas] begin[:]
<ast.Raise object at 0x7da1b1b47940>
variable[startup_info] assign[=] name[snapshot].startup_info
name[snapshot].startup_info assign[=] constant[None]
variable[ts] assign[=] call[name[rdfvalue].RDFDatetime.Now, parameter[]]
variable[history] assign[=] call[name[self].clients.setdefault, parameter[name[client_id], dictionary[[], []]]]
call[name[history]][name[ts]] assign[=] call[name[snapshot].SerializeToString, parameter[]]
variable[history] assign[=] call[name[self].startup_history.setdefault, parameter[name[client_id], dictionary[[], []]]]
call[name[history]][name[ts]] assign[=] call[name[startup_info].SerializeToString, parameter[]]
name[snapshot].startup_info assign[=] name[startup_info] | keyword[def] identifier[WriteClientSnapshot] ( identifier[self] , identifier[snapshot] ):
literal[string]
identifier[client_id] = identifier[snapshot] . identifier[client_id]
keyword[if] identifier[client_id] keyword[not] keyword[in] identifier[self] . identifier[metadatas] :
keyword[raise] identifier[db] . identifier[UnknownClientError] ( identifier[client_id] )
identifier[startup_info] = identifier[snapshot] . identifier[startup_info]
identifier[snapshot] . identifier[startup_info] = keyword[None]
identifier[ts] = identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] ()
identifier[history] = identifier[self] . identifier[clients] . identifier[setdefault] ( identifier[client_id] ,{})
identifier[history] [ identifier[ts] ]= identifier[snapshot] . identifier[SerializeToString] ()
identifier[history] = identifier[self] . identifier[startup_history] . identifier[setdefault] ( identifier[client_id] ,{})
identifier[history] [ identifier[ts] ]= identifier[startup_info] . identifier[SerializeToString] ()
identifier[snapshot] . identifier[startup_info] = identifier[startup_info] | def WriteClientSnapshot(self, snapshot):
"""Writes new client snapshot."""
client_id = snapshot.client_id
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id) # depends on [control=['if'], data=['client_id']]
startup_info = snapshot.startup_info
snapshot.startup_info = None
ts = rdfvalue.RDFDatetime.Now()
history = self.clients.setdefault(client_id, {})
history[ts] = snapshot.SerializeToString()
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
snapshot.startup_info = startup_info |
def _get_goids(gostr):
"""Return GO IDs from a GO str (e.g., GO:0043473,GO:0009987) or a file."""
if 'GO:' in gostr:
return gostr.split(',')
elif os.path.exists(gostr):
return GetGOs().get_goids(None, gostr, sys.stdout) | def function[_get_goids, parameter[gostr]]:
constant[Return GO IDs from a GO str (e.g., GO:0043473,GO:0009987) or a file.]
if compare[constant[GO:] in name[gostr]] begin[:]
return[call[name[gostr].split, parameter[constant[,]]]] | keyword[def] identifier[_get_goids] ( identifier[gostr] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[gostr] :
keyword[return] identifier[gostr] . identifier[split] ( literal[string] )
keyword[elif] identifier[os] . identifier[path] . identifier[exists] ( identifier[gostr] ):
keyword[return] identifier[GetGOs] (). identifier[get_goids] ( keyword[None] , identifier[gostr] , identifier[sys] . identifier[stdout] ) | def _get_goids(gostr):
"""Return GO IDs from a GO str (e.g., GO:0043473,GO:0009987) or a file."""
if 'GO:' in gostr:
return gostr.split(',') # depends on [control=['if'], data=['gostr']]
elif os.path.exists(gostr):
return GetGOs().get_goids(None, gostr, sys.stdout) # depends on [control=['if'], data=[]] |
def notify(self, msgtype, method, params):
"""Handle an incoming notify request."""
self.dispatch.call(method, params) | def function[notify, parameter[self, msgtype, method, params]]:
constant[Handle an incoming notify request.]
call[name[self].dispatch.call, parameter[name[method], name[params]]] | keyword[def] identifier[notify] ( identifier[self] , identifier[msgtype] , identifier[method] , identifier[params] ):
literal[string]
identifier[self] . identifier[dispatch] . identifier[call] ( identifier[method] , identifier[params] ) | def notify(self, msgtype, method, params):
"""Handle an incoming notify request."""
self.dispatch.call(method, params) |
def wherenot(self, fieldname, value):
"""
Logical opposite of `where`.
"""
return self.where(fieldname, value, negate=True) | def function[wherenot, parameter[self, fieldname, value]]:
constant[
Logical opposite of `where`.
]
return[call[name[self].where, parameter[name[fieldname], name[value]]]] | keyword[def] identifier[wherenot] ( identifier[self] , identifier[fieldname] , identifier[value] ):
literal[string]
keyword[return] identifier[self] . identifier[where] ( identifier[fieldname] , identifier[value] , identifier[negate] = keyword[True] ) | def wherenot(self, fieldname, value):
"""
Logical opposite of `where`.
"""
return self.where(fieldname, value, negate=True) |
def read_rec(table_name, objid):
"""Generator that yields keyed recs from store."""
req = datastore.LookupRequest()
req.key.extend([make_key(table_name, objid)])
for found in datastore.lookup(req).found:
yield extract_entity(found) | def function[read_rec, parameter[table_name, objid]]:
constant[Generator that yields keyed recs from store.]
variable[req] assign[=] call[name[datastore].LookupRequest, parameter[]]
call[name[req].key.extend, parameter[list[[<ast.Call object at 0x7da20c76fb20>]]]]
for taget[name[found]] in starred[call[name[datastore].lookup, parameter[name[req]]].found] begin[:]
<ast.Yield object at 0x7da20c76c070> | keyword[def] identifier[read_rec] ( identifier[table_name] , identifier[objid] ):
literal[string]
identifier[req] = identifier[datastore] . identifier[LookupRequest] ()
identifier[req] . identifier[key] . identifier[extend] ([ identifier[make_key] ( identifier[table_name] , identifier[objid] )])
keyword[for] identifier[found] keyword[in] identifier[datastore] . identifier[lookup] ( identifier[req] ). identifier[found] :
keyword[yield] identifier[extract_entity] ( identifier[found] ) | def read_rec(table_name, objid):
"""Generator that yields keyed recs from store."""
req = datastore.LookupRequest()
req.key.extend([make_key(table_name, objid)])
for found in datastore.lookup(req).found:
yield extract_entity(found) # depends on [control=['for'], data=['found']] |
def calc_individual_chi_squares(residuals,
long_probabilities,
rows_to_obs):
"""
Calculates individual chi-squared values for each choice situation in the
dataset.
Parameters
----------
residuals : 1D ndarray.
The choice vector minus the predicted probability of each alternative
for each observation.
long_probabilities : 1D ndarray.
The probability of each alternative being chosen in each choice
situation.
rows_to_obs : 2D scipy sparse array.
Should map each row of the long format dataferame to the unique
observations in the dataset.
Returns
-------
ind_chi_squareds : 1D ndarray.
Will have as many elements as there are columns in `rows_to_obs`. Each
element will contain the pearson chi-squared value for the given choice
situation.
"""
chi_squared_terms = np.square(residuals) / long_probabilities
return rows_to_obs.T.dot(chi_squared_terms) | def function[calc_individual_chi_squares, parameter[residuals, long_probabilities, rows_to_obs]]:
constant[
Calculates individual chi-squared values for each choice situation in the
dataset.
Parameters
----------
residuals : 1D ndarray.
The choice vector minus the predicted probability of each alternative
for each observation.
long_probabilities : 1D ndarray.
The probability of each alternative being chosen in each choice
situation.
rows_to_obs : 2D scipy sparse array.
Should map each row of the long format dataferame to the unique
observations in the dataset.
Returns
-------
ind_chi_squareds : 1D ndarray.
Will have as many elements as there are columns in `rows_to_obs`. Each
element will contain the pearson chi-squared value for the given choice
situation.
]
variable[chi_squared_terms] assign[=] binary_operation[call[name[np].square, parameter[name[residuals]]] / name[long_probabilities]]
return[call[name[rows_to_obs].T.dot, parameter[name[chi_squared_terms]]]] | keyword[def] identifier[calc_individual_chi_squares] ( identifier[residuals] ,
identifier[long_probabilities] ,
identifier[rows_to_obs] ):
literal[string]
identifier[chi_squared_terms] = identifier[np] . identifier[square] ( identifier[residuals] )/ identifier[long_probabilities]
keyword[return] identifier[rows_to_obs] . identifier[T] . identifier[dot] ( identifier[chi_squared_terms] ) | def calc_individual_chi_squares(residuals, long_probabilities, rows_to_obs):
"""
Calculates individual chi-squared values for each choice situation in the
dataset.
Parameters
----------
residuals : 1D ndarray.
The choice vector minus the predicted probability of each alternative
for each observation.
long_probabilities : 1D ndarray.
The probability of each alternative being chosen in each choice
situation.
rows_to_obs : 2D scipy sparse array.
Should map each row of the long format dataferame to the unique
observations in the dataset.
Returns
-------
ind_chi_squareds : 1D ndarray.
Will have as many elements as there are columns in `rows_to_obs`. Each
element will contain the pearson chi-squared value for the given choice
situation.
"""
chi_squared_terms = np.square(residuals) / long_probabilities
return rows_to_obs.T.dot(chi_squared_terms) |
def get_attached_volume(self, datacenter_id, server_id, volume_id):
"""
Retrieves volume information.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str``
"""
response = self._perform_request(
'/datacenters/%s/servers/%s/volumes/%s' % (
datacenter_id,
server_id,
volume_id))
return response | def function[get_attached_volume, parameter[self, datacenter_id, server_id, volume_id]]:
constant[
Retrieves volume information.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str``
]
variable[response] assign[=] call[name[self]._perform_request, parameter[binary_operation[constant[/datacenters/%s/servers/%s/volumes/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1affc0b20>, <ast.Name object at 0x7da1affc1210>, <ast.Name object at 0x7da1affc09a0>]]]]]
return[name[response]] | keyword[def] identifier[get_attached_volume] ( identifier[self] , identifier[datacenter_id] , identifier[server_id] , identifier[volume_id] ):
literal[string]
identifier[response] = identifier[self] . identifier[_perform_request] (
literal[string] %(
identifier[datacenter_id] ,
identifier[server_id] ,
identifier[volume_id] ))
keyword[return] identifier[response] | def get_attached_volume(self, datacenter_id, server_id, volume_id):
"""
Retrieves volume information.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str``
"""
response = self._perform_request('/datacenters/%s/servers/%s/volumes/%s' % (datacenter_id, server_id, volume_id))
return response |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.