code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _get_symbolic_function_initial_state(self, function_addr, fastpath_mode_state=None):
"""
Symbolically execute the first basic block of the specified function,
then returns it. We prepares the state using the already existing
state in fastpath mode (if avaiable).
:param function_addr: The function address
:return: A symbolic state if succeeded, None otherwise
"""
if function_addr is None:
return None
if function_addr in self._symbolic_function_initial_state:
return self._symbolic_function_initial_state[function_addr]
if fastpath_mode_state is not None:
fastpath_state = fastpath_mode_state
else:
if function_addr in self._function_input_states:
fastpath_state = self._function_input_states[function_addr]
else:
raise AngrCFGError('The impossible happened. Please report to Fish.')
symbolic_initial_state = self.project.factory.entry_state(mode='symbolic')
if fastpath_state is not None:
symbolic_initial_state = self.project.simos.prepare_call_state(fastpath_state,
initial_state=symbolic_initial_state)
# Find number of instructions of start block
func = self.project.kb.functions.get(function_addr)
start_block = func._get_block(function_addr)
num_instr = start_block.instructions - 1
symbolic_initial_state.ip = function_addr
path = self.project.factory.path(symbolic_initial_state)
try:
sim_successors = self.project.factory.successors(path.state, num_inst=num_instr)
except (SimError, AngrError):
return None
# We execute all but the last instruction in this basic block, so we have a cleaner
# state
# Start execution!
exits = sim_successors.flat_successors + sim_successors.unsat_successors
if exits:
final_st = None
for ex in exits:
if ex.satisfiable():
final_st = ex
break
else:
final_st = None
self._symbolic_function_initial_state[function_addr] = final_st
return final_st | def function[_get_symbolic_function_initial_state, parameter[self, function_addr, fastpath_mode_state]]:
constant[
Symbolically execute the first basic block of the specified function,
then returns it. We prepares the state using the already existing
state in fastpath mode (if avaiable).
:param function_addr: The function address
:return: A symbolic state if succeeded, None otherwise
]
if compare[name[function_addr] is constant[None]] begin[:]
return[constant[None]]
if compare[name[function_addr] in name[self]._symbolic_function_initial_state] begin[:]
return[call[name[self]._symbolic_function_initial_state][name[function_addr]]]
if compare[name[fastpath_mode_state] is_not constant[None]] begin[:]
variable[fastpath_state] assign[=] name[fastpath_mode_state]
variable[symbolic_initial_state] assign[=] call[name[self].project.factory.entry_state, parameter[]]
if compare[name[fastpath_state] is_not constant[None]] begin[:]
variable[symbolic_initial_state] assign[=] call[name[self].project.simos.prepare_call_state, parameter[name[fastpath_state]]]
variable[func] assign[=] call[name[self].project.kb.functions.get, parameter[name[function_addr]]]
variable[start_block] assign[=] call[name[func]._get_block, parameter[name[function_addr]]]
variable[num_instr] assign[=] binary_operation[name[start_block].instructions - constant[1]]
name[symbolic_initial_state].ip assign[=] name[function_addr]
variable[path] assign[=] call[name[self].project.factory.path, parameter[name[symbolic_initial_state]]]
<ast.Try object at 0x7da2044c34f0>
variable[exits] assign[=] binary_operation[name[sim_successors].flat_successors + name[sim_successors].unsat_successors]
if name[exits] begin[:]
variable[final_st] assign[=] constant[None]
for taget[name[ex]] in starred[name[exits]] begin[:]
if call[name[ex].satisfiable, parameter[]] begin[:]
variable[final_st] assign[=] name[ex]
break
call[name[self]._symbolic_function_initial_state][name[function_addr]] assign[=] name[final_st]
return[name[final_st]] | keyword[def] identifier[_get_symbolic_function_initial_state] ( identifier[self] , identifier[function_addr] , identifier[fastpath_mode_state] = keyword[None] ):
literal[string]
keyword[if] identifier[function_addr] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[function_addr] keyword[in] identifier[self] . identifier[_symbolic_function_initial_state] :
keyword[return] identifier[self] . identifier[_symbolic_function_initial_state] [ identifier[function_addr] ]
keyword[if] identifier[fastpath_mode_state] keyword[is] keyword[not] keyword[None] :
identifier[fastpath_state] = identifier[fastpath_mode_state]
keyword[else] :
keyword[if] identifier[function_addr] keyword[in] identifier[self] . identifier[_function_input_states] :
identifier[fastpath_state] = identifier[self] . identifier[_function_input_states] [ identifier[function_addr] ]
keyword[else] :
keyword[raise] identifier[AngrCFGError] ( literal[string] )
identifier[symbolic_initial_state] = identifier[self] . identifier[project] . identifier[factory] . identifier[entry_state] ( identifier[mode] = literal[string] )
keyword[if] identifier[fastpath_state] keyword[is] keyword[not] keyword[None] :
identifier[symbolic_initial_state] = identifier[self] . identifier[project] . identifier[simos] . identifier[prepare_call_state] ( identifier[fastpath_state] ,
identifier[initial_state] = identifier[symbolic_initial_state] )
identifier[func] = identifier[self] . identifier[project] . identifier[kb] . identifier[functions] . identifier[get] ( identifier[function_addr] )
identifier[start_block] = identifier[func] . identifier[_get_block] ( identifier[function_addr] )
identifier[num_instr] = identifier[start_block] . identifier[instructions] - literal[int]
identifier[symbolic_initial_state] . identifier[ip] = identifier[function_addr]
identifier[path] = identifier[self] . identifier[project] . identifier[factory] . identifier[path] ( identifier[symbolic_initial_state] )
keyword[try] :
identifier[sim_successors] = identifier[self] . identifier[project] . identifier[factory] . identifier[successors] ( identifier[path] . identifier[state] , identifier[num_inst] = identifier[num_instr] )
keyword[except] ( identifier[SimError] , identifier[AngrError] ):
keyword[return] keyword[None]
identifier[exits] = identifier[sim_successors] . identifier[flat_successors] + identifier[sim_successors] . identifier[unsat_successors]
keyword[if] identifier[exits] :
identifier[final_st] = keyword[None]
keyword[for] identifier[ex] keyword[in] identifier[exits] :
keyword[if] identifier[ex] . identifier[satisfiable] ():
identifier[final_st] = identifier[ex]
keyword[break]
keyword[else] :
identifier[final_st] = keyword[None]
identifier[self] . identifier[_symbolic_function_initial_state] [ identifier[function_addr] ]= identifier[final_st]
keyword[return] identifier[final_st] | def _get_symbolic_function_initial_state(self, function_addr, fastpath_mode_state=None):
"""
Symbolically execute the first basic block of the specified function,
then returns it. We prepares the state using the already existing
state in fastpath mode (if avaiable).
:param function_addr: The function address
:return: A symbolic state if succeeded, None otherwise
"""
if function_addr is None:
return None # depends on [control=['if'], data=[]]
if function_addr in self._symbolic_function_initial_state:
return self._symbolic_function_initial_state[function_addr] # depends on [control=['if'], data=['function_addr']]
if fastpath_mode_state is not None:
fastpath_state = fastpath_mode_state # depends on [control=['if'], data=['fastpath_mode_state']]
elif function_addr in self._function_input_states:
fastpath_state = self._function_input_states[function_addr] # depends on [control=['if'], data=['function_addr']]
else:
raise AngrCFGError('The impossible happened. Please report to Fish.')
symbolic_initial_state = self.project.factory.entry_state(mode='symbolic')
if fastpath_state is not None:
symbolic_initial_state = self.project.simos.prepare_call_state(fastpath_state, initial_state=symbolic_initial_state) # depends on [control=['if'], data=['fastpath_state']]
# Find number of instructions of start block
func = self.project.kb.functions.get(function_addr)
start_block = func._get_block(function_addr)
num_instr = start_block.instructions - 1
symbolic_initial_state.ip = function_addr
path = self.project.factory.path(symbolic_initial_state)
try:
sim_successors = self.project.factory.successors(path.state, num_inst=num_instr) # depends on [control=['try'], data=[]]
except (SimError, AngrError):
return None # depends on [control=['except'], data=[]]
# We execute all but the last instruction in this basic block, so we have a cleaner
# state
# Start execution!
exits = sim_successors.flat_successors + sim_successors.unsat_successors
if exits:
final_st = None
for ex in exits:
if ex.satisfiable():
final_st = ex
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']] # depends on [control=['if'], data=[]]
else:
final_st = None
self._symbolic_function_initial_state[function_addr] = final_st
return final_st |
def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png',
width=10, height=8):
"""Plot accuracy.
Args:
data: Panda dataframe in *the* format.
"""
output_path = os.path.join(output_dir_path, output_filename)
max_val_data = get_epoch_max_val_acc(data)
max_val_label = round(max_val_data['acc'].values[0], 4)
# max_val_epoch = max_val_data['epoch'].values[0]
max_epoch_data = data[data['epoch'] == data['epoch'].max()]
plot = ggplot(data, aes('epoch', 'acc', color='factor(data)')) + \
geom_line(size=1, show_legend=False) + \
geom_vline(aes(xintercept='epoch', color='data'),
data=max_val_data, alpha=0.5, show_legend=False) + \
geom_label(aes('epoch', 'acc'), data=max_val_data,
label=max_val_label, nudge_y=-0.02, va='top', label_size=0,
show_legend=False) + \
geom_text(aes('epoch', 'acc', label='data'), data=max_epoch_data,
nudge_x=2, ha='center', show_legend=False) + \
geom_point(aes('epoch', 'acc'), data=max_val_data,
show_legend=False) + \
labs(y='Accuracy', x='Epochs') + \
theme_bw(base_family='Arial', base_size=15) + \
scale_color_manual(['#ef8a62', '#67a9cf', "#f7f7f7"])
plot.save(output_path, width=width, height=height) | def function[plot_accuracy, parameter[data, output_dir_path, output_filename, width, height]]:
constant[Plot accuracy.
Args:
data: Panda dataframe in *the* format.
]
variable[output_path] assign[=] call[name[os].path.join, parameter[name[output_dir_path], name[output_filename]]]
variable[max_val_data] assign[=] call[name[get_epoch_max_val_acc], parameter[name[data]]]
variable[max_val_label] assign[=] call[name[round], parameter[call[call[name[max_val_data]][constant[acc]].values][constant[0]], constant[4]]]
variable[max_epoch_data] assign[=] call[name[data]][compare[call[name[data]][constant[epoch]] equal[==] call[call[name[data]][constant[epoch]].max, parameter[]]]]
variable[plot] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[ggplot], parameter[name[data], call[name[aes], parameter[constant[epoch], constant[acc]]]]] + call[name[geom_line], parameter[]]] + call[name[geom_vline], parameter[call[name[aes], parameter[]]]]] + call[name[geom_label], parameter[call[name[aes], parameter[constant[epoch], constant[acc]]]]]] + call[name[geom_text], parameter[call[name[aes], parameter[constant[epoch], constant[acc]]]]]] + call[name[geom_point], parameter[call[name[aes], parameter[constant[epoch], constant[acc]]]]]] + call[name[labs], parameter[]]] + call[name[theme_bw], parameter[]]] + call[name[scale_color_manual], parameter[list[[<ast.Constant object at 0x7da1b0a9c820>, <ast.Constant object at 0x7da1b0a9ca30>, <ast.Constant object at 0x7da1b0a9caf0>]]]]]
call[name[plot].save, parameter[name[output_path]]] | keyword[def] identifier[plot_accuracy] ( identifier[data] , identifier[output_dir_path] = literal[string] , identifier[output_filename] = literal[string] ,
identifier[width] = literal[int] , identifier[height] = literal[int] ):
literal[string]
identifier[output_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir_path] , identifier[output_filename] )
identifier[max_val_data] = identifier[get_epoch_max_val_acc] ( identifier[data] )
identifier[max_val_label] = identifier[round] ( identifier[max_val_data] [ literal[string] ]. identifier[values] [ literal[int] ], literal[int] )
identifier[max_epoch_data] = identifier[data] [ identifier[data] [ literal[string] ]== identifier[data] [ literal[string] ]. identifier[max] ()]
identifier[plot] = identifier[ggplot] ( identifier[data] , identifier[aes] ( literal[string] , literal[string] , identifier[color] = literal[string] ))+ identifier[geom_line] ( identifier[size] = literal[int] , identifier[show_legend] = keyword[False] )+ identifier[geom_vline] ( identifier[aes] ( identifier[xintercept] = literal[string] , identifier[color] = literal[string] ),
identifier[data] = identifier[max_val_data] , identifier[alpha] = literal[int] , identifier[show_legend] = keyword[False] )+ identifier[geom_label] ( identifier[aes] ( literal[string] , literal[string] ), identifier[data] = identifier[max_val_data] ,
identifier[label] = identifier[max_val_label] , identifier[nudge_y] =- literal[int] , identifier[va] = literal[string] , identifier[label_size] = literal[int] ,
identifier[show_legend] = keyword[False] )+ identifier[geom_text] ( identifier[aes] ( literal[string] , literal[string] , identifier[label] = literal[string] ), identifier[data] = identifier[max_epoch_data] ,
identifier[nudge_x] = literal[int] , identifier[ha] = literal[string] , identifier[show_legend] = keyword[False] )+ identifier[geom_point] ( identifier[aes] ( literal[string] , literal[string] ), identifier[data] = identifier[max_val_data] ,
identifier[show_legend] = keyword[False] )+ identifier[labs] ( identifier[y] = literal[string] , identifier[x] = literal[string] )+ identifier[theme_bw] ( identifier[base_family] = literal[string] , identifier[base_size] = literal[int] )+ identifier[scale_color_manual] ([ literal[string] , literal[string] , literal[string] ])
identifier[plot] . identifier[save] ( identifier[output_path] , identifier[width] = identifier[width] , identifier[height] = identifier[height] ) | def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png', width=10, height=8):
"""Plot accuracy.
Args:
data: Panda dataframe in *the* format.
"""
output_path = os.path.join(output_dir_path, output_filename)
max_val_data = get_epoch_max_val_acc(data)
max_val_label = round(max_val_data['acc'].values[0], 4)
# max_val_epoch = max_val_data['epoch'].values[0]
max_epoch_data = data[data['epoch'] == data['epoch'].max()]
plot = ggplot(data, aes('epoch', 'acc', color='factor(data)')) + geom_line(size=1, show_legend=False) + geom_vline(aes(xintercept='epoch', color='data'), data=max_val_data, alpha=0.5, show_legend=False) + geom_label(aes('epoch', 'acc'), data=max_val_data, label=max_val_label, nudge_y=-0.02, va='top', label_size=0, show_legend=False) + geom_text(aes('epoch', 'acc', label='data'), data=max_epoch_data, nudge_x=2, ha='center', show_legend=False) + geom_point(aes('epoch', 'acc'), data=max_val_data, show_legend=False) + labs(y='Accuracy', x='Epochs') + theme_bw(base_family='Arial', base_size=15) + scale_color_manual(['#ef8a62', '#67a9cf', '#f7f7f7'])
plot.save(output_path, width=width, height=height) |
def gpsWeek(year, month, day):
"returns (full) gpsWeek for given date (in UTC)"
hr = 12 #make sure you fall into right day, middle is save
return gpsFromUTC(year, month, day, hr, 0, 0.0)[0] | def function[gpsWeek, parameter[year, month, day]]:
constant[returns (full) gpsWeek for given date (in UTC)]
variable[hr] assign[=] constant[12]
return[call[call[name[gpsFromUTC], parameter[name[year], name[month], name[day], name[hr], constant[0], constant[0.0]]]][constant[0]]] | keyword[def] identifier[gpsWeek] ( identifier[year] , identifier[month] , identifier[day] ):
literal[string]
identifier[hr] = literal[int]
keyword[return] identifier[gpsFromUTC] ( identifier[year] , identifier[month] , identifier[day] , identifier[hr] , literal[int] , literal[int] )[ literal[int] ] | def gpsWeek(year, month, day):
"""returns (full) gpsWeek for given date (in UTC)"""
hr = 12 #make sure you fall into right day, middle is save
return gpsFromUTC(year, month, day, hr, 0, 0.0)[0] |
def depth(args):
"""
%prog depth anchorfile --qbed qbedfile --sbed sbedfile
Calculate the depths in the two genomes in comparison, given in --qbed and
--sbed. The synteny blocks will be layered on the genomes, and the
multiplicity will be summarized to stderr.
"""
from jcvi.utils.range import range_depth
p = OptionParser(depth.__doc__)
p.add_option("--depthfile",
help="Generate file with gene and depth [default: %default]")
p.add_option("--histogram", default=False, action="store_true",
help="Plot histograms in PDF")
p.add_option("--xmax", type="int", help="x-axis maximum to display in plot")
p.add_option("--title", default=None, help="Title to display in plot")
p.add_option("--quota", help="Force to use this quota, e.g. 1:1, 1:2 ...")
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorfile, = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
depthfile = opts.depthfile
ac = AnchorFile(anchorfile)
qranges = []
sranges = []
blocks = ac.blocks
for ib in blocks:
q, s, t = zip(*ib)
q = [qorder[x] for x in q]
s = [sorder[x] for x in s]
qrange = (min(q)[0], max(q)[0])
srange = (min(s)[0], max(s)[0])
qranges.append(qrange)
sranges.append(srange)
if is_self:
qranges.append(srange)
qgenome = op.basename(qbed.filename).split(".")[0]
sgenome = op.basename(sbed.filename).split(".")[0]
qtag = "Genome {0} depths".format(qgenome)
print("{}:".format(qtag), file=sys.stderr)
dsq, details = range_depth(qranges, len(qbed))
if depthfile:
fw = open(depthfile, "w")
write_details(fw, details, qbed)
if is_self:
return
stag = "Genome {0} depths".format(sgenome)
print("{}:".format(stag), file=sys.stderr)
dss, details = range_depth(sranges, len(sbed))
if depthfile:
write_details(fw, details, sbed)
fw.close()
logging.debug("Depth written to `{0}`.".format(depthfile))
if not opts.histogram:
return
from jcvi.graphics.base import plt, quickplot_ax, savefig, normalize_axes
# Plot two histograms one for query genome, one for subject genome
plt.figure(1, (6, 3))
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
xmax = opts.xmax or max(4, max(dsq.keys() + dss.keys()))
if opts.quota:
speak, qpeak = opts.quota.split(":")
qpeak, speak = int(qpeak), int(speak)
else:
qpeak = find_peak(dsq)
speak = find_peak(dss)
qtag = "# of {} blocks per {} gene".format(sgenome, qgenome)
stag = "# of {} blocks per {} gene".format(qgenome, sgenome)
quickplot_ax(ax1, dss, 0, xmax, stag, ylabel="Percentage of genome",
highlight=range(1, speak + 1))
quickplot_ax(ax2, dsq, 0, xmax, qtag, ylabel=None,
highlight=range(1, qpeak + 1))
title = opts.title or "{} vs {} syntenic depths\n{}:{} pattern"\
.format(qgenome, sgenome, speak, qpeak)
root = f.add_axes([0, 0, 1, 1])
vs, pattern = title.split('\n')
root.text(.5, .97, vs, ha="center", va="center", color="darkslategray")
root.text(.5, .925, pattern, ha="center", va="center",
color="tomato", size=16)
print(title, file=sys.stderr)
normalize_axes(root)
pf = anchorfile.rsplit(".", 1)[0] + ".depth"
image_name = pf + ".pdf"
savefig(image_name) | def function[depth, parameter[args]]:
constant[
%prog depth anchorfile --qbed qbedfile --sbed sbedfile
Calculate the depths in the two genomes in comparison, given in --qbed and
--sbed. The synteny blocks will be layered on the genomes, and the
multiplicity will be summarized to stderr.
]
from relative_module[jcvi.utils.range] import module[range_depth]
variable[p] assign[=] call[name[OptionParser], parameter[name[depth].__doc__]]
call[name[p].add_option, parameter[constant[--depthfile]]]
call[name[p].add_option, parameter[constant[--histogram]]]
call[name[p].add_option, parameter[constant[--xmax]]]
call[name[p].add_option, parameter[constant[--title]]]
call[name[p].add_option, parameter[constant[--quota]]]
call[name[p].set_beds, parameter[]]
<ast.Tuple object at 0x7da1b094da80> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b094d9f0>]]
<ast.Tuple object at 0x7da1b094db40> assign[=] name[args]
<ast.Tuple object at 0x7da1b094ed70> assign[=] call[name[check_beds], parameter[name[anchorfile], name[p], name[opts]]]
variable[depthfile] assign[=] name[opts].depthfile
variable[ac] assign[=] call[name[AnchorFile], parameter[name[anchorfile]]]
variable[qranges] assign[=] list[[]]
variable[sranges] assign[=] list[[]]
variable[blocks] assign[=] name[ac].blocks
for taget[name[ib]] in starred[name[blocks]] begin[:]
<ast.Tuple object at 0x7da1b094df90> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b09eb640>]]
variable[q] assign[=] <ast.ListComp object at 0x7da1b09ea3e0>
variable[s] assign[=] <ast.ListComp object at 0x7da1b09e9e70>
variable[qrange] assign[=] tuple[[<ast.Subscript object at 0x7da1b09ea020>, <ast.Subscript object at 0x7da1b09eb1f0>]]
variable[srange] assign[=] tuple[[<ast.Subscript object at 0x7da1b09ea890>, <ast.Subscript object at 0x7da1b09ea380>]]
call[name[qranges].append, parameter[name[qrange]]]
call[name[sranges].append, parameter[name[srange]]]
if name[is_self] begin[:]
call[name[qranges].append, parameter[name[srange]]]
variable[qgenome] assign[=] call[call[call[name[op].basename, parameter[name[qbed].filename]].split, parameter[constant[.]]]][constant[0]]
variable[sgenome] assign[=] call[call[call[name[op].basename, parameter[name[sbed].filename]].split, parameter[constant[.]]]][constant[0]]
variable[qtag] assign[=] call[constant[Genome {0} depths].format, parameter[name[qgenome]]]
call[name[print], parameter[call[constant[{}:].format, parameter[name[qtag]]]]]
<ast.Tuple object at 0x7da1b09eb2e0> assign[=] call[name[range_depth], parameter[name[qranges], call[name[len], parameter[name[qbed]]]]]
if name[depthfile] begin[:]
variable[fw] assign[=] call[name[open], parameter[name[depthfile], constant[w]]]
call[name[write_details], parameter[name[fw], name[details], name[qbed]]]
if name[is_self] begin[:]
return[None]
variable[stag] assign[=] call[constant[Genome {0} depths].format, parameter[name[sgenome]]]
call[name[print], parameter[call[constant[{}:].format, parameter[name[stag]]]]]
<ast.Tuple object at 0x7da1b0832c20> assign[=] call[name[range_depth], parameter[name[sranges], call[name[len], parameter[name[sbed]]]]]
if name[depthfile] begin[:]
call[name[write_details], parameter[name[fw], name[details], name[sbed]]]
call[name[fw].close, parameter[]]
call[name[logging].debug, parameter[call[constant[Depth written to `{0}`.].format, parameter[name[depthfile]]]]]
if <ast.UnaryOp object at 0x7da1b0832bc0> begin[:]
return[None]
from relative_module[jcvi.graphics.base] import module[plt], module[quickplot_ax], module[savefig], module[normalize_axes]
call[name[plt].figure, parameter[constant[1], tuple[[<ast.Constant object at 0x7da1b08331c0>, <ast.Constant object at 0x7da1b08330a0>]]]]
<ast.Tuple object at 0x7da20c6e6ef0> assign[=] call[name[plt].subplots, parameter[constant[1], constant[2]]]
variable[xmax] assign[=] <ast.BoolOp object at 0x7da20c6e4940>
if name[opts].quota begin[:]
<ast.Tuple object at 0x7da20c6e6c80> assign[=] call[name[opts].quota.split, parameter[constant[:]]]
<ast.Tuple object at 0x7da20c6e6aa0> assign[=] tuple[[<ast.Call object at 0x7da20c6e7f70>, <ast.Call object at 0x7da20c6e6f80>]]
variable[qtag] assign[=] call[constant[# of {} blocks per {} gene].format, parameter[name[sgenome], name[qgenome]]]
variable[stag] assign[=] call[constant[# of {} blocks per {} gene].format, parameter[name[qgenome], name[sgenome]]]
call[name[quickplot_ax], parameter[name[ax1], name[dss], constant[0], name[xmax], name[stag]]]
call[name[quickplot_ax], parameter[name[ax2], name[dsq], constant[0], name[xmax], name[qtag]]]
variable[title] assign[=] <ast.BoolOp object at 0x7da1b08a0eb0>
variable[root] assign[=] call[name[f].add_axes, parameter[list[[<ast.Constant object at 0x7da1b08a2200>, <ast.Constant object at 0x7da1b08a2fe0>, <ast.Constant object at 0x7da1b08fc2b0>, <ast.Constant object at 0x7da1b08fc0d0>]]]]
<ast.Tuple object at 0x7da1b08fc370> assign[=] call[name[title].split, parameter[constant[
]]]
call[name[root].text, parameter[constant[0.5], constant[0.97], name[vs]]]
call[name[root].text, parameter[constant[0.5], constant[0.925], name[pattern]]]
call[name[print], parameter[name[title]]]
call[name[normalize_axes], parameter[name[root]]]
variable[pf] assign[=] binary_operation[call[call[name[anchorfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.depth]]
variable[image_name] assign[=] binary_operation[name[pf] + constant[.pdf]]
call[name[savefig], parameter[name[image_name]]] | keyword[def] identifier[depth] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[range] keyword[import] identifier[range_depth]
identifier[p] = identifier[OptionParser] ( identifier[depth] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[type] = literal[string] , identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[None] , identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[help] = literal[string] )
identifier[p] . identifier[set_beds] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[anchorfile] ,= identifier[args]
identifier[qbed] , identifier[sbed] , identifier[qorder] , identifier[sorder] , identifier[is_self] = identifier[check_beds] ( identifier[anchorfile] , identifier[p] , identifier[opts] )
identifier[depthfile] = identifier[opts] . identifier[depthfile]
identifier[ac] = identifier[AnchorFile] ( identifier[anchorfile] )
identifier[qranges] =[]
identifier[sranges] =[]
identifier[blocks] = identifier[ac] . identifier[blocks]
keyword[for] identifier[ib] keyword[in] identifier[blocks] :
identifier[q] , identifier[s] , identifier[t] = identifier[zip] (* identifier[ib] )
identifier[q] =[ identifier[qorder] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[q] ]
identifier[s] =[ identifier[sorder] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[s] ]
identifier[qrange] =( identifier[min] ( identifier[q] )[ literal[int] ], identifier[max] ( identifier[q] )[ literal[int] ])
identifier[srange] =( identifier[min] ( identifier[s] )[ literal[int] ], identifier[max] ( identifier[s] )[ literal[int] ])
identifier[qranges] . identifier[append] ( identifier[qrange] )
identifier[sranges] . identifier[append] ( identifier[srange] )
keyword[if] identifier[is_self] :
identifier[qranges] . identifier[append] ( identifier[srange] )
identifier[qgenome] = identifier[op] . identifier[basename] ( identifier[qbed] . identifier[filename] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[sgenome] = identifier[op] . identifier[basename] ( identifier[sbed] . identifier[filename] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[qtag] = literal[string] . identifier[format] ( identifier[qgenome] )
identifier[print] ( literal[string] . identifier[format] ( identifier[qtag] ), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[dsq] , identifier[details] = identifier[range_depth] ( identifier[qranges] , identifier[len] ( identifier[qbed] ))
keyword[if] identifier[depthfile] :
identifier[fw] = identifier[open] ( identifier[depthfile] , literal[string] )
identifier[write_details] ( identifier[fw] , identifier[details] , identifier[qbed] )
keyword[if] identifier[is_self] :
keyword[return]
identifier[stag] = literal[string] . identifier[format] ( identifier[sgenome] )
identifier[print] ( literal[string] . identifier[format] ( identifier[stag] ), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[dss] , identifier[details] = identifier[range_depth] ( identifier[sranges] , identifier[len] ( identifier[sbed] ))
keyword[if] identifier[depthfile] :
identifier[write_details] ( identifier[fw] , identifier[details] , identifier[sbed] )
identifier[fw] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[depthfile] ))
keyword[if] keyword[not] identifier[opts] . identifier[histogram] :
keyword[return]
keyword[from] identifier[jcvi] . identifier[graphics] . identifier[base] keyword[import] identifier[plt] , identifier[quickplot_ax] , identifier[savefig] , identifier[normalize_axes]
identifier[plt] . identifier[figure] ( literal[int] ,( literal[int] , literal[int] ))
identifier[f] ,( identifier[ax1] , identifier[ax2] )= identifier[plt] . identifier[subplots] ( literal[int] , literal[int] , identifier[sharey] = keyword[True] )
identifier[xmax] = identifier[opts] . identifier[xmax] keyword[or] identifier[max] ( literal[int] , identifier[max] ( identifier[dsq] . identifier[keys] ()+ identifier[dss] . identifier[keys] ()))
keyword[if] identifier[opts] . identifier[quota] :
identifier[speak] , identifier[qpeak] = identifier[opts] . identifier[quota] . identifier[split] ( literal[string] )
identifier[qpeak] , identifier[speak] = identifier[int] ( identifier[qpeak] ), identifier[int] ( identifier[speak] )
keyword[else] :
identifier[qpeak] = identifier[find_peak] ( identifier[dsq] )
identifier[speak] = identifier[find_peak] ( identifier[dss] )
identifier[qtag] = literal[string] . identifier[format] ( identifier[sgenome] , identifier[qgenome] )
identifier[stag] = literal[string] . identifier[format] ( identifier[qgenome] , identifier[sgenome] )
identifier[quickplot_ax] ( identifier[ax1] , identifier[dss] , literal[int] , identifier[xmax] , identifier[stag] , identifier[ylabel] = literal[string] ,
identifier[highlight] = identifier[range] ( literal[int] , identifier[speak] + literal[int] ))
identifier[quickplot_ax] ( identifier[ax2] , identifier[dsq] , literal[int] , identifier[xmax] , identifier[qtag] , identifier[ylabel] = keyword[None] ,
identifier[highlight] = identifier[range] ( literal[int] , identifier[qpeak] + literal[int] ))
identifier[title] = identifier[opts] . identifier[title] keyword[or] literal[string] . identifier[format] ( identifier[qgenome] , identifier[sgenome] , identifier[speak] , identifier[qpeak] )
identifier[root] = identifier[f] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[vs] , identifier[pattern] = identifier[title] . identifier[split] ( literal[string] )
identifier[root] . identifier[text] ( literal[int] , literal[int] , identifier[vs] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] )
identifier[root] . identifier[text] ( literal[int] , literal[int] , identifier[pattern] , identifier[ha] = literal[string] , identifier[va] = literal[string] ,
identifier[color] = literal[string] , identifier[size] = literal[int] )
identifier[print] ( identifier[title] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[normalize_axes] ( identifier[root] )
identifier[pf] = identifier[anchorfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[image_name] = identifier[pf] + literal[string]
identifier[savefig] ( identifier[image_name] ) | def depth(args):
"""
%prog depth anchorfile --qbed qbedfile --sbed sbedfile
Calculate the depths in the two genomes in comparison, given in --qbed and
--sbed. The synteny blocks will be layered on the genomes, and the
multiplicity will be summarized to stderr.
"""
from jcvi.utils.range import range_depth
p = OptionParser(depth.__doc__)
p.add_option('--depthfile', help='Generate file with gene and depth [default: %default]')
p.add_option('--histogram', default=False, action='store_true', help='Plot histograms in PDF')
p.add_option('--xmax', type='int', help='x-axis maximum to display in plot')
p.add_option('--title', default=None, help='Title to display in plot')
p.add_option('--quota', help='Force to use this quota, e.g. 1:1, 1:2 ...')
p.set_beds()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(anchorfile,) = args
(qbed, sbed, qorder, sorder, is_self) = check_beds(anchorfile, p, opts)
depthfile = opts.depthfile
ac = AnchorFile(anchorfile)
qranges = []
sranges = []
blocks = ac.blocks
for ib in blocks:
(q, s, t) = zip(*ib)
q = [qorder[x] for x in q]
s = [sorder[x] for x in s]
qrange = (min(q)[0], max(q)[0])
srange = (min(s)[0], max(s)[0])
qranges.append(qrange)
sranges.append(srange)
if is_self:
qranges.append(srange) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ib']]
qgenome = op.basename(qbed.filename).split('.')[0]
sgenome = op.basename(sbed.filename).split('.')[0]
qtag = 'Genome {0} depths'.format(qgenome)
print('{}:'.format(qtag), file=sys.stderr)
(dsq, details) = range_depth(qranges, len(qbed))
if depthfile:
fw = open(depthfile, 'w')
write_details(fw, details, qbed) # depends on [control=['if'], data=[]]
if is_self:
return # depends on [control=['if'], data=[]]
stag = 'Genome {0} depths'.format(sgenome)
print('{}:'.format(stag), file=sys.stderr)
(dss, details) = range_depth(sranges, len(sbed))
if depthfile:
write_details(fw, details, sbed)
fw.close()
logging.debug('Depth written to `{0}`.'.format(depthfile)) # depends on [control=['if'], data=[]]
if not opts.histogram:
return # depends on [control=['if'], data=[]]
from jcvi.graphics.base import plt, quickplot_ax, savefig, normalize_axes
# Plot two histograms one for query genome, one for subject genome
plt.figure(1, (6, 3))
(f, (ax1, ax2)) = plt.subplots(1, 2, sharey=True)
xmax = opts.xmax or max(4, max(dsq.keys() + dss.keys()))
if opts.quota:
(speak, qpeak) = opts.quota.split(':')
(qpeak, speak) = (int(qpeak), int(speak)) # depends on [control=['if'], data=[]]
else:
qpeak = find_peak(dsq)
speak = find_peak(dss)
qtag = '# of {} blocks per {} gene'.format(sgenome, qgenome)
stag = '# of {} blocks per {} gene'.format(qgenome, sgenome)
quickplot_ax(ax1, dss, 0, xmax, stag, ylabel='Percentage of genome', highlight=range(1, speak + 1))
quickplot_ax(ax2, dsq, 0, xmax, qtag, ylabel=None, highlight=range(1, qpeak + 1))
title = opts.title or '{} vs {} syntenic depths\n{}:{} pattern'.format(qgenome, sgenome, speak, qpeak)
root = f.add_axes([0, 0, 1, 1])
(vs, pattern) = title.split('\n')
root.text(0.5, 0.97, vs, ha='center', va='center', color='darkslategray')
root.text(0.5, 0.925, pattern, ha='center', va='center', color='tomato', size=16)
print(title, file=sys.stderr)
normalize_axes(root)
pf = anchorfile.rsplit('.', 1)[0] + '.depth'
image_name = pf + '.pdf'
savefig(image_name) |
def set_poolmember_state(self, id_pools, pools):
"""
Enable/Disable pool member by list
"""
data = dict()
uri = "api/v3/pool/real/%s/member/status/" % ';'.join(id_pools)
data["server_pools"] = pools
return self.put(uri, data=data) | def function[set_poolmember_state, parameter[self, id_pools, pools]]:
constant[
Enable/Disable pool member by list
]
variable[data] assign[=] call[name[dict], parameter[]]
variable[uri] assign[=] binary_operation[constant[api/v3/pool/real/%s/member/status/] <ast.Mod object at 0x7da2590d6920> call[constant[;].join, parameter[name[id_pools]]]]
call[name[data]][constant[server_pools]] assign[=] name[pools]
return[call[name[self].put, parameter[name[uri]]]] | keyword[def] identifier[set_poolmember_state] ( identifier[self] , identifier[id_pools] , identifier[pools] ):
literal[string]
identifier[data] = identifier[dict] ()
identifier[uri] = literal[string] % literal[string] . identifier[join] ( identifier[id_pools] )
identifier[data] [ literal[string] ]= identifier[pools]
keyword[return] identifier[self] . identifier[put] ( identifier[uri] , identifier[data] = identifier[data] ) | def set_poolmember_state(self, id_pools, pools):
"""
Enable/Disable pool member by list
"""
data = dict()
uri = 'api/v3/pool/real/%s/member/status/' % ';'.join(id_pools)
data['server_pools'] = pools
return self.put(uri, data=data) |
def image_task(self):
"""
Returns a json-schema document that represents an task entity.
"""
uri = "/%s/task" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def function[image_task, parameter[self]]:
constant[
Returns a json-schema document that represents an task entity.
]
variable[uri] assign[=] binary_operation[constant[/%s/task] <ast.Mod object at 0x7da2590d6920> name[self].uri_base]
<ast.Tuple object at 0x7da1b052a110> assign[=] call[name[self].api.method_get, parameter[name[uri]]]
return[name[resp_body]] | keyword[def] identifier[image_task] ( identifier[self] ):
literal[string]
identifier[uri] = literal[string] % identifier[self] . identifier[uri_base]
identifier[resp] , identifier[resp_body] = identifier[self] . identifier[api] . identifier[method_get] ( identifier[uri] )
keyword[return] identifier[resp_body] | def image_task(self):
"""
Returns a json-schema document that represents an task entity.
"""
uri = '/%s/task' % self.uri_base
(resp, resp_body) = self.api.method_get(uri)
return resp_body |
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None, path_style=None,
https_enable=None, headers=None, full_headers=False):
'''
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_file=/path/to/file
'''
if not headers:
headers = {}
else:
full_headers = True
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
path_style,
https_enable,
)
return __utils__['s3.query'](method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn,
path_style=path_style,
https_enable=https_enable,
headers=headers,
full_headers=full_headers) | def function[put, parameter[bucket, path, return_bin, action, local_file, key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable, headers, full_headers]]:
constant[
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_file=/path/to/file
]
if <ast.UnaryOp object at 0x7da1b216b670> begin[:]
variable[headers] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b21686d0> assign[=] call[name[_get_key], parameter[name[key], name[keyid], name[service_url], name[verify_ssl], name[kms_keyid], name[location], name[role_arn], name[path_style], name[https_enable]]]
return[call[call[name[__utils__]][constant[s3.query]], parameter[]]] | keyword[def] identifier[put] ( identifier[bucket] , identifier[path] = keyword[None] , identifier[return_bin] = keyword[False] , identifier[action] = keyword[None] , identifier[local_file] = keyword[None] ,
identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[service_url] = keyword[None] , identifier[verify_ssl] = keyword[None] ,
identifier[kms_keyid] = keyword[None] , identifier[location] = keyword[None] , identifier[role_arn] = keyword[None] , identifier[path_style] = keyword[None] ,
identifier[https_enable] = keyword[None] , identifier[headers] = keyword[None] , identifier[full_headers] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[headers] :
identifier[headers] ={}
keyword[else] :
identifier[full_headers] = keyword[True]
identifier[key] , identifier[keyid] , identifier[service_url] , identifier[verify_ssl] , identifier[kms_keyid] , identifier[location] , identifier[role_arn] , identifier[path_style] , identifier[https_enable] = identifier[_get_key] (
identifier[key] ,
identifier[keyid] ,
identifier[service_url] ,
identifier[verify_ssl] ,
identifier[kms_keyid] ,
identifier[location] ,
identifier[role_arn] ,
identifier[path_style] ,
identifier[https_enable] ,
)
keyword[return] identifier[__utils__] [ literal[string] ]( identifier[method] = literal[string] ,
identifier[bucket] = identifier[bucket] ,
identifier[path] = identifier[path] ,
identifier[return_bin] = identifier[return_bin] ,
identifier[local_file] = identifier[local_file] ,
identifier[action] = identifier[action] ,
identifier[key] = identifier[key] ,
identifier[keyid] = identifier[keyid] ,
identifier[kms_keyid] = identifier[kms_keyid] ,
identifier[service_url] = identifier[service_url] ,
identifier[verify_ssl] = identifier[verify_ssl] ,
identifier[location] = identifier[location] ,
identifier[role_arn] = identifier[role_arn] ,
identifier[path_style] = identifier[path_style] ,
identifier[https_enable] = identifier[https_enable] ,
identifier[headers] = identifier[headers] ,
identifier[full_headers] = identifier[full_headers] ) | def put(bucket, path=None, return_bin=False, action=None, local_file=None, key=None, keyid=None, service_url=None, verify_ssl=None, kms_keyid=None, location=None, role_arn=None, path_style=None, https_enable=None, headers=None, full_headers=False):
"""
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_file=/path/to/file
"""
if not headers:
headers = {} # depends on [control=['if'], data=[]]
else:
full_headers = True
(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable) = _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable)
return __utils__['s3.query'](method='PUT', bucket=bucket, path=path, return_bin=return_bin, local_file=local_file, action=action, key=key, keyid=keyid, kms_keyid=kms_keyid, service_url=service_url, verify_ssl=verify_ssl, location=location, role_arn=role_arn, path_style=path_style, https_enable=https_enable, headers=headers, full_headers=full_headers) |
def file_info(self, path):
"""Returns the file info for the given remote file
:param path: path to the remote file
:returns: file info
:rtype: :class:`FileInfo` object or `None` if file
was not found
:raises: HTTPResponseError in case an HTTP error status was returned
"""
res = self._make_dav_request('PROPFIND', path, headers={'Depth': '0'})
if res:
return res[0]
return None | def function[file_info, parameter[self, path]]:
constant[Returns the file info for the given remote file
:param path: path to the remote file
:returns: file info
:rtype: :class:`FileInfo` object or `None` if file
was not found
:raises: HTTPResponseError in case an HTTP error status was returned
]
variable[res] assign[=] call[name[self]._make_dav_request, parameter[constant[PROPFIND], name[path]]]
if name[res] begin[:]
return[call[name[res]][constant[0]]]
return[constant[None]] | keyword[def] identifier[file_info] ( identifier[self] , identifier[path] ):
literal[string]
identifier[res] = identifier[self] . identifier[_make_dav_request] ( literal[string] , identifier[path] , identifier[headers] ={ literal[string] : literal[string] })
keyword[if] identifier[res] :
keyword[return] identifier[res] [ literal[int] ]
keyword[return] keyword[None] | def file_info(self, path):
"""Returns the file info for the given remote file
:param path: path to the remote file
:returns: file info
:rtype: :class:`FileInfo` object or `None` if file
was not found
:raises: HTTPResponseError in case an HTTP error status was returned
"""
res = self._make_dav_request('PROPFIND', path, headers={'Depth': '0'})
if res:
return res[0] # depends on [control=['if'], data=[]]
return None |
def unmounted(name,
device=None,
config='/etc/fstab',
persist=False,
user=None,
**kwargs):
'''
.. versionadded:: 0.17.0
Verify that a device is not mounted
name
The path to the location where the device is to be unmounted from
device
The device to be unmounted. This is optional because the device could
be mounted in multiple places.
.. versionadded:: 2015.5.0
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, Default is ``False``
user
The user to own the mount; this defaults to the user salt is
running as on the minion
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.unmounted'
return ret
# Get the active data
active = __salt__['mount.active'](extended=True)
if name not in active:
# Nothing to unmount
ret['comment'] = 'Target was already unmounted'
if name in active:
# The mount is present! Unmount it
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Mount point {0} is mounted but should not '
'be').format(name)
return ret
if device:
out = __salt__['mount.umount'](name, device, user=user)
update_mount_cache = True
else:
out = __salt__['mount.umount'](name, user=user)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to umount, the state has failed!
ret['comment'] = out
ret['result'] = False
elif out is True:
# umount worked!
ret['comment'] = 'Target was successfully unmounted'
ret['changes']['umount'] = True
else:
ret['comment'] = 'Execute set to False, Target was not unmounted'
ret['result'] = True
if update_mount_cache:
cache_result = __salt__['mount.delete_mount_cache'](name)
if persist:
device_key_name = 'device'
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and config == '/etc/fstab':
config = "/etc/auto_salt"
fstab_data = __salt__['mount.automaster'](config)
elif 'AIX' in __grains__['os']:
device_key_name = 'dev'
if config == '/etc/fstab':
config = "/etc/filesystems"
fstab_data = __salt__['mount.filesystems'](config)
elif 'Solaris' in __grains__['os']:
if config == '/etc/fstab':
config = '/etc/vfstab'
fstab_data = __salt__['mount.vfstab'](config)
else:
fstab_data = __salt__['mount.fstab'](config)
if name not in fstab_data:
ret['comment'] += '. fstab entry not found'
else:
if device:
if fstab_data[name][device_key_name] != device:
ret['comment'] += '. fstab entry for device {0} not found'.format(device)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Mount point {0} is unmounted but needs to '
'be purged from {1} to be made '
'persistent').format(name, config)
return ret
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.rm_automaster'](name, device, config)
elif 'AIX' in __grains__['os']:
out = __salt__['mount.rm_filesystems'](name, device, config)
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.rm_vfstab'](name, device, config)
else:
out = __salt__['mount.rm_fstab'](name, device, config)
if out is not True:
ret['result'] = False
ret['comment'] += '. Failed to persist purge'
else:
ret['comment'] += '. Removed target from fstab'
ret['changes']['persist'] = 'purged'
return ret | def function[unmounted, parameter[name, device, config, persist, user]]:
constant[
.. versionadded:: 0.17.0
Verify that a device is not mounted
name
The path to the location where the device is to be unmounted from
device
The device to be unmounted. This is optional because the device could
be mounted in multiple places.
.. versionadded:: 2015.5.0
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, Default is ``False``
user
The user to own the mount; this defaults to the user salt is
running as on the minion
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18f721180>, <ast.Constant object at 0x7da18f722bc0>, <ast.Constant object at 0x7da18f723f40>, <ast.Constant object at 0x7da18f721930>], [<ast.Name object at 0x7da18f720520>, <ast.Dict object at 0x7da18f722470>, <ast.Constant object at 0x7da18f721bd0>, <ast.Constant object at 0x7da18f721570>]]
variable[update_mount_cache] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da18f722dd0> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[Must provide name to mount.unmounted]
return[name[ret]]
variable[active] assign[=] call[call[name[__salt__]][constant[mount.active]], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[active]] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Target was already unmounted]
if compare[name[name] in name[active]] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] call[constant[Mount point {0} is mounted but should not be].format, parameter[name[name]]]
return[name[ret]]
if name[device] begin[:]
variable[out] assign[=] call[call[name[__salt__]][constant[mount.umount]], parameter[name[name], name[device]]]
variable[update_mount_cache] assign[=] constant[True]
if call[name[isinstance], parameter[name[out], name[string_types]]] begin[:]
call[name[ret]][constant[comment]] assign[=] name[out]
call[name[ret]][constant[result]] assign[=] constant[False]
if name[update_mount_cache] begin[:]
variable[cache_result] assign[=] call[call[name[__salt__]][constant[mount.delete_mount_cache]], parameter[name[name]]]
if name[persist] begin[:]
variable[device_key_name] assign[=] constant[device]
if <ast.BoolOp object at 0x7da1b21e2ef0> begin[:]
variable[config] assign[=] constant[/etc/auto_salt]
variable[fstab_data] assign[=] call[call[name[__salt__]][constant[mount.automaster]], parameter[name[config]]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[fstab_data]] begin[:]
<ast.AugAssign object at 0x7da1b21e1750>
return[name[ret]] | keyword[def] identifier[unmounted] ( identifier[name] ,
identifier[device] = keyword[None] ,
identifier[config] = literal[string] ,
identifier[persist] = keyword[False] ,
identifier[user] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
identifier[update_mount_cache] = keyword[False]
keyword[if] keyword[not] identifier[name] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[active] = identifier[__salt__] [ literal[string] ]( identifier[extended] = keyword[True] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[active] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[if] identifier[name] keyword[in] identifier[active] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]=( literal[string]
literal[string] ). identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[if] identifier[device] :
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[device] , identifier[user] = identifier[user] )
identifier[update_mount_cache] = keyword[True]
keyword[else] :
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[user] = identifier[user] )
identifier[update_mount_cache] = keyword[True]
keyword[if] identifier[isinstance] ( identifier[out] , identifier[string_types] ):
identifier[ret] [ literal[string] ]= identifier[out]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[elif] identifier[out] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= keyword[True]
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[True]
keyword[if] identifier[update_mount_cache] :
identifier[cache_result] = identifier[__salt__] [ literal[string] ]( identifier[name] )
keyword[if] identifier[persist] :
identifier[device_key_name] = literal[string]
keyword[if] identifier[__grains__] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ] keyword[and] identifier[config] == literal[string] :
identifier[config] = literal[string]
identifier[fstab_data] = identifier[__salt__] [ literal[string] ]( identifier[config] )
keyword[elif] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]:
identifier[device_key_name] = literal[string]
keyword[if] identifier[config] == literal[string] :
identifier[config] = literal[string]
identifier[fstab_data] = identifier[__salt__] [ literal[string] ]( identifier[config] )
keyword[elif] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]:
keyword[if] identifier[config] == literal[string] :
identifier[config] = literal[string]
identifier[fstab_data] = identifier[__salt__] [ literal[string] ]( identifier[config] )
keyword[else] :
identifier[fstab_data] = identifier[__salt__] [ literal[string] ]( identifier[config] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[fstab_data] :
identifier[ret] [ literal[string] ]+= literal[string]
keyword[else] :
keyword[if] identifier[device] :
keyword[if] identifier[fstab_data] [ identifier[name] ][ identifier[device_key_name] ]!= identifier[device] :
identifier[ret] [ literal[string] ]+= literal[string] . identifier[format] ( identifier[device] )
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]=( literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[name] , identifier[config] )
keyword[return] identifier[ret]
keyword[else] :
keyword[if] identifier[__grains__] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[device] , identifier[config] )
keyword[elif] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]:
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[device] , identifier[config] )
keyword[elif] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]:
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[device] , identifier[config] )
keyword[else] :
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[device] , identifier[config] )
keyword[if] identifier[out] keyword[is] keyword[not] keyword[True] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]+= literal[string]
keyword[else] :
identifier[ret] [ literal[string] ]+= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def unmounted(name, device=None, config='/etc/fstab', persist=False, user=None, **kwargs):
"""
.. versionadded:: 0.17.0
Verify that a device is not mounted
name
The path to the location where the device is to be unmounted from
device
The device to be unmounted. This is optional because the device could
be mounted in multiple places.
.. versionadded:: 2015.5.0
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, Default is ``False``
user
The user to own the mount; this defaults to the user salt is
running as on the minion
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.unmounted'
return ret # depends on [control=['if'], data=[]]
# Get the active data
active = __salt__['mount.active'](extended=True)
if name not in active:
# Nothing to unmount
ret['comment'] = 'Target was already unmounted' # depends on [control=['if'], data=[]]
if name in active:
# The mount is present! Unmount it
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Mount point {0} is mounted but should not be'.format(name)
return ret # depends on [control=['if'], data=[]]
if device:
out = __salt__['mount.umount'](name, device, user=user)
update_mount_cache = True # depends on [control=['if'], data=[]]
else:
out = __salt__['mount.umount'](name, user=user)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to umount, the state has failed!
ret['comment'] = out
ret['result'] = False # depends on [control=['if'], data=[]]
elif out is True:
# umount worked!
ret['comment'] = 'Target was successfully unmounted'
ret['changes']['umount'] = True # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Execute set to False, Target was not unmounted'
ret['result'] = True # depends on [control=['if'], data=['name']]
if update_mount_cache:
cache_result = __salt__['mount.delete_mount_cache'](name) # depends on [control=['if'], data=[]]
if persist:
device_key_name = 'device'
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and config == '/etc/fstab':
config = '/etc/auto_salt'
fstab_data = __salt__['mount.automaster'](config) # depends on [control=['if'], data=[]]
elif 'AIX' in __grains__['os']:
device_key_name = 'dev'
if config == '/etc/fstab':
config = '/etc/filesystems' # depends on [control=['if'], data=['config']]
fstab_data = __salt__['mount.filesystems'](config) # depends on [control=['if'], data=[]]
elif 'Solaris' in __grains__['os']:
if config == '/etc/fstab':
config = '/etc/vfstab' # depends on [control=['if'], data=['config']]
fstab_data = __salt__['mount.vfstab'](config) # depends on [control=['if'], data=[]]
else:
fstab_data = __salt__['mount.fstab'](config)
if name not in fstab_data:
ret['comment'] += '. fstab entry not found' # depends on [control=['if'], data=[]]
else:
if device:
if fstab_data[name][device_key_name] != device:
ret['comment'] += '. fstab entry for device {0} not found'.format(device)
return ret # depends on [control=['if'], data=['device']] # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Mount point {0} is unmounted but needs to be purged from {1} to be made persistent'.format(name, config)
return ret # depends on [control=['if'], data=[]]
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.rm_automaster'](name, device, config) # depends on [control=['if'], data=[]]
elif 'AIX' in __grains__['os']:
out = __salt__['mount.rm_filesystems'](name, device, config) # depends on [control=['if'], data=[]]
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.rm_vfstab'](name, device, config) # depends on [control=['if'], data=[]]
else:
out = __salt__['mount.rm_fstab'](name, device, config)
if out is not True:
ret['result'] = False
ret['comment'] += '. Failed to persist purge' # depends on [control=['if'], data=[]]
else:
ret['comment'] += '. Removed target from fstab'
ret['changes']['persist'] = 'purged' # depends on [control=['if'], data=[]]
return ret |
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket | def function[get_bucket, parameter[self, environment, name, filename, source]]:
constant[Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
]
variable[key] assign[=] call[name[self].get_cache_key, parameter[name[name], name[filename]]]
variable[checksum] assign[=] call[name[self].get_source_checksum, parameter[name[source]]]
variable[bucket] assign[=] call[name[Bucket], parameter[name[environment], name[key], name[checksum]]]
call[name[self].load_bytecode, parameter[name[bucket]]]
return[name[bucket]] | keyword[def] identifier[get_bucket] ( identifier[self] , identifier[environment] , identifier[name] , identifier[filename] , identifier[source] ):
literal[string]
identifier[key] = identifier[self] . identifier[get_cache_key] ( identifier[name] , identifier[filename] )
identifier[checksum] = identifier[self] . identifier[get_source_checksum] ( identifier[source] )
identifier[bucket] = identifier[Bucket] ( identifier[environment] , identifier[key] , identifier[checksum] )
identifier[self] . identifier[load_bytecode] ( identifier[bucket] )
keyword[return] identifier[bucket] | def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket |
def get_crash_signature(error_line):
"""Try to get a crash signature from the given error_line string."""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term | def function[get_crash_signature, parameter[error_line]]:
constant[Try to get a crash signature from the given error_line string.]
variable[search_term] assign[=] constant[None]
variable[match] assign[=] call[name[CRASH_RE].match, parameter[name[error_line]]]
if <ast.BoolOp object at 0x7da1b060e680> begin[:]
variable[search_term] assign[=] call[name[match].group, parameter[constant[1]]]
return[name[search_term]] | keyword[def] identifier[get_crash_signature] ( identifier[error_line] ):
literal[string]
identifier[search_term] = keyword[None]
identifier[match] = identifier[CRASH_RE] . identifier[match] ( identifier[error_line] )
keyword[if] identifier[match] keyword[and] identifier[is_helpful_search_term] ( identifier[match] . identifier[group] ( literal[int] )):
identifier[search_term] = identifier[match] . identifier[group] ( literal[int] )
keyword[return] identifier[search_term] | def get_crash_signature(error_line):
"""Try to get a crash signature from the given error_line string."""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1) # depends on [control=['if'], data=[]]
return search_term |
def fb_get(self, quant, fb=16):
"""Return a fixed bit number
quant: number of bits to read
fb: number of bits in the integer and decimal part of the output
default is 16, resulting in a 16.16 fixed bit"""
raw_number = self.s_get(quant)
if quant == 1:
# special case, just return that unsigned value
return raw_number
return raw_number / (1 << fb) | def function[fb_get, parameter[self, quant, fb]]:
constant[Return a fixed bit number
quant: number of bits to read
fb: number of bits in the integer and decimal part of the output
default is 16, resulting in a 16.16 fixed bit]
variable[raw_number] assign[=] call[name[self].s_get, parameter[name[quant]]]
if compare[name[quant] equal[==] constant[1]] begin[:]
return[name[raw_number]]
return[binary_operation[name[raw_number] / binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[fb]]]] | keyword[def] identifier[fb_get] ( identifier[self] , identifier[quant] , identifier[fb] = literal[int] ):
literal[string]
identifier[raw_number] = identifier[self] . identifier[s_get] ( identifier[quant] )
keyword[if] identifier[quant] == literal[int] :
keyword[return] identifier[raw_number]
keyword[return] identifier[raw_number] /( literal[int] << identifier[fb] ) | def fb_get(self, quant, fb=16):
"""Return a fixed bit number
quant: number of bits to read
fb: number of bits in the integer and decimal part of the output
default is 16, resulting in a 16.16 fixed bit"""
raw_number = self.s_get(quant)
if quant == 1:
# special case, just return that unsigned value
return raw_number # depends on [control=['if'], data=[]]
return raw_number / (1 << fb) |
def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel is not None:
logger.setLevel(logging.NOTSET)
return logger | def function[getLogger, parameter[]]:
constant[
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
]
variable[logger] assign[=] call[name[logging].getLogger, parameter[<ast.Starred object at 0x7da1b2344ee0>]]
if compare[name[_overrideLogLevel] is_not constant[None]] begin[:]
call[name[logger].setLevel, parameter[name[logging].NOTSET]]
return[name[logger]] | keyword[def] identifier[getLogger] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[_overrideLogLevel] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[setLevel] ( identifier[logging] . identifier[NOTSET] )
keyword[return] identifier[logger] | def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel is not None:
logger.setLevel(logging.NOTSET) # depends on [control=['if'], data=[]]
return logger |
def JoinTokens(tokens):
"""Join tokens (which may be a mix of unicode and str values).
See notes on unicode at the top. This function allows mixing encoded utf-8
byte string tokens with unicode tokens. (Python's default encoding is ASCII,
and we don't want to change that.)
We also want to support pure byte strings, so we can't get rid of the
try/except. Two tries necessary.
If someone really wanted to use another encoding, they could monkey patch
jsontemplate.JoinTokens (this function).
"""
try:
return ''.join(tokens)
except UnicodeDecodeError:
# This can still raise UnicodeDecodeError if that data isn't utf-8.
return ''.join(t.decode('utf-8') for t in tokens) | def function[JoinTokens, parameter[tokens]]:
constant[Join tokens (which may be a mix of unicode and str values).
See notes on unicode at the top. This function allows mixing encoded utf-8
byte string tokens with unicode tokens. (Python's default encoding is ASCII,
and we don't want to change that.)
We also want to support pure byte strings, so we can't get rid of the
try/except. Two tries necessary.
If someone really wanted to use another encoding, they could monkey patch
jsontemplate.JoinTokens (this function).
]
<ast.Try object at 0x7da18dc98b50> | keyword[def] identifier[JoinTokens] ( identifier[tokens] ):
literal[string]
keyword[try] :
keyword[return] literal[string] . identifier[join] ( identifier[tokens] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[return] literal[string] . identifier[join] ( identifier[t] . identifier[decode] ( literal[string] ) keyword[for] identifier[t] keyword[in] identifier[tokens] ) | def JoinTokens(tokens):
"""Join tokens (which may be a mix of unicode and str values).
See notes on unicode at the top. This function allows mixing encoded utf-8
byte string tokens with unicode tokens. (Python's default encoding is ASCII,
and we don't want to change that.)
We also want to support pure byte strings, so we can't get rid of the
try/except. Two tries necessary.
If someone really wanted to use another encoding, they could monkey patch
jsontemplate.JoinTokens (this function).
"""
try:
return ''.join(tokens) # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
# This can still raise UnicodeDecodeError if that data isn't utf-8.
return ''.join((t.decode('utf-8') for t in tokens)) # depends on [control=['except'], data=[]] |
def readSentence(self):
"""
Read every word untill empty word (NULL byte) is received.
:return: Reply word, tuple with read words.
"""
sentence = tuple(word for word in iter(self.readWord, b''))
self.log('--->', *sentence)
reply_word, words = sentence[0], sentence[1:]
if reply_word == '!fatal':
self.transport.close()
raise FatalError(words[0])
else:
return reply_word, words | def function[readSentence, parameter[self]]:
constant[
Read every word untill empty word (NULL byte) is received.
:return: Reply word, tuple with read words.
]
variable[sentence] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1083ca0>]]
call[name[self].log, parameter[constant[--->], <ast.Starred object at 0x7da1b1080b50>]]
<ast.Tuple object at 0x7da1b1081030> assign[=] tuple[[<ast.Subscript object at 0x7da1b1083dc0>, <ast.Subscript object at 0x7da1b10805e0>]]
if compare[name[reply_word] equal[==] constant[!fatal]] begin[:]
call[name[self].transport.close, parameter[]]
<ast.Raise object at 0x7da1b10810f0> | keyword[def] identifier[readSentence] ( identifier[self] ):
literal[string]
identifier[sentence] = identifier[tuple] ( identifier[word] keyword[for] identifier[word] keyword[in] identifier[iter] ( identifier[self] . identifier[readWord] , literal[string] ))
identifier[self] . identifier[log] ( literal[string] ,* identifier[sentence] )
identifier[reply_word] , identifier[words] = identifier[sentence] [ literal[int] ], identifier[sentence] [ literal[int] :]
keyword[if] identifier[reply_word] == literal[string] :
identifier[self] . identifier[transport] . identifier[close] ()
keyword[raise] identifier[FatalError] ( identifier[words] [ literal[int] ])
keyword[else] :
keyword[return] identifier[reply_word] , identifier[words] | def readSentence(self):
"""
Read every word untill empty word (NULL byte) is received.
:return: Reply word, tuple with read words.
"""
sentence = tuple((word for word in iter(self.readWord, b'')))
self.log('--->', *sentence)
(reply_word, words) = (sentence[0], sentence[1:])
if reply_word == '!fatal':
self.transport.close()
raise FatalError(words[0]) # depends on [control=['if'], data=[]]
else:
return (reply_word, words) |
def check_available_aac_encoders():
"""Returns the available AAC encoders
Returns
----------
codecs : list(str)
List of available encoder codecs
"""
cmd = [
'ffmpeg',
'-v', 'error',
'-codecs'
]
output = sp.check_output(cmd)
aac_codecs = [
x for x in
output.splitlines() if "AAC (Advanced Audio Coding)" in str(x)
][0]
hay = aac_codecs.decode('ascii')
match = re.findall(r'\(encoders: ([^\)]*) \)', hay)
if match:
return match[0].split(" ")
else:
return None | def function[check_available_aac_encoders, parameter[]]:
constant[Returns the available AAC encoders
Returns
----------
codecs : list(str)
List of available encoder codecs
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b04d9fc0>, <ast.Constant object at 0x7da1b04daf80>, <ast.Constant object at 0x7da1b04d88b0>, <ast.Constant object at 0x7da1b04d8550>]]
variable[output] assign[=] call[name[sp].check_output, parameter[name[cmd]]]
variable[aac_codecs] assign[=] call[<ast.ListComp object at 0x7da1b04dba30>][constant[0]]
variable[hay] assign[=] call[name[aac_codecs].decode, parameter[constant[ascii]]]
variable[match] assign[=] call[name[re].findall, parameter[constant[\(encoders: ([^\)]*) \)], name[hay]]]
if name[match] begin[:]
return[call[call[name[match]][constant[0]].split, parameter[constant[ ]]]] | keyword[def] identifier[check_available_aac_encoders] ():
literal[string]
identifier[cmd] =[
literal[string] ,
literal[string] , literal[string] ,
literal[string]
]
identifier[output] = identifier[sp] . identifier[check_output] ( identifier[cmd] )
identifier[aac_codecs] =[
identifier[x] keyword[for] identifier[x] keyword[in]
identifier[output] . identifier[splitlines] () keyword[if] literal[string] keyword[in] identifier[str] ( identifier[x] )
][ literal[int] ]
identifier[hay] = identifier[aac_codecs] . identifier[decode] ( literal[string] )
identifier[match] = identifier[re] . identifier[findall] ( literal[string] , identifier[hay] )
keyword[if] identifier[match] :
keyword[return] identifier[match] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[else] :
keyword[return] keyword[None] | def check_available_aac_encoders():
"""Returns the available AAC encoders
Returns
----------
codecs : list(str)
List of available encoder codecs
"""
cmd = ['ffmpeg', '-v', 'error', '-codecs']
output = sp.check_output(cmd)
aac_codecs = [x for x in output.splitlines() if 'AAC (Advanced Audio Coding)' in str(x)][0]
hay = aac_codecs.decode('ascii')
match = re.findall('\\(encoders: ([^\\)]*) \\)', hay)
if match:
return match[0].split(' ') # depends on [control=['if'], data=[]]
else:
return None |
def normalize_so_name(name):
"""
Handle different types of python installations
"""
if "cpython" in name:
return os.path.splitext(os.path.splitext(name)[0])[0]
# XXX: Special handling for Fedora python2 distribution
# See: https://github.com/python-rope/rope/issues/211
if name == "timemodule.so":
return "time"
return os.path.splitext(name)[0] | def function[normalize_so_name, parameter[name]]:
constant[
Handle different types of python installations
]
if compare[constant[cpython] in name[name]] begin[:]
return[call[call[name[os].path.splitext, parameter[call[call[name[os].path.splitext, parameter[name[name]]]][constant[0]]]]][constant[0]]]
if compare[name[name] equal[==] constant[timemodule.so]] begin[:]
return[constant[time]]
return[call[call[name[os].path.splitext, parameter[name[name]]]][constant[0]]] | keyword[def] identifier[normalize_so_name] ( identifier[name] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[return] identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[name] )[ literal[int] ])[ literal[int] ]
keyword[if] identifier[name] == literal[string] :
keyword[return] literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[splitext] ( identifier[name] )[ literal[int] ] | def normalize_so_name(name):
"""
Handle different types of python installations
"""
if 'cpython' in name:
return os.path.splitext(os.path.splitext(name)[0])[0] # depends on [control=['if'], data=['name']]
# XXX: Special handling for Fedora python2 distribution
# See: https://github.com/python-rope/rope/issues/211
if name == 'timemodule.so':
return 'time' # depends on [control=['if'], data=[]]
return os.path.splitext(name)[0] |
def _is_version_duplicate(self):
""" Define should new version be created for object or no.
Reasons to provide custom check instead of default `ignore_revision_duplicates`:
- no need to compare all revisions - it is OK if right object version exists in any revision;
- need to compare object attributes (not serialized data) to avoid
version creation on wrong <float> vs <int> comparison;
"""
if self.id is None:
return False
try:
latest_version = Version.objects.get_for_object(self).latest('revision__date_created')
except Version.DoesNotExist:
return False
latest_version_object = latest_version._object_version.object
fields = self.get_version_fields()
return all([getattr(self, f) == getattr(latest_version_object, f) for f in fields]) | def function[_is_version_duplicate, parameter[self]]:
constant[ Define should new version be created for object or no.
Reasons to provide custom check instead of default `ignore_revision_duplicates`:
- no need to compare all revisions - it is OK if right object version exists in any revision;
- need to compare object attributes (not serialized data) to avoid
version creation on wrong <float> vs <int> comparison;
]
if compare[name[self].id is constant[None]] begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b0f05db0>
variable[latest_version_object] assign[=] name[latest_version]._object_version.object
variable[fields] assign[=] call[name[self].get_version_fields, parameter[]]
return[call[name[all], parameter[<ast.ListComp object at 0x7da1b0fc7070>]]] | keyword[def] identifier[_is_version_duplicate] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[id] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[try] :
identifier[latest_version] = identifier[Version] . identifier[objects] . identifier[get_for_object] ( identifier[self] ). identifier[latest] ( literal[string] )
keyword[except] identifier[Version] . identifier[DoesNotExist] :
keyword[return] keyword[False]
identifier[latest_version_object] = identifier[latest_version] . identifier[_object_version] . identifier[object]
identifier[fields] = identifier[self] . identifier[get_version_fields] ()
keyword[return] identifier[all] ([ identifier[getattr] ( identifier[self] , identifier[f] )== identifier[getattr] ( identifier[latest_version_object] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[fields] ]) | def _is_version_duplicate(self):
""" Define should new version be created for object or no.
Reasons to provide custom check instead of default `ignore_revision_duplicates`:
- no need to compare all revisions - it is OK if right object version exists in any revision;
- need to compare object attributes (not serialized data) to avoid
version creation on wrong <float> vs <int> comparison;
"""
if self.id is None:
return False # depends on [control=['if'], data=[]]
try:
latest_version = Version.objects.get_for_object(self).latest('revision__date_created') # depends on [control=['try'], data=[]]
except Version.DoesNotExist:
return False # depends on [control=['except'], data=[]]
latest_version_object = latest_version._object_version.object
fields = self.get_version_fields()
return all([getattr(self, f) == getattr(latest_version_object, f) for f in fields]) |
def download_series_gui(frame, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when user wants serial downloading
"""
# create directory to save files
if not os.path.exists(directory):
os.makedirs(directory)
app = progress_class(frame, urls, directory, min_file_size, max_file_size, no_redirects) | def function[download_series_gui, parameter[frame, urls, directory, min_file_size, max_file_size, no_redirects]]:
constant[
called when user wants serial downloading
]
if <ast.UnaryOp object at 0x7da207f002b0> begin[:]
call[name[os].makedirs, parameter[name[directory]]]
variable[app] assign[=] call[name[progress_class], parameter[name[frame], name[urls], name[directory], name[min_file_size], name[max_file_size], name[no_redirects]]] | keyword[def] identifier[download_series_gui] ( identifier[frame] , identifier[urls] , identifier[directory] , identifier[min_file_size] , identifier[max_file_size] , identifier[no_redirects] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ):
identifier[os] . identifier[makedirs] ( identifier[directory] )
identifier[app] = identifier[progress_class] ( identifier[frame] , identifier[urls] , identifier[directory] , identifier[min_file_size] , identifier[max_file_size] , identifier[no_redirects] ) | def download_series_gui(frame, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when user wants serial downloading
""" # create directory to save files
if not os.path.exists(directory):
os.makedirs(directory) # depends on [control=['if'], data=[]]
app = progress_class(frame, urls, directory, min_file_size, max_file_size, no_redirects) |
def create_typedef(self, typedef_name, unused=None, with_defaults=True):
"""returns string, that contains valid C++ code, that defines typedef
to function type
:param name: the desired name of typedef
"""
return free_function_type_t.TYPEDEF_NAME_TEMPLATE % {
'typedef_name': typedef_name,
'return_type': self.return_type.build_decl_string(with_defaults),
'arguments': ','.join(
[_f(x, with_defaults) for x in self.arguments_types])} | def function[create_typedef, parameter[self, typedef_name, unused, with_defaults]]:
constant[returns string, that contains valid C++ code, that defines typedef
to function type
:param name: the desired name of typedef
]
return[binary_operation[name[free_function_type_t].TYPEDEF_NAME_TEMPLATE <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b1306c80>, <ast.Constant object at 0x7da1b1305ba0>, <ast.Constant object at 0x7da1b1306bc0>], [<ast.Name object at 0x7da1b1307b80>, <ast.Call object at 0x7da1b1304ca0>, <ast.Call object at 0x7da1b1306440>]]]] | keyword[def] identifier[create_typedef] ( identifier[self] , identifier[typedef_name] , identifier[unused] = keyword[None] , identifier[with_defaults] = keyword[True] ):
literal[string]
keyword[return] identifier[free_function_type_t] . identifier[TYPEDEF_NAME_TEMPLATE] %{
literal[string] : identifier[typedef_name] ,
literal[string] : identifier[self] . identifier[return_type] . identifier[build_decl_string] ( identifier[with_defaults] ),
literal[string] : literal[string] . identifier[join] (
[ identifier[_f] ( identifier[x] , identifier[with_defaults] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[arguments_types] ])} | def create_typedef(self, typedef_name, unused=None, with_defaults=True):
"""returns string, that contains valid C++ code, that defines typedef
to function type
:param name: the desired name of typedef
"""
return free_function_type_t.TYPEDEF_NAME_TEMPLATE % {'typedef_name': typedef_name, 'return_type': self.return_type.build_decl_string(with_defaults), 'arguments': ','.join([_f(x, with_defaults) for x in self.arguments_types])} |
def dragMoveEvent(self, event):
"""Reimplement Qt method"""
index = self.indexAt(event.pos())
if index:
dst = self.get_filename(index)
if osp.isdir(dst):
event.acceptProposedAction()
else:
event.ignore()
else:
event.ignore() | def function[dragMoveEvent, parameter[self, event]]:
constant[Reimplement Qt method]
variable[index] assign[=] call[name[self].indexAt, parameter[call[name[event].pos, parameter[]]]]
if name[index] begin[:]
variable[dst] assign[=] call[name[self].get_filename, parameter[name[index]]]
if call[name[osp].isdir, parameter[name[dst]]] begin[:]
call[name[event].acceptProposedAction, parameter[]] | keyword[def] identifier[dragMoveEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[index] = identifier[self] . identifier[indexAt] ( identifier[event] . identifier[pos] ())
keyword[if] identifier[index] :
identifier[dst] = identifier[self] . identifier[get_filename] ( identifier[index] )
keyword[if] identifier[osp] . identifier[isdir] ( identifier[dst] ):
identifier[event] . identifier[acceptProposedAction] ()
keyword[else] :
identifier[event] . identifier[ignore] ()
keyword[else] :
identifier[event] . identifier[ignore] () | def dragMoveEvent(self, event):
"""Reimplement Qt method"""
index = self.indexAt(event.pos())
if index:
dst = self.get_filename(index)
if osp.isdir(dst):
event.acceptProposedAction() # depends on [control=['if'], data=[]]
else:
event.ignore() # depends on [control=['if'], data=[]]
else:
event.ignore() |
def next_occurrences(self, n=None, since=None):
"""Yield the next planned occurrences after the date "since"
The `since` argument can be either a date or datetime onject.
If not given, it defaults to the date of the last event that's
already planned.
If `n` is given, the result is limited to that many dates;
otherwise, infinite results may be generated.
Note that less than `n` results may be yielded.
"""
scheme = self.recurrence_scheme
if scheme is None:
return ()
db = Session.object_session(self)
query = db.query(Event)
query = query.filter(Event.series_slug == self.slug)
query = query.order_by(desc(Event.date))
query = query.limit(1)
last_planned_event = query.one_or_none()
if since is None:
last_planned_event = query.one()
since = last_planned_event.date
elif since < last_planned_event.date:
since = last_planned_event.date
start = getattr(since, 'date', since)
start += relativedelta.relativedelta(days=+1)
if (scheme == 'monthly'
and last_planned_event
and last_planned_event.date.year == start.year
and last_planned_event.date.month == start.month):
# Monthly events try to have one event per month, so exclude
# the current month if there was already a meetup
start += relativedelta.relativedelta(months=+1)
start = start.replace(day=1)
start = datetime.datetime.combine(start, datetime.time(tzinfo=CET))
result = rrule.rrulestr(self.recurrence_rule, dtstart=start)
if n is not None:
result = itertools.islice(result, n)
return result | def function[next_occurrences, parameter[self, n, since]]:
constant[Yield the next planned occurrences after the date "since"
The `since` argument can be either a date or datetime onject.
If not given, it defaults to the date of the last event that's
already planned.
If `n` is given, the result is limited to that many dates;
otherwise, infinite results may be generated.
Note that less than `n` results may be yielded.
]
variable[scheme] assign[=] name[self].recurrence_scheme
if compare[name[scheme] is constant[None]] begin[:]
return[tuple[[]]]
variable[db] assign[=] call[name[Session].object_session, parameter[name[self]]]
variable[query] assign[=] call[name[db].query, parameter[name[Event]]]
variable[query] assign[=] call[name[query].filter, parameter[compare[name[Event].series_slug equal[==] name[self].slug]]]
variable[query] assign[=] call[name[query].order_by, parameter[call[name[desc], parameter[name[Event].date]]]]
variable[query] assign[=] call[name[query].limit, parameter[constant[1]]]
variable[last_planned_event] assign[=] call[name[query].one_or_none, parameter[]]
if compare[name[since] is constant[None]] begin[:]
variable[last_planned_event] assign[=] call[name[query].one, parameter[]]
variable[since] assign[=] name[last_planned_event].date
variable[start] assign[=] call[name[getattr], parameter[name[since], constant[date], name[since]]]
<ast.AugAssign object at 0x7da207f99960>
if <ast.BoolOp object at 0x7da207f9b400> begin[:]
<ast.AugAssign object at 0x7da207f9b8e0>
variable[start] assign[=] call[name[start].replace, parameter[]]
variable[start] assign[=] call[name[datetime].datetime.combine, parameter[name[start], call[name[datetime].time, parameter[]]]]
variable[result] assign[=] call[name[rrule].rrulestr, parameter[name[self].recurrence_rule]]
if compare[name[n] is_not constant[None]] begin[:]
variable[result] assign[=] call[name[itertools].islice, parameter[name[result], name[n]]]
return[name[result]] | keyword[def] identifier[next_occurrences] ( identifier[self] , identifier[n] = keyword[None] , identifier[since] = keyword[None] ):
literal[string]
identifier[scheme] = identifier[self] . identifier[recurrence_scheme]
keyword[if] identifier[scheme] keyword[is] keyword[None] :
keyword[return] ()
identifier[db] = identifier[Session] . identifier[object_session] ( identifier[self] )
identifier[query] = identifier[db] . identifier[query] ( identifier[Event] )
identifier[query] = identifier[query] . identifier[filter] ( identifier[Event] . identifier[series_slug] == identifier[self] . identifier[slug] )
identifier[query] = identifier[query] . identifier[order_by] ( identifier[desc] ( identifier[Event] . identifier[date] ))
identifier[query] = identifier[query] . identifier[limit] ( literal[int] )
identifier[last_planned_event] = identifier[query] . identifier[one_or_none] ()
keyword[if] identifier[since] keyword[is] keyword[None] :
identifier[last_planned_event] = identifier[query] . identifier[one] ()
identifier[since] = identifier[last_planned_event] . identifier[date]
keyword[elif] identifier[since] < identifier[last_planned_event] . identifier[date] :
identifier[since] = identifier[last_planned_event] . identifier[date]
identifier[start] = identifier[getattr] ( identifier[since] , literal[string] , identifier[since] )
identifier[start] += identifier[relativedelta] . identifier[relativedelta] ( identifier[days] =+ literal[int] )
keyword[if] ( identifier[scheme] == literal[string]
keyword[and] identifier[last_planned_event]
keyword[and] identifier[last_planned_event] . identifier[date] . identifier[year] == identifier[start] . identifier[year]
keyword[and] identifier[last_planned_event] . identifier[date] . identifier[month] == identifier[start] . identifier[month] ):
identifier[start] += identifier[relativedelta] . identifier[relativedelta] ( identifier[months] =+ literal[int] )
identifier[start] = identifier[start] . identifier[replace] ( identifier[day] = literal[int] )
identifier[start] = identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[start] , identifier[datetime] . identifier[time] ( identifier[tzinfo] = identifier[CET] ))
identifier[result] = identifier[rrule] . identifier[rrulestr] ( identifier[self] . identifier[recurrence_rule] , identifier[dtstart] = identifier[start] )
keyword[if] identifier[n] keyword[is] keyword[not] keyword[None] :
identifier[result] = identifier[itertools] . identifier[islice] ( identifier[result] , identifier[n] )
keyword[return] identifier[result] | def next_occurrences(self, n=None, since=None):
"""Yield the next planned occurrences after the date "since"
The `since` argument can be either a date or datetime onject.
If not given, it defaults to the date of the last event that's
already planned.
If `n` is given, the result is limited to that many dates;
otherwise, infinite results may be generated.
Note that less than `n` results may be yielded.
"""
scheme = self.recurrence_scheme
if scheme is None:
return () # depends on [control=['if'], data=[]]
db = Session.object_session(self)
query = db.query(Event)
query = query.filter(Event.series_slug == self.slug)
query = query.order_by(desc(Event.date))
query = query.limit(1)
last_planned_event = query.one_or_none()
if since is None:
last_planned_event = query.one()
since = last_planned_event.date # depends on [control=['if'], data=['since']]
elif since < last_planned_event.date:
since = last_planned_event.date # depends on [control=['if'], data=['since']]
start = getattr(since, 'date', since)
start += relativedelta.relativedelta(days=+1)
if scheme == 'monthly' and last_planned_event and (last_planned_event.date.year == start.year) and (last_planned_event.date.month == start.month):
# Monthly events try to have one event per month, so exclude
# the current month if there was already a meetup
start += relativedelta.relativedelta(months=+1)
start = start.replace(day=1) # depends on [control=['if'], data=[]]
start = datetime.datetime.combine(start, datetime.time(tzinfo=CET))
result = rrule.rrulestr(self.recurrence_rule, dtstart=start)
if n is not None:
result = itertools.islice(result, n) # depends on [control=['if'], data=['n']]
return result |
async def shutdown(self):
"""
Shut down this Looper.
"""
logger.display("Looper shutting down now...", extra={"cli": False})
self.running = False
start = time.perf_counter()
if not self.runFut.done():
await self.runFut
self.stopall()
logger.display("Looper shut down in {:.3f} seconds.".
format(time.perf_counter() - start), extra={"cli": False})
# Unset signal handlers, bug: https://bugs.python.org/issue23548
for sig_name in self.signals:
logger.debug("Unsetting handler for {}".format(sig_name))
sig_num = getattr(signal, sig_name)
self.loop.remove_signal_handler(sig_num) | <ast.AsyncFunctionDef object at 0x7da1b170c580> | keyword[async] keyword[def] identifier[shutdown] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[display] ( literal[string] , identifier[extra] ={ literal[string] : keyword[False] })
identifier[self] . identifier[running] = keyword[False]
identifier[start] = identifier[time] . identifier[perf_counter] ()
keyword[if] keyword[not] identifier[self] . identifier[runFut] . identifier[done] ():
keyword[await] identifier[self] . identifier[runFut]
identifier[self] . identifier[stopall] ()
identifier[logger] . identifier[display] ( literal[string] .
identifier[format] ( identifier[time] . identifier[perf_counter] ()- identifier[start] ), identifier[extra] ={ literal[string] : keyword[False] })
keyword[for] identifier[sig_name] keyword[in] identifier[self] . identifier[signals] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sig_name] ))
identifier[sig_num] = identifier[getattr] ( identifier[signal] , identifier[sig_name] )
identifier[self] . identifier[loop] . identifier[remove_signal_handler] ( identifier[sig_num] ) | async def shutdown(self):
"""
Shut down this Looper.
"""
logger.display('Looper shutting down now...', extra={'cli': False})
self.running = False
start = time.perf_counter()
if not self.runFut.done():
await self.runFut # depends on [control=['if'], data=[]]
self.stopall()
logger.display('Looper shut down in {:.3f} seconds.'.format(time.perf_counter() - start), extra={'cli': False})
# Unset signal handlers, bug: https://bugs.python.org/issue23548
for sig_name in self.signals:
logger.debug('Unsetting handler for {}'.format(sig_name))
sig_num = getattr(signal, sig_name)
self.loop.remove_signal_handler(sig_num) # depends on [control=['for'], data=['sig_name']] |
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest
# microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond) | def function[normalized, parameter[self]]:
constant[
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
]
variable[days] assign[=] call[name[int], parameter[name[self].days]]
variable[hours_f] assign[=] call[name[round], parameter[binary_operation[name[self].hours + binary_operation[constant[24] * binary_operation[name[self].days - name[days]]]], constant[11]]]
variable[hours] assign[=] call[name[int], parameter[name[hours_f]]]
variable[minutes_f] assign[=] call[name[round], parameter[binary_operation[name[self].minutes + binary_operation[constant[60] * binary_operation[name[hours_f] - name[hours]]]], constant[10]]]
variable[minutes] assign[=] call[name[int], parameter[name[minutes_f]]]
variable[seconds_f] assign[=] call[name[round], parameter[binary_operation[name[self].seconds + binary_operation[constant[60] * binary_operation[name[minutes_f] - name[minutes]]]], constant[8]]]
variable[seconds] assign[=] call[name[int], parameter[name[seconds_f]]]
variable[microseconds] assign[=] call[name[round], parameter[binary_operation[name[self].microseconds + binary_operation[constant[1000000.0] * binary_operation[name[seconds_f] - name[seconds]]]]]]
return[call[name[self].__class__, parameter[]]] | keyword[def] identifier[normalized] ( identifier[self] ):
literal[string]
identifier[days] = identifier[int] ( identifier[self] . identifier[days] )
identifier[hours_f] = identifier[round] ( identifier[self] . identifier[hours] + literal[int] *( identifier[self] . identifier[days] - identifier[days] ), literal[int] )
identifier[hours] = identifier[int] ( identifier[hours_f] )
identifier[minutes_f] = identifier[round] ( identifier[self] . identifier[minutes] + literal[int] *( identifier[hours_f] - identifier[hours] ), literal[int] )
identifier[minutes] = identifier[int] ( identifier[minutes_f] )
identifier[seconds_f] = identifier[round] ( identifier[self] . identifier[seconds] + literal[int] *( identifier[minutes_f] - identifier[minutes] ), literal[int] )
identifier[seconds] = identifier[int] ( identifier[seconds_f] )
identifier[microseconds] = identifier[round] ( identifier[self] . identifier[microseconds] + literal[int] *( identifier[seconds_f] - identifier[seconds] ))
keyword[return] identifier[self] . identifier[__class__] ( identifier[years] = identifier[self] . identifier[years] , identifier[months] = identifier[self] . identifier[months] ,
identifier[days] = identifier[days] , identifier[hours] = identifier[hours] , identifier[minutes] = identifier[minutes] ,
identifier[seconds] = identifier[seconds] , identifier[microseconds] = identifier[microseconds] ,
identifier[leapdays] = identifier[self] . identifier[leapdays] , identifier[year] = identifier[self] . identifier[year] ,
identifier[month] = identifier[self] . identifier[month] , identifier[day] = identifier[self] . identifier[day] ,
identifier[weekday] = identifier[self] . identifier[weekday] , identifier[hour] = identifier[self] . identifier[hour] ,
identifier[minute] = identifier[self] . identifier[minute] , identifier[second] = identifier[self] . identifier[second] ,
identifier[microsecond] = identifier[self] . identifier[microsecond] ) | def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest
# microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1000000.0 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months, days=days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) |
def getABCD(slh, a0=None, doubled_up=True):
"""Calculate the ABCD-linearization of an SLH model
Return the A, B, C, D and (a, c) matrices that linearize an SLH model
about a coherent displacement amplitude a0.
The equations of motion and the input-output relation are then:
dX = (A X + a) dt + B dA_in
dA_out = (C X + c) dt + D dA_in
where, if doubled_up == False
dX = [a_1, ..., a_m]
dA_in = [dA_1, ..., dA_n]
or if doubled_up == True
dX = [a_1, ..., a_m, a_1^*, ... a_m^*]
dA_in = [dA_1, ..., dA_n, dA_1^*, ..., dA_n^*]
Args:
slh: SLH object
a0: dictionary of coherent amplitudes ``{a1: a1_0, a2: a2_0, ...}``
with annihilation mode operators as keys and (numeric or symbolic)
amplitude as values.
doubled_up: boolean, necessary for phase-sensitive / active systems
Returns:
A tuple (A, B, C, D, a, c])
with
* `A`: coupling of modes to each other
* `B`: coupling of external input fields to modes
* `C`: coupling of internal modes to output
* `D`: coupling of external input fields to output fields
* `a`: constant coherent input vector for mode e.o.m.
* `c`: constant coherent input vector of scattered amplitudes
contributing to the output
"""
from qnet.algebra.library.fock_operators import Create, Destroy
if a0 is None:
a0 = {}
# the different degrees of freedom
full_space = ProductSpace.create(slh.S.space, slh.L.space, slh.H.space)
modes = sorted(full_space.local_factors)
# various dimensions
ncav = len(modes)
cdim = slh.cdim
# initialize the matrices
if doubled_up:
A = np.zeros((2*ncav, 2*ncav), dtype=object)
B = np.zeros((2*ncav, 2*cdim), dtype=object)
C = np.zeros((2*cdim, 2*ncav), dtype=object)
a = np.zeros(2*ncav, dtype=object)
c = np.zeros(2*cdim, dtype=object)
else:
A = np.zeros((ncav, ncav), dtype=object)
B = np.zeros((ncav, cdim), dtype=object)
C = np.zeros((cdim, ncav), dtype=object)
a = np.zeros(ncav, dtype=object)
c = np.zeros(cdim, dtype=object)
def _as_complex(o):
if isinstance(o, Operator):
o = o.expand()
if o is IdentityOperator:
o = 1
elif o is ZeroOperator:
o = 0
elif isinstance(o, ScalarTimesOperator):
assert o.term is IdentityOperator
o = o.coeff
else:
raise ValueError("{} is not trivial operator".format(o))
try:
return complex(o)
except TypeError:
return o
D = np.array([[_as_complex(o) for o in Sjj] for Sjj in slh.S.matrix])
if doubled_up:
# need to explicitly compute D^* because numpy object-dtype array's
# conjugate() method doesn't work
Dc = np.array([[D[ii, jj].conjugate() for jj in range(cdim)]
for ii in range(cdim)])
D = np.vstack((np.hstack((D, np.zeros((cdim, cdim)))),
np.hstack((np.zeros((cdim, cdim)), Dc))))
# create substitutions to displace the model
mode_substitutions = {aj: aj + aj_0 * IdentityOperator
for aj, aj_0 in a0.items()}
mode_substitutions.update({
aj.dag(): aj.dag() + aj_0.conjugate() * IdentityOperator
for aj, aj_0 in a0.items()
})
if len(mode_substitutions):
slh_displaced = (slh.substitute(mode_substitutions).expand()
.simplify_scalar())
else:
slh_displaced = slh
# make symbols for the external field modes
noises = [OperatorSymbol('b_{}'.format(n), hs="ext_{}".format(n))
for n in range(cdim)]
# compute the QSDEs for the internal operators
eoms = [slh_displaced.symbolic_heisenberg_eom(Destroy(hs=s), noises=noises)
for s in modes]
# use the coefficients to generate A, B matrices
for jj in range(len(modes)):
coeffsjj = get_coeffs(eoms[jj])
a[jj] = coeffsjj[IdentityOperator]
if doubled_up:
a[jj+ncav] = coeffsjj[IdentityOperator].conjugate()
for kk, skk in enumerate(modes):
A[jj, kk] = coeffsjj[Destroy(hs=skk)]
if doubled_up:
A[jj+ncav, kk+ncav] = coeffsjj[Destroy(hs=skk)].conjugate()
A[jj, kk + ncav] = coeffsjj[Create(hs=skk)]
A[jj+ncav, kk] = coeffsjj[Create(hs=skk)].conjugate()
for kk, dAkk in enumerate(noises):
B[jj, kk] = coeffsjj[dAkk]
if doubled_up:
B[jj+ncav, kk+cdim] = coeffsjj[dAkk].conjugate()
B[jj, kk+cdim] = coeffsjj[dAkk.dag()]
B[jj + ncav, kk] = coeffsjj[dAkk.dag()].conjugate()
# use the coefficients in the L vector to generate the C, D
# matrices
for jj, Ljj in enumerate(slh_displaced.Ls):
coeffsjj = get_coeffs(Ljj)
c[jj] = coeffsjj[IdentityOperator]
if doubled_up:
c[jj+cdim] = coeffsjj[IdentityOperator].conjugate()
for kk, skk in enumerate(modes):
C[jj, kk] = coeffsjj[Destroy(hs=skk)]
if doubled_up:
C[jj+cdim, kk+ncav] = coeffsjj[Destroy(hs=skk)].conjugate()
C[jj, kk+ncav] = coeffsjj[Create(hs=skk)]
C[jj+cdim, kk] = coeffsjj[Create(hs=skk)].conjugate()
return map(SympyMatrix, (A, B, C, D, a, c)) | def function[getABCD, parameter[slh, a0, doubled_up]]:
constant[Calculate the ABCD-linearization of an SLH model
Return the A, B, C, D and (a, c) matrices that linearize an SLH model
about a coherent displacement amplitude a0.
The equations of motion and the input-output relation are then:
dX = (A X + a) dt + B dA_in
dA_out = (C X + c) dt + D dA_in
where, if doubled_up == False
dX = [a_1, ..., a_m]
dA_in = [dA_1, ..., dA_n]
or if doubled_up == True
dX = [a_1, ..., a_m, a_1^*, ... a_m^*]
dA_in = [dA_1, ..., dA_n, dA_1^*, ..., dA_n^*]
Args:
slh: SLH object
a0: dictionary of coherent amplitudes ``{a1: a1_0, a2: a2_0, ...}``
with annihilation mode operators as keys and (numeric or symbolic)
amplitude as values.
doubled_up: boolean, necessary for phase-sensitive / active systems
Returns:
A tuple (A, B, C, D, a, c])
with
* `A`: coupling of modes to each other
* `B`: coupling of external input fields to modes
* `C`: coupling of internal modes to output
* `D`: coupling of external input fields to output fields
* `a`: constant coherent input vector for mode e.o.m.
* `c`: constant coherent input vector of scattered amplitudes
contributing to the output
]
from relative_module[qnet.algebra.library.fock_operators] import module[Create], module[Destroy]
if compare[name[a0] is constant[None]] begin[:]
variable[a0] assign[=] dictionary[[], []]
variable[full_space] assign[=] call[name[ProductSpace].create, parameter[name[slh].S.space, name[slh].L.space, name[slh].H.space]]
variable[modes] assign[=] call[name[sorted], parameter[name[full_space].local_factors]]
variable[ncav] assign[=] call[name[len], parameter[name[modes]]]
variable[cdim] assign[=] name[slh].cdim
if name[doubled_up] begin[:]
variable[A] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da20e955930>, <ast.BinOp object at 0x7da20e957c40>]]]]
variable[B] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da20e9560e0>, <ast.BinOp object at 0x7da20e956530>]]]]
variable[C] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da20e957070>, <ast.BinOp object at 0x7da20e957df0>]]]]
variable[a] assign[=] call[name[np].zeros, parameter[binary_operation[constant[2] * name[ncav]]]]
variable[c] assign[=] call[name[np].zeros, parameter[binary_operation[constant[2] * name[cdim]]]]
def function[_as_complex, parameter[o]]:
if call[name[isinstance], parameter[name[o], name[Operator]]] begin[:]
variable[o] assign[=] call[name[o].expand, parameter[]]
if compare[name[o] is name[IdentityOperator]] begin[:]
variable[o] assign[=] constant[1]
<ast.Try object at 0x7da18c4cf490>
variable[D] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18c4cfc10>]]
if name[doubled_up] begin[:]
variable[Dc] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18c4cf1c0>]]
variable[D] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da18c4ccd00>, <ast.Call object at 0x7da18c4ce7d0>]]]]
variable[mode_substitutions] assign[=] <ast.DictComp object at 0x7da18c4ce290>
call[name[mode_substitutions].update, parameter[<ast.DictComp object at 0x7da18c4cc670>]]
if call[name[len], parameter[name[mode_substitutions]]] begin[:]
variable[slh_displaced] assign[=] call[call[call[name[slh].substitute, parameter[name[mode_substitutions]]].expand, parameter[]].simplify_scalar, parameter[]]
variable[noises] assign[=] <ast.ListComp object at 0x7da18c4cd870>
variable[eoms] assign[=] <ast.ListComp object at 0x7da18c4cf070>
for taget[name[jj]] in starred[call[name[range], parameter[call[name[len], parameter[name[modes]]]]]] begin[:]
variable[coeffsjj] assign[=] call[name[get_coeffs], parameter[call[name[eoms]][name[jj]]]]
call[name[a]][name[jj]] assign[=] call[name[coeffsjj]][name[IdentityOperator]]
if name[doubled_up] begin[:]
call[name[a]][binary_operation[name[jj] + name[ncav]]] assign[=] call[call[name[coeffsjj]][name[IdentityOperator]].conjugate, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bccbdf0>, <ast.Name object at 0x7da18bcc8e80>]]] in starred[call[name[enumerate], parameter[name[modes]]]] begin[:]
call[name[A]][tuple[[<ast.Name object at 0x7da18bccae90>, <ast.Name object at 0x7da18bccb5e0>]]] assign[=] call[name[coeffsjj]][call[name[Destroy], parameter[]]]
if name[doubled_up] begin[:]
call[name[A]][tuple[[<ast.BinOp object at 0x7da18bccb070>, <ast.BinOp object at 0x7da18bcc9510>]]] assign[=] call[call[name[coeffsjj]][call[name[Destroy], parameter[]]].conjugate, parameter[]]
call[name[A]][tuple[[<ast.Name object at 0x7da18bcc9030>, <ast.BinOp object at 0x7da18bccaad0>]]] assign[=] call[name[coeffsjj]][call[name[Create], parameter[]]]
call[name[A]][tuple[[<ast.BinOp object at 0x7da18bcc8d30>, <ast.Name object at 0x7da18bccbd00>]]] assign[=] call[call[name[coeffsjj]][call[name[Create], parameter[]]].conjugate, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bcca1d0>, <ast.Name object at 0x7da18bccb430>]]] in starred[call[name[enumerate], parameter[name[noises]]]] begin[:]
call[name[B]][tuple[[<ast.Name object at 0x7da18bccbc70>, <ast.Name object at 0x7da18bccbd60>]]] assign[=] call[name[coeffsjj]][name[dAkk]]
if name[doubled_up] begin[:]
call[name[B]][tuple[[<ast.BinOp object at 0x7da18bcc9f30>, <ast.BinOp object at 0x7da18bcc88e0>]]] assign[=] call[call[name[coeffsjj]][name[dAkk]].conjugate, parameter[]]
call[name[B]][tuple[[<ast.Name object at 0x7da18bccb970>, <ast.BinOp object at 0x7da18bcc88b0>]]] assign[=] call[name[coeffsjj]][call[name[dAkk].dag, parameter[]]]
call[name[B]][tuple[[<ast.BinOp object at 0x7da18bcc8820>, <ast.Name object at 0x7da18bccb730>]]] assign[=] call[call[name[coeffsjj]][call[name[dAkk].dag, parameter[]]].conjugate, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bcc80a0>, <ast.Name object at 0x7da18bcc9810>]]] in starred[call[name[enumerate], parameter[name[slh_displaced].Ls]]] begin[:]
variable[coeffsjj] assign[=] call[name[get_coeffs], parameter[name[Ljj]]]
call[name[c]][name[jj]] assign[=] call[name[coeffsjj]][name[IdentityOperator]]
if name[doubled_up] begin[:]
call[name[c]][binary_operation[name[jj] + name[cdim]]] assign[=] call[call[name[coeffsjj]][name[IdentityOperator]].conjugate, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bcc9ff0>, <ast.Name object at 0x7da18bccbb80>]]] in starred[call[name[enumerate], parameter[name[modes]]]] begin[:]
call[name[C]][tuple[[<ast.Name object at 0x7da204344760>, <ast.Name object at 0x7da204344a60>]]] assign[=] call[name[coeffsjj]][call[name[Destroy], parameter[]]]
if name[doubled_up] begin[:]
call[name[C]][tuple[[<ast.BinOp object at 0x7da204346b30>, <ast.BinOp object at 0x7da2043472b0>]]] assign[=] call[call[name[coeffsjj]][call[name[Destroy], parameter[]]].conjugate, parameter[]]
call[name[C]][tuple[[<ast.Name object at 0x7da204345f00>, <ast.BinOp object at 0x7da204345e10>]]] assign[=] call[name[coeffsjj]][call[name[Create], parameter[]]]
call[name[C]][tuple[[<ast.BinOp object at 0x7da2043446d0>, <ast.Name object at 0x7da204346560>]]] assign[=] call[call[name[coeffsjj]][call[name[Create], parameter[]]].conjugate, parameter[]]
return[call[name[map], parameter[name[SympyMatrix], tuple[[<ast.Name object at 0x7da2043474c0>, <ast.Name object at 0x7da2043469e0>, <ast.Name object at 0x7da2043465c0>, <ast.Name object at 0x7da2043444f0>, <ast.Name object at 0x7da204345ed0>, <ast.Name object at 0x7da2043471f0>]]]]] | keyword[def] identifier[getABCD] ( identifier[slh] , identifier[a0] = keyword[None] , identifier[doubled_up] = keyword[True] ):
literal[string]
keyword[from] identifier[qnet] . identifier[algebra] . identifier[library] . identifier[fock_operators] keyword[import] identifier[Create] , identifier[Destroy]
keyword[if] identifier[a0] keyword[is] keyword[None] :
identifier[a0] ={}
identifier[full_space] = identifier[ProductSpace] . identifier[create] ( identifier[slh] . identifier[S] . identifier[space] , identifier[slh] . identifier[L] . identifier[space] , identifier[slh] . identifier[H] . identifier[space] )
identifier[modes] = identifier[sorted] ( identifier[full_space] . identifier[local_factors] )
identifier[ncav] = identifier[len] ( identifier[modes] )
identifier[cdim] = identifier[slh] . identifier[cdim]
keyword[if] identifier[doubled_up] :
identifier[A] = identifier[np] . identifier[zeros] (( literal[int] * identifier[ncav] , literal[int] * identifier[ncav] ), identifier[dtype] = identifier[object] )
identifier[B] = identifier[np] . identifier[zeros] (( literal[int] * identifier[ncav] , literal[int] * identifier[cdim] ), identifier[dtype] = identifier[object] )
identifier[C] = identifier[np] . identifier[zeros] (( literal[int] * identifier[cdim] , literal[int] * identifier[ncav] ), identifier[dtype] = identifier[object] )
identifier[a] = identifier[np] . identifier[zeros] ( literal[int] * identifier[ncav] , identifier[dtype] = identifier[object] )
identifier[c] = identifier[np] . identifier[zeros] ( literal[int] * identifier[cdim] , identifier[dtype] = identifier[object] )
keyword[else] :
identifier[A] = identifier[np] . identifier[zeros] (( identifier[ncav] , identifier[ncav] ), identifier[dtype] = identifier[object] )
identifier[B] = identifier[np] . identifier[zeros] (( identifier[ncav] , identifier[cdim] ), identifier[dtype] = identifier[object] )
identifier[C] = identifier[np] . identifier[zeros] (( identifier[cdim] , identifier[ncav] ), identifier[dtype] = identifier[object] )
identifier[a] = identifier[np] . identifier[zeros] ( identifier[ncav] , identifier[dtype] = identifier[object] )
identifier[c] = identifier[np] . identifier[zeros] ( identifier[cdim] , identifier[dtype] = identifier[object] )
keyword[def] identifier[_as_complex] ( identifier[o] ):
keyword[if] identifier[isinstance] ( identifier[o] , identifier[Operator] ):
identifier[o] = identifier[o] . identifier[expand] ()
keyword[if] identifier[o] keyword[is] identifier[IdentityOperator] :
identifier[o] = literal[int]
keyword[elif] identifier[o] keyword[is] identifier[ZeroOperator] :
identifier[o] = literal[int]
keyword[elif] identifier[isinstance] ( identifier[o] , identifier[ScalarTimesOperator] ):
keyword[assert] identifier[o] . identifier[term] keyword[is] identifier[IdentityOperator]
identifier[o] = identifier[o] . identifier[coeff]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[o] ))
keyword[try] :
keyword[return] identifier[complex] ( identifier[o] )
keyword[except] identifier[TypeError] :
keyword[return] identifier[o]
identifier[D] = identifier[np] . identifier[array] ([[ identifier[_as_complex] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[Sjj] ] keyword[for] identifier[Sjj] keyword[in] identifier[slh] . identifier[S] . identifier[matrix] ])
keyword[if] identifier[doubled_up] :
identifier[Dc] = identifier[np] . identifier[array] ([[ identifier[D] [ identifier[ii] , identifier[jj] ]. identifier[conjugate] () keyword[for] identifier[jj] keyword[in] identifier[range] ( identifier[cdim] )]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[cdim] )])
identifier[D] = identifier[np] . identifier[vstack] (( identifier[np] . identifier[hstack] (( identifier[D] , identifier[np] . identifier[zeros] (( identifier[cdim] , identifier[cdim] )))),
identifier[np] . identifier[hstack] (( identifier[np] . identifier[zeros] (( identifier[cdim] , identifier[cdim] )), identifier[Dc] ))))
identifier[mode_substitutions] ={ identifier[aj] : identifier[aj] + identifier[aj_0] * identifier[IdentityOperator]
keyword[for] identifier[aj] , identifier[aj_0] keyword[in] identifier[a0] . identifier[items] ()}
identifier[mode_substitutions] . identifier[update] ({
identifier[aj] . identifier[dag] (): identifier[aj] . identifier[dag] ()+ identifier[aj_0] . identifier[conjugate] ()* identifier[IdentityOperator]
keyword[for] identifier[aj] , identifier[aj_0] keyword[in] identifier[a0] . identifier[items] ()
})
keyword[if] identifier[len] ( identifier[mode_substitutions] ):
identifier[slh_displaced] =( identifier[slh] . identifier[substitute] ( identifier[mode_substitutions] ). identifier[expand] ()
. identifier[simplify_scalar] ())
keyword[else] :
identifier[slh_displaced] = identifier[slh]
identifier[noises] =[ identifier[OperatorSymbol] ( literal[string] . identifier[format] ( identifier[n] ), identifier[hs] = literal[string] . identifier[format] ( identifier[n] ))
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[cdim] )]
identifier[eoms] =[ identifier[slh_displaced] . identifier[symbolic_heisenberg_eom] ( identifier[Destroy] ( identifier[hs] = identifier[s] ), identifier[noises] = identifier[noises] )
keyword[for] identifier[s] keyword[in] identifier[modes] ]
keyword[for] identifier[jj] keyword[in] identifier[range] ( identifier[len] ( identifier[modes] )):
identifier[coeffsjj] = identifier[get_coeffs] ( identifier[eoms] [ identifier[jj] ])
identifier[a] [ identifier[jj] ]= identifier[coeffsjj] [ identifier[IdentityOperator] ]
keyword[if] identifier[doubled_up] :
identifier[a] [ identifier[jj] + identifier[ncav] ]= identifier[coeffsjj] [ identifier[IdentityOperator] ]. identifier[conjugate] ()
keyword[for] identifier[kk] , identifier[skk] keyword[in] identifier[enumerate] ( identifier[modes] ):
identifier[A] [ identifier[jj] , identifier[kk] ]= identifier[coeffsjj] [ identifier[Destroy] ( identifier[hs] = identifier[skk] )]
keyword[if] identifier[doubled_up] :
identifier[A] [ identifier[jj] + identifier[ncav] , identifier[kk] + identifier[ncav] ]= identifier[coeffsjj] [ identifier[Destroy] ( identifier[hs] = identifier[skk] )]. identifier[conjugate] ()
identifier[A] [ identifier[jj] , identifier[kk] + identifier[ncav] ]= identifier[coeffsjj] [ identifier[Create] ( identifier[hs] = identifier[skk] )]
identifier[A] [ identifier[jj] + identifier[ncav] , identifier[kk] ]= identifier[coeffsjj] [ identifier[Create] ( identifier[hs] = identifier[skk] )]. identifier[conjugate] ()
keyword[for] identifier[kk] , identifier[dAkk] keyword[in] identifier[enumerate] ( identifier[noises] ):
identifier[B] [ identifier[jj] , identifier[kk] ]= identifier[coeffsjj] [ identifier[dAkk] ]
keyword[if] identifier[doubled_up] :
identifier[B] [ identifier[jj] + identifier[ncav] , identifier[kk] + identifier[cdim] ]= identifier[coeffsjj] [ identifier[dAkk] ]. identifier[conjugate] ()
identifier[B] [ identifier[jj] , identifier[kk] + identifier[cdim] ]= identifier[coeffsjj] [ identifier[dAkk] . identifier[dag] ()]
identifier[B] [ identifier[jj] + identifier[ncav] , identifier[kk] ]= identifier[coeffsjj] [ identifier[dAkk] . identifier[dag] ()]. identifier[conjugate] ()
keyword[for] identifier[jj] , identifier[Ljj] keyword[in] identifier[enumerate] ( identifier[slh_displaced] . identifier[Ls] ):
identifier[coeffsjj] = identifier[get_coeffs] ( identifier[Ljj] )
identifier[c] [ identifier[jj] ]= identifier[coeffsjj] [ identifier[IdentityOperator] ]
keyword[if] identifier[doubled_up] :
identifier[c] [ identifier[jj] + identifier[cdim] ]= identifier[coeffsjj] [ identifier[IdentityOperator] ]. identifier[conjugate] ()
keyword[for] identifier[kk] , identifier[skk] keyword[in] identifier[enumerate] ( identifier[modes] ):
identifier[C] [ identifier[jj] , identifier[kk] ]= identifier[coeffsjj] [ identifier[Destroy] ( identifier[hs] = identifier[skk] )]
keyword[if] identifier[doubled_up] :
identifier[C] [ identifier[jj] + identifier[cdim] , identifier[kk] + identifier[ncav] ]= identifier[coeffsjj] [ identifier[Destroy] ( identifier[hs] = identifier[skk] )]. identifier[conjugate] ()
identifier[C] [ identifier[jj] , identifier[kk] + identifier[ncav] ]= identifier[coeffsjj] [ identifier[Create] ( identifier[hs] = identifier[skk] )]
identifier[C] [ identifier[jj] + identifier[cdim] , identifier[kk] ]= identifier[coeffsjj] [ identifier[Create] ( identifier[hs] = identifier[skk] )]. identifier[conjugate] ()
keyword[return] identifier[map] ( identifier[SympyMatrix] ,( identifier[A] , identifier[B] , identifier[C] , identifier[D] , identifier[a] , identifier[c] )) | def getABCD(slh, a0=None, doubled_up=True):
"""Calculate the ABCD-linearization of an SLH model
Return the A, B, C, D and (a, c) matrices that linearize an SLH model
about a coherent displacement amplitude a0.
The equations of motion and the input-output relation are then:
dX = (A X + a) dt + B dA_in
dA_out = (C X + c) dt + D dA_in
where, if doubled_up == False
dX = [a_1, ..., a_m]
dA_in = [dA_1, ..., dA_n]
or if doubled_up == True
dX = [a_1, ..., a_m, a_1^*, ... a_m^*]
dA_in = [dA_1, ..., dA_n, dA_1^*, ..., dA_n^*]
Args:
slh: SLH object
a0: dictionary of coherent amplitudes ``{a1: a1_0, a2: a2_0, ...}``
with annihilation mode operators as keys and (numeric or symbolic)
amplitude as values.
doubled_up: boolean, necessary for phase-sensitive / active systems
Returns:
A tuple (A, B, C, D, a, c])
with
* `A`: coupling of modes to each other
* `B`: coupling of external input fields to modes
* `C`: coupling of internal modes to output
* `D`: coupling of external input fields to output fields
* `a`: constant coherent input vector for mode e.o.m.
* `c`: constant coherent input vector of scattered amplitudes
contributing to the output
"""
from qnet.algebra.library.fock_operators import Create, Destroy
if a0 is None:
a0 = {} # depends on [control=['if'], data=['a0']]
# the different degrees of freedom
full_space = ProductSpace.create(slh.S.space, slh.L.space, slh.H.space)
modes = sorted(full_space.local_factors)
# various dimensions
ncav = len(modes)
cdim = slh.cdim
# initialize the matrices
if doubled_up:
A = np.zeros((2 * ncav, 2 * ncav), dtype=object)
B = np.zeros((2 * ncav, 2 * cdim), dtype=object)
C = np.zeros((2 * cdim, 2 * ncav), dtype=object)
a = np.zeros(2 * ncav, dtype=object)
c = np.zeros(2 * cdim, dtype=object) # depends on [control=['if'], data=[]]
else:
A = np.zeros((ncav, ncav), dtype=object)
B = np.zeros((ncav, cdim), dtype=object)
C = np.zeros((cdim, ncav), dtype=object)
a = np.zeros(ncav, dtype=object)
c = np.zeros(cdim, dtype=object)
def _as_complex(o):
if isinstance(o, Operator):
o = o.expand()
if o is IdentityOperator:
o = 1 # depends on [control=['if'], data=['o']]
elif o is ZeroOperator:
o = 0 # depends on [control=['if'], data=['o']]
elif isinstance(o, ScalarTimesOperator):
assert o.term is IdentityOperator
o = o.coeff # depends on [control=['if'], data=[]]
else:
raise ValueError('{} is not trivial operator'.format(o)) # depends on [control=['if'], data=[]]
try:
return complex(o) # depends on [control=['try'], data=[]]
except TypeError:
return o # depends on [control=['except'], data=[]]
D = np.array([[_as_complex(o) for o in Sjj] for Sjj in slh.S.matrix])
if doubled_up:
# need to explicitly compute D^* because numpy object-dtype array's
# conjugate() method doesn't work
Dc = np.array([[D[ii, jj].conjugate() for jj in range(cdim)] for ii in range(cdim)])
D = np.vstack((np.hstack((D, np.zeros((cdim, cdim)))), np.hstack((np.zeros((cdim, cdim)), Dc)))) # depends on [control=['if'], data=[]]
# create substitutions to displace the model
mode_substitutions = {aj: aj + aj_0 * IdentityOperator for (aj, aj_0) in a0.items()}
mode_substitutions.update({aj.dag(): aj.dag() + aj_0.conjugate() * IdentityOperator for (aj, aj_0) in a0.items()})
if len(mode_substitutions):
slh_displaced = slh.substitute(mode_substitutions).expand().simplify_scalar() # depends on [control=['if'], data=[]]
else:
slh_displaced = slh
# make symbols for the external field modes
noises = [OperatorSymbol('b_{}'.format(n), hs='ext_{}'.format(n)) for n in range(cdim)]
# compute the QSDEs for the internal operators
eoms = [slh_displaced.symbolic_heisenberg_eom(Destroy(hs=s), noises=noises) for s in modes]
# use the coefficients to generate A, B matrices
for jj in range(len(modes)):
coeffsjj = get_coeffs(eoms[jj])
a[jj] = coeffsjj[IdentityOperator]
if doubled_up:
a[jj + ncav] = coeffsjj[IdentityOperator].conjugate() # depends on [control=['if'], data=[]]
for (kk, skk) in enumerate(modes):
A[jj, kk] = coeffsjj[Destroy(hs=skk)]
if doubled_up:
A[jj + ncav, kk + ncav] = coeffsjj[Destroy(hs=skk)].conjugate()
A[jj, kk + ncav] = coeffsjj[Create(hs=skk)]
A[jj + ncav, kk] = coeffsjj[Create(hs=skk)].conjugate() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for (kk, dAkk) in enumerate(noises):
B[jj, kk] = coeffsjj[dAkk]
if doubled_up:
B[jj + ncav, kk + cdim] = coeffsjj[dAkk].conjugate()
B[jj, kk + cdim] = coeffsjj[dAkk.dag()]
B[jj + ncav, kk] = coeffsjj[dAkk.dag()].conjugate() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['jj']]
# use the coefficients in the L vector to generate the C, D
# matrices
for (jj, Ljj) in enumerate(slh_displaced.Ls):
coeffsjj = get_coeffs(Ljj)
c[jj] = coeffsjj[IdentityOperator]
if doubled_up:
c[jj + cdim] = coeffsjj[IdentityOperator].conjugate() # depends on [control=['if'], data=[]]
for (kk, skk) in enumerate(modes):
C[jj, kk] = coeffsjj[Destroy(hs=skk)]
if doubled_up:
C[jj + cdim, kk + ncav] = coeffsjj[Destroy(hs=skk)].conjugate()
C[jj, kk + ncav] = coeffsjj[Create(hs=skk)]
C[jj + cdim, kk] = coeffsjj[Create(hs=skk)].conjugate() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return map(SympyMatrix, (A, B, C, D, a, c)) |
def torecarray(*args, **kwargs):
"""
Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``.
"""
import numpy as np
return toarray(*args, **kwargs).view(np.recarray) | def function[torecarray, parameter[]]:
constant[
Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``.
]
import module[numpy] as alias[np]
return[call[call[name[toarray], parameter[<ast.Starred object at 0x7da204564490>]].view, parameter[name[np].recarray]]] | keyword[def] identifier[torecarray] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[return] identifier[toarray] (* identifier[args] ,** identifier[kwargs] ). identifier[view] ( identifier[np] . identifier[recarray] ) | def torecarray(*args, **kwargs):
"""
Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``.
"""
import numpy as np
return toarray(*args, **kwargs).view(np.recarray) |
def _output_validators(self):
"""Output common validator types based on usage."""
if self._walk_for_type('Boolean'):
print("from .validators import boolean")
if self._walk_for_type('Integer'):
print("from .validators import integer")
vlist = self.override.get_validator_list()
for override in vlist:
if override.startswith('common/'):
override = override.lstrip('common/')
filename = "validators"
else:
filename = "%s_validators" % self.filename
print("from .%s import %s" % (filename, override)) | def function[_output_validators, parameter[self]]:
constant[Output common validator types based on usage.]
if call[name[self]._walk_for_type, parameter[constant[Boolean]]] begin[:]
call[name[print], parameter[constant[from .validators import boolean]]]
if call[name[self]._walk_for_type, parameter[constant[Integer]]] begin[:]
call[name[print], parameter[constant[from .validators import integer]]]
variable[vlist] assign[=] call[name[self].override.get_validator_list, parameter[]]
for taget[name[override]] in starred[name[vlist]] begin[:]
if call[name[override].startswith, parameter[constant[common/]]] begin[:]
variable[override] assign[=] call[name[override].lstrip, parameter[constant[common/]]]
variable[filename] assign[=] constant[validators]
call[name[print], parameter[binary_operation[constant[from .%s import %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18c4cda50>, <ast.Name object at 0x7da2041d9a20>]]]]] | keyword[def] identifier[_output_validators] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_walk_for_type] ( literal[string] ):
identifier[print] ( literal[string] )
keyword[if] identifier[self] . identifier[_walk_for_type] ( literal[string] ):
identifier[print] ( literal[string] )
identifier[vlist] = identifier[self] . identifier[override] . identifier[get_validator_list] ()
keyword[for] identifier[override] keyword[in] identifier[vlist] :
keyword[if] identifier[override] . identifier[startswith] ( literal[string] ):
identifier[override] = identifier[override] . identifier[lstrip] ( literal[string] )
identifier[filename] = literal[string]
keyword[else] :
identifier[filename] = literal[string] % identifier[self] . identifier[filename]
identifier[print] ( literal[string] %( identifier[filename] , identifier[override] )) | def _output_validators(self):
"""Output common validator types based on usage."""
if self._walk_for_type('Boolean'):
print('from .validators import boolean') # depends on [control=['if'], data=[]]
if self._walk_for_type('Integer'):
print('from .validators import integer') # depends on [control=['if'], data=[]]
vlist = self.override.get_validator_list()
for override in vlist:
if override.startswith('common/'):
override = override.lstrip('common/')
filename = 'validators' # depends on [control=['if'], data=[]]
else:
filename = '%s_validators' % self.filename
print('from .%s import %s' % (filename, override)) # depends on [control=['for'], data=['override']] |
def small_factors(x, max_prime):
"""
Factorizing x up to max_prime limit.
:param x:
:param max_prime:
:return:
"""
factors = DlogFprint.prime_factors(x, limit=max_prime)
return DlogFprint.factor_list_to_map(factors) | def function[small_factors, parameter[x, max_prime]]:
constant[
Factorizing x up to max_prime limit.
:param x:
:param max_prime:
:return:
]
variable[factors] assign[=] call[name[DlogFprint].prime_factors, parameter[name[x]]]
return[call[name[DlogFprint].factor_list_to_map, parameter[name[factors]]]] | keyword[def] identifier[small_factors] ( identifier[x] , identifier[max_prime] ):
literal[string]
identifier[factors] = identifier[DlogFprint] . identifier[prime_factors] ( identifier[x] , identifier[limit] = identifier[max_prime] )
keyword[return] identifier[DlogFprint] . identifier[factor_list_to_map] ( identifier[factors] ) | def small_factors(x, max_prime):
"""
Factorizing x up to max_prime limit.
:param x:
:param max_prime:
:return:
"""
factors = DlogFprint.prime_factors(x, limit=max_prime)
return DlogFprint.factor_list_to_map(factors) |
def check_between(v_min, v_max, **params):
"""Checks parameters are in a specified range
Parameters
----------
v_min : float, minimum allowed value (inclusive)
v_max : float, maximum allowed value (inclusive)
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters
"""
for p in params:
if params[p] < v_min or params[p] > v_max:
raise ValueError("Expected {} between {} and {}, "
"got {}".format(p, v_min, v_max, params[p])) | def function[check_between, parameter[v_min, v_max]]:
constant[Checks parameters are in a specified range
Parameters
----------
v_min : float, minimum allowed value (inclusive)
v_max : float, maximum allowed value (inclusive)
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters
]
for taget[name[p]] in starred[name[params]] begin[:]
if <ast.BoolOp object at 0x7da18fe90fa0> begin[:]
<ast.Raise object at 0x7da18fe93130> | keyword[def] identifier[check_between] ( identifier[v_min] , identifier[v_max] ,** identifier[params] ):
literal[string]
keyword[for] identifier[p] keyword[in] identifier[params] :
keyword[if] identifier[params] [ identifier[p] ]< identifier[v_min] keyword[or] identifier[params] [ identifier[p] ]> identifier[v_max] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[p] , identifier[v_min] , identifier[v_max] , identifier[params] [ identifier[p] ])) | def check_between(v_min, v_max, **params):
"""Checks parameters are in a specified range
Parameters
----------
v_min : float, minimum allowed value (inclusive)
v_max : float, maximum allowed value (inclusive)
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters
"""
for p in params:
if params[p] < v_min or params[p] > v_max:
raise ValueError('Expected {} between {} and {}, got {}'.format(p, v_min, v_max, params[p])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] |
def xVal(self):
"""
Return the ``<c:xVal>`` element for this series as an oxml element.
This element contains the X values for this series.
"""
xml = self._xVal_tmpl.format(**{
'nsdecls': ' %s' % nsdecls('c'),
'numRef_xml': self.numRef_xml(
self._series.x_values_ref, self._series.number_format,
self._series.x_values
),
})
return parse_xml(xml) | def function[xVal, parameter[self]]:
constant[
Return the ``<c:xVal>`` element for this series as an oxml element.
This element contains the X values for this series.
]
variable[xml] assign[=] call[name[self]._xVal_tmpl.format, parameter[]]
return[call[name[parse_xml], parameter[name[xml]]]] | keyword[def] identifier[xVal] ( identifier[self] ):
literal[string]
identifier[xml] = identifier[self] . identifier[_xVal_tmpl] . identifier[format] (**{
literal[string] : literal[string] % identifier[nsdecls] ( literal[string] ),
literal[string] : identifier[self] . identifier[numRef_xml] (
identifier[self] . identifier[_series] . identifier[x_values_ref] , identifier[self] . identifier[_series] . identifier[number_format] ,
identifier[self] . identifier[_series] . identifier[x_values]
),
})
keyword[return] identifier[parse_xml] ( identifier[xml] ) | def xVal(self):
"""
Return the ``<c:xVal>`` element for this series as an oxml element.
This element contains the X values for this series.
"""
xml = self._xVal_tmpl.format(**{'nsdecls': ' %s' % nsdecls('c'), 'numRef_xml': self.numRef_xml(self._series.x_values_ref, self._series.number_format, self._series.x_values)})
return parse_xml(xml) |
def get_stars_of_children_of(self, component):
"""
same as get_children_of except if any of the children are orbits, this will recursively
follow the tree to return a list of all children (grandchildren, etc) stars under that orbit
"""
stars = self.get_stars()
orbits = self.get_orbits()
stars_children = []
for child in self.get_children_of(component):
if child in stars:
stars_children.append(child)
elif child in orbits:
stars_children += self.get_stars_of_children_of(child)
else:
# maybe an envelope or eventually spot, ring, etc
pass
return stars_children | def function[get_stars_of_children_of, parameter[self, component]]:
constant[
same as get_children_of except if any of the children are orbits, this will recursively
follow the tree to return a list of all children (grandchildren, etc) stars under that orbit
]
variable[stars] assign[=] call[name[self].get_stars, parameter[]]
variable[orbits] assign[=] call[name[self].get_orbits, parameter[]]
variable[stars_children] assign[=] list[[]]
for taget[name[child]] in starred[call[name[self].get_children_of, parameter[name[component]]]] begin[:]
if compare[name[child] in name[stars]] begin[:]
call[name[stars_children].append, parameter[name[child]]]
return[name[stars_children]] | keyword[def] identifier[get_stars_of_children_of] ( identifier[self] , identifier[component] ):
literal[string]
identifier[stars] = identifier[self] . identifier[get_stars] ()
identifier[orbits] = identifier[self] . identifier[get_orbits] ()
identifier[stars_children] =[]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[get_children_of] ( identifier[component] ):
keyword[if] identifier[child] keyword[in] identifier[stars] :
identifier[stars_children] . identifier[append] ( identifier[child] )
keyword[elif] identifier[child] keyword[in] identifier[orbits] :
identifier[stars_children] += identifier[self] . identifier[get_stars_of_children_of] ( identifier[child] )
keyword[else] :
keyword[pass]
keyword[return] identifier[stars_children] | def get_stars_of_children_of(self, component):
"""
same as get_children_of except if any of the children are orbits, this will recursively
follow the tree to return a list of all children (grandchildren, etc) stars under that orbit
"""
stars = self.get_stars()
orbits = self.get_orbits()
stars_children = []
for child in self.get_children_of(component):
if child in stars:
stars_children.append(child) # depends on [control=['if'], data=['child']]
elif child in orbits:
stars_children += self.get_stars_of_children_of(child) # depends on [control=['if'], data=['child']]
else:
# maybe an envelope or eventually spot, ring, etc
pass # depends on [control=['for'], data=['child']]
return stars_children |
def hoverEnterEvent( self, event ):
"""
Prompts the tool tip for this node based on the inputed event.
:param event | <QHoverEvent>
"""
# process the parent event
super(XNode, self).hoverEnterEvent(event)
# hover over a hotspot
hotspot = self.hotspotAt(event.pos())
if not hotspot:
hotspot = self.dropzoneAt(event.pos())
old_spot = self._hoverSpot
if hotspot and hotspot != old_spot:
# update the new hotspot
self._hoverSpot = hotspot
if old_spot:
old_spot.hoverLeaveEvent(event)
if hotspot.hoverEnterEvent(event):
self.update()
elif old_spot and not hotspot:
self._hoverSpot = None
if old_spot.hoverLeaveEvent(event):
self.update() | def function[hoverEnterEvent, parameter[self, event]]:
constant[
Prompts the tool tip for this node based on the inputed event.
:param event | <QHoverEvent>
]
call[call[name[super], parameter[name[XNode], name[self]]].hoverEnterEvent, parameter[name[event]]]
variable[hotspot] assign[=] call[name[self].hotspotAt, parameter[call[name[event].pos, parameter[]]]]
if <ast.UnaryOp object at 0x7da18f09ea10> begin[:]
variable[hotspot] assign[=] call[name[self].dropzoneAt, parameter[call[name[event].pos, parameter[]]]]
variable[old_spot] assign[=] name[self]._hoverSpot
if <ast.BoolOp object at 0x7da18f09f4f0> begin[:]
name[self]._hoverSpot assign[=] name[hotspot]
if name[old_spot] begin[:]
call[name[old_spot].hoverLeaveEvent, parameter[name[event]]]
if call[name[hotspot].hoverEnterEvent, parameter[name[event]]] begin[:]
call[name[self].update, parameter[]] | keyword[def] identifier[hoverEnterEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[super] ( identifier[XNode] , identifier[self] ). identifier[hoverEnterEvent] ( identifier[event] )
identifier[hotspot] = identifier[self] . identifier[hotspotAt] ( identifier[event] . identifier[pos] ())
keyword[if] keyword[not] identifier[hotspot] :
identifier[hotspot] = identifier[self] . identifier[dropzoneAt] ( identifier[event] . identifier[pos] ())
identifier[old_spot] = identifier[self] . identifier[_hoverSpot]
keyword[if] identifier[hotspot] keyword[and] identifier[hotspot] != identifier[old_spot] :
identifier[self] . identifier[_hoverSpot] = identifier[hotspot]
keyword[if] identifier[old_spot] :
identifier[old_spot] . identifier[hoverLeaveEvent] ( identifier[event] )
keyword[if] identifier[hotspot] . identifier[hoverEnterEvent] ( identifier[event] ):
identifier[self] . identifier[update] ()
keyword[elif] identifier[old_spot] keyword[and] keyword[not] identifier[hotspot] :
identifier[self] . identifier[_hoverSpot] = keyword[None]
keyword[if] identifier[old_spot] . identifier[hoverLeaveEvent] ( identifier[event] ):
identifier[self] . identifier[update] () | def hoverEnterEvent(self, event):
"""
Prompts the tool tip for this node based on the inputed event.
:param event | <QHoverEvent>
"""
# process the parent event
super(XNode, self).hoverEnterEvent(event)
# hover over a hotspot
hotspot = self.hotspotAt(event.pos())
if not hotspot:
hotspot = self.dropzoneAt(event.pos()) # depends on [control=['if'], data=[]]
old_spot = self._hoverSpot
if hotspot and hotspot != old_spot:
# update the new hotspot
self._hoverSpot = hotspot
if old_spot:
old_spot.hoverLeaveEvent(event) # depends on [control=['if'], data=[]]
if hotspot.hoverEnterEvent(event):
self.update() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif old_spot and (not hotspot):
self._hoverSpot = None
if old_spot.hoverLeaveEvent(event):
self.update() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_text_channel` to create a :class:`TextChannel` in the category.
"""
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options) | <ast.AsyncFunctionDef object at 0x7da1b20407f0> | keyword[async] keyword[def] identifier[create_text_channel] ( identifier[self] , identifier[name] ,*, identifier[overwrites] = keyword[None] , identifier[reason] = keyword[None] ,** identifier[options] ):
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[guild] . identifier[create_text_channel] ( identifier[name] , identifier[overwrites] = identifier[overwrites] , identifier[category] = identifier[self] , identifier[reason] = identifier[reason] ,** identifier[options] ) | async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_text_channel` to create a :class:`TextChannel` in the category.
"""
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options) |
def get_record_revisions(recid, from_date):
"""Get record revisions."""
try:
from invenio.dbquery import run_sql
except ImportError:
from invenio.legacy.dbquery import run_sql
return run_sql(
'SELECT job_date, marcxml '
'FROM hstRECORD WHERE id_bibrec = %s AND job_date >= %s '
'ORDER BY job_date ASC', (recid, from_date),
run_on_slave=True) | def function[get_record_revisions, parameter[recid, from_date]]:
constant[Get record revisions.]
<ast.Try object at 0x7da1b013d180>
return[call[name[run_sql], parameter[constant[SELECT job_date, marcxml FROM hstRECORD WHERE id_bibrec = %s AND job_date >= %s ORDER BY job_date ASC], tuple[[<ast.Name object at 0x7da1b013d810>, <ast.Name object at 0x7da1b013f0a0>]]]]] | keyword[def] identifier[get_record_revisions] ( identifier[recid] , identifier[from_date] ):
literal[string]
keyword[try] :
keyword[from] identifier[invenio] . identifier[dbquery] keyword[import] identifier[run_sql]
keyword[except] identifier[ImportError] :
keyword[from] identifier[invenio] . identifier[legacy] . identifier[dbquery] keyword[import] identifier[run_sql]
keyword[return] identifier[run_sql] (
literal[string]
literal[string]
literal[string] ,( identifier[recid] , identifier[from_date] ),
identifier[run_on_slave] = keyword[True] ) | def get_record_revisions(recid, from_date):
"""Get record revisions."""
try:
from invenio.dbquery import run_sql # depends on [control=['try'], data=[]]
except ImportError:
from invenio.legacy.dbquery import run_sql # depends on [control=['except'], data=[]]
return run_sql('SELECT job_date, marcxml FROM hstRECORD WHERE id_bibrec = %s AND job_date >= %s ORDER BY job_date ASC', (recid, from_date), run_on_slave=True) |
def raster_statistics(raster_file):
"""Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
minv, maxv, meanv, std = band.ComputeStatistics(False)
return minv, maxv, meanv, std | def function[raster_statistics, parameter[raster_file]]:
constant[Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std.
]
variable[ds] assign[=] call[name[gdal_Open], parameter[name[raster_file]]]
variable[band] assign[=] call[name[ds].GetRasterBand, parameter[constant[1]]]
<ast.Tuple object at 0x7da1b2585f30> assign[=] call[name[band].ComputeStatistics, parameter[constant[False]]]
return[tuple[[<ast.Name object at 0x7da1b2586860>, <ast.Name object at 0x7da1b2586590>, <ast.Name object at 0x7da1b2586650>, <ast.Name object at 0x7da1b25879d0>]]] | keyword[def] identifier[raster_statistics] ( identifier[raster_file] ):
literal[string]
identifier[ds] = identifier[gdal_Open] ( identifier[raster_file] )
identifier[band] = identifier[ds] . identifier[GetRasterBand] ( literal[int] )
identifier[minv] , identifier[maxv] , identifier[meanv] , identifier[std] = identifier[band] . identifier[ComputeStatistics] ( keyword[False] )
keyword[return] identifier[minv] , identifier[maxv] , identifier[meanv] , identifier[std] | def raster_statistics(raster_file):
"""Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
(minv, maxv, meanv, std) = band.ComputeStatistics(False)
return (minv, maxv, meanv, std) |
def find_node_with_payload(self, point, point_payload, cur_node = None):
"""!
@brief Find node with specified coordinates and payload.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] point_payload (any): Payload of the node that is searched in the tree.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
rule_search = lambda node, point=point, payload=point_payload: self.__point_comparator(node.data, point) and node.payload == payload
return self.__find_node_by_rule(point, rule_search, cur_node) | def function[find_node_with_payload, parameter[self, point, point_payload, cur_node]]:
constant[!
@brief Find node with specified coordinates and payload.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] point_payload (any): Payload of the node that is searched in the tree.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
]
variable[rule_search] assign[=] <ast.Lambda object at 0x7da1b014ce80>
return[call[name[self].__find_node_by_rule, parameter[name[point], name[rule_search], name[cur_node]]]] | keyword[def] identifier[find_node_with_payload] ( identifier[self] , identifier[point] , identifier[point_payload] , identifier[cur_node] = keyword[None] ):
literal[string]
identifier[rule_search] = keyword[lambda] identifier[node] , identifier[point] = identifier[point] , identifier[payload] = identifier[point_payload] : identifier[self] . identifier[__point_comparator] ( identifier[node] . identifier[data] , identifier[point] ) keyword[and] identifier[node] . identifier[payload] == identifier[payload]
keyword[return] identifier[self] . identifier[__find_node_by_rule] ( identifier[point] , identifier[rule_search] , identifier[cur_node] ) | def find_node_with_payload(self, point, point_payload, cur_node=None):
"""!
@brief Find node with specified coordinates and payload.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] point_payload (any): Payload of the node that is searched in the tree.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
rule_search = lambda node, point=point, payload=point_payload: self.__point_comparator(node.data, point) and node.payload == payload
return self.__find_node_by_rule(point, rule_search, cur_node) |
def ema(eqdata, **kwargs):
"""
Exponential moving average with the given span.
Parameters
----------
eqdata : DataFrame
Must have exactly 1 column on which to calculate EMA
span : int, optional
Span for exponential moving average. Cf. `pandas.stats.moments.ewma
<http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and
`additional Pandas documentation
<http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_.
outputcol : str, optional
Column to use for output. Defaults to 'EMA'.
selection : str, optional
Column of eqdata on which to calculate ema. If
`eqdata` has only 1 column, `selection` is ignored,
and ema is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
emadf : DataFrame
Exponential moving average using the given `span`.
"""
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_span = kwargs.get('span', 20)
_col = kwargs.get('outputcol', 'EMA')
_emadf = pd.DataFrame(index=_eqdata.index, columns=[_col], dtype=np.float64)
_emadf.loc[:, _col] = _eqdata.ewm(span=_span, min_periods=0, adjust=True, ignore_na=False).mean().values.flatten()
return _emadf | def function[ema, parameter[eqdata]]:
constant[
Exponential moving average with the given span.
Parameters
----------
eqdata : DataFrame
Must have exactly 1 column on which to calculate EMA
span : int, optional
Span for exponential moving average. Cf. `pandas.stats.moments.ewma
<http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and
`additional Pandas documentation
<http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_.
outputcol : str, optional
Column to use for output. Defaults to 'EMA'.
selection : str, optional
Column of eqdata on which to calculate ema. If
`eqdata` has only 1 column, `selection` is ignored,
and ema is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
emadf : DataFrame
Exponential moving average using the given `span`.
]
if <ast.BoolOp object at 0x7da1b27b9390> begin[:]
variable[_selection] assign[=] call[name[kwargs].get, parameter[constant[selection], constant[Adj Close]]]
variable[_eqdata] assign[=] call[name[eqdata].loc][tuple[[<ast.Slice object at 0x7da2054a71c0>, <ast.Name object at 0x7da2054a4ca0>]]]
variable[_span] assign[=] call[name[kwargs].get, parameter[constant[span], constant[20]]]
variable[_col] assign[=] call[name[kwargs].get, parameter[constant[outputcol], constant[EMA]]]
variable[_emadf] assign[=] call[name[pd].DataFrame, parameter[]]
call[name[_emadf].loc][tuple[[<ast.Slice object at 0x7da2054a5990>, <ast.Name object at 0x7da2054a5a20>]]] assign[=] call[call[call[name[_eqdata].ewm, parameter[]].mean, parameter[]].values.flatten, parameter[]]
return[name[_emadf]] | keyword[def] identifier[ema] ( identifier[eqdata] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[eqdata] . identifier[shape] )> literal[int] keyword[and] identifier[eqdata] . identifier[shape] [ literal[int] ]!= literal[int] :
identifier[_selection] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[_eqdata] = identifier[eqdata] . identifier[loc] [:, identifier[_selection] ]
keyword[else] :
identifier[_eqdata] = identifier[eqdata]
identifier[_span] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[_col] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[_emadf] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[_eqdata] . identifier[index] , identifier[columns] =[ identifier[_col] ], identifier[dtype] = identifier[np] . identifier[float64] )
identifier[_emadf] . identifier[loc] [:, identifier[_col] ]= identifier[_eqdata] . identifier[ewm] ( identifier[span] = identifier[_span] , identifier[min_periods] = literal[int] , identifier[adjust] = keyword[True] , identifier[ignore_na] = keyword[False] ). identifier[mean] (). identifier[values] . identifier[flatten] ()
keyword[return] identifier[_emadf] | def ema(eqdata, **kwargs):
"""
Exponential moving average with the given span.
Parameters
----------
eqdata : DataFrame
Must have exactly 1 column on which to calculate EMA
span : int, optional
Span for exponential moving average. Cf. `pandas.stats.moments.ewma
<http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and
`additional Pandas documentation
<http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_.
outputcol : str, optional
Column to use for output. Defaults to 'EMA'.
selection : str, optional
Column of eqdata on which to calculate ema. If
`eqdata` has only 1 column, `selection` is ignored,
and ema is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
emadf : DataFrame
Exponential moving average using the given `span`.
"""
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection] # depends on [control=['if'], data=[]]
else:
_eqdata = eqdata
_span = kwargs.get('span', 20)
_col = kwargs.get('outputcol', 'EMA')
_emadf = pd.DataFrame(index=_eqdata.index, columns=[_col], dtype=np.float64)
_emadf.loc[:, _col] = _eqdata.ewm(span=_span, min_periods=0, adjust=True, ignore_na=False).mean().values.flatten()
return _emadf |
async def get_files_to_delete(self) -> List[str]:
"""
Determine the files to delete when rolling over.
"""
dir_name, base_name = os.path.split(self.absolute_file_path)
file_names = await self.loop.run_in_executor(
None, lambda: os.listdir(dir_name)
)
result = []
prefix = base_name + "."
plen = len(prefix)
for file_name in file_names:
if file_name[:plen] == prefix:
suffix = file_name[plen:]
if self.ext_match.match(suffix):
result.append(os.path.join(dir_name, file_name))
if len(result) < self.backup_count:
return []
else:
return result[: len(result) - self.backup_count] | <ast.AsyncFunctionDef object at 0x7da2049606d0> | keyword[async] keyword[def] identifier[get_files_to_delete] ( identifier[self] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[dir_name] , identifier[base_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[self] . identifier[absolute_file_path] )
identifier[file_names] = keyword[await] identifier[self] . identifier[loop] . identifier[run_in_executor] (
keyword[None] , keyword[lambda] : identifier[os] . identifier[listdir] ( identifier[dir_name] )
)
identifier[result] =[]
identifier[prefix] = identifier[base_name] + literal[string]
identifier[plen] = identifier[len] ( identifier[prefix] )
keyword[for] identifier[file_name] keyword[in] identifier[file_names] :
keyword[if] identifier[file_name] [: identifier[plen] ]== identifier[prefix] :
identifier[suffix] = identifier[file_name] [ identifier[plen] :]
keyword[if] identifier[self] . identifier[ext_match] . identifier[match] ( identifier[suffix] ):
identifier[result] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] , identifier[file_name] ))
keyword[if] identifier[len] ( identifier[result] )< identifier[self] . identifier[backup_count] :
keyword[return] []
keyword[else] :
keyword[return] identifier[result] [: identifier[len] ( identifier[result] )- identifier[self] . identifier[backup_count] ] | async def get_files_to_delete(self) -> List[str]:
"""
Determine the files to delete when rolling over.
"""
(dir_name, base_name) = os.path.split(self.absolute_file_path)
file_names = await self.loop.run_in_executor(None, lambda : os.listdir(dir_name))
result = []
prefix = base_name + '.'
plen = len(prefix)
for file_name in file_names:
if file_name[:plen] == prefix:
suffix = file_name[plen:]
if self.ext_match.match(suffix):
result.append(os.path.join(dir_name, file_name)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_name']]
if len(result) < self.backup_count:
return [] # depends on [control=['if'], data=[]]
else:
return result[:len(result) - self.backup_count] |
def set_http_port(port=80):
'''
Configure the port HTTP should listen on
CLI Example:
.. code-block:: bash
salt '*' ilo.set_http_port 8080
'''
_current = global_settings()
if _current['Global Settings']['HTTP_PORT']['VALUE'] == port:
return True
_xml = """<RIBCL VERSION="2.0">
<LOGIN USER_LOGIN="adminname" PASSWORD="password">
<RIB_INFO MODE="write">
<MOD_GLOBAL_SETTINGS>
<HTTP_PORT value="{0}"/>
</MOD_GLOBAL_SETTINGS>
</RIB_INFO>
</LOGIN>
</RIBCL>""".format(port)
return __execute_cmd('Set_HTTP_Port', _xml) | def function[set_http_port, parameter[port]]:
constant[
Configure the port HTTP should listen on
CLI Example:
.. code-block:: bash
salt '*' ilo.set_http_port 8080
]
variable[_current] assign[=] call[name[global_settings], parameter[]]
if compare[call[call[call[name[_current]][constant[Global Settings]]][constant[HTTP_PORT]]][constant[VALUE]] equal[==] name[port]] begin[:]
return[constant[True]]
variable[_xml] assign[=] call[constant[<RIBCL VERSION="2.0">
<LOGIN USER_LOGIN="adminname" PASSWORD="password">
<RIB_INFO MODE="write">
<MOD_GLOBAL_SETTINGS>
<HTTP_PORT value="{0}"/>
</MOD_GLOBAL_SETTINGS>
</RIB_INFO>
</LOGIN>
</RIBCL>].format, parameter[name[port]]]
return[call[name[__execute_cmd], parameter[constant[Set_HTTP_Port], name[_xml]]]] | keyword[def] identifier[set_http_port] ( identifier[port] = literal[int] ):
literal[string]
identifier[_current] = identifier[global_settings] ()
keyword[if] identifier[_current] [ literal[string] ][ literal[string] ][ literal[string] ]== identifier[port] :
keyword[return] keyword[True]
identifier[_xml] = literal[string] . identifier[format] ( identifier[port] )
keyword[return] identifier[__execute_cmd] ( literal[string] , identifier[_xml] ) | def set_http_port(port=80):
"""
Configure the port HTTP should listen on
CLI Example:
.. code-block:: bash
salt '*' ilo.set_http_port 8080
"""
_current = global_settings()
if _current['Global Settings']['HTTP_PORT']['VALUE'] == port:
return True # depends on [control=['if'], data=[]]
_xml = '<RIBCL VERSION="2.0">\n <LOGIN USER_LOGIN="adminname" PASSWORD="password">\n <RIB_INFO MODE="write">\n <MOD_GLOBAL_SETTINGS>\n <HTTP_PORT value="{0}"/>\n </MOD_GLOBAL_SETTINGS>\n </RIB_INFO>\n </LOGIN>\n </RIBCL>'.format(port)
return __execute_cmd('Set_HTTP_Port', _xml) |
def consume_vertices(self):
"""
Consumes all consecutive vertices.
NOTE: There is no guarantee this will consume all vertices since other
statements can also occur in the vertex list
"""
while True:
# Vertex color
if len(self.values) == 7:
yield (
float(self.values[1]),
float(self.values[2]),
float(self.values[3]),
float(self.values[4]),
float(self.values[5]),
float(self.values[6]),
)
# Positions only
else:
yield (
float(self.values[1]),
float(self.values[2]),
float(self.values[3]),
)
try:
self.next_line()
except StopIteration:
break
if not self.values:
break
if self.values[0] != "v":
break | def function[consume_vertices, parameter[self]]:
constant[
Consumes all consecutive vertices.
NOTE: There is no guarantee this will consume all vertices since other
statements can also occur in the vertex list
]
while constant[True] begin[:]
if compare[call[name[len], parameter[name[self].values]] equal[==] constant[7]] begin[:]
<ast.Yield object at 0x7da2047ea770>
<ast.Try object at 0x7da2047e8b20>
if <ast.UnaryOp object at 0x7da2047eb070> begin[:]
break
if compare[call[name[self].values][constant[0]] not_equal[!=] constant[v]] begin[:]
break | keyword[def] identifier[consume_vertices] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
keyword[if] identifier[len] ( identifier[self] . identifier[values] )== literal[int] :
keyword[yield] (
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
)
keyword[else] :
keyword[yield] (
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
identifier[float] ( identifier[self] . identifier[values] [ literal[int] ]),
)
keyword[try] :
identifier[self] . identifier[next_line] ()
keyword[except] identifier[StopIteration] :
keyword[break]
keyword[if] keyword[not] identifier[self] . identifier[values] :
keyword[break]
keyword[if] identifier[self] . identifier[values] [ literal[int] ]!= literal[string] :
keyword[break] | def consume_vertices(self):
"""
Consumes all consecutive vertices.
NOTE: There is no guarantee this will consume all vertices since other
statements can also occur in the vertex list
"""
while True:
# Vertex color
if len(self.values) == 7:
yield (float(self.values[1]), float(self.values[2]), float(self.values[3]), float(self.values[4]), float(self.values[5]), float(self.values[6])) # depends on [control=['if'], data=[]]
else:
# Positions only
yield (float(self.values[1]), float(self.values[2]), float(self.values[3]))
try:
self.next_line() # depends on [control=['try'], data=[]]
except StopIteration:
break # depends on [control=['except'], data=[]]
if not self.values:
break # depends on [control=['if'], data=[]]
if self.values[0] != 'v':
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def getDBusEndpoints(reactor, busAddress, client=True):
"""
Creates DBus endpoints.
@param busAddress: 'session', 'system', or a valid bus address as defined
by the DBus specification. If 'session' (the default) or 'system' is
supplied, the contents of the DBUS_SESSION_BUS_ADDRESS or
DBUS_SYSTEM_BUS_ADDRESS environment variables will be used for the bus
address, respectively. If DBUS_SYSTEM_BUS_ADDRESS is not set, the
well-known address unix:path=/var/run/dbus/system_bus_socket will be
used.
@type busAddress: C{string}
@rtype: C{list} of L{twisted.internet.interfaces.IStreamServerEndpoint}
@returns: A list of endpoint instances
"""
if busAddress == 'session':
addrString = os.environ.get('DBUS_SESSION_BUS_ADDRESS', None)
if addrString is None:
raise Exception('DBus Session environment variable not set')
elif busAddress == 'system':
addrString = os.environ.get(
'DBUS_SYSTEM_BUS_ADDRESS',
'unix:path=/var/run/dbus/system_bus_socket',
)
else:
addrString = busAddress
# XXX Add documentation about extra key=value parameters in address string
# such as nonce-tcp vs tcp which use same endpoint class
epl = []
for ep_addr in addrString.split(';'):
d = {}
kind = None
ep = None
for c in ep_addr.split(','):
if c.startswith('unix:'):
kind = 'unix'
c = c[5:]
elif c.startswith('tcp:'):
kind = 'tcp'
c = c[4:]
elif c.startswith('nonce-tcp:'):
kind = 'tcp'
c = c[10:]
d['nonce-tcp'] = True
elif c.startswith('launchd:'):
kind = 'launchd'
c = c[7:]
if '=' in c:
k, v = c.split('=')
d[k] = v
if kind == 'unix':
if 'path' in d:
path = d['path']
elif 'tmpdir' in d:
path = d['tmpdir'] + '/dbus-' + str(os.getpid())
elif 'abstract' in d:
path = '\0' + d['abstract']
if client:
ep = UNIXClientEndpoint(reactor, path=path)
else:
ep = UNIXServerEndpoint(reactor, address=path)
elif kind == 'tcp':
if client:
ep = TCP4ClientEndpoint(reactor, d['host'], int(d['port']))
else:
ep = TCP4ServerEndpoint(reactor, int(
d['port']), interface=d['host'])
if ep:
ep.dbus_args = d
epl.append(ep)
return epl | def function[getDBusEndpoints, parameter[reactor, busAddress, client]]:
constant[
Creates DBus endpoints.
@param busAddress: 'session', 'system', or a valid bus address as defined
by the DBus specification. If 'session' (the default) or 'system' is
supplied, the contents of the DBUS_SESSION_BUS_ADDRESS or
DBUS_SYSTEM_BUS_ADDRESS environment variables will be used for the bus
address, respectively. If DBUS_SYSTEM_BUS_ADDRESS is not set, the
well-known address unix:path=/var/run/dbus/system_bus_socket will be
used.
@type busAddress: C{string}
@rtype: C{list} of L{twisted.internet.interfaces.IStreamServerEndpoint}
@returns: A list of endpoint instances
]
if compare[name[busAddress] equal[==] constant[session]] begin[:]
variable[addrString] assign[=] call[name[os].environ.get, parameter[constant[DBUS_SESSION_BUS_ADDRESS], constant[None]]]
if compare[name[addrString] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0627ac0>
variable[epl] assign[=] list[[]]
for taget[name[ep_addr]] in starred[call[name[addrString].split, parameter[constant[;]]]] begin[:]
variable[d] assign[=] dictionary[[], []]
variable[kind] assign[=] constant[None]
variable[ep] assign[=] constant[None]
for taget[name[c]] in starred[call[name[ep_addr].split, parameter[constant[,]]]] begin[:]
if call[name[c].startswith, parameter[constant[unix:]]] begin[:]
variable[kind] assign[=] constant[unix]
variable[c] assign[=] call[name[c]][<ast.Slice object at 0x7da20e793370>]
if compare[constant[=] in name[c]] begin[:]
<ast.Tuple object at 0x7da1b0626c50> assign[=] call[name[c].split, parameter[constant[=]]]
call[name[d]][name[k]] assign[=] name[v]
if compare[name[kind] equal[==] constant[unix]] begin[:]
if compare[constant[path] in name[d]] begin[:]
variable[path] assign[=] call[name[d]][constant[path]]
if name[client] begin[:]
variable[ep] assign[=] call[name[UNIXClientEndpoint], parameter[name[reactor]]]
if name[ep] begin[:]
name[ep].dbus_args assign[=] name[d]
call[name[epl].append, parameter[name[ep]]]
return[name[epl]] | keyword[def] identifier[getDBusEndpoints] ( identifier[reactor] , identifier[busAddress] , identifier[client] = keyword[True] ):
literal[string]
keyword[if] identifier[busAddress] == literal[string] :
identifier[addrString] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[addrString] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[busAddress] == literal[string] :
identifier[addrString] = identifier[os] . identifier[environ] . identifier[get] (
literal[string] ,
literal[string] ,
)
keyword[else] :
identifier[addrString] = identifier[busAddress]
identifier[epl] =[]
keyword[for] identifier[ep_addr] keyword[in] identifier[addrString] . identifier[split] ( literal[string] ):
identifier[d] ={}
identifier[kind] = keyword[None]
identifier[ep] = keyword[None]
keyword[for] identifier[c] keyword[in] identifier[ep_addr] . identifier[split] ( literal[string] ):
keyword[if] identifier[c] . identifier[startswith] ( literal[string] ):
identifier[kind] = literal[string]
identifier[c] = identifier[c] [ literal[int] :]
keyword[elif] identifier[c] . identifier[startswith] ( literal[string] ):
identifier[kind] = literal[string]
identifier[c] = identifier[c] [ literal[int] :]
keyword[elif] identifier[c] . identifier[startswith] ( literal[string] ):
identifier[kind] = literal[string]
identifier[c] = identifier[c] [ literal[int] :]
identifier[d] [ literal[string] ]= keyword[True]
keyword[elif] identifier[c] . identifier[startswith] ( literal[string] ):
identifier[kind] = literal[string]
identifier[c] = identifier[c] [ literal[int] :]
keyword[if] literal[string] keyword[in] identifier[c] :
identifier[k] , identifier[v] = identifier[c] . identifier[split] ( literal[string] )
identifier[d] [ identifier[k] ]= identifier[v]
keyword[if] identifier[kind] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[path] = identifier[d] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[d] :
identifier[path] = identifier[d] [ literal[string] ]+ literal[string] + identifier[str] ( identifier[os] . identifier[getpid] ())
keyword[elif] literal[string] keyword[in] identifier[d] :
identifier[path] = literal[string] + identifier[d] [ literal[string] ]
keyword[if] identifier[client] :
identifier[ep] = identifier[UNIXClientEndpoint] ( identifier[reactor] , identifier[path] = identifier[path] )
keyword[else] :
identifier[ep] = identifier[UNIXServerEndpoint] ( identifier[reactor] , identifier[address] = identifier[path] )
keyword[elif] identifier[kind] == literal[string] :
keyword[if] identifier[client] :
identifier[ep] = identifier[TCP4ClientEndpoint] ( identifier[reactor] , identifier[d] [ literal[string] ], identifier[int] ( identifier[d] [ literal[string] ]))
keyword[else] :
identifier[ep] = identifier[TCP4ServerEndpoint] ( identifier[reactor] , identifier[int] (
identifier[d] [ literal[string] ]), identifier[interface] = identifier[d] [ literal[string] ])
keyword[if] identifier[ep] :
identifier[ep] . identifier[dbus_args] = identifier[d]
identifier[epl] . identifier[append] ( identifier[ep] )
keyword[return] identifier[epl] | def getDBusEndpoints(reactor, busAddress, client=True):
"""
Creates DBus endpoints.
@param busAddress: 'session', 'system', or a valid bus address as defined
by the DBus specification. If 'session' (the default) or 'system' is
supplied, the contents of the DBUS_SESSION_BUS_ADDRESS or
DBUS_SYSTEM_BUS_ADDRESS environment variables will be used for the bus
address, respectively. If DBUS_SYSTEM_BUS_ADDRESS is not set, the
well-known address unix:path=/var/run/dbus/system_bus_socket will be
used.
@type busAddress: C{string}
@rtype: C{list} of L{twisted.internet.interfaces.IStreamServerEndpoint}
@returns: A list of endpoint instances
"""
if busAddress == 'session':
addrString = os.environ.get('DBUS_SESSION_BUS_ADDRESS', None)
if addrString is None:
raise Exception('DBus Session environment variable not set') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif busAddress == 'system':
addrString = os.environ.get('DBUS_SYSTEM_BUS_ADDRESS', 'unix:path=/var/run/dbus/system_bus_socket') # depends on [control=['if'], data=[]]
else:
addrString = busAddress
# XXX Add documentation about extra key=value parameters in address string
# such as nonce-tcp vs tcp which use same endpoint class
epl = []
for ep_addr in addrString.split(';'):
d = {}
kind = None
ep = None
for c in ep_addr.split(','):
if c.startswith('unix:'):
kind = 'unix'
c = c[5:] # depends on [control=['if'], data=[]]
elif c.startswith('tcp:'):
kind = 'tcp'
c = c[4:] # depends on [control=['if'], data=[]]
elif c.startswith('nonce-tcp:'):
kind = 'tcp'
c = c[10:]
d['nonce-tcp'] = True # depends on [control=['if'], data=[]]
elif c.startswith('launchd:'):
kind = 'launchd'
c = c[7:] # depends on [control=['if'], data=[]]
if '=' in c:
(k, v) = c.split('=')
d[k] = v # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']]
if kind == 'unix':
if 'path' in d:
path = d['path'] # depends on [control=['if'], data=['d']]
elif 'tmpdir' in d:
path = d['tmpdir'] + '/dbus-' + str(os.getpid()) # depends on [control=['if'], data=['d']]
elif 'abstract' in d:
path = '\x00' + d['abstract'] # depends on [control=['if'], data=['d']]
if client:
ep = UNIXClientEndpoint(reactor, path=path) # depends on [control=['if'], data=[]]
else:
ep = UNIXServerEndpoint(reactor, address=path) # depends on [control=['if'], data=[]]
elif kind == 'tcp':
if client:
ep = TCP4ClientEndpoint(reactor, d['host'], int(d['port'])) # depends on [control=['if'], data=[]]
else:
ep = TCP4ServerEndpoint(reactor, int(d['port']), interface=d['host']) # depends on [control=['if'], data=[]]
if ep:
ep.dbus_args = d
epl.append(ep) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ep_addr']]
return epl |
def labeled_accumulate(sequence, keygetter=operator.itemgetter(0), valuegetter=operator.itemgetter(1), accumulator=operator.add):
"""
Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original
sequence/iterable) in the target elements, like behaving as keys or legends.
:param sequence:
:param keygetter:
:param valuegetter:
:return:
"""
return izip((keygetter(item) for item in sequence),
accumulate((valuegetter(item) for item in sequence), accumulator)) | def function[labeled_accumulate, parameter[sequence, keygetter, valuegetter, accumulator]]:
constant[
Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original
sequence/iterable) in the target elements, like behaving as keys or legends.
:param sequence:
:param keygetter:
:param valuegetter:
:return:
]
return[call[name[izip], parameter[<ast.GeneratorExp object at 0x7da18f09ccd0>, call[name[accumulate], parameter[<ast.GeneratorExp object at 0x7da18f09fa00>, name[accumulator]]]]]] | keyword[def] identifier[labeled_accumulate] ( identifier[sequence] , identifier[keygetter] = identifier[operator] . identifier[itemgetter] ( literal[int] ), identifier[valuegetter] = identifier[operator] . identifier[itemgetter] ( literal[int] ), identifier[accumulator] = identifier[operator] . identifier[add] ):
literal[string]
keyword[return] identifier[izip] (( identifier[keygetter] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[sequence] ),
identifier[accumulate] (( identifier[valuegetter] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[sequence] ), identifier[accumulator] )) | def labeled_accumulate(sequence, keygetter=operator.itemgetter(0), valuegetter=operator.itemgetter(1), accumulator=operator.add):
"""
Accumulates input elements according to accumulate(), but keeping certain data (per element, from the original
sequence/iterable) in the target elements, like behaving as keys or legends.
:param sequence:
:param keygetter:
:param valuegetter:
:return:
"""
return izip((keygetter(item) for item in sequence), accumulate((valuegetter(item) for item in sequence), accumulator)) |
def signature_base_string(http_method, base_str_uri,
normalized_encoded_request_parameters):
"""**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string | def function[signature_base_string, parameter[http_method, base_str_uri, normalized_encoded_request_parameters]]:
constant[**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
]
variable[base_string] assign[=] call[name[utils].escape, parameter[call[name[http_method].upper, parameter[]]]]
<ast.AugAssign object at 0x7da1b175e9e0>
<ast.AugAssign object at 0x7da1b175c820>
<ast.AugAssign object at 0x7da1b175d510>
<ast.AugAssign object at 0x7da1b175da20>
return[name[base_string]] | keyword[def] identifier[signature_base_string] ( identifier[http_method] , identifier[base_str_uri] ,
identifier[normalized_encoded_request_parameters] ):
literal[string]
identifier[base_string] = identifier[utils] . identifier[escape] ( identifier[http_method] . identifier[upper] ())
identifier[base_string] += literal[string]
identifier[base_string] += identifier[utils] . identifier[escape] ( identifier[base_str_uri] )
identifier[base_string] += literal[string]
identifier[base_string] += identifier[utils] . identifier[escape] ( identifier[normalized_encoded_request_parameters] )
keyword[return] identifier[base_string] | def signature_base_string(http_method, base_str_uri, normalized_encoded_request_parameters):
"""**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string |
def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_data(url)
for value in response['values']:
yield value
url = response.get('next', None) | def function[get_collection, parameter[self, url]]:
constant[ Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. ]
variable[url] assign[=] binary_operation[name[self].BASE_API2 + name[url]]
while compare[name[url] is_not constant[None]] begin[:]
variable[response] assign[=] call[name[self].get_data, parameter[name[url]]]
for taget[name[value]] in starred[call[name[response]][constant[values]]] begin[:]
<ast.Yield object at 0x7da1b027e620>
variable[url] assign[=] call[name[response].get, parameter[constant[next], constant[None]]] | keyword[def] identifier[get_collection] ( identifier[self] , identifier[url] ):
literal[string]
identifier[url] = identifier[self] . identifier[BASE_API2] + identifier[url]
keyword[while] identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[response] = identifier[self] . identifier[get_data] ( identifier[url] )
keyword[for] identifier[value] keyword[in] identifier[response] [ literal[string] ]:
keyword[yield] identifier[value]
identifier[url] = identifier[response] . identifier[get] ( literal[string] , keyword[None] ) | def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_data(url)
for value in response['values']:
yield value # depends on [control=['for'], data=['value']]
url = response.get('next', None) # depends on [control=['while'], data=['url']] |
def get_synset_by_id(self, mongo_id):
'''
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
'''
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset)
return synset | def function[get_synset_by_id, parameter[self, mongo_id]]:
constant[
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
]
variable[cache_hit] assign[=] constant[None]
if compare[name[self]._synset_cache is_not constant[None]] begin[:]
variable[cache_hit] assign[=] call[name[self]._synset_cache.get, parameter[name[mongo_id]]]
if compare[name[cache_hit] is_not constant[None]] begin[:]
return[name[cache_hit]]
variable[synset_dict] assign[=] call[name[self]._mongo_db.synsets.find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0fea110>], [<ast.Name object at 0x7da1b0fea200>]]]]
if compare[name[synset_dict] is_not constant[None]] begin[:]
variable[synset] assign[=] call[name[Synset], parameter[name[self], name[synset_dict]]]
if compare[name[self]._synset_cache is_not constant[None]] begin[:]
call[name[self]._synset_cache.put, parameter[name[mongo_id], name[synset]]]
return[name[synset]] | keyword[def] identifier[get_synset_by_id] ( identifier[self] , identifier[mongo_id] ):
literal[string]
identifier[cache_hit] = keyword[None]
keyword[if] identifier[self] . identifier[_synset_cache] keyword[is] keyword[not] keyword[None] :
identifier[cache_hit] = identifier[self] . identifier[_synset_cache] . identifier[get] ( identifier[mongo_id] )
keyword[if] identifier[cache_hit] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[cache_hit]
identifier[synset_dict] = identifier[self] . identifier[_mongo_db] . identifier[synsets] . identifier[find_one] ({ literal[string] : identifier[mongo_id] })
keyword[if] identifier[synset_dict] keyword[is] keyword[not] keyword[None] :
identifier[synset] = identifier[Synset] ( identifier[self] , identifier[synset_dict] )
keyword[if] identifier[self] . identifier[_synset_cache] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_synset_cache] . identifier[put] ( identifier[mongo_id] , identifier[synset] )
keyword[return] identifier[synset] | def get_synset_by_id(self, mongo_id):
"""
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object
"""
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id) # depends on [control=['if'], data=[]]
if cache_hit is not None:
return cache_hit # depends on [control=['if'], data=['cache_hit']]
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if synset_dict is not None:
synset = Synset(self, synset_dict)
if self._synset_cache is not None:
self._synset_cache.put(mongo_id, synset) # depends on [control=['if'], data=[]]
return synset # depends on [control=['if'], data=['synset_dict']] |
def rotated(self, angle_degrees_ccw):
"""Concatenates a rotation matrix on this matrix"""
angle = angle_degrees_ccw / 180.0 * pi
c, s = cos(angle), sin(angle)
return self @ PdfMatrix((c, s, -s, c, 0, 0)) | def function[rotated, parameter[self, angle_degrees_ccw]]:
constant[Concatenates a rotation matrix on this matrix]
variable[angle] assign[=] binary_operation[binary_operation[name[angle_degrees_ccw] / constant[180.0]] * name[pi]]
<ast.Tuple object at 0x7da18ede4190> assign[=] tuple[[<ast.Call object at 0x7da18ede6350>, <ast.Call object at 0x7da18ede7df0>]]
return[binary_operation[name[self] <ast.MatMult object at 0x7da2590d6860> call[name[PdfMatrix], parameter[tuple[[<ast.Name object at 0x7da18ede7d60>, <ast.Name object at 0x7da18ede76a0>, <ast.UnaryOp object at 0x7da18ede49d0>, <ast.Name object at 0x7da18ede7580>, <ast.Constant object at 0x7da18ede46a0>, <ast.Constant object at 0x7da18ede6b60>]]]]]] | keyword[def] identifier[rotated] ( identifier[self] , identifier[angle_degrees_ccw] ):
literal[string]
identifier[angle] = identifier[angle_degrees_ccw] / literal[int] * identifier[pi]
identifier[c] , identifier[s] = identifier[cos] ( identifier[angle] ), identifier[sin] ( identifier[angle] )
keyword[return] identifier[self] @ identifier[PdfMatrix] (( identifier[c] , identifier[s] ,- identifier[s] , identifier[c] , literal[int] , literal[int] )) | def rotated(self, angle_degrees_ccw):
"""Concatenates a rotation matrix on this matrix"""
angle = angle_degrees_ccw / 180.0 * pi
(c, s) = (cos(angle), sin(angle))
return self @ PdfMatrix((c, s, -s, c, 0, 0)) |
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self | def function[from_url, parameter[cls, url]]:
constant[
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
]
variable[doc] assign[=] dictionary[[<ast.Constant object at 0x7da1b06fecb0>, <ast.Constant object at 0x7da1b06fece0>, <ast.Constant object at 0x7da1b06fe8c0>], [<ast.List object at 0x7da1b06ffeb0>, <ast.List object at 0x7da1b06fefe0>, <ast.Constant object at 0x7da1b06ffdf0>]]
variable[self] assign[=] call[name[cls], parameter[name[doc]]]
return[name[self]] | keyword[def] identifier[from_url] ( identifier[cls] , identifier[url] ,** identifier[kwargs] ):
literal[string]
identifier[doc] ={
literal[string] :[
{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[url] ,
},
},
],
literal[string] :[
{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
},
}
],
literal[string] : literal[string] ,
}
identifier[self] = identifier[cls] ( identifier[doc] ,** identifier[kwargs] )
keyword[return] identifier[self] | def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {'clusters': [{'name': 'self', 'cluster': {'server': url}}], 'contexts': [{'name': 'self', 'context': {'cluster': 'self'}}], 'current-context': 'self'}
self = cls(doc, **kwargs)
return self |
async def rollback(self):
"""Roll back this transaction."""
if not self._parent._is_active:
return
await self._do_rollback()
self._is_active = False | <ast.AsyncFunctionDef object at 0x7da1b2262230> | keyword[async] keyword[def] identifier[rollback] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_parent] . identifier[_is_active] :
keyword[return]
keyword[await] identifier[self] . identifier[_do_rollback] ()
identifier[self] . identifier[_is_active] = keyword[False] | async def rollback(self):
"""Roll back this transaction."""
if not self._parent._is_active:
return # depends on [control=['if'], data=[]]
await self._do_rollback()
self._is_active = False |
def update(self):
"""Calculate the smoothing parameter values.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> remotedischargesafety(0.0)
>>> remotedischargesafety.values[1] = 2.5
>>> derived.remotedischargesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_logistic1
>>> from hydpy import round_
>>> round_(smooth_logistic1(0.1, derived.remotedischargesmoothpar[0]))
1.0
>>> round_(smooth_logistic1(2.5, derived.remotedischargesmoothpar[1]))
0.99
"""
metapar = self.subpars.pars.control.remotedischargesafety
self.shape = metapar.shape
self(tuple(smoothtools.calc_smoothpar_logistic1(mp)
for mp in metapar.values)) | def function[update, parameter[self]]:
constant[Calculate the smoothing parameter values.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> remotedischargesafety(0.0)
>>> remotedischargesafety.values[1] = 2.5
>>> derived.remotedischargesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_logistic1
>>> from hydpy import round_
>>> round_(smooth_logistic1(0.1, derived.remotedischargesmoothpar[0]))
1.0
>>> round_(smooth_logistic1(2.5, derived.remotedischargesmoothpar[1]))
0.99
]
variable[metapar] assign[=] name[self].subpars.pars.control.remotedischargesafety
name[self].shape assign[=] name[metapar].shape
call[name[self], parameter[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da2044c1060>]]]] | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[metapar] = identifier[self] . identifier[subpars] . identifier[pars] . identifier[control] . identifier[remotedischargesafety]
identifier[self] . identifier[shape] = identifier[metapar] . identifier[shape]
identifier[self] ( identifier[tuple] ( identifier[smoothtools] . identifier[calc_smoothpar_logistic1] ( identifier[mp] )
keyword[for] identifier[mp] keyword[in] identifier[metapar] . identifier[values] )) | def update(self):
"""Calculate the smoothing parameter values.
The following example is explained in some detail in module
|smoothtools|:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> remotedischargesafety(0.0)
>>> remotedischargesafety.values[1] = 2.5
>>> derived.remotedischargesmoothpar.update()
>>> from hydpy.cythons.smoothutils import smooth_logistic1
>>> from hydpy import round_
>>> round_(smooth_logistic1(0.1, derived.remotedischargesmoothpar[0]))
1.0
>>> round_(smooth_logistic1(2.5, derived.remotedischargesmoothpar[1]))
0.99
"""
metapar = self.subpars.pars.control.remotedischargesafety
self.shape = metapar.shape
self(tuple((smoothtools.calc_smoothpar_logistic1(mp) for mp in metapar.values))) |
def _apply_filters(self, **filters):
"""Determine rows to keep in data for given set of filters
Parameters
----------
filters: dict
dictionary of filters ({col: values}}); uses a pseudo-regexp syntax
by default, but accepts `regexp: True` to use regexp directly
"""
regexp = filters.pop('regexp', False)
keep = np.array([True] * len(self.data))
# filter by columns and list of values
for col, values in filters.items():
# treat `_apply_filters(col=None)` as no filter applied
if values is None:
continue
if col in self.meta.columns:
matches = pattern_match(self.meta[col], values, regexp=regexp)
cat_idx = self.meta[matches].index
keep_col = (self.data[META_IDX].set_index(META_IDX)
.index.isin(cat_idx))
elif col == 'variable':
level = filters['level'] if 'level' in filters else None
keep_col = pattern_match(self.data[col], values, level, regexp)
elif col == 'year':
_data = self.data[col] if self.time_col is not 'time' \
else self.data['time'].apply(lambda x: x.year)
keep_col = years_match(_data, values)
elif col == 'month' and self.time_col is 'time':
keep_col = month_match(self.data['time']
.apply(lambda x: x.month),
values)
elif col == 'day' and self.time_col is 'time':
if isinstance(values, str):
wday = True
elif isinstance(values, list) and isinstance(values[0], str):
wday = True
else:
wday = False
if wday:
days = self.data['time'].apply(lambda x: x.weekday())
else: # ints or list of ints
days = self.data['time'].apply(lambda x: x.day)
keep_col = day_match(days, values)
elif col == 'hour' and self.time_col is 'time':
keep_col = hour_match(self.data['time']
.apply(lambda x: x.hour),
values)
elif col == 'time' and self.time_col is 'time':
keep_col = datetime_match(self.data[col], values)
elif col == 'level':
if 'variable' not in filters.keys():
keep_col = find_depth(self.data['variable'], level=values)
else:
continue
elif col in self.data.columns:
keep_col = pattern_match(self.data[col], values, regexp=regexp)
else:
_raise_filter_error(col)
keep &= keep_col
return keep | def function[_apply_filters, parameter[self]]:
constant[Determine rows to keep in data for given set of filters
Parameters
----------
filters: dict
dictionary of filters ({col: values}}); uses a pseudo-regexp syntax
by default, but accepts `regexp: True` to use regexp directly
]
variable[regexp] assign[=] call[name[filters].pop, parameter[constant[regexp], constant[False]]]
variable[keep] assign[=] call[name[np].array, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b0f2add0>]] * call[name[len], parameter[name[self].data]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0f2b700>, <ast.Name object at 0x7da1b0f2b7c0>]]] in starred[call[name[filters].items, parameter[]]] begin[:]
if compare[name[values] is constant[None]] begin[:]
continue
if compare[name[col] in name[self].meta.columns] begin[:]
variable[matches] assign[=] call[name[pattern_match], parameter[call[name[self].meta][name[col]], name[values]]]
variable[cat_idx] assign[=] call[name[self].meta][name[matches]].index
variable[keep_col] assign[=] call[call[call[name[self].data][name[META_IDX]].set_index, parameter[name[META_IDX]]].index.isin, parameter[name[cat_idx]]]
<ast.AugAssign object at 0x7da18bccab90>
return[name[keep]] | keyword[def] identifier[_apply_filters] ( identifier[self] ,** identifier[filters] ):
literal[string]
identifier[regexp] = identifier[filters] . identifier[pop] ( literal[string] , keyword[False] )
identifier[keep] = identifier[np] . identifier[array] ([ keyword[True] ]* identifier[len] ( identifier[self] . identifier[data] ))
keyword[for] identifier[col] , identifier[values] keyword[in] identifier[filters] . identifier[items] ():
keyword[if] identifier[values] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[col] keyword[in] identifier[self] . identifier[meta] . identifier[columns] :
identifier[matches] = identifier[pattern_match] ( identifier[self] . identifier[meta] [ identifier[col] ], identifier[values] , identifier[regexp] = identifier[regexp] )
identifier[cat_idx] = identifier[self] . identifier[meta] [ identifier[matches] ]. identifier[index]
identifier[keep_col] =( identifier[self] . identifier[data] [ identifier[META_IDX] ]. identifier[set_index] ( identifier[META_IDX] )
. identifier[index] . identifier[isin] ( identifier[cat_idx] ))
keyword[elif] identifier[col] == literal[string] :
identifier[level] = identifier[filters] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[filters] keyword[else] keyword[None]
identifier[keep_col] = identifier[pattern_match] ( identifier[self] . identifier[data] [ identifier[col] ], identifier[values] , identifier[level] , identifier[regexp] )
keyword[elif] identifier[col] == literal[string] :
identifier[_data] = identifier[self] . identifier[data] [ identifier[col] ] keyword[if] identifier[self] . identifier[time_col] keyword[is] keyword[not] literal[string] keyword[else] identifier[self] . identifier[data] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[year] )
identifier[keep_col] = identifier[years_match] ( identifier[_data] , identifier[values] )
keyword[elif] identifier[col] == literal[string] keyword[and] identifier[self] . identifier[time_col] keyword[is] literal[string] :
identifier[keep_col] = identifier[month_match] ( identifier[self] . identifier[data] [ literal[string] ]
. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[month] ),
identifier[values] )
keyword[elif] identifier[col] == literal[string] keyword[and] identifier[self] . identifier[time_col] keyword[is] literal[string] :
keyword[if] identifier[isinstance] ( identifier[values] , identifier[str] ):
identifier[wday] = keyword[True]
keyword[elif] identifier[isinstance] ( identifier[values] , identifier[list] ) keyword[and] identifier[isinstance] ( identifier[values] [ literal[int] ], identifier[str] ):
identifier[wday] = keyword[True]
keyword[else] :
identifier[wday] = keyword[False]
keyword[if] identifier[wday] :
identifier[days] = identifier[self] . identifier[data] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[weekday] ())
keyword[else] :
identifier[days] = identifier[self] . identifier[data] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[day] )
identifier[keep_col] = identifier[day_match] ( identifier[days] , identifier[values] )
keyword[elif] identifier[col] == literal[string] keyword[and] identifier[self] . identifier[time_col] keyword[is] literal[string] :
identifier[keep_col] = identifier[hour_match] ( identifier[self] . identifier[data] [ literal[string] ]
. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[hour] ),
identifier[values] )
keyword[elif] identifier[col] == literal[string] keyword[and] identifier[self] . identifier[time_col] keyword[is] literal[string] :
identifier[keep_col] = identifier[datetime_match] ( identifier[self] . identifier[data] [ identifier[col] ], identifier[values] )
keyword[elif] identifier[col] == literal[string] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[filters] . identifier[keys] ():
identifier[keep_col] = identifier[find_depth] ( identifier[self] . identifier[data] [ literal[string] ], identifier[level] = identifier[values] )
keyword[else] :
keyword[continue]
keyword[elif] identifier[col] keyword[in] identifier[self] . identifier[data] . identifier[columns] :
identifier[keep_col] = identifier[pattern_match] ( identifier[self] . identifier[data] [ identifier[col] ], identifier[values] , identifier[regexp] = identifier[regexp] )
keyword[else] :
identifier[_raise_filter_error] ( identifier[col] )
identifier[keep] &= identifier[keep_col]
keyword[return] identifier[keep] | def _apply_filters(self, **filters):
"""Determine rows to keep in data for given set of filters
Parameters
----------
filters: dict
dictionary of filters ({col: values}}); uses a pseudo-regexp syntax
by default, but accepts `regexp: True` to use regexp directly
"""
regexp = filters.pop('regexp', False)
keep = np.array([True] * len(self.data))
# filter by columns and list of values
for (col, values) in filters.items():
# treat `_apply_filters(col=None)` as no filter applied
if values is None:
continue # depends on [control=['if'], data=[]]
if col in self.meta.columns:
matches = pattern_match(self.meta[col], values, regexp=regexp)
cat_idx = self.meta[matches].index
keep_col = self.data[META_IDX].set_index(META_IDX).index.isin(cat_idx) # depends on [control=['if'], data=['col']]
elif col == 'variable':
level = filters['level'] if 'level' in filters else None
keep_col = pattern_match(self.data[col], values, level, regexp) # depends on [control=['if'], data=['col']]
elif col == 'year':
_data = self.data[col] if self.time_col is not 'time' else self.data['time'].apply(lambda x: x.year)
keep_col = years_match(_data, values) # depends on [control=['if'], data=['col']]
elif col == 'month' and self.time_col is 'time':
keep_col = month_match(self.data['time'].apply(lambda x: x.month), values) # depends on [control=['if'], data=[]]
elif col == 'day' and self.time_col is 'time':
if isinstance(values, str):
wday = True # depends on [control=['if'], data=[]]
elif isinstance(values, list) and isinstance(values[0], str):
wday = True # depends on [control=['if'], data=[]]
else:
wday = False
if wday:
days = self.data['time'].apply(lambda x: x.weekday()) # depends on [control=['if'], data=[]]
else: # ints or list of ints
days = self.data['time'].apply(lambda x: x.day)
keep_col = day_match(days, values) # depends on [control=['if'], data=[]]
elif col == 'hour' and self.time_col is 'time':
keep_col = hour_match(self.data['time'].apply(lambda x: x.hour), values) # depends on [control=['if'], data=[]]
elif col == 'time' and self.time_col is 'time':
keep_col = datetime_match(self.data[col], values) # depends on [control=['if'], data=[]]
elif col == 'level':
if 'variable' not in filters.keys():
keep_col = find_depth(self.data['variable'], level=values) # depends on [control=['if'], data=[]]
else:
continue # depends on [control=['if'], data=[]]
elif col in self.data.columns:
keep_col = pattern_match(self.data[col], values, regexp=regexp) # depends on [control=['if'], data=['col']]
else:
_raise_filter_error(col)
keep &= keep_col # depends on [control=['for'], data=[]]
return keep |
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn | def function[get_connection, parameter[self, url, proxies]]:
constant[Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
]
variable[proxy] assign[=] call[name[select_proxy], parameter[name[url], name[proxies]]]
if name[proxy] begin[:]
variable[proxy] assign[=] call[name[prepend_scheme_if_needed], parameter[name[proxy], constant[http]]]
variable[proxy_url] assign[=] call[name[parse_url], parameter[name[proxy]]]
if <ast.UnaryOp object at 0x7da18bccb250> begin[:]
<ast.Raise object at 0x7da18bccb8e0>
variable[proxy_manager] assign[=] call[name[self].proxy_manager_for, parameter[name[proxy]]]
variable[conn] assign[=] call[name[proxy_manager].connection_from_url, parameter[name[url]]]
return[name[conn]] | keyword[def] identifier[get_connection] ( identifier[self] , identifier[url] , identifier[proxies] = keyword[None] ):
literal[string]
identifier[proxy] = identifier[select_proxy] ( identifier[url] , identifier[proxies] )
keyword[if] identifier[proxy] :
identifier[proxy] = identifier[prepend_scheme_if_needed] ( identifier[proxy] , literal[string] )
identifier[proxy_url] = identifier[parse_url] ( identifier[proxy] )
keyword[if] keyword[not] identifier[proxy_url] . identifier[host] :
keyword[raise] identifier[InvalidProxyURL] ( literal[string]
literal[string] )
identifier[proxy_manager] = identifier[self] . identifier[proxy_manager_for] ( identifier[proxy] )
identifier[conn] = identifier[proxy_manager] . identifier[connection_from_url] ( identifier[url] )
keyword[else] :
identifier[parsed] = identifier[urlparse] ( identifier[url] )
identifier[url] = identifier[parsed] . identifier[geturl] ()
identifier[conn] = identifier[self] . identifier[poolmanager] . identifier[connection_from_url] ( identifier[url] )
keyword[return] identifier[conn] | def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL('Please check proxy URL. It is malformed and could be missing the host.') # depends on [control=['if'], data=[]]
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url) # depends on [control=['if'], data=[]]
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn |
def tcache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endtcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
tags = None
if len(tokens) > 3 and 'tags=' in tokens[-1]:
tags = parser.compile_filter(tokens[-1][5:])
del tokens[-1]
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(token) for token in tokens[3:]],
tags
) | def function[tcache, parameter[parser, token]]:
constant[
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
]
variable[nodelist] assign[=] call[name[parser].parse, parameter[tuple[[<ast.Constant object at 0x7da18dc048b0>]]]]
call[name[parser].delete_first_token, parameter[]]
variable[tokens] assign[=] call[name[token].split_contents, parameter[]]
if compare[call[name[len], parameter[name[tokens]]] less[<] constant[3]] begin[:]
<ast.Raise object at 0x7da18dc07610>
variable[tags] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18dc042b0> begin[:]
variable[tags] assign[=] call[name[parser].compile_filter, parameter[call[call[name[tokens]][<ast.UnaryOp object at 0x7da18dc06a40>]][<ast.Slice object at 0x7da18f7204c0>]]]
<ast.Delete object at 0x7da18f722320>
return[call[name[CacheNode], parameter[name[nodelist], call[name[parser].compile_filter, parameter[call[name[tokens]][constant[1]]]], call[name[tokens]][constant[2]], <ast.ListComp object at 0x7da18f720310>, name[tags]]]] | keyword[def] identifier[tcache] ( identifier[parser] , identifier[token] ):
literal[string]
identifier[nodelist] = identifier[parser] . identifier[parse] (( literal[string] ,))
identifier[parser] . identifier[delete_first_token] ()
identifier[tokens] = identifier[token] . identifier[split_contents] ()
keyword[if] identifier[len] ( identifier[tokens] )< literal[int] :
keyword[raise] identifier[template] . identifier[TemplateSyntaxError] ( literal[string] % identifier[tokens] [ literal[int] ])
identifier[tags] = keyword[None]
keyword[if] identifier[len] ( identifier[tokens] )> literal[int] keyword[and] literal[string] keyword[in] identifier[tokens] [- literal[int] ]:
identifier[tags] = identifier[parser] . identifier[compile_filter] ( identifier[tokens] [- literal[int] ][ literal[int] :])
keyword[del] identifier[tokens] [- literal[int] ]
keyword[return] identifier[CacheNode] ( identifier[nodelist] ,
identifier[parser] . identifier[compile_filter] ( identifier[tokens] [ literal[int] ]),
identifier[tokens] [ literal[int] ],
[ identifier[parser] . identifier[compile_filter] ( identifier[token] ) keyword[for] identifier[token] keyword[in] identifier[tokens] [ literal[int] :]],
identifier[tags]
) | def tcache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endtcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0]) # depends on [control=['if'], data=[]]
tags = None
if len(tokens) > 3 and 'tags=' in tokens[-1]:
tags = parser.compile_filter(tokens[-1][5:])
del tokens[-1] # depends on [control=['if'], data=[]] # fragment_name can't be a variable.
return CacheNode(nodelist, parser.compile_filter(tokens[1]), tokens[2], [parser.compile_filter(token) for token in tokens[3:]], tags) |
def on_enter_fit_name(self, event):
"""
Allows the entering of new fit names in the fit combobox
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit.name
"""
if self.current_fit == None:
self.on_btn_add_fit(event)
value = self.fit_box.GetValue()
if ':' in value:
name, color = value.split(':')
else:
name, color = value, None
if name in [x.name for x in self.pmag_results_data['specimens'][self.s]]:
print('bad name')
return
self.current_fit.name = name
if color in list(self.color_dict.keys()):
self.current_fit.color = self.color_dict[color]
self.update_fit_boxes()
self.plot_high_levels_data() | def function[on_enter_fit_name, parameter[self, event]]:
constant[
Allows the entering of new fit names in the fit combobox
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit.name
]
if compare[name[self].current_fit equal[==] constant[None]] begin[:]
call[name[self].on_btn_add_fit, parameter[name[event]]]
variable[value] assign[=] call[name[self].fit_box.GetValue, parameter[]]
if compare[constant[:] in name[value]] begin[:]
<ast.Tuple object at 0x7da1b0474820> assign[=] call[name[value].split, parameter[constant[:]]]
if compare[name[name] in <ast.ListComp object at 0x7da1b04760b0>] begin[:]
call[name[print], parameter[constant[bad name]]]
return[None]
name[self].current_fit.name assign[=] name[name]
if compare[name[color] in call[name[list], parameter[call[name[self].color_dict.keys, parameter[]]]]] begin[:]
name[self].current_fit.color assign[=] call[name[self].color_dict][name[color]]
call[name[self].update_fit_boxes, parameter[]]
call[name[self].plot_high_levels_data, parameter[]] | keyword[def] identifier[on_enter_fit_name] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[self] . identifier[current_fit] == keyword[None] :
identifier[self] . identifier[on_btn_add_fit] ( identifier[event] )
identifier[value] = identifier[self] . identifier[fit_box] . identifier[GetValue] ()
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[name] , identifier[color] = identifier[value] . identifier[split] ( literal[string] )
keyword[else] :
identifier[name] , identifier[color] = identifier[value] , keyword[None]
keyword[if] identifier[name] keyword[in] [ identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[pmag_results_data] [ literal[string] ][ identifier[self] . identifier[s] ]]:
identifier[print] ( literal[string] )
keyword[return]
identifier[self] . identifier[current_fit] . identifier[name] = identifier[name]
keyword[if] identifier[color] keyword[in] identifier[list] ( identifier[self] . identifier[color_dict] . identifier[keys] ()):
identifier[self] . identifier[current_fit] . identifier[color] = identifier[self] . identifier[color_dict] [ identifier[color] ]
identifier[self] . identifier[update_fit_boxes] ()
identifier[self] . identifier[plot_high_levels_data] () | def on_enter_fit_name(self, event):
"""
Allows the entering of new fit names in the fit combobox
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit.name
"""
if self.current_fit == None:
self.on_btn_add_fit(event) # depends on [control=['if'], data=[]]
value = self.fit_box.GetValue()
if ':' in value:
(name, color) = value.split(':') # depends on [control=['if'], data=['value']]
else:
(name, color) = (value, None)
if name in [x.name for x in self.pmag_results_data['specimens'][self.s]]:
print('bad name')
return # depends on [control=['if'], data=[]]
self.current_fit.name = name
if color in list(self.color_dict.keys()):
self.current_fit.color = self.color_dict[color] # depends on [control=['if'], data=['color']]
self.update_fit_boxes()
self.plot_high_levels_data() |
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description) | def function[_reconnect, parameter[self]]:
constant[Schedule the next connection attempt if the class is not currently
closing.
]
if <ast.BoolOp object at 0x7da1b0cca830> begin[:]
call[name[LOGGER].debug, parameter[constant[Attempting RabbitMQ reconnect in %s seconds], name[self].reconnect_delay]]
call[name[self].io_loop.call_later, parameter[name[self].reconnect_delay, name[self].connect]]
return[None]
call[name[LOGGER].warning, parameter[constant[Reconnect called while %s], name[self].state_description]] | keyword[def] identifier[_reconnect] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[idle] keyword[or] identifier[self] . identifier[closed] :
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[reconnect_delay] )
identifier[self] . identifier[io_loop] . identifier[call_later] ( identifier[self] . identifier[reconnect_delay] , identifier[self] . identifier[connect] )
keyword[return]
identifier[LOGGER] . identifier[warning] ( literal[string] , identifier[self] . identifier[state_description] ) | def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds', self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return # depends on [control=['if'], data=[]]
LOGGER.warning('Reconnect called while %s', self.state_description) |
def get_crop_size(crop_w, crop_h, image_w, image_h):
"""
Determines the correct scale size for the image
when img w == crop w and img h > crop h
Use these dimensions
when img h == crop h and img w > crop w
Use these dimensions
"""
scale1 = float(crop_w) / float(image_w)
scale2 = float(crop_h) / float(image_h)
scale1_w = crop_w # int(round(img_w * scale1))
scale1_h = int(round(image_h * scale1))
scale2_w = int(round(image_w * scale2))
scale2_h = crop_h # int(round(img_h * scale2))
if scale1_h > crop_h: # scale1_w == crop_w
# crop on vertical
return (scale1_w, scale1_h)
else: # scale2_h == crop_h and scale2_w > crop_w
#crop on horizontal
return (scale2_w, scale2_h) | def function[get_crop_size, parameter[crop_w, crop_h, image_w, image_h]]:
constant[
Determines the correct scale size for the image
when img w == crop w and img h > crop h
Use these dimensions
when img h == crop h and img w > crop w
Use these dimensions
]
variable[scale1] assign[=] binary_operation[call[name[float], parameter[name[crop_w]]] / call[name[float], parameter[name[image_w]]]]
variable[scale2] assign[=] binary_operation[call[name[float], parameter[name[crop_h]]] / call[name[float], parameter[name[image_h]]]]
variable[scale1_w] assign[=] name[crop_w]
variable[scale1_h] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[image_h] * name[scale1]]]]]]
variable[scale2_w] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[image_w] * name[scale2]]]]]]
variable[scale2_h] assign[=] name[crop_h]
if compare[name[scale1_h] greater[>] name[crop_h]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1300af0>, <ast.Name object at 0x7da1b1300b50>]]] | keyword[def] identifier[get_crop_size] ( identifier[crop_w] , identifier[crop_h] , identifier[image_w] , identifier[image_h] ):
literal[string]
identifier[scale1] = identifier[float] ( identifier[crop_w] )/ identifier[float] ( identifier[image_w] )
identifier[scale2] = identifier[float] ( identifier[crop_h] )/ identifier[float] ( identifier[image_h] )
identifier[scale1_w] = identifier[crop_w]
identifier[scale1_h] = identifier[int] ( identifier[round] ( identifier[image_h] * identifier[scale1] ))
identifier[scale2_w] = identifier[int] ( identifier[round] ( identifier[image_w] * identifier[scale2] ))
identifier[scale2_h] = identifier[crop_h]
keyword[if] identifier[scale1_h] > identifier[crop_h] :
keyword[return] ( identifier[scale1_w] , identifier[scale1_h] )
keyword[else] :
keyword[return] ( identifier[scale2_w] , identifier[scale2_h] ) | def get_crop_size(crop_w, crop_h, image_w, image_h):
"""
Determines the correct scale size for the image
when img w == crop w and img h > crop h
Use these dimensions
when img h == crop h and img w > crop w
Use these dimensions
"""
scale1 = float(crop_w) / float(image_w)
scale2 = float(crop_h) / float(image_h)
scale1_w = crop_w # int(round(img_w * scale1))
scale1_h = int(round(image_h * scale1))
scale2_w = int(round(image_w * scale2))
scale2_h = crop_h # int(round(img_h * scale2))
if scale1_h > crop_h: # scale1_w == crop_w
# crop on vertical
return (scale1_w, scale1_h) # depends on [control=['if'], data=['scale1_h']]
else: # scale2_h == crop_h and scale2_w > crop_w
#crop on horizontal
return (scale2_w, scale2_h) |
def getmtime(self, path=None, client_kwargs=None, header=None):
"""
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
"""
return self._getmtime_from_header(
self.head(path, client_kwargs, header)) | def function[getmtime, parameter[self, path, client_kwargs, header]]:
constant[
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
]
return[call[name[self]._getmtime_from_header, parameter[call[name[self].head, parameter[name[path], name[client_kwargs], name[header]]]]]] | keyword[def] identifier[getmtime] ( identifier[self] , identifier[path] = keyword[None] , identifier[client_kwargs] = keyword[None] , identifier[header] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_getmtime_from_header] (
identifier[self] . identifier[head] ( identifier[path] , identifier[client_kwargs] , identifier[header] )) | def getmtime(self, path=None, client_kwargs=None, header=None):
"""
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
"""
return self._getmtime_from_header(self.head(path, client_kwargs, header)) |
def update(self, friendly_name=values.unset, identity=values.unset,
deployment_sid=values.unset, enabled=values.unset):
"""
Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
identity=identity,
deployment_sid=deployment_sid,
enabled=enabled,
) | def function[update, parameter[self, friendly_name, identity, deployment_sid, enabled]]:
constant[
Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
]
return[call[name[self]._proxy.update, parameter[]]] | keyword[def] identifier[update] ( identifier[self] , identifier[friendly_name] = identifier[values] . identifier[unset] , identifier[identity] = identifier[values] . identifier[unset] ,
identifier[deployment_sid] = identifier[values] . identifier[unset] , identifier[enabled] = identifier[values] . identifier[unset] ):
literal[string]
keyword[return] identifier[self] . identifier[_proxy] . identifier[update] (
identifier[friendly_name] = identifier[friendly_name] ,
identifier[identity] = identifier[identity] ,
identifier[deployment_sid] = identifier[deployment_sid] ,
identifier[enabled] = identifier[enabled] ,
) | def update(self, friendly_name=values.unset, identity=values.unset, deployment_sid=values.unset, enabled=values.unset):
"""
Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
"""
return self._proxy.update(friendly_name=friendly_name, identity=identity, deployment_sid=deployment_sid, enabled=enabled) |
def current_app(self):
"""
Return: dict(package, activity, pid?)
Raises:
RuntimeError
"""
# try: adb shell dumpsys activity top
_activityRE = re.compile(r'ACTIVITY (?P<package>[^/]+)/(?P<activity>[^/\s]+) \w+ pid=(?P<pid>\d+)')
m = _activityRE.search(self.shell('dumpsys', 'activity', 'top'))
if m:
return dict(package=m.group('package'), activity=m.group('activity'), pid=int(m.group('pid')))
# try: adb shell dumpsys window windows
_focusedRE = re.compile('mFocusedApp=.*ActivityRecord{\w+ \w+ (?P<package>.*)/(?P<activity>.*) .*')
m = _focusedRE.search(self.shell('dumpsys', 'window', 'windows'))
if m:
return dict(package=m.group('package'), activity=m.group('activity'))
raise RuntimeError("Couldn't get focused app") | def function[current_app, parameter[self]]:
constant[
Return: dict(package, activity, pid?)
Raises:
RuntimeError
]
variable[_activityRE] assign[=] call[name[re].compile, parameter[constant[ACTIVITY (?P<package>[^/]+)/(?P<activity>[^/\s]+) \w+ pid=(?P<pid>\d+)]]]
variable[m] assign[=] call[name[_activityRE].search, parameter[call[name[self].shell, parameter[constant[dumpsys], constant[activity], constant[top]]]]]
if name[m] begin[:]
return[call[name[dict], parameter[]]]
variable[_focusedRE] assign[=] call[name[re].compile, parameter[constant[mFocusedApp=.*ActivityRecord{\w+ \w+ (?P<package>.*)/(?P<activity>.*) .*]]]
variable[m] assign[=] call[name[_focusedRE].search, parameter[call[name[self].shell, parameter[constant[dumpsys], constant[window], constant[windows]]]]]
if name[m] begin[:]
return[call[name[dict], parameter[]]]
<ast.Raise object at 0x7da204565150> | keyword[def] identifier[current_app] ( identifier[self] ):
literal[string]
identifier[_activityRE] = identifier[re] . identifier[compile] ( literal[string] )
identifier[m] = identifier[_activityRE] . identifier[search] ( identifier[self] . identifier[shell] ( literal[string] , literal[string] , literal[string] ))
keyword[if] identifier[m] :
keyword[return] identifier[dict] ( identifier[package] = identifier[m] . identifier[group] ( literal[string] ), identifier[activity] = identifier[m] . identifier[group] ( literal[string] ), identifier[pid] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] )))
identifier[_focusedRE] = identifier[re] . identifier[compile] ( literal[string] )
identifier[m] = identifier[_focusedRE] . identifier[search] ( identifier[self] . identifier[shell] ( literal[string] , literal[string] , literal[string] ))
keyword[if] identifier[m] :
keyword[return] identifier[dict] ( identifier[package] = identifier[m] . identifier[group] ( literal[string] ), identifier[activity] = identifier[m] . identifier[group] ( literal[string] ))
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def current_app(self):
"""
Return: dict(package, activity, pid?)
Raises:
RuntimeError
"""
# try: adb shell dumpsys activity top
_activityRE = re.compile('ACTIVITY (?P<package>[^/]+)/(?P<activity>[^/\\s]+) \\w+ pid=(?P<pid>\\d+)')
m = _activityRE.search(self.shell('dumpsys', 'activity', 'top'))
if m:
return dict(package=m.group('package'), activity=m.group('activity'), pid=int(m.group('pid'))) # depends on [control=['if'], data=[]]
# try: adb shell dumpsys window windows
_focusedRE = re.compile('mFocusedApp=.*ActivityRecord{\\w+ \\w+ (?P<package>.*)/(?P<activity>.*) .*')
m = _focusedRE.search(self.shell('dumpsys', 'window', 'windows'))
if m:
return dict(package=m.group('package'), activity=m.group('activity')) # depends on [control=['if'], data=[]]
raise RuntimeError("Couldn't get focused app") |
def get_cimobject_header(obj):
"""
Return the value for the CIM-XML extension header field 'CIMObject', using
the given object.
This function implements the rules defined in DSP0200 section 6.3.7
"CIMObject". The format of the CIMObject value is similar but not identical
to a local WBEM URI (one without namespace type and authority), as defined
in DSP0207.
One difference is that DSP0207 requires a leading slash for a local WBEM
URI, e.g. '/root/cimv2:CIM_Class.k=1', while the CIMObject value has no
leading slash, e.g. 'root/cimv2:CIM_Class.k=1'.
Another difference is that the CIMObject value for instance paths has
provisions for an instance path without keys, while WBEM URIs do not have
that. Pywbem does not support that.
"""
# Local namespace path
if isinstance(obj, six.string_types):
return obj
# Local class path
if isinstance(obj, CIMClassName):
return obj.to_wbem_uri(format='cimobject')
# Local instance path
if isinstance(obj, CIMInstanceName):
return obj.to_wbem_uri(format='cimobject')
raise TypeError(
_format("Invalid object type {0} to generate CIMObject header value "
"from", type(obj))) | def function[get_cimobject_header, parameter[obj]]:
constant[
Return the value for the CIM-XML extension header field 'CIMObject', using
the given object.
This function implements the rules defined in DSP0200 section 6.3.7
"CIMObject". The format of the CIMObject value is similar but not identical
to a local WBEM URI (one without namespace type and authority), as defined
in DSP0207.
One difference is that DSP0207 requires a leading slash for a local WBEM
URI, e.g. '/root/cimv2:CIM_Class.k=1', while the CIMObject value has no
leading slash, e.g. 'root/cimv2:CIM_Class.k=1'.
Another difference is that the CIMObject value for instance paths has
provisions for an instance path without keys, while WBEM URIs do not have
that. Pywbem does not support that.
]
if call[name[isinstance], parameter[name[obj], name[six].string_types]] begin[:]
return[name[obj]]
if call[name[isinstance], parameter[name[obj], name[CIMClassName]]] begin[:]
return[call[name[obj].to_wbem_uri, parameter[]]]
if call[name[isinstance], parameter[name[obj], name[CIMInstanceName]]] begin[:]
return[call[name[obj].to_wbem_uri, parameter[]]]
<ast.Raise object at 0x7da1b0c963e0> | keyword[def] identifier[get_cimobject_header] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[obj]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[CIMClassName] ):
keyword[return] identifier[obj] . identifier[to_wbem_uri] ( identifier[format] = literal[string] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[CIMInstanceName] ):
keyword[return] identifier[obj] . identifier[to_wbem_uri] ( identifier[format] = literal[string] )
keyword[raise] identifier[TypeError] (
identifier[_format] ( literal[string]
literal[string] , identifier[type] ( identifier[obj] ))) | def get_cimobject_header(obj):
"""
Return the value for the CIM-XML extension header field 'CIMObject', using
the given object.
This function implements the rules defined in DSP0200 section 6.3.7
"CIMObject". The format of the CIMObject value is similar but not identical
to a local WBEM URI (one without namespace type and authority), as defined
in DSP0207.
One difference is that DSP0207 requires a leading slash for a local WBEM
URI, e.g. '/root/cimv2:CIM_Class.k=1', while the CIMObject value has no
leading slash, e.g. 'root/cimv2:CIM_Class.k=1'.
Another difference is that the CIMObject value for instance paths has
provisions for an instance path without keys, while WBEM URIs do not have
that. Pywbem does not support that.
"""
# Local namespace path
if isinstance(obj, six.string_types):
return obj # depends on [control=['if'], data=[]]
# Local class path
if isinstance(obj, CIMClassName):
return obj.to_wbem_uri(format='cimobject') # depends on [control=['if'], data=[]]
# Local instance path
if isinstance(obj, CIMInstanceName):
return obj.to_wbem_uri(format='cimobject') # depends on [control=['if'], data=[]]
raise TypeError(_format('Invalid object type {0} to generate CIMObject header value from', type(obj))) |
def from_json(cls, data):
"""Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema}
"""
required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition',
'humidity_condition', 'wind_condition', 'sky_condition')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
return cls(data['name'], data['day_type'], Location.from_json(data['location']),
DryBulbCondition.from_json(data['dry_bulb_condition']),
HumidityCondition.from_json(data['humidity_condition']),
WindCondition.from_json(data['wind_condition']),
SkyCondition.from_json(data['sky_condition'])) | def function[from_json, parameter[cls, data]]:
constant[Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema}
]
variable[required_keys] assign[=] tuple[[<ast.Constant object at 0x7da1b12d7820>, <ast.Constant object at 0x7da1b12d77f0>, <ast.Constant object at 0x7da1b12d7760>, <ast.Constant object at 0x7da1b12d7730>, <ast.Constant object at 0x7da1b12d77c0>, <ast.Constant object at 0x7da1b12d7790>, <ast.Constant object at 0x7da1b12d7af0>]]
for taget[name[key]] in starred[name[required_keys]] begin[:]
assert[compare[name[key] in name[data]]]
return[call[name[cls], parameter[call[name[data]][constant[name]], call[name[data]][constant[day_type]], call[name[Location].from_json, parameter[call[name[data]][constant[location]]]], call[name[DryBulbCondition].from_json, parameter[call[name[data]][constant[dry_bulb_condition]]]], call[name[HumidityCondition].from_json, parameter[call[name[data]][constant[humidity_condition]]]], call[name[WindCondition].from_json, parameter[call[name[data]][constant[wind_condition]]]], call[name[SkyCondition].from_json, parameter[call[name[data]][constant[sky_condition]]]]]]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[data] ):
literal[string]
identifier[required_keys] =( literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] )
keyword[for] identifier[key] keyword[in] identifier[required_keys] :
keyword[assert] identifier[key] keyword[in] identifier[data] , literal[string] . identifier[format] ( identifier[key] )
keyword[return] identifier[cls] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[Location] . identifier[from_json] ( identifier[data] [ literal[string] ]),
identifier[DryBulbCondition] . identifier[from_json] ( identifier[data] [ literal[string] ]),
identifier[HumidityCondition] . identifier[from_json] ( identifier[data] [ literal[string] ]),
identifier[WindCondition] . identifier[from_json] ( identifier[data] [ literal[string] ]),
identifier[SkyCondition] . identifier[from_json] ( identifier[data] [ literal[string] ])) | def from_json(cls, data):
"""Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema}
"""
required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition', 'humidity_condition', 'wind_condition', 'sky_condition')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key) # depends on [control=['for'], data=['key']]
return cls(data['name'], data['day_type'], Location.from_json(data['location']), DryBulbCondition.from_json(data['dry_bulb_condition']), HumidityCondition.from_json(data['humidity_condition']), WindCondition.from_json(data['wind_condition']), SkyCondition.from_json(data['sky_condition'])) |
def cee_map_priority_table_map_cos6_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos6_pgid = ET.SubElement(priority_table, "map-cos6-pgid")
map_cos6_pgid.text = kwargs.pop('map_cos6_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[cee_map_priority_table_map_cos6_pgid, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[cee_map] assign[=] call[name[ET].SubElement, parameter[name[config], constant[cee-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[priority_table] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[priority-table]]]
variable[map_cos6_pgid] assign[=] call[name[ET].SubElement, parameter[name[priority_table], constant[map-cos6-pgid]]]
name[map_cos6_pgid].text assign[=] call[name[kwargs].pop, parameter[constant[map_cos6_pgid]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[cee_map_priority_table_map_cos6_pgid] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[cee_map] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[priority_table] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[map_cos6_pgid] = identifier[ET] . identifier[SubElement] ( identifier[priority_table] , literal[string] )
identifier[map_cos6_pgid] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def cee_map_priority_table_map_cos6_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
cee_map = ET.SubElement(config, 'cee-map', xmlns='urn:brocade.com:mgmt:brocade-cee-map')
name_key = ET.SubElement(cee_map, 'name')
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, 'priority-table')
map_cos6_pgid = ET.SubElement(priority_table, 'map-cos6-pgid')
map_cos6_pgid.text = kwargs.pop('map_cos6_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None,
timestamp=None, other_features=None):
'''`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
'''
def add_feature(name, xs):
if name not in fc:
fc[name] = StringCounter()
fc[name] += StringCounter(xs)
timestamp = timestamp or int(time.time() * 1000)
other_features = other_features or {}
if clean_html is None:
if html is not None:
try:
clean_html_utf8 = make_clean_html(html, encoding=encoding)
except:
logger.warn('dropping doc because:', exc_info=True)
return
clean_html = clean_html_utf8.decode('utf-8')
else:
clean_html_utf8 = u''
clean_html = u''
else:
clean_html_utf8 = u''
if clean_visible is None or len(clean_visible) == 0:
clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8')
elif isinstance(clean_visible, str):
clean_visible = clean_visible.decode('utf-8')
fc = FeatureCollection()
fc[u'meta_raw'] = html and uni(html, encoding) or u''
fc[u'meta_clean_html'] = clean_html
fc[u'meta_clean_visible'] = clean_visible
fc[u'meta_timestamp'] = unicode(timestamp)
url = url or u''
fc[u'meta_url'] = uni(url)
add_feature(u'icq', features.ICQs(clean_visible))
add_feature(u'skype', features.skypes(clean_visible))
add_feature(u'phone', features.phones(clean_visible))
add_feature(u'email', features.emails(clean_visible))
bowNP, normalizations = features.noun_phrases(
cleanse(clean_visible), included_unnormalized=True)
add_feature(u'bowNP', bowNP)
bowNP_unnorm = chain(*normalizations.values())
add_feature(u'bowNP_unnorm', bowNP_unnorm)
add_feature(u'image_url', features.image_urls(clean_html))
add_feature(u'a_url', features.a_urls(clean_html))
## get parsed versions, extract usernames
fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url'])
fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url'])
fc[u'usernames'] = features.usernames(fc[u'image_url'])
fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url'])
fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url'])
fc[u'usernames'] += features.usernames(fc[u'a_url'])
#fc[u'usernames'] += features.usernames2(
# fc[u'meta_clean_visible'])
# beginning of treating this as a pipeline...
xform = features.entity_names()
fc = xform.process(fc)
for feat_name, feat_val in other_features.iteritems():
fc[feat_name] += StringCounter(feat_val)
return fc | def function[html_to_fc, parameter[html, clean_html, clean_visible, encoding, url, timestamp, other_features]]:
constant[`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
]
def function[add_feature, parameter[name, xs]]:
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[fc]] begin[:]
call[name[fc]][name[name]] assign[=] call[name[StringCounter], parameter[]]
<ast.AugAssign object at 0x7da20c992110>
variable[timestamp] assign[=] <ast.BoolOp object at 0x7da20c992650>
variable[other_features] assign[=] <ast.BoolOp object at 0x7da20c9925f0>
if compare[name[clean_html] is constant[None]] begin[:]
if compare[name[html] is_not constant[None]] begin[:]
<ast.Try object at 0x7da20c992350>
variable[clean_html] assign[=] call[name[clean_html_utf8].decode, parameter[constant[utf-8]]]
if <ast.BoolOp object at 0x7da20c993340> begin[:]
variable[clean_visible] assign[=] call[call[name[make_clean_visible], parameter[name[clean_html_utf8]]].decode, parameter[constant[utf-8]]]
variable[fc] assign[=] call[name[FeatureCollection], parameter[]]
call[name[fc]][constant[meta_raw]] assign[=] <ast.BoolOp object at 0x7da20c9904c0>
call[name[fc]][constant[meta_clean_html]] assign[=] name[clean_html]
call[name[fc]][constant[meta_clean_visible]] assign[=] name[clean_visible]
call[name[fc]][constant[meta_timestamp]] assign[=] call[name[unicode], parameter[name[timestamp]]]
variable[url] assign[=] <ast.BoolOp object at 0x7da20c990ca0>
call[name[fc]][constant[meta_url]] assign[=] call[name[uni], parameter[name[url]]]
call[name[add_feature], parameter[constant[icq], call[name[features].ICQs, parameter[name[clean_visible]]]]]
call[name[add_feature], parameter[constant[skype], call[name[features].skypes, parameter[name[clean_visible]]]]]
call[name[add_feature], parameter[constant[phone], call[name[features].phones, parameter[name[clean_visible]]]]]
call[name[add_feature], parameter[constant[email], call[name[features].emails, parameter[name[clean_visible]]]]]
<ast.Tuple object at 0x7da20c993a30> assign[=] call[name[features].noun_phrases, parameter[call[name[cleanse], parameter[name[clean_visible]]]]]
call[name[add_feature], parameter[constant[bowNP], name[bowNP]]]
variable[bowNP_unnorm] assign[=] call[name[chain], parameter[<ast.Starred object at 0x7da20c9934f0>]]
call[name[add_feature], parameter[constant[bowNP_unnorm], name[bowNP_unnorm]]]
call[name[add_feature], parameter[constant[image_url], call[name[features].image_urls, parameter[name[clean_html]]]]]
call[name[add_feature], parameter[constant[a_url], call[name[features].a_urls, parameter[name[clean_html]]]]]
call[name[fc]][constant[img_url_path_dirs]] assign[=] call[name[features].path_dirs, parameter[call[name[fc]][constant[image_url]]]]
call[name[fc]][constant[img_url_hostnames]] assign[=] call[name[features].host_names, parameter[call[name[fc]][constant[image_url]]]]
call[name[fc]][constant[usernames]] assign[=] call[name[features].usernames, parameter[call[name[fc]][constant[image_url]]]]
call[name[fc]][constant[a_url_path_dirs]] assign[=] call[name[features].path_dirs, parameter[call[name[fc]][constant[a_url]]]]
call[name[fc]][constant[a_url_hostnames]] assign[=] call[name[features].host_names, parameter[call[name[fc]][constant[a_url]]]]
<ast.AugAssign object at 0x7da20c993af0>
variable[xform] assign[=] call[name[features].entity_names, parameter[]]
variable[fc] assign[=] call[name[xform].process, parameter[name[fc]]]
for taget[tuple[[<ast.Name object at 0x7da20c990cd0>, <ast.Name object at 0x7da20c9903a0>]]] in starred[call[name[other_features].iteritems, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da18c4ce080>
return[name[fc]] | keyword[def] identifier[html_to_fc] ( identifier[html] = keyword[None] , identifier[clean_html] = keyword[None] , identifier[clean_visible] = keyword[None] , identifier[encoding] = keyword[None] , identifier[url] = keyword[None] ,
identifier[timestamp] = keyword[None] , identifier[other_features] = keyword[None] ):
literal[string]
keyword[def] identifier[add_feature] ( identifier[name] , identifier[xs] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[fc] :
identifier[fc] [ identifier[name] ]= identifier[StringCounter] ()
identifier[fc] [ identifier[name] ]+= identifier[StringCounter] ( identifier[xs] )
identifier[timestamp] = identifier[timestamp] keyword[or] identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )
identifier[other_features] = identifier[other_features] keyword[or] {}
keyword[if] identifier[clean_html] keyword[is] keyword[None] :
keyword[if] identifier[html] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[clean_html_utf8] = identifier[make_clean_html] ( identifier[html] , identifier[encoding] = identifier[encoding] )
keyword[except] :
identifier[logger] . identifier[warn] ( literal[string] , identifier[exc_info] = keyword[True] )
keyword[return]
identifier[clean_html] = identifier[clean_html_utf8] . identifier[decode] ( literal[string] )
keyword[else] :
identifier[clean_html_utf8] = literal[string]
identifier[clean_html] = literal[string]
keyword[else] :
identifier[clean_html_utf8] = literal[string]
keyword[if] identifier[clean_visible] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[clean_visible] )== literal[int] :
identifier[clean_visible] = identifier[make_clean_visible] ( identifier[clean_html_utf8] ). identifier[decode] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[clean_visible] , identifier[str] ):
identifier[clean_visible] = identifier[clean_visible] . identifier[decode] ( literal[string] )
identifier[fc] = identifier[FeatureCollection] ()
identifier[fc] [ literal[string] ]= identifier[html] keyword[and] identifier[uni] ( identifier[html] , identifier[encoding] ) keyword[or] literal[string]
identifier[fc] [ literal[string] ]= identifier[clean_html]
identifier[fc] [ literal[string] ]= identifier[clean_visible]
identifier[fc] [ literal[string] ]= identifier[unicode] ( identifier[timestamp] )
identifier[url] = identifier[url] keyword[or] literal[string]
identifier[fc] [ literal[string] ]= identifier[uni] ( identifier[url] )
identifier[add_feature] ( literal[string] , identifier[features] . identifier[ICQs] ( identifier[clean_visible] ))
identifier[add_feature] ( literal[string] , identifier[features] . identifier[skypes] ( identifier[clean_visible] ))
identifier[add_feature] ( literal[string] , identifier[features] . identifier[phones] ( identifier[clean_visible] ))
identifier[add_feature] ( literal[string] , identifier[features] . identifier[emails] ( identifier[clean_visible] ))
identifier[bowNP] , identifier[normalizations] = identifier[features] . identifier[noun_phrases] (
identifier[cleanse] ( identifier[clean_visible] ), identifier[included_unnormalized] = keyword[True] )
identifier[add_feature] ( literal[string] , identifier[bowNP] )
identifier[bowNP_unnorm] = identifier[chain] (* identifier[normalizations] . identifier[values] ())
identifier[add_feature] ( literal[string] , identifier[bowNP_unnorm] )
identifier[add_feature] ( literal[string] , identifier[features] . identifier[image_urls] ( identifier[clean_html] ))
identifier[add_feature] ( literal[string] , identifier[features] . identifier[a_urls] ( identifier[clean_html] ))
identifier[fc] [ literal[string] ]= identifier[features] . identifier[path_dirs] ( identifier[fc] [ literal[string] ])
identifier[fc] [ literal[string] ]= identifier[features] . identifier[host_names] ( identifier[fc] [ literal[string] ])
identifier[fc] [ literal[string] ]= identifier[features] . identifier[usernames] ( identifier[fc] [ literal[string] ])
identifier[fc] [ literal[string] ]= identifier[features] . identifier[path_dirs] ( identifier[fc] [ literal[string] ])
identifier[fc] [ literal[string] ]= identifier[features] . identifier[host_names] ( identifier[fc] [ literal[string] ])
identifier[fc] [ literal[string] ]+= identifier[features] . identifier[usernames] ( identifier[fc] [ literal[string] ])
identifier[xform] = identifier[features] . identifier[entity_names] ()
identifier[fc] = identifier[xform] . identifier[process] ( identifier[fc] )
keyword[for] identifier[feat_name] , identifier[feat_val] keyword[in] identifier[other_features] . identifier[iteritems] ():
identifier[fc] [ identifier[feat_name] ]+= identifier[StringCounter] ( identifier[feat_val] )
keyword[return] identifier[fc] | def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None, timestamp=None, other_features=None):
"""`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
"""
def add_feature(name, xs):
if name not in fc:
fc[name] = StringCounter() # depends on [control=['if'], data=['name', 'fc']]
fc[name] += StringCounter(xs)
timestamp = timestamp or int(time.time() * 1000)
other_features = other_features or {}
if clean_html is None:
if html is not None:
try:
clean_html_utf8 = make_clean_html(html, encoding=encoding) # depends on [control=['try'], data=[]]
except:
logger.warn('dropping doc because:', exc_info=True)
return # depends on [control=['except'], data=[]]
clean_html = clean_html_utf8.decode('utf-8') # depends on [control=['if'], data=['html']]
else:
clean_html_utf8 = u''
clean_html = u'' # depends on [control=['if'], data=['clean_html']]
else:
clean_html_utf8 = u''
if clean_visible is None or len(clean_visible) == 0:
clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8') # depends on [control=['if'], data=[]]
elif isinstance(clean_visible, str):
clean_visible = clean_visible.decode('utf-8') # depends on [control=['if'], data=[]]
fc = FeatureCollection()
fc[u'meta_raw'] = html and uni(html, encoding) or u''
fc[u'meta_clean_html'] = clean_html
fc[u'meta_clean_visible'] = clean_visible
fc[u'meta_timestamp'] = unicode(timestamp)
url = url or u''
fc[u'meta_url'] = uni(url)
add_feature(u'icq', features.ICQs(clean_visible))
add_feature(u'skype', features.skypes(clean_visible))
add_feature(u'phone', features.phones(clean_visible))
add_feature(u'email', features.emails(clean_visible))
(bowNP, normalizations) = features.noun_phrases(cleanse(clean_visible), included_unnormalized=True)
add_feature(u'bowNP', bowNP)
bowNP_unnorm = chain(*normalizations.values())
add_feature(u'bowNP_unnorm', bowNP_unnorm)
add_feature(u'image_url', features.image_urls(clean_html))
add_feature(u'a_url', features.a_urls(clean_html))
## get parsed versions, extract usernames
fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url'])
fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url'])
fc[u'usernames'] = features.usernames(fc[u'image_url'])
fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url'])
fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url'])
fc[u'usernames'] += features.usernames(fc[u'a_url'])
#fc[u'usernames'] += features.usernames2(
# fc[u'meta_clean_visible'])
# beginning of treating this as a pipeline...
xform = features.entity_names()
fc = xform.process(fc)
for (feat_name, feat_val) in other_features.iteritems():
fc[feat_name] += StringCounter(feat_val) # depends on [control=['for'], data=[]]
return fc |
def process_all(self, texts:Collection[str]) -> List[List[str]]:
"Process a list of `texts`."
if self.n_cpus <= 1: return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) | def function[process_all, parameter[self, texts]]:
constant[Process a list of `texts`.]
if compare[name[self].n_cpus less_or_equal[<=] constant[1]] begin[:]
return[call[name[self]._process_all_1, parameter[name[texts]]]]
with call[name[ProcessPoolExecutor], parameter[name[self].n_cpus]] begin[:]
return[call[name[sum], parameter[call[name[e].map, parameter[name[self]._process_all_1, call[name[partition_by_cores], parameter[name[texts], name[self].n_cpus]]]], list[[]]]]] | keyword[def] identifier[process_all] ( identifier[self] , identifier[texts] : identifier[Collection] [ identifier[str] ])-> identifier[List] [ identifier[List] [ identifier[str] ]]:
literal[string]
keyword[if] identifier[self] . identifier[n_cpus] <= literal[int] : keyword[return] identifier[self] . identifier[_process_all_1] ( identifier[texts] )
keyword[with] identifier[ProcessPoolExecutor] ( identifier[self] . identifier[n_cpus] ) keyword[as] identifier[e] :
keyword[return] identifier[sum] ( identifier[e] . identifier[map] ( identifier[self] . identifier[_process_all_1] , identifier[partition_by_cores] ( identifier[texts] , identifier[self] . identifier[n_cpus] )),[]) | def process_all(self, texts: Collection[str]) -> List[List[str]]:
"""Process a list of `texts`."""
if self.n_cpus <= 1:
return self._process_all_1(texts) # depends on [control=['if'], data=[]]
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) # depends on [control=['with'], data=['e']] |
def _CheckLogFileSize(cursor):
"""Warns if MySQL log file size is not large enough for blob insertions."""
# Do not fail, because users might not be able to change this for their
# database. Instead, warn the user about the impacts.
innodb_log_file_size = int(_ReadVariable("innodb_log_file_size", cursor))
required_size = 10 * mysql_blobs.BLOB_CHUNK_SIZE
if innodb_log_file_size < required_size:
# See MySQL error 1118: The size of BLOB/TEXT data inserted in one
# transaction is greater than 10% of redo log size. Increase the redo log
# size using innodb_log_file_size.
max_blob_size = innodb_log_file_size / 10
max_blob_size_mib = max_blob_size / 2**20
logging.warning(
"MySQL innodb_log_file_size of %d is required, got %d. "
"Storing Blobs bigger than %.4f MiB will fail.", required_size,
innodb_log_file_size, max_blob_size_mib) | def function[_CheckLogFileSize, parameter[cursor]]:
constant[Warns if MySQL log file size is not large enough for blob insertions.]
variable[innodb_log_file_size] assign[=] call[name[int], parameter[call[name[_ReadVariable], parameter[constant[innodb_log_file_size], name[cursor]]]]]
variable[required_size] assign[=] binary_operation[constant[10] * name[mysql_blobs].BLOB_CHUNK_SIZE]
if compare[name[innodb_log_file_size] less[<] name[required_size]] begin[:]
variable[max_blob_size] assign[=] binary_operation[name[innodb_log_file_size] / constant[10]]
variable[max_blob_size_mib] assign[=] binary_operation[name[max_blob_size] / binary_operation[constant[2] ** constant[20]]]
call[name[logging].warning, parameter[constant[MySQL innodb_log_file_size of %d is required, got %d. Storing Blobs bigger than %.4f MiB will fail.], name[required_size], name[innodb_log_file_size], name[max_blob_size_mib]]] | keyword[def] identifier[_CheckLogFileSize] ( identifier[cursor] ):
literal[string]
identifier[innodb_log_file_size] = identifier[int] ( identifier[_ReadVariable] ( literal[string] , identifier[cursor] ))
identifier[required_size] = literal[int] * identifier[mysql_blobs] . identifier[BLOB_CHUNK_SIZE]
keyword[if] identifier[innodb_log_file_size] < identifier[required_size] :
identifier[max_blob_size] = identifier[innodb_log_file_size] / literal[int]
identifier[max_blob_size_mib] = identifier[max_blob_size] / literal[int] ** literal[int]
identifier[logging] . identifier[warning] (
literal[string]
literal[string] , identifier[required_size] ,
identifier[innodb_log_file_size] , identifier[max_blob_size_mib] ) | def _CheckLogFileSize(cursor):
"""Warns if MySQL log file size is not large enough for blob insertions."""
# Do not fail, because users might not be able to change this for their
# database. Instead, warn the user about the impacts.
innodb_log_file_size = int(_ReadVariable('innodb_log_file_size', cursor))
required_size = 10 * mysql_blobs.BLOB_CHUNK_SIZE
if innodb_log_file_size < required_size:
# See MySQL error 1118: The size of BLOB/TEXT data inserted in one
# transaction is greater than 10% of redo log size. Increase the redo log
# size using innodb_log_file_size.
max_blob_size = innodb_log_file_size / 10
max_blob_size_mib = max_blob_size / 2 ** 20
logging.warning('MySQL innodb_log_file_size of %d is required, got %d. Storing Blobs bigger than %.4f MiB will fail.', required_size, innodb_log_file_size, max_blob_size_mib) # depends on [control=['if'], data=['innodb_log_file_size', 'required_size']] |
def metadata_converter_help():
"""Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | def function[metadata_converter_help, parameter[]]:
constant[Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
]
variable[message] assign[=] call[name[m].Message, parameter[]]
call[name[message].add, parameter[call[name[m].Brand, parameter[]]]]
call[name[message].add, parameter[call[name[heading], parameter[]]]]
call[name[message].add, parameter[call[name[content], parameter[]]]]
return[name[message]] | keyword[def] identifier[metadata_converter_help] ():
literal[string]
identifier[message] = identifier[m] . identifier[Message] ()
identifier[message] . identifier[add] ( identifier[m] . identifier[Brand] ())
identifier[message] . identifier[add] ( identifier[heading] ())
identifier[message] . identifier[add] ( identifier[content] ())
keyword[return] identifier[message] | def metadata_converter_help():
"""Help message for metadata converter Dialog.
.. versionadded:: 4.3
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message |
def match_check(self, regex, fun):
'''
Validate a single regex to function comparison, the function argument
can be a list of functions. It is all or nothing for a list of
functions
'''
vals = []
if isinstance(fun, six.string_types):
fun = [fun]
for func in fun:
try:
if re.match(regex, func):
vals.append(True)
else:
vals.append(False)
except Exception:
log.error('Invalid regular expression: %s', regex)
return vals and all(vals) | def function[match_check, parameter[self, regex, fun]]:
constant[
Validate a single regex to function comparison, the function argument
can be a list of functions. It is all or nothing for a list of
functions
]
variable[vals] assign[=] list[[]]
if call[name[isinstance], parameter[name[fun], name[six].string_types]] begin[:]
variable[fun] assign[=] list[[<ast.Name object at 0x7da1b2163c40>]]
for taget[name[func]] in starred[name[fun]] begin[:]
<ast.Try object at 0x7da1b2162ad0>
return[<ast.BoolOp object at 0x7da1b2163460>] | keyword[def] identifier[match_check] ( identifier[self] , identifier[regex] , identifier[fun] ):
literal[string]
identifier[vals] =[]
keyword[if] identifier[isinstance] ( identifier[fun] , identifier[six] . identifier[string_types] ):
identifier[fun] =[ identifier[fun] ]
keyword[for] identifier[func] keyword[in] identifier[fun] :
keyword[try] :
keyword[if] identifier[re] . identifier[match] ( identifier[regex] , identifier[func] ):
identifier[vals] . identifier[append] ( keyword[True] )
keyword[else] :
identifier[vals] . identifier[append] ( keyword[False] )
keyword[except] identifier[Exception] :
identifier[log] . identifier[error] ( literal[string] , identifier[regex] )
keyword[return] identifier[vals] keyword[and] identifier[all] ( identifier[vals] ) | def match_check(self, regex, fun):
"""
Validate a single regex to function comparison, the function argument
can be a list of functions. It is all or nothing for a list of
functions
"""
vals = []
if isinstance(fun, six.string_types):
fun = [fun] # depends on [control=['if'], data=[]]
for func in fun:
try:
if re.match(regex, func):
vals.append(True) # depends on [control=['if'], data=[]]
else:
vals.append(False) # depends on [control=['try'], data=[]]
except Exception:
log.error('Invalid regular expression: %s', regex) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['func']]
return vals and all(vals) |
def pluralize(count, item_type):
"""Pluralizes the item_type if the count does not equal one.
For example `pluralize(1, 'apple')` returns '1 apple',
while `pluralize(0, 'apple') returns '0 apples'.
:return The count and inflected item_type together as a string
:rtype string
"""
def pluralize_string(x):
if x.endswith('s'):
return x + 'es'
else:
return x + 's'
text = '{} {}'.format(count, item_type if count == 1 else pluralize_string(item_type))
return text | def function[pluralize, parameter[count, item_type]]:
constant[Pluralizes the item_type if the count does not equal one.
For example `pluralize(1, 'apple')` returns '1 apple',
while `pluralize(0, 'apple') returns '0 apples'.
:return The count and inflected item_type together as a string
:rtype string
]
def function[pluralize_string, parameter[x]]:
if call[name[x].endswith, parameter[constant[s]]] begin[:]
return[binary_operation[name[x] + constant[es]]]
variable[text] assign[=] call[constant[{} {}].format, parameter[name[count], <ast.IfExp object at 0x7da1b1e6bb50>]]
return[name[text]] | keyword[def] identifier[pluralize] ( identifier[count] , identifier[item_type] ):
literal[string]
keyword[def] identifier[pluralize_string] ( identifier[x] ):
keyword[if] identifier[x] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[x] + literal[string]
keyword[else] :
keyword[return] identifier[x] + literal[string]
identifier[text] = literal[string] . identifier[format] ( identifier[count] , identifier[item_type] keyword[if] identifier[count] == literal[int] keyword[else] identifier[pluralize_string] ( identifier[item_type] ))
keyword[return] identifier[text] | def pluralize(count, item_type):
"""Pluralizes the item_type if the count does not equal one.
For example `pluralize(1, 'apple')` returns '1 apple',
while `pluralize(0, 'apple') returns '0 apples'.
:return The count and inflected item_type together as a string
:rtype string
"""
def pluralize_string(x):
if x.endswith('s'):
return x + 'es' # depends on [control=['if'], data=[]]
else:
return x + 's'
text = '{} {}'.format(count, item_type if count == 1 else pluralize_string(item_type))
return text |
def collect(self, top, sup, argv=None, parent=""):
""" means this element is part of a larger object, hence a property of
that object """
try:
#objekt = PyGroup("", root=top)
(namespace, tag) = _namespace_and_tag(self, self.ref, top)
try:
if self.xmlns_map[namespace] == top.target_namespace:
cti = get_type_def(tag, top.parts)
try:
return cti.py_class.properties
except ValueError:
return cti.collect(top, sup)
else:
raise Exception(
"Reference to group in other XSD file, not supported")
except KeyError:
raise Exception("Missing namespace definition")
except AttributeError as exc:
print("#!!!!", exc)
return [], [] | def function[collect, parameter[self, top, sup, argv, parent]]:
constant[ means this element is part of a larger object, hence a property of
that object ]
<ast.Try object at 0x7da1b1d54910> | keyword[def] identifier[collect] ( identifier[self] , identifier[top] , identifier[sup] , identifier[argv] = keyword[None] , identifier[parent] = literal[string] ):
literal[string]
keyword[try] :
( identifier[namespace] , identifier[tag] )= identifier[_namespace_and_tag] ( identifier[self] , identifier[self] . identifier[ref] , identifier[top] )
keyword[try] :
keyword[if] identifier[self] . identifier[xmlns_map] [ identifier[namespace] ]== identifier[top] . identifier[target_namespace] :
identifier[cti] = identifier[get_type_def] ( identifier[tag] , identifier[top] . identifier[parts] )
keyword[try] :
keyword[return] identifier[cti] . identifier[py_class] . identifier[properties]
keyword[except] identifier[ValueError] :
keyword[return] identifier[cti] . identifier[collect] ( identifier[top] , identifier[sup] )
keyword[else] :
keyword[raise] identifier[Exception] (
literal[string] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[except] identifier[AttributeError] keyword[as] identifier[exc] :
identifier[print] ( literal[string] , identifier[exc] )
keyword[return] [],[] | def collect(self, top, sup, argv=None, parent=''):
""" means this element is part of a larger object, hence a property of
that object """
try:
#objekt = PyGroup("", root=top)
(namespace, tag) = _namespace_and_tag(self, self.ref, top)
try:
if self.xmlns_map[namespace] == top.target_namespace:
cti = get_type_def(tag, top.parts)
try:
return cti.py_class.properties # depends on [control=['try'], data=[]]
except ValueError:
return cti.collect(top, sup) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
raise Exception('Reference to group in other XSD file, not supported') # depends on [control=['try'], data=[]]
except KeyError:
raise Exception('Missing namespace definition') # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError as exc:
print('#!!!!', exc)
return ([], []) # depends on [control=['except'], data=['exc']] |
def __rmfile(path):
"""Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("rmfile: %s" % path)
try:
os.remove(path)
return True
except Exception as e:
logger.error("rmfile: %s failed! Error: %s" % (path, e))
return False | def function[__rmfile, parameter[path]]:
constant[Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
]
call[name[logger].info, parameter[binary_operation[constant[rmfile: %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
<ast.Try object at 0x7da1b1131ab0> | keyword[def] identifier[__rmfile] ( identifier[path] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] % identifier[path] )
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[return] keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] %( identifier[path] , identifier[e] ))
keyword[return] keyword[False] | def __rmfile(path):
"""Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info('rmfile: %s' % path)
try:
os.remove(path)
return True # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('rmfile: %s failed! Error: %s' % (path, e))
return False # depends on [control=['except'], data=['e']] |
def load_plugins(self, *args):
"""
Loads all plugins
:param args: Arguments to pass to the plugins
"""
for manifest in self._manifests:
self.load_plugin(manifest, *args) | def function[load_plugins, parameter[self]]:
constant[
Loads all plugins
:param args: Arguments to pass to the plugins
]
for taget[name[manifest]] in starred[name[self]._manifests] begin[:]
call[name[self].load_plugin, parameter[name[manifest], <ast.Starred object at 0x7da20c7965f0>]] | keyword[def] identifier[load_plugins] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[for] identifier[manifest] keyword[in] identifier[self] . identifier[_manifests] :
identifier[self] . identifier[load_plugin] ( identifier[manifest] ,* identifier[args] ) | def load_plugins(self, *args):
"""
Loads all plugins
:param args: Arguments to pass to the plugins
"""
for manifest in self._manifests:
self.load_plugin(manifest, *args) # depends on [control=['for'], data=['manifest']] |
def delete(self, response, **kwargs):
'''
If a record matching the instance id exists in the database, delete it.
'''
response_cls = self._get_instance(**kwargs)
if response_cls:
_action_and_commit(response_cls, session.delete) | def function[delete, parameter[self, response]]:
constant[
If a record matching the instance id exists in the database, delete it.
]
variable[response_cls] assign[=] call[name[self]._get_instance, parameter[]]
if name[response_cls] begin[:]
call[name[_action_and_commit], parameter[name[response_cls], name[session].delete]] | keyword[def] identifier[delete] ( identifier[self] , identifier[response] ,** identifier[kwargs] ):
literal[string]
identifier[response_cls] = identifier[self] . identifier[_get_instance] (** identifier[kwargs] )
keyword[if] identifier[response_cls] :
identifier[_action_and_commit] ( identifier[response_cls] , identifier[session] . identifier[delete] ) | def delete(self, response, **kwargs):
"""
If a record matching the instance id exists in the database, delete it.
"""
response_cls = self._get_instance(**kwargs)
if response_cls:
_action_and_commit(response_cls, session.delete) # depends on [control=['if'], data=[]] |
def _get_music_services_data_xml(soco=None):
"""Fetch the music services data xml from a Sonos device.
Args:
soco (SoCo): a SoCo instance to query. If none is specified, a
random device will be used. Defaults to `None`.
Returns:
str: a string containing the music services data xml
"""
device = soco or discovery.any_soco()
log.debug("Fetching music services data from %s", device)
available_services = device.musicServices.ListAvailableServices()
descriptor_list_xml = available_services[
'AvailableServiceDescriptorList']
log.debug("Services descriptor list: %s", descriptor_list_xml)
return descriptor_list_xml | def function[_get_music_services_data_xml, parameter[soco]]:
constant[Fetch the music services data xml from a Sonos device.
Args:
soco (SoCo): a SoCo instance to query. If none is specified, a
random device will be used. Defaults to `None`.
Returns:
str: a string containing the music services data xml
]
variable[device] assign[=] <ast.BoolOp object at 0x7da18ede4070>
call[name[log].debug, parameter[constant[Fetching music services data from %s], name[device]]]
variable[available_services] assign[=] call[name[device].musicServices.ListAvailableServices, parameter[]]
variable[descriptor_list_xml] assign[=] call[name[available_services]][constant[AvailableServiceDescriptorList]]
call[name[log].debug, parameter[constant[Services descriptor list: %s], name[descriptor_list_xml]]]
return[name[descriptor_list_xml]] | keyword[def] identifier[_get_music_services_data_xml] ( identifier[soco] = keyword[None] ):
literal[string]
identifier[device] = identifier[soco] keyword[or] identifier[discovery] . identifier[any_soco] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[device] )
identifier[available_services] = identifier[device] . identifier[musicServices] . identifier[ListAvailableServices] ()
identifier[descriptor_list_xml] = identifier[available_services] [
literal[string] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[descriptor_list_xml] )
keyword[return] identifier[descriptor_list_xml] | def _get_music_services_data_xml(soco=None):
"""Fetch the music services data xml from a Sonos device.
Args:
soco (SoCo): a SoCo instance to query. If none is specified, a
random device will be used. Defaults to `None`.
Returns:
str: a string containing the music services data xml
"""
device = soco or discovery.any_soco()
log.debug('Fetching music services data from %s', device)
available_services = device.musicServices.ListAvailableServices()
descriptor_list_xml = available_services['AvailableServiceDescriptorList']
log.debug('Services descriptor list: %s', descriptor_list_xml)
return descriptor_list_xml |
def next_execution(args):
"""
Returns the next execution datetime of a DAG at the command line.
>>> airflow next_execution tutorial
2018-08-31 10:38:00
"""
dag = get_dag(args)
if dag.is_paused:
print("[INFO] Please be reminded this DAG is PAUSED now.")
if dag.latest_execution_date:
next_execution_dttm = dag.following_schedule(dag.latest_execution_date)
if next_execution_dttm is None:
print("[WARN] No following schedule can be found. " +
"This DAG may have schedule interval '@once' or `None`.")
print(next_execution_dttm)
else:
print("[WARN] Only applicable when there is execution record found for the DAG.")
print(None) | def function[next_execution, parameter[args]]:
constant[
Returns the next execution datetime of a DAG at the command line.
>>> airflow next_execution tutorial
2018-08-31 10:38:00
]
variable[dag] assign[=] call[name[get_dag], parameter[name[args]]]
if name[dag].is_paused begin[:]
call[name[print], parameter[constant[[INFO] Please be reminded this DAG is PAUSED now.]]]
if name[dag].latest_execution_date begin[:]
variable[next_execution_dttm] assign[=] call[name[dag].following_schedule, parameter[name[dag].latest_execution_date]]
if compare[name[next_execution_dttm] is constant[None]] begin[:]
call[name[print], parameter[binary_operation[constant[[WARN] No following schedule can be found. ] + constant[This DAG may have schedule interval '@once' or `None`.]]]]
call[name[print], parameter[name[next_execution_dttm]]] | keyword[def] identifier[next_execution] ( identifier[args] ):
literal[string]
identifier[dag] = identifier[get_dag] ( identifier[args] )
keyword[if] identifier[dag] . identifier[is_paused] :
identifier[print] ( literal[string] )
keyword[if] identifier[dag] . identifier[latest_execution_date] :
identifier[next_execution_dttm] = identifier[dag] . identifier[following_schedule] ( identifier[dag] . identifier[latest_execution_date] )
keyword[if] identifier[next_execution_dttm] keyword[is] keyword[None] :
identifier[print] ( literal[string] +
literal[string] )
identifier[print] ( identifier[next_execution_dttm] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( keyword[None] ) | def next_execution(args):
"""
Returns the next execution datetime of a DAG at the command line.
>>> airflow next_execution tutorial
2018-08-31 10:38:00
"""
dag = get_dag(args)
if dag.is_paused:
print('[INFO] Please be reminded this DAG is PAUSED now.') # depends on [control=['if'], data=[]]
if dag.latest_execution_date:
next_execution_dttm = dag.following_schedule(dag.latest_execution_date)
if next_execution_dttm is None:
print('[WARN] No following schedule can be found. ' + "This DAG may have schedule interval '@once' or `None`.") # depends on [control=['if'], data=[]]
print(next_execution_dttm) # depends on [control=['if'], data=[]]
else:
print('[WARN] Only applicable when there is execution record found for the DAG.')
print(None) |
def diagonal_basis_commutes(pauli_a, pauli_b):
"""
Test if `pauli_a` and `pauli_b` share a diagonal basis
Example:
Check if [A, B] with the constraint that A & B must share a one-qubit
diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this
function would return True. If the inputs were [sX(5), sZ(4)] this
function would return True. If the inputs were [sX(0), sY(0) * sZ(2)]
this function would return False.
:param pauli_a: Pauli term to check commutation against `pauli_b`
:param pauli_b: Pauli term to check commutation against `pauli_a`
:return: Boolean of commutation result
:rtype: Bool
"""
overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits())
for qubit_index in overlapping_active_qubits:
if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and
pauli_a[qubit_index] != pauli_b[qubit_index]):
return False
return True | def function[diagonal_basis_commutes, parameter[pauli_a, pauli_b]]:
constant[
Test if `pauli_a` and `pauli_b` share a diagonal basis
Example:
Check if [A, B] with the constraint that A & B must share a one-qubit
diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this
function would return True. If the inputs were [sX(5), sZ(4)] this
function would return True. If the inputs were [sX(0), sY(0) * sZ(2)]
this function would return False.
:param pauli_a: Pauli term to check commutation against `pauli_b`
:param pauli_b: Pauli term to check commutation against `pauli_a`
:return: Boolean of commutation result
:rtype: Bool
]
variable[overlapping_active_qubits] assign[=] binary_operation[call[name[set], parameter[call[name[pauli_a].get_qubits, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[pauli_b].get_qubits, parameter[]]]]]
for taget[name[qubit_index]] in starred[name[overlapping_active_qubits]] begin[:]
if <ast.BoolOp object at 0x7da207f02620> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[diagonal_basis_commutes] ( identifier[pauli_a] , identifier[pauli_b] ):
literal[string]
identifier[overlapping_active_qubits] = identifier[set] ( identifier[pauli_a] . identifier[get_qubits] ())& identifier[set] ( identifier[pauli_b] . identifier[get_qubits] ())
keyword[for] identifier[qubit_index] keyword[in] identifier[overlapping_active_qubits] :
keyword[if] ( identifier[pauli_a] [ identifier[qubit_index] ]!= literal[string] keyword[and] identifier[pauli_b] [ identifier[qubit_index] ]!= literal[string] keyword[and]
identifier[pauli_a] [ identifier[qubit_index] ]!= identifier[pauli_b] [ identifier[qubit_index] ]):
keyword[return] keyword[False]
keyword[return] keyword[True] | def diagonal_basis_commutes(pauli_a, pauli_b):
"""
Test if `pauli_a` and `pauli_b` share a diagonal basis
Example:
Check if [A, B] with the constraint that A & B must share a one-qubit
diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this
function would return True. If the inputs were [sX(5), sZ(4)] this
function would return True. If the inputs were [sX(0), sY(0) * sZ(2)]
this function would return False.
:param pauli_a: Pauli term to check commutation against `pauli_b`
:param pauli_b: Pauli term to check commutation against `pauli_a`
:return: Boolean of commutation result
:rtype: Bool
"""
overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits())
for qubit_index in overlapping_active_qubits:
if pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and (pauli_a[qubit_index] != pauli_b[qubit_index]):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['qubit_index']]
return True |
def _get_streams(self):
"""
Find the streams for vk.com
:return:
"""
self.session.http.headers.update({'User-Agent': useragents.IPHONE_6})
# If this is a 'videos' catalog URL
# with an video ID in the GET request, get that instead
url = self.follow_vk_redirect(self.url)
m = self._url_re.match(url)
if not m:
log.error('URL is not compatible: {0}'.format(url))
return
video_id = m.group('video_id')
log.debug('video ID: {0}'.format(video_id))
params = {
'act': 'show_inline',
'al': '1',
'video': video_id,
}
res = self.session.http.post(self.API_URL, params=params)
for _i in itertags(res.text, 'iframe'):
if _i.attributes.get('src'):
iframe_url = update_scheme(self.url, _i.attributes['src'])
log.debug('Found iframe: {0}'.format(iframe_url))
for s in self.session.streams(iframe_url).items():
yield s
for _i in itertags(res.text, 'source'):
if _i.attributes.get('type') == 'application/vnd.apple.mpegurl':
video_url = _i.attributes['src']
# Remove invalid URL
if video_url.startswith('https://vk.com/'):
continue
streams = HLSStream.parse_variant_playlist(self.session,
video_url)
if not streams:
yield 'live', HLSStream(self.session, video_url)
else:
for s in streams.items():
yield s
elif _i.attributes.get('type') == 'video/mp4':
q = 'vod'
video_url = _i.attributes['src']
m = self._vod_quality_re.search(video_url)
if m:
q = '{0}p'.format(m.group(1))
yield q, HTTPStream(self.session, video_url) | def function[_get_streams, parameter[self]]:
constant[
Find the streams for vk.com
:return:
]
call[name[self].session.http.headers.update, parameter[dictionary[[<ast.Constant object at 0x7da1b2344d30>], [<ast.Attribute object at 0x7da1b23454b0>]]]]
variable[url] assign[=] call[name[self].follow_vk_redirect, parameter[name[self].url]]
variable[m] assign[=] call[name[self]._url_re.match, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b2345360> begin[:]
call[name[log].error, parameter[call[constant[URL is not compatible: {0}].format, parameter[name[url]]]]]
return[None]
variable[video_id] assign[=] call[name[m].group, parameter[constant[video_id]]]
call[name[log].debug, parameter[call[constant[video ID: {0}].format, parameter[name[video_id]]]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344c70>, <ast.Constant object at 0x7da1b2347400>, <ast.Constant object at 0x7da1b2344a90>], [<ast.Constant object at 0x7da1b2346b90>, <ast.Constant object at 0x7da1b2344790>, <ast.Name object at 0x7da1b2346c50>]]
variable[res] assign[=] call[name[self].session.http.post, parameter[name[self].API_URL]]
for taget[name[_i]] in starred[call[name[itertags], parameter[name[res].text, constant[iframe]]]] begin[:]
if call[name[_i].attributes.get, parameter[constant[src]]] begin[:]
variable[iframe_url] assign[=] call[name[update_scheme], parameter[name[self].url, call[name[_i].attributes][constant[src]]]]
call[name[log].debug, parameter[call[constant[Found iframe: {0}].format, parameter[name[iframe_url]]]]]
for taget[name[s]] in starred[call[call[name[self].session.streams, parameter[name[iframe_url]]].items, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b23464d0>
for taget[name[_i]] in starred[call[name[itertags], parameter[name[res].text, constant[source]]]] begin[:]
if compare[call[name[_i].attributes.get, parameter[constant[type]]] equal[==] constant[application/vnd.apple.mpegurl]] begin[:]
variable[video_url] assign[=] call[name[_i].attributes][constant[src]]
if call[name[video_url].startswith, parameter[constant[https://vk.com/]]] begin[:]
continue
variable[streams] assign[=] call[name[HLSStream].parse_variant_playlist, parameter[name[self].session, name[video_url]]]
if <ast.UnaryOp object at 0x7da1b2346b30> begin[:]
<ast.Yield object at 0x7da1b2346dd0> | keyword[def] identifier[_get_streams] ( identifier[self] ):
literal[string]
identifier[self] . identifier[session] . identifier[http] . identifier[headers] . identifier[update] ({ literal[string] : identifier[useragents] . identifier[IPHONE_6] })
identifier[url] = identifier[self] . identifier[follow_vk_redirect] ( identifier[self] . identifier[url] )
identifier[m] = identifier[self] . identifier[_url_re] . identifier[match] ( identifier[url] )
keyword[if] keyword[not] identifier[m] :
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[url] ))
keyword[return]
identifier[video_id] = identifier[m] . identifier[group] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[video_id] ))
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[video_id] ,
}
identifier[res] = identifier[self] . identifier[session] . identifier[http] . identifier[post] ( identifier[self] . identifier[API_URL] , identifier[params] = identifier[params] )
keyword[for] identifier[_i] keyword[in] identifier[itertags] ( identifier[res] . identifier[text] , literal[string] ):
keyword[if] identifier[_i] . identifier[attributes] . identifier[get] ( literal[string] ):
identifier[iframe_url] = identifier[update_scheme] ( identifier[self] . identifier[url] , identifier[_i] . identifier[attributes] [ literal[string] ])
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[iframe_url] ))
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[session] . identifier[streams] ( identifier[iframe_url] ). identifier[items] ():
keyword[yield] identifier[s]
keyword[for] identifier[_i] keyword[in] identifier[itertags] ( identifier[res] . identifier[text] , literal[string] ):
keyword[if] identifier[_i] . identifier[attributes] . identifier[get] ( literal[string] )== literal[string] :
identifier[video_url] = identifier[_i] . identifier[attributes] [ literal[string] ]
keyword[if] identifier[video_url] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[streams] = identifier[HLSStream] . identifier[parse_variant_playlist] ( identifier[self] . identifier[session] ,
identifier[video_url] )
keyword[if] keyword[not] identifier[streams] :
keyword[yield] literal[string] , identifier[HLSStream] ( identifier[self] . identifier[session] , identifier[video_url] )
keyword[else] :
keyword[for] identifier[s] keyword[in] identifier[streams] . identifier[items] ():
keyword[yield] identifier[s]
keyword[elif] identifier[_i] . identifier[attributes] . identifier[get] ( literal[string] )== literal[string] :
identifier[q] = literal[string]
identifier[video_url] = identifier[_i] . identifier[attributes] [ literal[string] ]
identifier[m] = identifier[self] . identifier[_vod_quality_re] . identifier[search] ( identifier[video_url] )
keyword[if] identifier[m] :
identifier[q] = literal[string] . identifier[format] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[yield] identifier[q] , identifier[HTTPStream] ( identifier[self] . identifier[session] , identifier[video_url] ) | def _get_streams(self):
"""
Find the streams for vk.com
:return:
"""
self.session.http.headers.update({'User-Agent': useragents.IPHONE_6})
# If this is a 'videos' catalog URL
# with an video ID in the GET request, get that instead
url = self.follow_vk_redirect(self.url)
m = self._url_re.match(url)
if not m:
log.error('URL is not compatible: {0}'.format(url))
return # depends on [control=['if'], data=[]]
video_id = m.group('video_id')
log.debug('video ID: {0}'.format(video_id))
params = {'act': 'show_inline', 'al': '1', 'video': video_id}
res = self.session.http.post(self.API_URL, params=params)
for _i in itertags(res.text, 'iframe'):
if _i.attributes.get('src'):
iframe_url = update_scheme(self.url, _i.attributes['src'])
log.debug('Found iframe: {0}'.format(iframe_url))
for s in self.session.streams(iframe_url).items():
yield s # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_i']]
for _i in itertags(res.text, 'source'):
if _i.attributes.get('type') == 'application/vnd.apple.mpegurl':
video_url = _i.attributes['src']
# Remove invalid URL
if video_url.startswith('https://vk.com/'):
continue # depends on [control=['if'], data=[]]
streams = HLSStream.parse_variant_playlist(self.session, video_url)
if not streams:
yield ('live', HLSStream(self.session, video_url)) # depends on [control=['if'], data=[]]
else:
for s in streams.items():
yield s # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]]
elif _i.attributes.get('type') == 'video/mp4':
q = 'vod'
video_url = _i.attributes['src']
m = self._vod_quality_re.search(video_url)
if m:
q = '{0}p'.format(m.group(1)) # depends on [control=['if'], data=[]]
yield (q, HTTPStream(self.session, video_url)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_i']] |
def safe_mkdir(self, d):
"""If a directory doesn't exist, create it.
If it does exist, print a warning to the logger.
If it exists as a file, rais a FileExistsError
:param d: directory path to create
:type d: str
"""
if os.path.isfile(d):
raise FileExistsError(
"Cannont create directory %s, a file by that name already exists." % d)
if os.path.isdir(d):
logger.warning("%s already exists, using existing directory.", d)
return
os.makedirs(d) | def function[safe_mkdir, parameter[self, d]]:
constant[If a directory doesn't exist, create it.
If it does exist, print a warning to the logger.
If it exists as a file, rais a FileExistsError
:param d: directory path to create
:type d: str
]
if call[name[os].path.isfile, parameter[name[d]]] begin[:]
<ast.Raise object at 0x7da1b09b9f60>
if call[name[os].path.isdir, parameter[name[d]]] begin[:]
call[name[logger].warning, parameter[constant[%s already exists, using existing directory.], name[d]]]
return[None]
call[name[os].makedirs, parameter[name[d]]] | keyword[def] identifier[safe_mkdir] ( identifier[self] , identifier[d] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[d] ):
keyword[raise] identifier[FileExistsError] (
literal[string] % identifier[d] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[d] ):
identifier[logger] . identifier[warning] ( literal[string] , identifier[d] )
keyword[return]
identifier[os] . identifier[makedirs] ( identifier[d] ) | def safe_mkdir(self, d):
"""If a directory doesn't exist, create it.
If it does exist, print a warning to the logger.
If it exists as a file, rais a FileExistsError
:param d: directory path to create
:type d: str
"""
if os.path.isfile(d):
raise FileExistsError('Cannont create directory %s, a file by that name already exists.' % d) # depends on [control=['if'], data=[]]
if os.path.isdir(d):
logger.warning('%s already exists, using existing directory.', d)
return # depends on [control=['if'], data=[]]
os.makedirs(d) |
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id | def function[submit, parameter[self, command, blocksize, job_name]]:
constant[ Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
]
variable[job_name] assign[=] call[constant[{0}.{1}].format, parameter[name[job_name], call[name[time].time, parameter[]]]]
variable[script_path] assign[=] call[constant[{0}/{1}.sh].format, parameter[name[self].script_dir, name[job_name]]]
variable[script_path] assign[=] call[name[os].path.abspath, parameter[name[script_path]]]
variable[wrap_command] assign[=] call[name[self].launcher, parameter[name[command], name[self].tasks_per_node, name[self].nodes_per_block]]
call[name[self]._write_submit_script, parameter[name[wrap_command], name[script_path]]]
<ast.Tuple object at 0x7da1b0af0c40> assign[=] call[name[self].channel.execute_no_wait, parameter[call[constant[bash {0}].format, parameter[name[script_path]]], constant[3]]]
call[name[self].resources][name[job_id]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0af1c60>, <ast.Constant object at 0x7da1b0a06170>, <ast.Constant object at 0x7da1b0a066e0>, <ast.Constant object at 0x7da1b0a04d60>], [<ast.Name object at 0x7da1b0a06500>, <ast.Constant object at 0x7da1b0a05c00>, <ast.Name object at 0x7da1b0a06860>, <ast.Name object at 0x7da1b0a04c10>]]
return[name[job_id]] | keyword[def] identifier[submit] ( identifier[self] , identifier[command] , identifier[blocksize] , identifier[job_name] = literal[string] ):
literal[string]
identifier[job_name] = literal[string] . identifier[format] ( identifier[job_name] , identifier[time] . identifier[time] ())
identifier[script_path] = literal[string] . identifier[format] ( identifier[self] . identifier[script_dir] , identifier[job_name] )
identifier[script_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[script_path] )
identifier[wrap_command] = identifier[self] . identifier[launcher] ( identifier[command] , identifier[self] . identifier[tasks_per_node] , identifier[self] . identifier[nodes_per_block] )
identifier[self] . identifier[_write_submit_script] ( identifier[wrap_command] , identifier[script_path] )
identifier[job_id] , identifier[proc] = identifier[self] . identifier[channel] . identifier[execute_no_wait] ( literal[string] . identifier[format] ( identifier[script_path] ), literal[int] )
identifier[self] . identifier[resources] [ identifier[job_id] ]={ literal[string] : identifier[job_id] , literal[string] : literal[string] , literal[string] : identifier[blocksize] , literal[string] : identifier[proc] }
keyword[return] identifier[job_id] | def submit(self, command, blocksize, job_name='parsl.auto'):
""" Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
job_name = '{0}.{1}'.format(job_name, time.time())
# Set script path
script_path = '{0}/{1}.sh'.format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
(job_id, proc) = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id |
def _format_files(cls, files, kind):
'''Format the list of files (e. g. assistants or snippets'''
lines = []
if files:
lines.append('The following {kind} are contained in this DAP:'.format(kind=kind.title()))
for f in files:
lines.append('* ' + strip_prefix(f, kind).replace(os.path.sep, ' ').strip())
return lines
else:
return ['No {kind} are contained in this DAP'.format(kind=kind.title())] | def function[_format_files, parameter[cls, files, kind]]:
constant[Format the list of files (e. g. assistants or snippets]
variable[lines] assign[=] list[[]]
if name[files] begin[:]
call[name[lines].append, parameter[call[constant[The following {kind} are contained in this DAP:].format, parameter[]]]]
for taget[name[f]] in starred[name[files]] begin[:]
call[name[lines].append, parameter[binary_operation[constant[* ] + call[call[call[name[strip_prefix], parameter[name[f], name[kind]]].replace, parameter[name[os].path.sep, constant[ ]]].strip, parameter[]]]]]
return[name[lines]] | keyword[def] identifier[_format_files] ( identifier[cls] , identifier[files] , identifier[kind] ):
literal[string]
identifier[lines] =[]
keyword[if] identifier[files] :
identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[kind] = identifier[kind] . identifier[title] ()))
keyword[for] identifier[f] keyword[in] identifier[files] :
identifier[lines] . identifier[append] ( literal[string] + identifier[strip_prefix] ( identifier[f] , identifier[kind] ). identifier[replace] ( identifier[os] . identifier[path] . identifier[sep] , literal[string] ). identifier[strip] ())
keyword[return] identifier[lines]
keyword[else] :
keyword[return] [ literal[string] . identifier[format] ( identifier[kind] = identifier[kind] . identifier[title] ())] | def _format_files(cls, files, kind):
"""Format the list of files (e. g. assistants or snippets"""
lines = []
if files:
lines.append('The following {kind} are contained in this DAP:'.format(kind=kind.title()))
for f in files:
lines.append('* ' + strip_prefix(f, kind).replace(os.path.sep, ' ').strip()) # depends on [control=['for'], data=['f']]
return lines # depends on [control=['if'], data=[]]
else:
return ['No {kind} are contained in this DAP'.format(kind=kind.title())] |
def _resolve_subkeys(key, separator="."):
"""Resolve a potentially nested key.
If the key contains the ``separator`` (e.g. ``.``) then the key will be
split on the first instance of the subkey::
>>> _resolve_subkeys('a.b.c')
('a', 'b.c')
>>> _resolve_subkeys('d|e|f', separator='|')
('d', 'e|f')
If not, the subkey will be :data:`None`::
>>> _resolve_subkeys('foo')
('foo', None)
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s).
"""
parts = key.split(separator, 1)
if len(parts) > 1:
return parts
else:
return parts[0], None | def function[_resolve_subkeys, parameter[key, separator]]:
constant[Resolve a potentially nested key.
If the key contains the ``separator`` (e.g. ``.``) then the key will be
split on the first instance of the subkey::
>>> _resolve_subkeys('a.b.c')
('a', 'b.c')
>>> _resolve_subkeys('d|e|f', separator='|')
('d', 'e|f')
If not, the subkey will be :data:`None`::
>>> _resolve_subkeys('foo')
('foo', None)
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s).
]
variable[parts] assign[=] call[name[key].split, parameter[name[separator], constant[1]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:]
return[name[parts]] | keyword[def] identifier[_resolve_subkeys] ( identifier[key] , identifier[separator] = literal[string] ):
literal[string]
identifier[parts] = identifier[key] . identifier[split] ( identifier[separator] , literal[int] )
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
keyword[return] identifier[parts]
keyword[else] :
keyword[return] identifier[parts] [ literal[int] ], keyword[None] | def _resolve_subkeys(key, separator='.'):
"""Resolve a potentially nested key.
If the key contains the ``separator`` (e.g. ``.``) then the key will be
split on the first instance of the subkey::
>>> _resolve_subkeys('a.b.c')
('a', 'b.c')
>>> _resolve_subkeys('d|e|f', separator='|')
('d', 'e|f')
If not, the subkey will be :data:`None`::
>>> _resolve_subkeys('foo')
('foo', None)
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s).
"""
parts = key.split(separator, 1)
if len(parts) > 1:
return parts # depends on [control=['if'], data=[]]
else:
return (parts[0], None) |
def make_query(args, other=None, limit=None, strand=None, featuretype=None,
extra=None, order_by=None, reverse=False,
completely_within=False):
"""
Multi-purpose, bare-bones ORM function.
This function composes queries given some commonly-used kwargs that can be
passed to FeatureDB methods (like .parents(), .children(), .all_features(),
.features_of_type()). It handles, in one place, things like restricting to
featuretype, limiting to a genomic range, limiting to one strand, or
returning results ordered by different criteria.
Additional filtering/subsetting/sorting behavior should be added here.
(Note: this ended up having better performance (and flexibility) than
sqlalchemy)
This function also provides support for additional JOINs etc (supplied via
the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the
`_QUERY` var below for the order in which they are used.
For example, FeatureDB._relation uses `other` to supply the JOIN
substatment, and that same method also uses `extra` to supply the
"relations.level = ?" substatment (see the source for FeatureDB._relation
for more details).
`args` contains the arguments that will ultimately be supplied to the
sqlite3.connection.execute function. It may be further populated below --
for example, if strand="+", then the query will include a strand clause,
and the strand will be appended to the args.
`args` can be pre-filled with args that are passed to `other` and `extra`.
"""
_QUERY = ("{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} "
"{LIMIT} {STRAND} {ORDER_BY}")
# Construct a dictionary `d` that will be used later as _QUERY.format(**d).
# Default is just _SELECT, which returns all records in the features table.
# (Recall that constants._SELECT gets the fields in the order needed to
# reconstruct a Feature)
d = dict(_SELECT=constants._SELECT, OTHER="", FEATURETYPE="", LIMIT="",
STRAND="", ORDER_BY="", EXTRA="")
if other:
d['OTHER'] = other
if extra:
d['EXTRA'] = extra
# If `other` and `extra` take args (that is, they have "?" in them), then
# they should have been provided in `args`.
required_args = (d['EXTRA'] + d['OTHER']).count('?')
if len(args) != required_args:
raise ValueError('Not enough args (%s) for subquery' % args)
# Below, if a kwarg is specified, then we create sections of the query --
# appending to args as necessary.
#
# IMPORTANT: the order in which things are processed here is the same as
# the order of the placeholders in _QUERY. That is, we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place.
if featuretype:
# Handle single or iterables of featuretypes.
#
# e.g., "featuretype = 'exon'"
#
# or, "featuretype IN ('exon', 'CDS')"
if isinstance(featuretype, six.string_types):
d['FEATURETYPE'] = "features.featuretype = ?"
args.append(featuretype)
else:
d['FEATURETYPE'] = (
"features.featuretype IN (%s)"
% (','.join(["?" for _ in featuretype]))
)
args.extend(featuretype)
if limit:
# Restrict to a genomic region. Makes use of the UCSC binning strategy
# for performance.
#
# `limit` is a string or a tuple of (chrom, start, stop)
#
# e.g., "seqid = 'chr2L' AND start > 1000 AND end < 5000"
if isinstance(limit, six.string_types):
seqid, startstop = limit.split(':')
start, end = startstop.split('-')
else:
seqid, start, end = limit
# Identify possible bins
_bins = bins.bins(int(start), int(end), one=False)
# Use different overlap conditions
if completely_within:
d['LIMIT'] = (
"features.seqid = ? AND features.start >= ? "
"AND features.end <= ?"
)
args.extend([seqid, start, end])
else:
d['LIMIT'] = (
"features.seqid = ? AND features.start <= ? "
"AND features.end >= ?"
)
# Note order (end, start)
args.extend([seqid, end, start])
# Add bin clause. See issue #45.
if len(_bins) < 900:
d['LIMIT'] += " AND features.bin IN (%s)" % (','.join(map(str, _bins)))
if strand:
# e.g., "strand = '+'"
d['STRAND'] = "features.strand = ?"
args.append(strand)
# TODO: implement file_order!
valid_order_by = constants._gffkeys_extra + ['file_order', 'length']
_order_by = []
if order_by:
# Default is essentially random order.
#
# e.g. "ORDER BY seqid, start DESC"
if isinstance(order_by, six.string_types):
_order_by.append(order_by)
else:
for k in order_by:
if k not in valid_order_by:
raise ValueError("%s not a valid order-by value in %s"
% (k, valid_order_by))
# There's no length field, so order by end - start
if k == 'length':
k = '(end - start)'
_order_by.append(k)
_order_by = ','.join(_order_by)
if reverse:
direction = 'DESC'
else:
direction = 'ASC'
d['ORDER_BY'] = 'ORDER BY %s %s' % (_order_by, direction)
# Ensure only one "WHERE" is included; the rest get "AND ". This is ugly.
where = False
if "where" in d['OTHER'].lower():
where = True
for i in ['EXTRA', 'FEATURETYPE', 'LIMIT', 'STRAND']:
if d[i]:
if not where:
d[i] = "WHERE " + d[i]
where = True
else:
d[i] = "AND " + d[i]
return _QUERY.format(**d), args | def function[make_query, parameter[args, other, limit, strand, featuretype, extra, order_by, reverse, completely_within]]:
constant[
Multi-purpose, bare-bones ORM function.
This function composes queries given some commonly-used kwargs that can be
passed to FeatureDB methods (like .parents(), .children(), .all_features(),
.features_of_type()). It handles, in one place, things like restricting to
featuretype, limiting to a genomic range, limiting to one strand, or
returning results ordered by different criteria.
Additional filtering/subsetting/sorting behavior should be added here.
(Note: this ended up having better performance (and flexibility) than
sqlalchemy)
This function also provides support for additional JOINs etc (supplied via
the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the
`_QUERY` var below for the order in which they are used.
For example, FeatureDB._relation uses `other` to supply the JOIN
substatment, and that same method also uses `extra` to supply the
"relations.level = ?" substatment (see the source for FeatureDB._relation
for more details).
`args` contains the arguments that will ultimately be supplied to the
sqlite3.connection.execute function. It may be further populated below --
for example, if strand="+", then the query will include a strand clause,
and the strand will be appended to the args.
`args` can be pre-filled with args that are passed to `other` and `extra`.
]
variable[_QUERY] assign[=] constant[{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} {LIMIT} {STRAND} {ORDER_BY}]
variable[d] assign[=] call[name[dict], parameter[]]
if name[other] begin[:]
call[name[d]][constant[OTHER]] assign[=] name[other]
if name[extra] begin[:]
call[name[d]][constant[EXTRA]] assign[=] name[extra]
variable[required_args] assign[=] call[binary_operation[call[name[d]][constant[EXTRA]] + call[name[d]][constant[OTHER]]].count, parameter[constant[?]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] name[required_args]] begin[:]
<ast.Raise object at 0x7da18fe92920>
if name[featuretype] begin[:]
if call[name[isinstance], parameter[name[featuretype], name[six].string_types]] begin[:]
call[name[d]][constant[FEATURETYPE]] assign[=] constant[features.featuretype = ?]
call[name[args].append, parameter[name[featuretype]]]
if name[limit] begin[:]
if call[name[isinstance], parameter[name[limit], name[six].string_types]] begin[:]
<ast.Tuple object at 0x7da20c991ab0> assign[=] call[name[limit].split, parameter[constant[:]]]
<ast.Tuple object at 0x7da20c992590> assign[=] call[name[startstop].split, parameter[constant[-]]]
variable[_bins] assign[=] call[name[bins].bins, parameter[call[name[int], parameter[name[start]]], call[name[int], parameter[name[end]]]]]
if name[completely_within] begin[:]
call[name[d]][constant[LIMIT]] assign[=] constant[features.seqid = ? AND features.start >= ? AND features.end <= ?]
call[name[args].extend, parameter[list[[<ast.Name object at 0x7da20c990640>, <ast.Name object at 0x7da20c992fe0>, <ast.Name object at 0x7da20c992860>]]]]
if compare[call[name[len], parameter[name[_bins]]] less[<] constant[900]] begin[:]
<ast.AugAssign object at 0x7da1b2347640>
if name[strand] begin[:]
call[name[d]][constant[STRAND]] assign[=] constant[features.strand = ?]
call[name[args].append, parameter[name[strand]]]
variable[valid_order_by] assign[=] binary_operation[name[constants]._gffkeys_extra + list[[<ast.Constant object at 0x7da1b23458d0>, <ast.Constant object at 0x7da1b2346ef0>]]]
variable[_order_by] assign[=] list[[]]
if name[order_by] begin[:]
if call[name[isinstance], parameter[name[order_by], name[six].string_types]] begin[:]
call[name[_order_by].append, parameter[name[order_by]]]
variable[_order_by] assign[=] call[constant[,].join, parameter[name[_order_by]]]
if name[reverse] begin[:]
variable[direction] assign[=] constant[DESC]
call[name[d]][constant[ORDER_BY]] assign[=] binary_operation[constant[ORDER BY %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2346410>, <ast.Name object at 0x7da1b23471c0>]]]
variable[where] assign[=] constant[False]
if compare[constant[where] in call[call[name[d]][constant[OTHER]].lower, parameter[]]] begin[:]
variable[where] assign[=] constant[True]
for taget[name[i]] in starred[list[[<ast.Constant object at 0x7da1b2345150>, <ast.Constant object at 0x7da1b2345ea0>, <ast.Constant object at 0x7da1b2345510>, <ast.Constant object at 0x7da1b23465c0>]]] begin[:]
if call[name[d]][name[i]] begin[:]
if <ast.UnaryOp object at 0x7da1b2346cb0> begin[:]
call[name[d]][name[i]] assign[=] binary_operation[constant[WHERE ] + call[name[d]][name[i]]]
variable[where] assign[=] constant[True]
return[tuple[[<ast.Call object at 0x7da1b2344ac0>, <ast.Name object at 0x7da1b2347f40>]]] | keyword[def] identifier[make_query] ( identifier[args] , identifier[other] = keyword[None] , identifier[limit] = keyword[None] , identifier[strand] = keyword[None] , identifier[featuretype] = keyword[None] ,
identifier[extra] = keyword[None] , identifier[order_by] = keyword[None] , identifier[reverse] = keyword[False] ,
identifier[completely_within] = keyword[False] ):
literal[string]
identifier[_QUERY] =( literal[string]
literal[string] )
identifier[d] = identifier[dict] ( identifier[_SELECT] = identifier[constants] . identifier[_SELECT] , identifier[OTHER] = literal[string] , identifier[FEATURETYPE] = literal[string] , identifier[LIMIT] = literal[string] ,
identifier[STRAND] = literal[string] , identifier[ORDER_BY] = literal[string] , identifier[EXTRA] = literal[string] )
keyword[if] identifier[other] :
identifier[d] [ literal[string] ]= identifier[other]
keyword[if] identifier[extra] :
identifier[d] [ literal[string] ]= identifier[extra]
identifier[required_args] =( identifier[d] [ literal[string] ]+ identifier[d] [ literal[string] ]). identifier[count] ( literal[string] )
keyword[if] identifier[len] ( identifier[args] )!= identifier[required_args] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[args] )
keyword[if] identifier[featuretype] :
keyword[if] identifier[isinstance] ( identifier[featuretype] , identifier[six] . identifier[string_types] ):
identifier[d] [ literal[string] ]= literal[string]
identifier[args] . identifier[append] ( identifier[featuretype] )
keyword[else] :
identifier[d] [ literal[string] ]=(
literal[string]
%( literal[string] . identifier[join] ([ literal[string] keyword[for] identifier[_] keyword[in] identifier[featuretype] ]))
)
identifier[args] . identifier[extend] ( identifier[featuretype] )
keyword[if] identifier[limit] :
keyword[if] identifier[isinstance] ( identifier[limit] , identifier[six] . identifier[string_types] ):
identifier[seqid] , identifier[startstop] = identifier[limit] . identifier[split] ( literal[string] )
identifier[start] , identifier[end] = identifier[startstop] . identifier[split] ( literal[string] )
keyword[else] :
identifier[seqid] , identifier[start] , identifier[end] = identifier[limit]
identifier[_bins] = identifier[bins] . identifier[bins] ( identifier[int] ( identifier[start] ), identifier[int] ( identifier[end] ), identifier[one] = keyword[False] )
keyword[if] identifier[completely_within] :
identifier[d] [ literal[string] ]=(
literal[string]
literal[string]
)
identifier[args] . identifier[extend] ([ identifier[seqid] , identifier[start] , identifier[end] ])
keyword[else] :
identifier[d] [ literal[string] ]=(
literal[string]
literal[string]
)
identifier[args] . identifier[extend] ([ identifier[seqid] , identifier[end] , identifier[start] ])
keyword[if] identifier[len] ( identifier[_bins] )< literal[int] :
identifier[d] [ literal[string] ]+= literal[string] %( literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[_bins] )))
keyword[if] identifier[strand] :
identifier[d] [ literal[string] ]= literal[string]
identifier[args] . identifier[append] ( identifier[strand] )
identifier[valid_order_by] = identifier[constants] . identifier[_gffkeys_extra] +[ literal[string] , literal[string] ]
identifier[_order_by] =[]
keyword[if] identifier[order_by] :
keyword[if] identifier[isinstance] ( identifier[order_by] , identifier[six] . identifier[string_types] ):
identifier[_order_by] . identifier[append] ( identifier[order_by] )
keyword[else] :
keyword[for] identifier[k] keyword[in] identifier[order_by] :
keyword[if] identifier[k] keyword[not] keyword[in] identifier[valid_order_by] :
keyword[raise] identifier[ValueError] ( literal[string]
%( identifier[k] , identifier[valid_order_by] ))
keyword[if] identifier[k] == literal[string] :
identifier[k] = literal[string]
identifier[_order_by] . identifier[append] ( identifier[k] )
identifier[_order_by] = literal[string] . identifier[join] ( identifier[_order_by] )
keyword[if] identifier[reverse] :
identifier[direction] = literal[string]
keyword[else] :
identifier[direction] = literal[string]
identifier[d] [ literal[string] ]= literal[string] %( identifier[_order_by] , identifier[direction] )
identifier[where] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[d] [ literal[string] ]. identifier[lower] ():
identifier[where] = keyword[True]
keyword[for] identifier[i] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[d] [ identifier[i] ]:
keyword[if] keyword[not] identifier[where] :
identifier[d] [ identifier[i] ]= literal[string] + identifier[d] [ identifier[i] ]
identifier[where] = keyword[True]
keyword[else] :
identifier[d] [ identifier[i] ]= literal[string] + identifier[d] [ identifier[i] ]
keyword[return] identifier[_QUERY] . identifier[format] (** identifier[d] ), identifier[args] | def make_query(args, other=None, limit=None, strand=None, featuretype=None, extra=None, order_by=None, reverse=False, completely_within=False):
"""
Multi-purpose, bare-bones ORM function.
This function composes queries given some commonly-used kwargs that can be
passed to FeatureDB methods (like .parents(), .children(), .all_features(),
.features_of_type()). It handles, in one place, things like restricting to
featuretype, limiting to a genomic range, limiting to one strand, or
returning results ordered by different criteria.
Additional filtering/subsetting/sorting behavior should be added here.
(Note: this ended up having better performance (and flexibility) than
sqlalchemy)
This function also provides support for additional JOINs etc (supplied via
the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the
`_QUERY` var below for the order in which they are used.
For example, FeatureDB._relation uses `other` to supply the JOIN
substatment, and that same method also uses `extra` to supply the
"relations.level = ?" substatment (see the source for FeatureDB._relation
for more details).
`args` contains the arguments that will ultimately be supplied to the
sqlite3.connection.execute function. It may be further populated below --
for example, if strand="+", then the query will include a strand clause,
and the strand will be appended to the args.
`args` can be pre-filled with args that are passed to `other` and `extra`.
"""
_QUERY = '{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} {LIMIT} {STRAND} {ORDER_BY}'
# Construct a dictionary `d` that will be used later as _QUERY.format(**d).
# Default is just _SELECT, which returns all records in the features table.
# (Recall that constants._SELECT gets the fields in the order needed to
# reconstruct a Feature)
d = dict(_SELECT=constants._SELECT, OTHER='', FEATURETYPE='', LIMIT='', STRAND='', ORDER_BY='', EXTRA='')
if other:
d['OTHER'] = other # depends on [control=['if'], data=[]]
if extra:
d['EXTRA'] = extra # depends on [control=['if'], data=[]]
# If `other` and `extra` take args (that is, they have "?" in them), then
# they should have been provided in `args`.
required_args = (d['EXTRA'] + d['OTHER']).count('?')
if len(args) != required_args:
raise ValueError('Not enough args (%s) for subquery' % args) # depends on [control=['if'], data=[]]
# Below, if a kwarg is specified, then we create sections of the query --
# appending to args as necessary.
#
# IMPORTANT: the order in which things are processed here is the same as
# the order of the placeholders in _QUERY. That is, we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place.
if featuretype:
# Handle single or iterables of featuretypes.
#
# e.g., "featuretype = 'exon'"
#
# or, "featuretype IN ('exon', 'CDS')"
if isinstance(featuretype, six.string_types):
d['FEATURETYPE'] = 'features.featuretype = ?'
args.append(featuretype) # depends on [control=['if'], data=[]]
else:
d['FEATURETYPE'] = 'features.featuretype IN (%s)' % ','.join(['?' for _ in featuretype])
args.extend(featuretype) # depends on [control=['if'], data=[]]
if limit:
# Restrict to a genomic region. Makes use of the UCSC binning strategy
# for performance.
#
# `limit` is a string or a tuple of (chrom, start, stop)
#
# e.g., "seqid = 'chr2L' AND start > 1000 AND end < 5000"
if isinstance(limit, six.string_types):
(seqid, startstop) = limit.split(':')
(start, end) = startstop.split('-') # depends on [control=['if'], data=[]]
else:
(seqid, start, end) = limit
# Identify possible bins
_bins = bins.bins(int(start), int(end), one=False)
# Use different overlap conditions
if completely_within:
d['LIMIT'] = 'features.seqid = ? AND features.start >= ? AND features.end <= ?'
args.extend([seqid, start, end]) # depends on [control=['if'], data=[]]
else:
d['LIMIT'] = 'features.seqid = ? AND features.start <= ? AND features.end >= ?'
# Note order (end, start)
args.extend([seqid, end, start])
# Add bin clause. See issue #45.
if len(_bins) < 900:
d['LIMIT'] += ' AND features.bin IN (%s)' % ','.join(map(str, _bins)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if strand:
# e.g., "strand = '+'"
d['STRAND'] = 'features.strand = ?'
args.append(strand) # depends on [control=['if'], data=[]]
# TODO: implement file_order!
valid_order_by = constants._gffkeys_extra + ['file_order', 'length']
_order_by = []
if order_by:
# Default is essentially random order.
#
# e.g. "ORDER BY seqid, start DESC"
if isinstance(order_by, six.string_types):
_order_by.append(order_by) # depends on [control=['if'], data=[]]
else:
for k in order_by:
if k not in valid_order_by:
raise ValueError('%s not a valid order-by value in %s' % (k, valid_order_by)) # depends on [control=['if'], data=['k', 'valid_order_by']]
# There's no length field, so order by end - start
if k == 'length':
k = '(end - start)' # depends on [control=['if'], data=['k']]
_order_by.append(k) # depends on [control=['for'], data=['k']]
_order_by = ','.join(_order_by)
if reverse:
direction = 'DESC' # depends on [control=['if'], data=[]]
else:
direction = 'ASC'
d['ORDER_BY'] = 'ORDER BY %s %s' % (_order_by, direction) # depends on [control=['if'], data=[]]
# Ensure only one "WHERE" is included; the rest get "AND ". This is ugly.
where = False
if 'where' in d['OTHER'].lower():
where = True # depends on [control=['if'], data=[]]
for i in ['EXTRA', 'FEATURETYPE', 'LIMIT', 'STRAND']:
if d[i]:
if not where:
d[i] = 'WHERE ' + d[i]
where = True # depends on [control=['if'], data=[]]
else:
d[i] = 'AND ' + d[i] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return (_QUERY.format(**d), args) |
def deep_documents(self):
"""
list of all documents find in subtrees of this node
"""
tree = []
for entry in self.contents:
if isinstance(entry, Document):
tree.append(entry)
else:
tree += entry.deep_documents
return tree | def function[deep_documents, parameter[self]]:
constant[
list of all documents find in subtrees of this node
]
variable[tree] assign[=] list[[]]
for taget[name[entry]] in starred[name[self].contents] begin[:]
if call[name[isinstance], parameter[name[entry], name[Document]]] begin[:]
call[name[tree].append, parameter[name[entry]]]
return[name[tree]] | keyword[def] identifier[deep_documents] ( identifier[self] ):
literal[string]
identifier[tree] =[]
keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[contents] :
keyword[if] identifier[isinstance] ( identifier[entry] , identifier[Document] ):
identifier[tree] . identifier[append] ( identifier[entry] )
keyword[else] :
identifier[tree] += identifier[entry] . identifier[deep_documents]
keyword[return] identifier[tree] | def deep_documents(self):
"""
list of all documents find in subtrees of this node
"""
tree = []
for entry in self.contents:
if isinstance(entry, Document):
tree.append(entry) # depends on [control=['if'], data=[]]
else:
tree += entry.deep_documents # depends on [control=['for'], data=['entry']]
return tree |
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models} | def function[model_performance, parameter[self, test_data, train, valid, xval]]:
constant[
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
]
return[<ast.DictComp object at 0x7da18c4cc730>] | keyword[def] identifier[model_performance] ( identifier[self] , identifier[test_data] = keyword[None] , identifier[train] = keyword[False] , identifier[valid] = keyword[False] , identifier[xval] = keyword[False] ):
literal[string]
keyword[return] { identifier[model] . identifier[model_id] : identifier[model] . identifier[model_performance] ( identifier[test_data] , identifier[train] , identifier[valid] , identifier[xval] ) keyword[for] identifier[model] keyword[in] identifier[self] . identifier[models] } | def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models} |
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path,
sslContext)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None | def function[__FindSupportedVersion, parameter[protocol, server, port, path, preferredApiVersions, sslContext]]:
constant[
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
]
variable[serviceVersionDescription] assign[=] call[name[__GetServiceVersionDescription], parameter[name[protocol], name[server], name[port], name[path], name[sslContext]]]
if compare[name[serviceVersionDescription] is constant[None]] begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da1b235b850> begin[:]
variable[preferredApiVersions] assign[=] list[[<ast.Name object at 0x7da1b2358610>]]
for taget[name[desiredVersion]] in starred[name[preferredApiVersions]] begin[:]
if call[name[__VersionIsSupported], parameter[name[desiredVersion], name[serviceVersionDescription]]] begin[:]
return[name[desiredVersion]]
return[constant[None]] | keyword[def] identifier[__FindSupportedVersion] ( identifier[protocol] , identifier[server] , identifier[port] , identifier[path] , identifier[preferredApiVersions] , identifier[sslContext] ):
literal[string]
identifier[serviceVersionDescription] = identifier[__GetServiceVersionDescription] ( identifier[protocol] ,
identifier[server] ,
identifier[port] ,
identifier[path] ,
identifier[sslContext] )
keyword[if] identifier[serviceVersionDescription] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[preferredApiVersions] , identifier[list] ):
identifier[preferredApiVersions] =[ identifier[preferredApiVersions] ]
keyword[for] identifier[desiredVersion] keyword[in] identifier[preferredApiVersions] :
keyword[if] identifier[__VersionIsSupported] ( identifier[desiredVersion] , identifier[serviceVersionDescription] ):
keyword[return] identifier[desiredVersion]
keyword[return] keyword[None] | def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol, server, port, path, sslContext)
if serviceVersionDescription is None:
return None # depends on [control=['if'], data=[]]
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [preferredApiVersions] # depends on [control=['if'], data=[]]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['desiredVersion']]
return None |
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles)
else:
return role in self.roles | def function[has_role, parameter[self, role]]:
constant[Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance]
if call[name[isinstance], parameter[name[role], name[string_types]]] begin[:]
return[compare[name[role] in <ast.GeneratorExp object at 0x7da207f028f0>]] | keyword[def] identifier[has_role] ( identifier[self] , identifier[role] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[role] , identifier[string_types] ):
keyword[return] identifier[role] keyword[in] ( identifier[role] . identifier[name] keyword[for] identifier[role] keyword[in] identifier[self] . identifier[roles] )
keyword[else] :
keyword[return] identifier[role] keyword[in] identifier[self] . identifier[roles] | def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles) # depends on [control=['if'], data=[]]
else:
return role in self.roles |
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2 | def function[corruptVector, parameter[v1, noiseLevel, numActiveCols]]:
constant[
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
]
variable[size] assign[=] call[name[len], parameter[name[v1]]]
variable[v2] assign[=] call[name[np].zeros, parameter[name[size]]]
variable[bitsToSwap] assign[=] call[name[int], parameter[binary_operation[name[noiseLevel] * name[numActiveCols]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[size]]]] begin[:]
call[name[v2]][name[i]] assign[=] call[name[v1]][name[i]]
for taget[name[_]] in starred[call[name[range], parameter[name[bitsToSwap]]]] begin[:]
variable[i] assign[=] call[name[random].randrange, parameter[name[size]]]
if compare[call[name[v2]][name[i]] equal[==] constant[1]] begin[:]
call[name[v2]][name[i]] assign[=] constant[0]
return[name[v2]] | keyword[def] identifier[corruptVector] ( identifier[v1] , identifier[noiseLevel] , identifier[numActiveCols] ):
literal[string]
identifier[size] = identifier[len] ( identifier[v1] )
identifier[v2] = identifier[np] . identifier[zeros] ( identifier[size] , identifier[dtype] = literal[string] )
identifier[bitsToSwap] = identifier[int] ( identifier[noiseLevel] * identifier[numActiveCols] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] ):
identifier[v2] [ identifier[i] ]= identifier[v1] [ identifier[i] ]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[bitsToSwap] ):
identifier[i] = identifier[random] . identifier[randrange] ( identifier[size] )
keyword[if] identifier[v2] [ identifier[i] ]== literal[int] :
identifier[v2] [ identifier[i] ]= literal[int]
keyword[else] :
identifier[v2] [ identifier[i] ]= literal[int]
keyword[return] identifier[v2] | def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype='uint32')
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i] # depends on [control=['for'], data=['i']]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0 # depends on [control=['if'], data=[]]
else:
v2[i] = 1 # depends on [control=['for'], data=[]]
return v2 |
def commit_config(self):
"""
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
"""
# Always generate a rollback config on commit
self._gen_rollback_cfg()
if self.config_replace:
# Replace operation
filename = self.candidate_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise ReplaceConfigException("Candidate config file does not exist")
if self.auto_rollback_on_error:
cmd = 'configure replace {} force revert trigger error'.format(cfg_file)
else:
cmd = 'configure replace {} force'.format(cfg_file)
output = self._commit_hostname_handler(cmd)
if ('original configuration has been successfully restored' in output) or \
('error' in output.lower()) or \
('failed' in output.lower()):
msg = "Candidate config could not be applied\n{}".format(output)
raise ReplaceConfigException(msg)
elif '%Please turn config archive on' in output:
msg = "napalm-ios replace() requires Cisco 'archive' feature to be enabled."
raise ReplaceConfigException(msg)
else:
# Merge operation
filename = self.merge_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise MergeConfigException("Merge source config file does not exist")
cmd = 'copy {} running-config'.format(cfg_file)
self._disable_confirm()
output = self._commit_hostname_handler(cmd)
self._enable_confirm()
if 'Invalid input detected' in output:
self.rollback()
err_header = "Configuration merge failed; automatic rollback attempted"
merge_error = "{0}:\n{1}".format(err_header, output)
raise MergeConfigException(merge_error)
# Save config to startup (both replace and merge)
output += self.device.send_command_expect("write mem") | def function[commit_config, parameter[self]]:
constant[
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
]
call[name[self]._gen_rollback_cfg, parameter[]]
if name[self].config_replace begin[:]
variable[filename] assign[=] name[self].candidate_cfg
variable[cfg_file] assign[=] call[name[self]._gen_full_path, parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da18c4cd390> begin[:]
<ast.Raise object at 0x7da18c4cfdf0>
if name[self].auto_rollback_on_error begin[:]
variable[cmd] assign[=] call[constant[configure replace {} force revert trigger error].format, parameter[name[cfg_file]]]
variable[output] assign[=] call[name[self]._commit_hostname_handler, parameter[name[cmd]]]
if <ast.BoolOp object at 0x7da1b11e1420> begin[:]
variable[msg] assign[=] call[constant[Candidate config could not be applied
{}].format, parameter[name[output]]]
<ast.Raise object at 0x7da204963070>
<ast.AugAssign object at 0x7da18c4cdc00> | keyword[def] identifier[commit_config] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_gen_rollback_cfg] ()
keyword[if] identifier[self] . identifier[config_replace] :
identifier[filename] = identifier[self] . identifier[candidate_cfg]
identifier[cfg_file] = identifier[self] . identifier[_gen_full_path] ( identifier[filename] )
keyword[if] keyword[not] identifier[self] . identifier[_check_file_exists] ( identifier[cfg_file] ):
keyword[raise] identifier[ReplaceConfigException] ( literal[string] )
keyword[if] identifier[self] . identifier[auto_rollback_on_error] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[cfg_file] )
keyword[else] :
identifier[cmd] = literal[string] . identifier[format] ( identifier[cfg_file] )
identifier[output] = identifier[self] . identifier[_commit_hostname_handler] ( identifier[cmd] )
keyword[if] ( literal[string] keyword[in] identifier[output] ) keyword[or] ( literal[string] keyword[in] identifier[output] . identifier[lower] ()) keyword[or] ( literal[string] keyword[in] identifier[output] . identifier[lower] ()):
identifier[msg] = literal[string] . identifier[format] ( identifier[output] )
keyword[raise] identifier[ReplaceConfigException] ( identifier[msg] )
keyword[elif] literal[string] keyword[in] identifier[output] :
identifier[msg] = literal[string]
keyword[raise] identifier[ReplaceConfigException] ( identifier[msg] )
keyword[else] :
identifier[filename] = identifier[self] . identifier[merge_cfg]
identifier[cfg_file] = identifier[self] . identifier[_gen_full_path] ( identifier[filename] )
keyword[if] keyword[not] identifier[self] . identifier[_check_file_exists] ( identifier[cfg_file] ):
keyword[raise] identifier[MergeConfigException] ( literal[string] )
identifier[cmd] = literal[string] . identifier[format] ( identifier[cfg_file] )
identifier[self] . identifier[_disable_confirm] ()
identifier[output] = identifier[self] . identifier[_commit_hostname_handler] ( identifier[cmd] )
identifier[self] . identifier[_enable_confirm] ()
keyword[if] literal[string] keyword[in] identifier[output] :
identifier[self] . identifier[rollback] ()
identifier[err_header] = literal[string]
identifier[merge_error] = literal[string] . identifier[format] ( identifier[err_header] , identifier[output] )
keyword[raise] identifier[MergeConfigException] ( identifier[merge_error] )
identifier[output] += identifier[self] . identifier[device] . identifier[send_command_expect] ( literal[string] ) | def commit_config(self):
"""
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
"""
# Always generate a rollback config on commit
self._gen_rollback_cfg()
if self.config_replace:
# Replace operation
filename = self.candidate_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise ReplaceConfigException('Candidate config file does not exist') # depends on [control=['if'], data=[]]
if self.auto_rollback_on_error:
cmd = 'configure replace {} force revert trigger error'.format(cfg_file) # depends on [control=['if'], data=[]]
else:
cmd = 'configure replace {} force'.format(cfg_file)
output = self._commit_hostname_handler(cmd)
if 'original configuration has been successfully restored' in output or 'error' in output.lower() or 'failed' in output.lower():
msg = 'Candidate config could not be applied\n{}'.format(output)
raise ReplaceConfigException(msg) # depends on [control=['if'], data=[]]
elif '%Please turn config archive on' in output:
msg = "napalm-ios replace() requires Cisco 'archive' feature to be enabled."
raise ReplaceConfigException(msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Merge operation
filename = self.merge_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise MergeConfigException('Merge source config file does not exist') # depends on [control=['if'], data=[]]
cmd = 'copy {} running-config'.format(cfg_file)
self._disable_confirm()
output = self._commit_hostname_handler(cmd)
self._enable_confirm()
if 'Invalid input detected' in output:
self.rollback()
err_header = 'Configuration merge failed; automatic rollback attempted'
merge_error = '{0}:\n{1}'.format(err_header, output)
raise MergeConfigException(merge_error) # depends on [control=['if'], data=['output']]
# Save config to startup (both replace and merge)
output += self.device.send_command_expect('write mem') |
def get_times_from_cli(cli_token):
"""Convert a CLI token to a datetime tuple.
Argument:
cli_token (str): an isoformat datetime token ([ISO date]:[ISO date])
or a special value among:
* thisday
* thisweek
* thismonth
* thisyear
Returns:
tuple: a datetime.date objects couple, where the first item is
the start of a time frame and the second item the end of the
time frame. Both elements can also be None, if no date was
provided.
Raises:
ValueError: when the CLI token is not in the right format
(no colon in the token, not one of the special values, dates
are not in proper ISO-8601 format.)
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
today = datetime.date.today()
if cli_token=="thisday":
return today, today
elif cli_token=="thisweek":
return today, today - dateutil.relativedelta.relativedelta(days=7)
elif cli_token=="thismonth":
return today, today - dateutil.relativedelta.relativedelta(months=1)
elif cli_token=="thisyear":
return today, today - dateutil.relativedelta.relativedelta(years=1)
else:
try:
start_date, stop_date = cli_token.split(':')
except ValueError:
raise ValueError("--time parameter must contain a colon (:)")
if not start_date and not stop_date: # ':', no start date, no stop date
return None, None
try:
start_date = date_from_isoformat(start_date) if start_date else None
stop_date = date_from_isoformat(stop_date) if stop_date else None
except ValueError:
raise ValueError("--time parameter was not provided ISO formatted dates")
return start_date, stop_date | def function[get_times_from_cli, parameter[cli_token]]:
constant[Convert a CLI token to a datetime tuple.
Argument:
cli_token (str): an isoformat datetime token ([ISO date]:[ISO date])
or a special value among:
* thisday
* thisweek
* thismonth
* thisyear
Returns:
tuple: a datetime.date objects couple, where the first item is
the start of a time frame and the second item the end of the
time frame. Both elements can also be None, if no date was
provided.
Raises:
ValueError: when the CLI token is not in the right format
(no colon in the token, not one of the special values, dates
are not in proper ISO-8601 format.)
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
]
variable[today] assign[=] call[name[datetime].date.today, parameter[]]
if compare[name[cli_token] equal[==] constant[thisday]] begin[:]
return[tuple[[<ast.Name object at 0x7da20c6a93f0>, <ast.Name object at 0x7da20c6a95a0>]]] | keyword[def] identifier[get_times_from_cli] ( identifier[cli_token] ):
literal[string]
identifier[today] = identifier[datetime] . identifier[date] . identifier[today] ()
keyword[if] identifier[cli_token] == literal[string] :
keyword[return] identifier[today] , identifier[today]
keyword[elif] identifier[cli_token] == literal[string] :
keyword[return] identifier[today] , identifier[today] - identifier[dateutil] . identifier[relativedelta] . identifier[relativedelta] ( identifier[days] = literal[int] )
keyword[elif] identifier[cli_token] == literal[string] :
keyword[return] identifier[today] , identifier[today] - identifier[dateutil] . identifier[relativedelta] . identifier[relativedelta] ( identifier[months] = literal[int] )
keyword[elif] identifier[cli_token] == literal[string] :
keyword[return] identifier[today] , identifier[today] - identifier[dateutil] . identifier[relativedelta] . identifier[relativedelta] ( identifier[years] = literal[int] )
keyword[else] :
keyword[try] :
identifier[start_date] , identifier[stop_date] = identifier[cli_token] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[start_date] keyword[and] keyword[not] identifier[stop_date] :
keyword[return] keyword[None] , keyword[None]
keyword[try] :
identifier[start_date] = identifier[date_from_isoformat] ( identifier[start_date] ) keyword[if] identifier[start_date] keyword[else] keyword[None]
identifier[stop_date] = identifier[date_from_isoformat] ( identifier[stop_date] ) keyword[if] identifier[stop_date] keyword[else] keyword[None]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[start_date] , identifier[stop_date] | def get_times_from_cli(cli_token):
"""Convert a CLI token to a datetime tuple.
Argument:
cli_token (str): an isoformat datetime token ([ISO date]:[ISO date])
or a special value among:
* thisday
* thisweek
* thismonth
* thisyear
Returns:
tuple: a datetime.date objects couple, where the first item is
the start of a time frame and the second item the end of the
time frame. Both elements can also be None, if no date was
provided.
Raises:
ValueError: when the CLI token is not in the right format
(no colon in the token, not one of the special values, dates
are not in proper ISO-8601 format.)
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
today = datetime.date.today()
if cli_token == 'thisday':
return (today, today) # depends on [control=['if'], data=[]]
elif cli_token == 'thisweek':
return (today, today - dateutil.relativedelta.relativedelta(days=7)) # depends on [control=['if'], data=[]]
elif cli_token == 'thismonth':
return (today, today - dateutil.relativedelta.relativedelta(months=1)) # depends on [control=['if'], data=[]]
elif cli_token == 'thisyear':
return (today, today - dateutil.relativedelta.relativedelta(years=1)) # depends on [control=['if'], data=[]]
else:
try:
(start_date, stop_date) = cli_token.split(':') # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('--time parameter must contain a colon (:)') # depends on [control=['except'], data=[]]
if not start_date and (not stop_date): # ':', no start date, no stop date
return (None, None) # depends on [control=['if'], data=[]]
try:
start_date = date_from_isoformat(start_date) if start_date else None
stop_date = date_from_isoformat(stop_date) if stop_date else None # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('--time parameter was not provided ISO formatted dates') # depends on [control=['except'], data=[]]
return (start_date, stop_date) |
def decrypt(self, orig_pkt, assoclen=None):
"""decrypt a MACsec frame for this Secure Association"""
hdr = copy.deepcopy(orig_pkt)
del hdr[MACsec].payload
pktlen = len(orig_pkt)
if self.send_sci:
hdrlen = NOSCI_LEN + SCI_LEN
else:
hdrlen = NOSCI_LEN
if assoclen is None or not self.do_encrypt:
if self.do_encrypt:
assoclen = hdrlen
else:
assoclen = pktlen - self.icvlen
iv = self.make_iv(hdr)
assoc, ct, icv = MACsecSA.split_pkt(orig_pkt, assoclen, self.icvlen)
decryptor = Cipher(
algorithms.AES(self.key),
modes.GCM(iv, icv),
backend=default_backend()
).decryptor()
decryptor.authenticate_additional_data(assoc)
pt = assoc[hdrlen:assoclen]
pt += decryptor.update(ct)
pt += decryptor.finalize()
hdr[MACsec].type = struct.unpack('!H', pt[0:2])[0]
hdr[MACsec].payload = Raw(pt[2:])
return hdr | def function[decrypt, parameter[self, orig_pkt, assoclen]]:
constant[decrypt a MACsec frame for this Secure Association]
variable[hdr] assign[=] call[name[copy].deepcopy, parameter[name[orig_pkt]]]
<ast.Delete object at 0x7da1b1fcb7c0>
variable[pktlen] assign[=] call[name[len], parameter[name[orig_pkt]]]
if name[self].send_sci begin[:]
variable[hdrlen] assign[=] binary_operation[name[NOSCI_LEN] + name[SCI_LEN]]
if <ast.BoolOp object at 0x7da1b1fcb400> begin[:]
if name[self].do_encrypt begin[:]
variable[assoclen] assign[=] name[hdrlen]
variable[iv] assign[=] call[name[self].make_iv, parameter[name[hdr]]]
<ast.Tuple object at 0x7da1b2098f10> assign[=] call[name[MACsecSA].split_pkt, parameter[name[orig_pkt], name[assoclen], name[self].icvlen]]
variable[decryptor] assign[=] call[call[name[Cipher], parameter[call[name[algorithms].AES, parameter[name[self].key]], call[name[modes].GCM, parameter[name[iv], name[icv]]]]].decryptor, parameter[]]
call[name[decryptor].authenticate_additional_data, parameter[name[assoc]]]
variable[pt] assign[=] call[name[assoc]][<ast.Slice object at 0x7da1b1fc8a60>]
<ast.AugAssign object at 0x7da1b1fc9c90>
<ast.AugAssign object at 0x7da1b1fca080>
call[name[hdr]][name[MACsec]].type assign[=] call[call[name[struct].unpack, parameter[constant[!H], call[name[pt]][<ast.Slice object at 0x7da1b1f966b0>]]]][constant[0]]
call[name[hdr]][name[MACsec]].payload assign[=] call[name[Raw], parameter[call[name[pt]][<ast.Slice object at 0x7da1b1f94040>]]]
return[name[hdr]] | keyword[def] identifier[decrypt] ( identifier[self] , identifier[orig_pkt] , identifier[assoclen] = keyword[None] ):
literal[string]
identifier[hdr] = identifier[copy] . identifier[deepcopy] ( identifier[orig_pkt] )
keyword[del] identifier[hdr] [ identifier[MACsec] ]. identifier[payload]
identifier[pktlen] = identifier[len] ( identifier[orig_pkt] )
keyword[if] identifier[self] . identifier[send_sci] :
identifier[hdrlen] = identifier[NOSCI_LEN] + identifier[SCI_LEN]
keyword[else] :
identifier[hdrlen] = identifier[NOSCI_LEN]
keyword[if] identifier[assoclen] keyword[is] keyword[None] keyword[or] keyword[not] identifier[self] . identifier[do_encrypt] :
keyword[if] identifier[self] . identifier[do_encrypt] :
identifier[assoclen] = identifier[hdrlen]
keyword[else] :
identifier[assoclen] = identifier[pktlen] - identifier[self] . identifier[icvlen]
identifier[iv] = identifier[self] . identifier[make_iv] ( identifier[hdr] )
identifier[assoc] , identifier[ct] , identifier[icv] = identifier[MACsecSA] . identifier[split_pkt] ( identifier[orig_pkt] , identifier[assoclen] , identifier[self] . identifier[icvlen] )
identifier[decryptor] = identifier[Cipher] (
identifier[algorithms] . identifier[AES] ( identifier[self] . identifier[key] ),
identifier[modes] . identifier[GCM] ( identifier[iv] , identifier[icv] ),
identifier[backend] = identifier[default_backend] ()
). identifier[decryptor] ()
identifier[decryptor] . identifier[authenticate_additional_data] ( identifier[assoc] )
identifier[pt] = identifier[assoc] [ identifier[hdrlen] : identifier[assoclen] ]
identifier[pt] += identifier[decryptor] . identifier[update] ( identifier[ct] )
identifier[pt] += identifier[decryptor] . identifier[finalize] ()
identifier[hdr] [ identifier[MACsec] ]. identifier[type] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[pt] [ literal[int] : literal[int] ])[ literal[int] ]
identifier[hdr] [ identifier[MACsec] ]. identifier[payload] = identifier[Raw] ( identifier[pt] [ literal[int] :])
keyword[return] identifier[hdr] | def decrypt(self, orig_pkt, assoclen=None):
"""decrypt a MACsec frame for this Secure Association"""
hdr = copy.deepcopy(orig_pkt)
del hdr[MACsec].payload
pktlen = len(orig_pkt)
if self.send_sci:
hdrlen = NOSCI_LEN + SCI_LEN # depends on [control=['if'], data=[]]
else:
hdrlen = NOSCI_LEN
if assoclen is None or not self.do_encrypt:
if self.do_encrypt:
assoclen = hdrlen # depends on [control=['if'], data=[]]
else:
assoclen = pktlen - self.icvlen # depends on [control=['if'], data=[]]
iv = self.make_iv(hdr)
(assoc, ct, icv) = MACsecSA.split_pkt(orig_pkt, assoclen, self.icvlen)
decryptor = Cipher(algorithms.AES(self.key), modes.GCM(iv, icv), backend=default_backend()).decryptor()
decryptor.authenticate_additional_data(assoc)
pt = assoc[hdrlen:assoclen]
pt += decryptor.update(ct)
pt += decryptor.finalize()
hdr[MACsec].type = struct.unpack('!H', pt[0:2])[0]
hdr[MACsec].payload = Raw(pt[2:])
return hdr |
def _get_ema(cls, df, column, windows):
""" get exponential moving average
:param df: data
:param column: column to calculate
:param windows: collection of window of exponential moving average
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_ema'.format(column, window)
if len(df[column]) > 0:
df[column_name] = df[column].ewm(
ignore_na=False, span=window,
min_periods=0, adjust=True).mean()
else:
df[column_name] = [] | def function[_get_ema, parameter[cls, df, column, windows]]:
constant[ get exponential moving average
:param df: data
:param column: column to calculate
:param windows: collection of window of exponential moving average
:return: None
]
variable[window] assign[=] call[name[cls].get_only_one_positive_int, parameter[name[windows]]]
variable[column_name] assign[=] call[constant[{}_{}_ema].format, parameter[name[column], name[window]]]
if compare[call[name[len], parameter[call[name[df]][name[column]]]] greater[>] constant[0]] begin[:]
call[name[df]][name[column_name]] assign[=] call[call[call[name[df]][name[column]].ewm, parameter[]].mean, parameter[]] | keyword[def] identifier[_get_ema] ( identifier[cls] , identifier[df] , identifier[column] , identifier[windows] ):
literal[string]
identifier[window] = identifier[cls] . identifier[get_only_one_positive_int] ( identifier[windows] )
identifier[column_name] = literal[string] . identifier[format] ( identifier[column] , identifier[window] )
keyword[if] identifier[len] ( identifier[df] [ identifier[column] ])> literal[int] :
identifier[df] [ identifier[column_name] ]= identifier[df] [ identifier[column] ]. identifier[ewm] (
identifier[ignore_na] = keyword[False] , identifier[span] = identifier[window] ,
identifier[min_periods] = literal[int] , identifier[adjust] = keyword[True] ). identifier[mean] ()
keyword[else] :
identifier[df] [ identifier[column_name] ]=[] | def _get_ema(cls, df, column, windows):
""" get exponential moving average
:param df: data
:param column: column to calculate
:param windows: collection of window of exponential moving average
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_ema'.format(column, window)
if len(df[column]) > 0:
df[column_name] = df[column].ewm(ignore_na=False, span=window, min_periods=0, adjust=True).mean() # depends on [control=['if'], data=[]]
else:
df[column_name] = [] |
def _plot(self):
"""Plot all dots for series"""
r_max = min(
self.view.x(1) - self.view.x(0),
(self.view.y(0) or 0) - self.view.y(1)
) / (2 * 1.05)
for serie in self.series:
self.dot(serie, r_max) | def function[_plot, parameter[self]]:
constant[Plot all dots for series]
variable[r_max] assign[=] binary_operation[call[name[min], parameter[binary_operation[call[name[self].view.x, parameter[constant[1]]] - call[name[self].view.x, parameter[constant[0]]]], binary_operation[<ast.BoolOp object at 0x7da20c795750> - call[name[self].view.y, parameter[constant[1]]]]]] / binary_operation[constant[2] * constant[1.05]]]
for taget[name[serie]] in starred[name[self].series] begin[:]
call[name[self].dot, parameter[name[serie], name[r_max]]] | keyword[def] identifier[_plot] ( identifier[self] ):
literal[string]
identifier[r_max] = identifier[min] (
identifier[self] . identifier[view] . identifier[x] ( literal[int] )- identifier[self] . identifier[view] . identifier[x] ( literal[int] ),
( identifier[self] . identifier[view] . identifier[y] ( literal[int] ) keyword[or] literal[int] )- identifier[self] . identifier[view] . identifier[y] ( literal[int] )
)/( literal[int] * literal[int] )
keyword[for] identifier[serie] keyword[in] identifier[self] . identifier[series] :
identifier[self] . identifier[dot] ( identifier[serie] , identifier[r_max] ) | def _plot(self):
"""Plot all dots for series"""
r_max = min(self.view.x(1) - self.view.x(0), (self.view.y(0) or 0) - self.view.y(1)) / (2 * 1.05)
for serie in self.series:
self.dot(serie, r_max) # depends on [control=['for'], data=['serie']] |
def discharge(self):
"""Discharge of the element in each layer
"""
rv = np.zeros(self.aq[0].naq)
Qls = self.parameters[:, 0] * self.dischargeinf()
Qls.shape = (self.nls, self.nlayers, self.order + 1)
Qls = np.sum(Qls, 2)
for i, q in enumerate(Qls):
rv[self.layers[i]] += q
#rv[self.layers] = np.sum(Qls.reshape(self.nls * (self.order + 1), self.nlayers), 0)
return rv | def function[discharge, parameter[self]]:
constant[Discharge of the element in each layer
]
variable[rv] assign[=] call[name[np].zeros, parameter[call[name[self].aq][constant[0]].naq]]
variable[Qls] assign[=] binary_operation[call[name[self].parameters][tuple[[<ast.Slice object at 0x7da1b0b2f190>, <ast.Constant object at 0x7da1b0b2ef80>]]] * call[name[self].dischargeinf, parameter[]]]
name[Qls].shape assign[=] tuple[[<ast.Attribute object at 0x7da1b0b2f0d0>, <ast.Attribute object at 0x7da1b0b2f5e0>, <ast.BinOp object at 0x7da1b0b2f2b0>]]
variable[Qls] assign[=] call[name[np].sum, parameter[name[Qls], constant[2]]]
for taget[tuple[[<ast.Name object at 0x7da1b0b2c670>, <ast.Name object at 0x7da1b0b2ceb0>]]] in starred[call[name[enumerate], parameter[name[Qls]]]] begin[:]
<ast.AugAssign object at 0x7da1b0e27dc0>
return[name[rv]] | keyword[def] identifier[discharge] ( identifier[self] ):
literal[string]
identifier[rv] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[aq] [ literal[int] ]. identifier[naq] )
identifier[Qls] = identifier[self] . identifier[parameters] [:, literal[int] ]* identifier[self] . identifier[dischargeinf] ()
identifier[Qls] . identifier[shape] =( identifier[self] . identifier[nls] , identifier[self] . identifier[nlayers] , identifier[self] . identifier[order] + literal[int] )
identifier[Qls] = identifier[np] . identifier[sum] ( identifier[Qls] , literal[int] )
keyword[for] identifier[i] , identifier[q] keyword[in] identifier[enumerate] ( identifier[Qls] ):
identifier[rv] [ identifier[self] . identifier[layers] [ identifier[i] ]]+= identifier[q]
keyword[return] identifier[rv] | def discharge(self):
"""Discharge of the element in each layer
"""
rv = np.zeros(self.aq[0].naq)
Qls = self.parameters[:, 0] * self.dischargeinf()
Qls.shape = (self.nls, self.nlayers, self.order + 1)
Qls = np.sum(Qls, 2)
for (i, q) in enumerate(Qls):
rv[self.layers[i]] += q # depends on [control=['for'], data=[]]
#rv[self.layers] = np.sum(Qls.reshape(self.nls * (self.order + 1), self.nlayers), 0)
return rv |
def simplesurface(idf, bsd, deletebsd=True, setto000=False):
"""convert a bsd (buildingsurface:detailed) into a simple surface"""
funcs = (wallexterior,
walladiabatic,
wallunderground,
wallinterzone,
roof,
ceilingadiabatic,
ceilinginterzone,
floorgroundcontact,
flooradiabatic,
floorinterzone,)
for func in funcs:
surface = func(idf, bsd, deletebsd=deletebsd, setto000=setto000)
if surface:
return surface
return None | def function[simplesurface, parameter[idf, bsd, deletebsd, setto000]]:
constant[convert a bsd (buildingsurface:detailed) into a simple surface]
variable[funcs] assign[=] tuple[[<ast.Name object at 0x7da2041db4c0>, <ast.Name object at 0x7da2041da380>, <ast.Name object at 0x7da2041dbd90>, <ast.Name object at 0x7da2041d8c40>, <ast.Name object at 0x7da2041d9630>, <ast.Name object at 0x7da2041daec0>, <ast.Name object at 0x7da2041da9b0>, <ast.Name object at 0x7da2041d8400>, <ast.Name object at 0x7da2041d9f90>, <ast.Name object at 0x7da2041d9330>]]
for taget[name[func]] in starred[name[funcs]] begin[:]
variable[surface] assign[=] call[name[func], parameter[name[idf], name[bsd]]]
if name[surface] begin[:]
return[name[surface]]
return[constant[None]] | keyword[def] identifier[simplesurface] ( identifier[idf] , identifier[bsd] , identifier[deletebsd] = keyword[True] , identifier[setto000] = keyword[False] ):
literal[string]
identifier[funcs] =( identifier[wallexterior] ,
identifier[walladiabatic] ,
identifier[wallunderground] ,
identifier[wallinterzone] ,
identifier[roof] ,
identifier[ceilingadiabatic] ,
identifier[ceilinginterzone] ,
identifier[floorgroundcontact] ,
identifier[flooradiabatic] ,
identifier[floorinterzone] ,)
keyword[for] identifier[func] keyword[in] identifier[funcs] :
identifier[surface] = identifier[func] ( identifier[idf] , identifier[bsd] , identifier[deletebsd] = identifier[deletebsd] , identifier[setto000] = identifier[setto000] )
keyword[if] identifier[surface] :
keyword[return] identifier[surface]
keyword[return] keyword[None] | def simplesurface(idf, bsd, deletebsd=True, setto000=False):
"""convert a bsd (buildingsurface:detailed) into a simple surface"""
funcs = (wallexterior, walladiabatic, wallunderground, wallinterzone, roof, ceilingadiabatic, ceilinginterzone, floorgroundcontact, flooradiabatic, floorinterzone)
for func in funcs:
surface = func(idf, bsd, deletebsd=deletebsd, setto000=setto000)
if surface:
return surface # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func']]
return None |
def voxelize_ray(mesh,
pitch,
per_cell=[2, 2],
**kwargs):
"""
Voxelize a mesh using ray queries.
Parameters
-------------
mesh : Trimesh object
Mesh to be voxelized
pitch : float
Length of voxel cube
per_cell : (2,) int
How many ray queries to make per cell
Returns
-------------
voxels : (n, 3) int
Voxel positions
origin : (3, ) int
Origin of voxels
"""
# how many rays per cell
per_cell = np.array(per_cell).astype(np.int).reshape(2)
# edge length of cube voxels
pitch = float(pitch)
# create the ray origins in a grid
bounds = mesh.bounds[:, :2].copy()
# offset start so we get the requested number per cell
bounds[0] += pitch / (1.0 + per_cell)
# offset end so arange doesn't short us
bounds[1] += pitch
# on X we are doing multiple rays per voxel step
step = pitch / per_cell
# 2D grid
ray_ori = util.grid_arange(bounds, step=step)
# a Z position below the mesh
z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch)
ray_ori = np.column_stack((ray_ori, z))
# all rays are along positive Z
ray_dir = np.ones_like(ray_ori) * [0, 0, 1]
# if you have pyembree this should be decently fast
hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0]
# just convert hit locations to integer positions
voxels = np.round(hits / pitch).astype(np.int64)
# offset voxels by min, so matrix isn't huge
origin = voxels.min(axis=0)
voxels -= origin
return voxels, origin | def function[voxelize_ray, parameter[mesh, pitch, per_cell]]:
constant[
Voxelize a mesh using ray queries.
Parameters
-------------
mesh : Trimesh object
Mesh to be voxelized
pitch : float
Length of voxel cube
per_cell : (2,) int
How many ray queries to make per cell
Returns
-------------
voxels : (n, 3) int
Voxel positions
origin : (3, ) int
Origin of voxels
]
variable[per_cell] assign[=] call[call[call[name[np].array, parameter[name[per_cell]]].astype, parameter[name[np].int]].reshape, parameter[constant[2]]]
variable[pitch] assign[=] call[name[float], parameter[name[pitch]]]
variable[bounds] assign[=] call[call[name[mesh].bounds][tuple[[<ast.Slice object at 0x7da1b23e51e0>, <ast.Slice object at 0x7da1b23e50f0>]]].copy, parameter[]]
<ast.AugAssign object at 0x7da1b23e52a0>
<ast.AugAssign object at 0x7da1b23e4760>
variable[step] assign[=] binary_operation[name[pitch] / name[per_cell]]
variable[ray_ori] assign[=] call[name[util].grid_arange, parameter[name[bounds]]]
variable[z] assign[=] binary_operation[call[name[np].ones, parameter[call[name[len], parameter[name[ray_ori]]]]] * binary_operation[call[call[name[mesh].bounds][constant[0]]][constant[2]] - name[pitch]]]
variable[ray_ori] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Name object at 0x7da1b23c5e40>, <ast.Name object at 0x7da1b23c5060>]]]]
variable[ray_dir] assign[=] binary_operation[call[name[np].ones_like, parameter[name[ray_ori]]] * list[[<ast.Constant object at 0x7da1b23c6650>, <ast.Constant object at 0x7da1b23c59f0>, <ast.Constant object at 0x7da1b23c4310>]]]
variable[hits] assign[=] call[call[name[mesh].ray.intersects_location, parameter[name[ray_ori], name[ray_dir]]]][constant[0]]
variable[voxels] assign[=] call[call[name[np].round, parameter[binary_operation[name[hits] / name[pitch]]]].astype, parameter[name[np].int64]]
variable[origin] assign[=] call[name[voxels].min, parameter[]]
<ast.AugAssign object at 0x7da1b23c5cc0>
return[tuple[[<ast.Name object at 0x7da20c990790>, <ast.Name object at 0x7da20c991000>]]] | keyword[def] identifier[voxelize_ray] ( identifier[mesh] ,
identifier[pitch] ,
identifier[per_cell] =[ literal[int] , literal[int] ],
** identifier[kwargs] ):
literal[string]
identifier[per_cell] = identifier[np] . identifier[array] ( identifier[per_cell] ). identifier[astype] ( identifier[np] . identifier[int] ). identifier[reshape] ( literal[int] )
identifier[pitch] = identifier[float] ( identifier[pitch] )
identifier[bounds] = identifier[mesh] . identifier[bounds] [:,: literal[int] ]. identifier[copy] ()
identifier[bounds] [ literal[int] ]+= identifier[pitch] /( literal[int] + identifier[per_cell] )
identifier[bounds] [ literal[int] ]+= identifier[pitch]
identifier[step] = identifier[pitch] / identifier[per_cell]
identifier[ray_ori] = identifier[util] . identifier[grid_arange] ( identifier[bounds] , identifier[step] = identifier[step] )
identifier[z] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[ray_ori] ))*( identifier[mesh] . identifier[bounds] [ literal[int] ][ literal[int] ]- identifier[pitch] )
identifier[ray_ori] = identifier[np] . identifier[column_stack] (( identifier[ray_ori] , identifier[z] ))
identifier[ray_dir] = identifier[np] . identifier[ones_like] ( identifier[ray_ori] )*[ literal[int] , literal[int] , literal[int] ]
identifier[hits] = identifier[mesh] . identifier[ray] . identifier[intersects_location] ( identifier[ray_ori] , identifier[ray_dir] )[ literal[int] ]
identifier[voxels] = identifier[np] . identifier[round] ( identifier[hits] / identifier[pitch] ). identifier[astype] ( identifier[np] . identifier[int64] )
identifier[origin] = identifier[voxels] . identifier[min] ( identifier[axis] = literal[int] )
identifier[voxels] -= identifier[origin]
keyword[return] identifier[voxels] , identifier[origin] | def voxelize_ray(mesh, pitch, per_cell=[2, 2], **kwargs):
"""
Voxelize a mesh using ray queries.
Parameters
-------------
mesh : Trimesh object
Mesh to be voxelized
pitch : float
Length of voxel cube
per_cell : (2,) int
How many ray queries to make per cell
Returns
-------------
voxels : (n, 3) int
Voxel positions
origin : (3, ) int
Origin of voxels
"""
# how many rays per cell
per_cell = np.array(per_cell).astype(np.int).reshape(2)
# edge length of cube voxels
pitch = float(pitch)
# create the ray origins in a grid
bounds = mesh.bounds[:, :2].copy()
# offset start so we get the requested number per cell
bounds[0] += pitch / (1.0 + per_cell)
# offset end so arange doesn't short us
bounds[1] += pitch
# on X we are doing multiple rays per voxel step
step = pitch / per_cell
# 2D grid
ray_ori = util.grid_arange(bounds, step=step)
# a Z position below the mesh
z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch)
ray_ori = np.column_stack((ray_ori, z))
# all rays are along positive Z
ray_dir = np.ones_like(ray_ori) * [0, 0, 1]
# if you have pyembree this should be decently fast
hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0]
# just convert hit locations to integer positions
voxels = np.round(hits / pitch).astype(np.int64)
# offset voxels by min, so matrix isn't huge
origin = voxels.min(axis=0)
voxels -= origin
return (voxels, origin) |
def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs | def function[get_node_details, parameter[self, node_id]]:
constant[Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
]
if <ast.UnaryOp object at 0x7da18ede6fe0> begin[:]
<ast.Raise object at 0x7da18ede4f40>
variable[node] assign[=] call[name[self]._client.nodes.get, parameter[name[node_id]]]
return[name[node].attrs] | keyword[def] identifier[get_node_details] ( identifier[self] , identifier[node_id] : identifier[list] )-> identifier[dict] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_manager] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] )
identifier[node] = identifier[self] . identifier[_client] . identifier[nodes] . identifier[get] ( identifier[node_id] )
keyword[return] identifier[node] . identifier[attrs] | def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve node details.') # depends on [control=['if'], data=[]]
node = self._client.nodes.get(node_id)
return node.attrs |
def children(self):
"""
Returns list of children changesets.
"""
rev_filter = settings.GIT_REV_FILTER
cmd = "rev-list %s --children | grep '^%s'" % (rev_filter, self.raw_id)
so, se = self.repository.run_git_command(cmd)
children = []
for l in so.splitlines():
childs = l.split(' ')[1:]
children.extend(childs)
return [self.repository.get_changeset(cs) for cs in children] | def function[children, parameter[self]]:
constant[
Returns list of children changesets.
]
variable[rev_filter] assign[=] name[settings].GIT_REV_FILTER
variable[cmd] assign[=] binary_operation[constant[rev-list %s --children | grep '^%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2043454b0>, <ast.Attribute object at 0x7da204344a00>]]]
<ast.Tuple object at 0x7da204344c10> assign[=] call[name[self].repository.run_git_command, parameter[name[cmd]]]
variable[children] assign[=] list[[]]
for taget[name[l]] in starred[call[name[so].splitlines, parameter[]]] begin[:]
variable[childs] assign[=] call[call[name[l].split, parameter[constant[ ]]]][<ast.Slice object at 0x7da204344c40>]
call[name[children].extend, parameter[name[childs]]]
return[<ast.ListComp object at 0x7da204346bf0>] | keyword[def] identifier[children] ( identifier[self] ):
literal[string]
identifier[rev_filter] = identifier[settings] . identifier[GIT_REV_FILTER]
identifier[cmd] = literal[string] %( identifier[rev_filter] , identifier[self] . identifier[raw_id] )
identifier[so] , identifier[se] = identifier[self] . identifier[repository] . identifier[run_git_command] ( identifier[cmd] )
identifier[children] =[]
keyword[for] identifier[l] keyword[in] identifier[so] . identifier[splitlines] ():
identifier[childs] = identifier[l] . identifier[split] ( literal[string] )[ literal[int] :]
identifier[children] . identifier[extend] ( identifier[childs] )
keyword[return] [ identifier[self] . identifier[repository] . identifier[get_changeset] ( identifier[cs] ) keyword[for] identifier[cs] keyword[in] identifier[children] ] | def children(self):
"""
Returns list of children changesets.
"""
rev_filter = settings.GIT_REV_FILTER
cmd = "rev-list %s --children | grep '^%s'" % (rev_filter, self.raw_id)
(so, se) = self.repository.run_git_command(cmd)
children = []
for l in so.splitlines():
childs = l.split(' ')[1:]
children.extend(childs) # depends on [control=['for'], data=['l']]
return [self.repository.get_changeset(cs) for cs in children] |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'word_count') and self.word_count is not None:
_dict['word_count'] = self.word_count
if hasattr(self,
'character_count') and self.character_count is not None:
_dict['character_count'] = self.character_count
if hasattr(self, 'translations') and self.translations is not None:
_dict['translations'] = [x._to_dict() for x in self.translations]
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b1b47940> begin[:]
call[name[_dict]][constant[word_count]] assign[=] name[self].word_count
if <ast.BoolOp object at 0x7da1b1b47eb0> begin[:]
call[name[_dict]][constant[character_count]] assign[=] name[self].character_count
if <ast.BoolOp object at 0x7da1b1b47fd0> begin[:]
call[name[_dict]][constant[translations]] assign[=] <ast.ListComp object at 0x7da1b1b47be0>
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[word_count] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[word_count]
keyword[if] identifier[hasattr] ( identifier[self] ,
literal[string] ) keyword[and] identifier[self] . identifier[character_count] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[character_count]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[translations] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[translations] ]
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'word_count') and self.word_count is not None:
_dict['word_count'] = self.word_count # depends on [control=['if'], data=[]]
if hasattr(self, 'character_count') and self.character_count is not None:
_dict['character_count'] = self.character_count # depends on [control=['if'], data=[]]
if hasattr(self, 'translations') and self.translations is not None:
_dict['translations'] = [x._to_dict() for x in self.translations] # depends on [control=['if'], data=[]]
return _dict |
def account_overview(object):
"""Create layout for user profile"""
return Layout(
Container(
Row(
Column2(
Panel(
'Avatar',
Img(src="{}{}".format(settings.MEDIA_URL, object.avatar)),
collapse=True,
),
),
Column10(
Panel(
'Account information',
DescriptionList(
'email',
'first_name',
'last_name',
),
)
),
)
)
) | def function[account_overview, parameter[object]]:
constant[Create layout for user profile]
return[call[name[Layout], parameter[call[name[Container], parameter[call[name[Row], parameter[call[name[Column2], parameter[call[name[Panel], parameter[constant[Avatar], call[name[Img], parameter[]]]]]], call[name[Column10], parameter[call[name[Panel], parameter[constant[Account information], call[name[DescriptionList], parameter[constant[email], constant[first_name], constant[last_name]]]]]]]]]]]]]] | keyword[def] identifier[account_overview] ( identifier[object] ):
literal[string]
keyword[return] identifier[Layout] (
identifier[Container] (
identifier[Row] (
identifier[Column2] (
identifier[Panel] (
literal[string] ,
identifier[Img] ( identifier[src] = literal[string] . identifier[format] ( identifier[settings] . identifier[MEDIA_URL] , identifier[object] . identifier[avatar] )),
identifier[collapse] = keyword[True] ,
),
),
identifier[Column10] (
identifier[Panel] (
literal[string] ,
identifier[DescriptionList] (
literal[string] ,
literal[string] ,
literal[string] ,
),
)
),
)
)
) | def account_overview(object):
"""Create layout for user profile"""
return Layout(Container(Row(Column2(Panel('Avatar', Img(src='{}{}'.format(settings.MEDIA_URL, object.avatar)), collapse=True)), Column10(Panel('Account information', DescriptionList('email', 'first_name', 'last_name')))))) |
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int) | def function[_column_type, parameter[values, has_invisible]]:
constant[The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", 'пять']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
]
return[call[name[reduce], parameter[name[_more_generic], <ast.ListComp object at 0x7da18fe92ce0>, name[int]]]] | keyword[def] identifier[_column_type] ( identifier[values] , identifier[has_invisible] = keyword[True] ):
literal[string]
keyword[return] identifier[reduce] ( identifier[_more_generic] ,[ identifier[type] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ], identifier[int] ) | def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", 'пять']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int) |
def spm_hrf_compat(t,
peak_delay=6,
under_delay=16,
peak_disp=1,
under_disp=1,
p_u_ratio=6,
normalize=True,
):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf) | def function[spm_hrf_compat, parameter[t, peak_delay, under_delay, peak_disp, under_disp, p_u_ratio, normalize]]:
constant[ SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
]
if call[name[len], parameter[<ast.ListComp object at 0x7da1b10366e0>]] begin[:]
<ast.Raise object at 0x7da1b1037be0>
variable[hrf] assign[=] call[name[np].zeros, parameter[name[t].shape]]
variable[pos_t] assign[=] call[name[t]][compare[name[t] greater[>] constant[0]]]
variable[peak] assign[=] call[name[sps].gamma.pdf, parameter[name[pos_t], binary_operation[name[peak_delay] / name[peak_disp]]]]
variable[undershoot] assign[=] call[name[sps].gamma.pdf, parameter[name[pos_t], binary_operation[name[under_delay] / name[under_disp]]]]
call[name[hrf]][compare[name[t] greater[>] constant[0]]] assign[=] binary_operation[name[peak] - binary_operation[name[undershoot] / name[p_u_ratio]]]
if <ast.UnaryOp object at 0x7da1b1034d00> begin[:]
return[name[hrf]]
return[binary_operation[name[hrf] / call[name[np].max, parameter[name[hrf]]]]] | keyword[def] identifier[spm_hrf_compat] ( identifier[t] ,
identifier[peak_delay] = literal[int] ,
identifier[under_delay] = literal[int] ,
identifier[peak_disp] = literal[int] ,
identifier[under_disp] = literal[int] ,
identifier[p_u_ratio] = literal[int] ,
identifier[normalize] = keyword[True] ,
):
literal[string]
keyword[if] identifier[len] ([ identifier[v] keyword[for] identifier[v] keyword[in] [ identifier[peak_delay] , identifier[peak_disp] , identifier[under_delay] , identifier[under_disp] ]
keyword[if] identifier[v] <= literal[int] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[hrf] = identifier[np] . identifier[zeros] ( identifier[t] . identifier[shape] , identifier[dtype] = identifier[np] . identifier[float] )
identifier[pos_t] = identifier[t] [ identifier[t] > literal[int] ]
identifier[peak] = identifier[sps] . identifier[gamma] . identifier[pdf] ( identifier[pos_t] ,
identifier[peak_delay] / identifier[peak_disp] ,
identifier[loc] = literal[int] ,
identifier[scale] = identifier[peak_disp] )
identifier[undershoot] = identifier[sps] . identifier[gamma] . identifier[pdf] ( identifier[pos_t] ,
identifier[under_delay] / identifier[under_disp] ,
identifier[loc] = literal[int] ,
identifier[scale] = identifier[under_disp] )
identifier[hrf] [ identifier[t] > literal[int] ]= identifier[peak] - identifier[undershoot] / identifier[p_u_ratio]
keyword[if] keyword[not] identifier[normalize] :
keyword[return] identifier[hrf]
keyword[return] identifier[hrf] / identifier[np] . identifier[max] ( identifier[hrf] ) | def spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6, normalize=True):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if v <= 0]):
raise ValueError('delays and dispersions must be > 0') # depends on [control=['if'], data=[]]
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0, scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf # depends on [control=['if'], data=[]]
return hrf / np.max(hrf) |
def set_auto_shutoff(self, timer):
"""
:param timer: an int, one of [None (never), -1, 30, 60, 120]
:return: nothing
"""
values = {
"desired_state": {
"auto_shutoff": timer
}
}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | def function[set_auto_shutoff, parameter[self, timer]]:
constant[
:param timer: an int, one of [None (never), -1, 30, 60, 120]
:return: nothing
]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b2631660>], [<ast.Dict object at 0x7da1b26322c0>]]
variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], name[values]]]
call[name[self]._update_state_from_response, parameter[name[response]]] | keyword[def] identifier[set_auto_shutoff] ( identifier[self] , identifier[timer] ):
literal[string]
identifier[values] ={
literal[string] :{
literal[string] : identifier[timer]
}
}
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] )
identifier[self] . identifier[_update_state_from_response] ( identifier[response] ) | def set_auto_shutoff(self, timer):
"""
:param timer: an int, one of [None (never), -1, 30, 60, 120]
:return: nothing
"""
values = {'desired_state': {'auto_shutoff': timer}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.